[Scipy-svn] r5126 - in trunk: scipy/cluster/tests scipy/interpolate scipy/interpolate/tests scipy/io/matlab scipy/io/matlab/tests scipy/lib/lapack/tests scipy/linalg/tests scipy/signal scipy/sparse scipy/sparse/linalg/isolve scipy/sparse/linalg/isolve/tests scipy/sparse/linalg/tests scipy/sparse/tests scipy/spatial scipy/spatial/tests scipy/stats scipy/stats/tests tools/win32/build_scripts

scipy-svn@scip... scipy-svn@scip...
Sun Nov 16 03:24:01 CST 2008


Author: jarrod.millman
Date: 2008-11-16 03:23:48 -0600 (Sun, 16 Nov 2008)
New Revision: 5126

Modified:
   trunk/scipy/cluster/tests/test_hierarchy.py
   trunk/scipy/interpolate/interpolate.py
   trunk/scipy/interpolate/interpolate_wrapper.py
   trunk/scipy/interpolate/tests/test_fitpack.py
   trunk/scipy/interpolate/tests/test_interpolate.py
   trunk/scipy/io/matlab/mio.py
   trunk/scipy/io/matlab/mio4.py
   trunk/scipy/io/matlab/mio5.py
   trunk/scipy/io/matlab/miobase.py
   trunk/scipy/io/matlab/tests/test_mio.py
   trunk/scipy/lib/lapack/tests/common.py
   trunk/scipy/lib/lapack/tests/test_esv.py
   trunk/scipy/lib/lapack/tests/test_gesv.py
   trunk/scipy/lib/lapack/tests/test_lapack.py
   trunk/scipy/linalg/tests/test_decomp.py
   trunk/scipy/signal/signaltools.py
   trunk/scipy/sparse/data.py
   trunk/scipy/sparse/lil.py
   trunk/scipy/sparse/linalg/isolve/iterative.py
   trunk/scipy/sparse/linalg/isolve/minres.py
   trunk/scipy/sparse/linalg/isolve/tests/test_iterative.py
   trunk/scipy/sparse/linalg/tests/test_interface.py
   trunk/scipy/sparse/sputils.py
   trunk/scipy/sparse/tests/test_base.py
   trunk/scipy/spatial/distance.py
   trunk/scipy/spatial/kdtree.py
   trunk/scipy/spatial/tests/test_kdtree.py
   trunk/scipy/stats/distributions.py
   trunk/scipy/stats/tests/test_stats.py
   trunk/tools/win32/build_scripts/pavement.py
Log:
ran reindent for upcoming release


Modified: trunk/scipy/cluster/tests/test_hierarchy.py
===================================================================
--- trunk/scipy/cluster/tests/test_hierarchy.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/cluster/tests/test_hierarchy.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -461,7 +461,7 @@
         b = [2, 2, 2]
         self.failUnless(is_isomorphic(a, b) == True)
         self.failUnless(is_isomorphic(b, a) == True)
-        
+
     def test_is_isomorphic_2(self):
         "Tests is_isomorphic on test case #2 (two flat clusters, different labelings)"
         a = [1, 7, 1]
@@ -529,7 +529,7 @@
     def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0):
         a = np.int_(np.random.rand(nobs) * nclusters)
         b = np.zeros(a.size, dtype=np.int_)
-        q = {}        
+        q = {}
         P = np.random.permutation(nclusters)
         for i in xrange(0, a.shape[0]):
             b[i] = P[a[i]]
@@ -699,4 +699,3 @@
 
 if __name__ == "__main__":
     run_module_suite()
-

Modified: trunk/scipy/interpolate/interpolate.py
===================================================================
--- trunk/scipy/interpolate/interpolate.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/interpolate/interpolate.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -94,7 +94,7 @@
     def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
                  fill_value=np.nan):
         self.x, self.y, self.z = map(ravel, map(asarray, [x, y, z]))
-        
+
         if len(self.z) == len(self.x) * len(self.y):
             self.x, self.y = meshgrid(x,y)
             self.x, self.y = map(ravel, [self.x, self.y])
@@ -130,7 +130,7 @@
         -------
         z : 2D array with shape (len(y), len(x))
             The interpolated values.
-        
+
         """
 
         x = atleast_1d(x)

Modified: trunk/scipy/interpolate/interpolate_wrapper.py
===================================================================
--- trunk/scipy/interpolate/interpolate_wrapper.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/interpolate/interpolate_wrapper.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -14,19 +14,19 @@
         and returns corresponding y.
     """
     shifted_x = np.concatenate(( np.array([x[0]-1]) , x[0:-1] ))
-    
+
     midpoints_of_x = atleast_1d_and_contiguous( .5*(x + shifted_x) )
     new_x = atleast_1d_and_contiguous(new_x)
-    
+
     TINY = 1e-10
     indices = np.searchsorted(midpoints_of_x, new_x+TINY)-1
     indices = np.atleast_1d(np.clip(indices, 0, np.Inf).astype(np.int))
     new_y = np.take(y, indices, axis=-1)
-    
+
     return new_y
-    
-    
 
+
+
 def linear(x, y, new_x):
     """ Linearly interpolates values in new_x based on the values in x and y
 
@@ -80,7 +80,7 @@
         _interpolate.loginterp_dddd(x, y, new_x, new_y)
 
     return new_y
-    
+
 def block_average_above(x, y, new_x):
     """ Linearly interpolates values in new_x based on the values in x and y
 
@@ -102,10 +102,10 @@
     if len(y.shape) == 2:
         new_y = np.zeros((y.shape[0], len(new_x)), np.float64)
         for i in range(len(new_y)):
-            bad_index = _interpolate.block_averave_above_dddd(x, y[i], 
+            bad_index = _interpolate.block_averave_above_dddd(x, y[i],
                                                             new_x, new_y[i])
             if bad_index is not None:
-                break                                                
+                break
     else:
         new_y = np.zeros(len(new_x), np.float64)
         bad_index = _interpolate.block_average_above_dddd(x, y, new_x, new_y)
@@ -115,12 +115,12 @@
               "is out of the x range (%f, %f)" % \
               (bad_index, new_x[bad_index], x[0], x[-1])
         raise ValueError, msg
-              
+
     return new_y
 
 def block(x, y, new_x):
     """ Essentially a step function.
-    
+
         For each new_x[i], finds largest j such that
         x[j] < new_x[j], and returns y[j].
     """

Modified: trunk/scipy/interpolate/tests/test_fitpack.py
===================================================================
--- trunk/scipy/interpolate/tests/test_fitpack.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/interpolate/tests/test_fitpack.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -38,11 +38,11 @@
 
     def test_subclassing(self):
         # See #731
-        
+
         class ZeroSpline(UnivariateSpline):
             def __call__(self, x):
                 return 0*array(x)
-        
+
         sp = ZeroSpline([1,2,3,4,5], [3,2,3,2,3], k=2)
         assert_array_equal(sp([1.5, 2.5]), [0., 0.])
 

Modified: trunk/scipy/interpolate/tests/test_interpolate.py
===================================================================
--- trunk/scipy/interpolate/tests/test_interpolate.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/interpolate/tests/test_interpolate.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -14,7 +14,7 @@
 
         v,u = ogrid[0:2:24j, 0:pi:25j]
         assert_almost_equal(I(u.ravel(), v.ravel()), sin(u+0.5*v), decimal=2)
-        
+
     def test_interp2d_meshgrid_input(self):
         # Ticket #703
         x = linspace(0, 2, 16)

Modified: trunk/scipy/io/matlab/mio.py
===================================================================
--- trunk/scipy/io/matlab/mio.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/io/matlab/mio.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -51,8 +51,8 @@
 def mat_reader_factory(file_name, appendmat=True, **kwargs):
     """Create reader for matlab .mat format files
 
-    %(file_arg)s                          
-    %(append_arg)s                          
+    %(file_arg)s
+    %(append_arg)s
     %(basename_arg)s
     %(load_args)s
     %(struct_arg)s
@@ -91,14 +91,14 @@
 def loadmat(file_name,  mdict=None, appendmat=True, **kwargs):
     ''' Load Matlab(tm) file
 
-    %(file_arg)s                          
+    %(file_arg)s
     m_dict : dict, optional
         dictionary in which to insert matfile variables
-    %(append_arg)s                          
+    %(append_arg)s
     %(basename_arg)s
     %(load_args)s
     %(struct_arg)s
-    
+
     Notes
     -----
     v4 (Level 1.0), v6 and v7 to 7.2 matfiles are supported.
@@ -124,13 +124,13 @@
 
     file_name : {string, file-like object}
         Name of the mat file (do not need .mat extension if
-    	appendmat==True) Can also pass open file-like object
+        appendmat==True) Can also pass open file-like object
     m_dict : dict
         dictionary from which to save matfile variables
     %(append_arg)s
     format : {'5', '4'} string, optional
         '5' for matlab 5 (up to matlab 7.2)
-        '4' for matlab 4 mat files, 
+        '4' for matlab 4 mat files,
     """
     file_is_string = isinstance(file_name, basestring)
     if file_is_string:

Modified: trunk/scipy/io/matlab/mio4.py
===================================================================
--- trunk/scipy/io/matlab/mio4.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/io/matlab/mio4.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -190,7 +190,7 @@
     @filldoc
     def __init__(self, mat_stream, *args, **kwargs):
         ''' Initialize matlab 4 file reader
-        
+
     %(matstream_arg)s
     %(load_args)s
         '''

Modified: trunk/scipy/io/matlab/mio5.py
===================================================================
--- trunk/scipy/io/matlab/mio5.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/io/matlab/mio5.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -196,8 +196,8 @@
     ''' Subclass to signal this is a matlab function '''
     def __new__(cls, input_array):
         obj = np.asarray(input_array).view(cls)
-    
 
+
 class Mat5ArrayReader(MatArrayReader):
     ''' Class to get Mat5 arrays
 
@@ -516,7 +516,7 @@
                  uint16_codec=None
                  ):
         '''Initializer for matlab 5 file format reader
-        
+
     %(matstream_arg)s
     %(load_args)s
     %(struct_arg)s
@@ -527,7 +527,7 @@
         # Deal with deprecations
         if struct_as_record is None:
             warnings.warn("Using struct_as_record default value (False)" +
-                          " This will change to True in future versions", 
+                          " This will change to True in future versions",
                           FutureWarning, stacklevel=2)
             struct_as_record = False
         self.codecs = {}
@@ -629,7 +629,7 @@
         super(Mat5MatrixWriter, self).__init__(file_stream, arr, name)
         self.is_global = is_global
         self.unicode_strings = unicode_strings
-        
+
     def write_dtype(self, arr):
         self.file_stream.write(arr.tostring())
 

Modified: trunk/scipy/io/matlab/miobase.py
===================================================================
--- trunk/scipy/io/matlab/miobase.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/io/matlab/miobase.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -14,14 +14,14 @@
    {'file_arg':
     '''file_name : string
         Name of the mat file (do not need .mat extension if
-    	appendmat==True) If name not a full path name, search for the
-    	file on the sys.path list and use the first one found (the
-    	current directory is searched first).  Can also pass open
-    	file-like object''',
+        appendmat==True) If name not a full path name, search for the
+        file on the sys.path list and use the first one found (the
+        current directory is searched first).  Can also pass open
+        file-like object''',
     'append_arg':
     '''appendmat : {True, False} optional
         True to append the .mat extension to the end of the given
-	filename, if not already present''',
+        filename, if not already present''',
     'basename_arg':
     '''base_name : string, optional, unused
         base name for unnamed variables.  The code no longer uses
@@ -29,12 +29,12 @@
         it in future versions''',
     'load_args':
     '''byte_order : {None, string}, optional
-    	None by default, implying byte order guessed from mat
-	file. Otherwise can be one of ('native', '=', 'little', '<',
-	'BIG', '>')
+        None by default, implying byte order guessed from mat
+        file. Otherwise can be one of ('native', '=', 'little', '<',
+        'BIG', '>')
     mat_dtype : {False, True} optional
          If True, return arrays in same dtype as would be loaded into
-	 matlab (instead of the dtype with which they are saved)
+         matlab (instead of the dtype with which they are saved)
     squeeze_me : {False, True} optional
          whether to squeeze unit matrix dimensions or not
     chars_as_strings : {True, False} optional
@@ -46,15 +46,15 @@
     'struct_arg':
     '''struct_as_record : {False, True} optional
         Whether to load matlab structs as numpy record arrays, or as
-	old-style numpy arrays with dtype=object.  Setting this flag
-	to False replicates the behaviour of scipy version 0.6
-	(returning numpy object arrays).  The preferred setting is
-	True, because it allows easier round-trip load and save of
-	matlab files.  In a future version of scipy, we will change
-	the default setting to True, and following versions may remove
-	this flag entirely.  For now, we set the default to False, for
-	backwards compatibility, but issue a warning.
-	Note that non-record arrays cannot be exported via savemat.''',
+        old-style numpy arrays with dtype=object.  Setting this flag
+        to False replicates the behaviour of scipy version 0.6
+        (returning numpy object arrays).  The preferred setting is
+        True, because it allows easier round-trip load and save of
+        matlab files.  In a future version of scipy, we will change
+        the default setting to True, and following versions may remove
+        this flag entirely.  For now, we set the default to False, for
+        backwards compatibility, but issue a warning.
+        Note that non-record arrays cannot be exported via savemat.''',
     'matstream_arg':
     '''mat_stream : file-like
         object with file API, open for reading'''}
@@ -191,7 +191,7 @@
                  ):
         '''
         Initializer for mat file reader
-        
+
         mat_stream : file-like
             object with file API, open for reading
     %(load_args)s

Modified: trunk/scipy/io/matlab/tests/test_mio.py
===================================================================
--- trunk/scipy/io/matlab/tests/test_mio.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/io/matlab/tests/test_mio.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -210,7 +210,7 @@
     # A field in a record array may not be an ndarray
     # A scalar from a record array will be type np.void
     if not isinstance(expected,
-                      (np.void, np.ndarray, MatlabObject)): 
+                      (np.void, np.ndarray, MatlabObject)):
         assert_equal(expected, actual)
         return
     # This is an ndarray-like thing
@@ -328,8 +328,8 @@
     # This too
     yield assert_raises, FutureWarning, find_mat_file, fname
     # we need kwargs for this one
-    yield (lambda a, k: assert_raises(*a, **k), 
-          (DeprecationWarning, loadmat, fname), 
+    yield (lambda a, k: assert_raises(*a, **k),
+          (DeprecationWarning, loadmat, fname),
           {'struct_as_record':True, 'basename':'raw'})
     warnings.resetwarnings()
 
@@ -337,4 +337,3 @@
 def test_regression_653():
     """Regression test for #653."""
     assert_raises(TypeError, savemat, StringIO(), {'d':{1:2}}, format='5')
-

Modified: trunk/scipy/lib/lapack/tests/common.py
===================================================================
--- trunk/scipy/lib/lapack/tests/common.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/lib/lapack/tests/common.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -2,11 +2,11 @@
 
 from scipy.lib.lapack import flapack, clapack
 
-FUNCS_TP = {'ssygv' : np.float32, 
+FUNCS_TP = {'ssygv' : np.float32,
          'dsygv': np.float,
          'ssygvd' : np.float32,
          'dsygvd' : np.float,
-         'ssyev' : np.float32, 
+         'ssyev' : np.float32,
          'dsyev': np.float,
          'ssyevr' : np.float32,
          'dsyevr' : np.float,
@@ -14,7 +14,7 @@
          'dsyevr' : np.float,
          'sgehrd' : np.float32,
          'dgehrd' : np.float,
-         'sgebal' : np.float32, 
+         'sgebal' : np.float32,
          'dgebal': np.float}
 
 # Test FLAPACK if not empty
@@ -50,4 +50,3 @@
     FUNCS_CLAPACK = None
 
 PREC = {np.float32: 5, np.float: 12}
-

Modified: trunk/scipy/lib/lapack/tests/test_esv.py
===================================================================
--- trunk/scipy/lib/lapack/tests/test_esv.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/lib/lapack/tests/test_esv.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -25,7 +25,7 @@
         assert not info, `info`
         assert_array_almost_equal(w, SYEV_REF, decimal=PREC[tp])
         for i in range(3):
-            assert_array_almost_equal(np.dot(a,v[:,i]), w[i]*v[:,i], 
+            assert_array_almost_equal(np.dot(a,v[:,i]), w[i]*v[:,i],
                                       decimal=PREC[tp])
 
     def _test_base_irange(self, func, irange, lang):
@@ -47,7 +47,7 @@
         assert_array_almost_equal(w, SYEV_REF[rslice], decimal=PREC[tp])
 
         for i in range(m):
-            assert_array_almost_equal(np.dot(a,v[:,i]), w[i]*v[:,i], 
+            assert_array_almost_equal(np.dot(a,v[:,i]), w[i]*v[:,i],
                                       decimal=PREC[tp])
 
     def _test_base_vrange(self, func, vrange, lang):
@@ -68,7 +68,7 @@
         assert_array_almost_equal(w, ew, decimal=PREC[tp])
 
         for i in range(len(w)):
-            assert_array_almost_equal(np.dot(a,v[:,i]), w[i]*v[:,i], 
+            assert_array_almost_equal(np.dot(a,v[:,i]), w[i]*v[:,i],
                                       decimal=PREC[tp])
 
     def _test_syevr_ranges(self, func, lang):
@@ -104,32 +104,32 @@
         self._test_syevr_ranges('dsyevr', 'F')
 
     # Clapack tests
-    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["ssyev"], 
+    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["ssyev"],
                 "Clapack empty, skip clapack test")
     def test_clapack_ssyev(self):
         self._test_base('ssyev', 'C')
 
-    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dsyev"], 
+    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dsyev"],
                 "Clapack empty, skip clapack test")
     def test_clapack_dsyev(self):
         self._test_base('dsyev', 'C')
 
-    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["ssyevr"], 
+    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["ssyevr"],
                 "Clapack empty, skip clapack test")
     def test_clapack_ssyevr(self):
         self._test_base('ssyevr', 'C')
 
-    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dsyevr"], 
+    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dsyevr"],
                 "Clapack empty, skip clapack test")
     def test_clapack_dsyevr(self):
         self._test_base('dsyevr', 'C')
 
-    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["ssyevr"], 
+    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["ssyevr"],
                 "Clapack empty, skip clapack test")
     def test_clapack_ssyevr_ranges(self):
         self._test_syevr_ranges('ssyevr', 'C')
 
-    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dsyevr"], 
+    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dsyevr"],
                 "Clapack empty, skip clapack test")
     def test_clapack_dsyevr_ranges(self):
         self._test_syevr_ranges('dsyevr', 'C')

Modified: trunk/scipy/lib/lapack/tests/test_gesv.py
===================================================================
--- trunk/scipy/lib/lapack/tests/test_gesv.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/lib/lapack/tests/test_gesv.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -31,7 +31,7 @@
                 assert_array_almost_equal(np.dot(a,np.dot(b,v[:,i])), w[i]*v[:,i],
                                           decimal=PREC[tp])
             elif itype == 3:
-                assert_array_almost_equal(np.dot(b,np.dot(a,v[:,i])), 
+                assert_array_almost_equal(np.dot(b,np.dot(a,v[:,i])),
                                           w[i]*v[:,i], decimal=PREC[tp] - 1)
             else:
                 raise ValueError, `itype`
@@ -60,32 +60,32 @@
     def test_dsygv_3(self):
         self._test_base('dsygv', 'F', 3)
 
-    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["ssygv"], 
+    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["ssygv"],
                 "Clapack empty, skip flapack test")
     def test_clapack_ssygv_1(self):
         self._test_base('ssygv', 'C', 1)
 
-    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["ssygv"], 
+    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["ssygv"],
                 "Clapack empty, skip flapack test")
     def test_clapack_ssygv_2(self):
         self._test_base('ssygv', 'C', 2)
 
-    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["ssygv"], 
+    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["ssygv"],
                 "Clapack empty, skip flapack test")
     def test_clapack_ssygv_3(self):
         self._test_base('ssygv', 'C', 3)
 
-    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dsygv"], 
+    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dsygv"],
                 "Clapack empty, skip flapack test")
     def test_clapack_dsygv_1(self):
         self._test_base('dsygv', 'C', 1)
 
-    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dsygv"], 
+    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dsygv"],
                 "Clapack empty, skip flapack test")
     def test_clapack_dsygv_2(self):
         self._test_base('dsygv', 'C', 2)
 
-    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dsygv"], 
+    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dsygv"],
                 "Clapack empty, skip flapack test")
     def test_clapack_dsygv_3(self):
         self._test_base('dsygv', 'C', 3)

Modified: trunk/scipy/lib/lapack/tests/test_lapack.py
===================================================================
--- trunk/scipy/lib/lapack/tests/test_lapack.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/lib/lapack/tests/test_lapack.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -40,7 +40,7 @@
         a = np.array([[-149, -50,-154],
              [ 537, 180, 546],
              [ -27,  -9, -25]]).astype(tp)
-        
+
         if lang == 'C':
             f = FUNCS_CLAPACK[func]
         elif lang == 'F':
@@ -54,35 +54,35 @@
     @dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip flapack test")
     def test_sgebal(self):
         self._test_gebal_base('sgebal', 'F')
-                
+
     @dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip flapack test")
     def test_dgebal(self):
         self._test_gebal_base('dgebal', 'F')
-                
+
     @dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip clapack test")
     def test_sgehrd(self):
         self._test_gehrd_base('sgehrd', 'F')
-                
+
     @dec.skipif(FLAPACK_IS_EMPTY, "Flapack empty, skip clapack test")
     def test_dgehrd(self):
         self._test_gehrd_base('dgehrd', 'F')
 
-    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["sgebal"], 
+    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["sgebal"],
                 "Clapack empty, skip flapack test")
     def test_clapack_sgebal(self):
         self._test_gebal_base('sgebal', 'C')
-                
-    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dgebal"], 
+
+    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dgebal"],
                 "Clapack empty, skip flapack test")
     def test_clapack_dgebal(self):
         self._test_gebal_base('dgebal', 'C')
-                
-    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["sgehrd"], 
+
+    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["sgehrd"],
                 "Clapack empty, skip flapack test")
     def test_clapack_sgehrd(self):
         self._test_gehrd_base('sgehrd', 'C')
-                
-    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dgehrd"], 
+
+    @dec.skipif(CLAPACK_IS_EMPTY or not FUNCS_CLAPACK["dgehrd"],
                 "Clapack empty, skip flapack test")
     def test_clapack_dgehrd(self):
         self._test_gehrd_base('dgehrd', 'C')

Modified: trunk/scipy/linalg/tests/test_decomp.py
===================================================================
--- trunk/scipy/linalg/tests/test_decomp.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/linalg/tests/test_decomp.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -41,12 +41,12 @@
         act = act.dtype
     else:
         act = dtype(act)
-        
+
     if isinstance(des, ndarray):
         des = des.dtype
     else:
         des = dtype(des)
-        
+
     assert act == des, 'dtype mismatch: "%s" (should be "%s") '%(act, des)
 
 # XXX: This function should not be defined here, but somewhere in
@@ -59,10 +59,10 @@
 #      scipy.linalg namespace
 def symrand(dim_or_eigv, dtype="d"):
     """Return a random symmetric (Hermitian) matrix.
-    
+
     If 'dim_or_eigv' is an integer N, return a NxN matrix, with eigenvalues
         uniformly distributed on (0.1,1].
-        
+
     If 'dim_or_eigv' is  1-D real array 'a', return a matrix whose
                       eigenvalues are sort(a).
     """
@@ -75,7 +75,7 @@
         d = dim_or_eigv
     else:
         raise TypeError("input type not supported.")
-    
+
     v = random_rot(dim, dtype=dtype)
     h = dot(dot(hermitian(v), diag(d)), v)
     # to avoid roundoff errors, symmetrize the matrix (again)
@@ -99,7 +99,7 @@
         D[n-1] = sign(x[0])
         x[0] -= D[n-1]*sqrt((x*x).sum())
         # Householder transformation
-        
+
         Hx = eye(dim-n+1, dtype=dtype) - 2.*outer(x, x)/(x*x).sum()
         mat = eye(dim, dtype=dtype)
         mat[n-1:,n-1:] = Hx
@@ -514,7 +514,7 @@
         self.eigenproblem_standard(DIM, 'f', False, False)
         self.eigenproblem_standard(DIM, 'f', False, True)
         self.eigenproblem_standard(DIM, 'f', True, True)
-    
+
     def test_eigh_complex_standard(self):
         self.eigenproblem_standard(DIM, 'D', False, False)
         self.eigenproblem_standard(DIM, 'D', False, True)
@@ -523,7 +523,7 @@
         self.eigenproblem_standard(DIM, 'F', False, True)
         self.eigenproblem_standard(DIM, 'F', True, True)
 
-    
+
 class TestLU(TestCase):
 
     def __init__(self, *args, **kw):

Modified: trunk/scipy/signal/signaltools.py
===================================================================
--- trunk/scipy/signal/signaltools.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/signal/signaltools.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -942,7 +942,7 @@
     The analytic signal `x_a(t)` of `x(t)` is::
 
         x_a = F^{-1}(F(x) 2U) = x + i y
-    
+
     where ``F`` is the Fourier transform, ``U`` the unit step function,
     and ``y`` the Hilbert transform of ``x``. [1]
 

Modified: trunk/scipy/sparse/data.py
===================================================================
--- trunk/scipy/sparse/data.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/sparse/data.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -66,5 +66,3 @@
 
     def _mul_scalar(self, other):
         return self._with_data(self.data * other)
-
-

Modified: trunk/scipy/sparse/lil.py
===================================================================
--- trunk/scipy/sparse/lil.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/sparse/lil.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -155,7 +155,7 @@
         return new
 
     def _get1(self, i, j):
-        
+
         if i < 0:
             i += self.shape[0]
         if i < 0 or i >= self.shape[0]:
@@ -165,7 +165,7 @@
             j += self.shape[1]
         if j < 0 or j >= self.shape[1]:
             raise IndexError('column index out of bounds')
-        
+
         row  = self.rows[i]
         data = self.data[i]
 

Modified: trunk/scipy/sparse/linalg/isolve/iterative.py
===================================================================
--- trunk/scipy/sparse/linalg/isolve/iterative.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/sparse/linalg/isolve/iterative.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -31,7 +31,7 @@
     Maximum number of iterations.  Iteration will stop after maxiter
     steps even if the specified tolerance has not been achieved.
 M : {sparse matrix, dense matrix, LinearOperator}
-    Preconditioner for A.  The preconditioner should approximate the 
+    Preconditioner for A.  The preconditioner should approximate the
     inverse of A.  Effective preconditioning dramatically improves the
     rate of convergence, which implies that fewer iterations are needed
     to reach a given error tolerance.
@@ -53,9 +53,9 @@
 ----------------------
 xtype : {'f','d','F','D'}
     The type of the result.  If None, then it will be determined from
-    A.dtype.char and b.  If A does not have a typecode method then it 
-    will compute A.matvec(x0) to get a typecode.   To save the extra 
-    computation when A does not have a typecode attribute use xtype=0 
+    A.dtype.char and b.  If A does not have a typecode method then it
+    will compute A.matvec(x0) to get a typecode.   To save the extra
+    computation when A does not have a typecode attribute use xtype=0
     for the same type as b or use xtype='f','d','F',or 'D'.
     This parameter has been superceeded by LinearOperator.
 """
@@ -123,7 +123,7 @@
                 ftflag = False
             bnrm2, resid, info = stoptest(work[slice1], b, bnrm2, tol, info)
         ijob = 2
-    
+
     if info > 0 and iter_ == maxiter and resid > tol:
         #info isn't set appropriately otherwise
         info = iter_
@@ -185,7 +185,7 @@
                 ftflag = False
             bnrm2, resid, info = stoptest(work[slice1], b, bnrm2, tol, info)
         ijob = 2
-    
+
     if info > 0 and iter_ == maxiter and resid > tol:
         #info isn't set appropriately otherwise
         info = iter_
@@ -299,24 +299,24 @@
                 ftflag = False
             bnrm2, resid, info = stoptest(work[slice1], b, bnrm2, tol, info)
         ijob = 2
-    
+
     if info > 0 and iter_ == maxiter and resid > tol:
         #info isn't set appropriately otherwise
         info = iter_
 
     return postprocess(x), info
-    
 
+
 def gmres(A, b, x0=None, tol=1e-5, restrt=20, maxiter=None, xtype=None, M=None, callback=None):
     """Use Generalized Minimal RESidual iteration to solve A x = b
-    
+
     Parameters
     ----------
     A : {sparse matrix, dense matrix, LinearOperator}
         The N-by-N matrix of the linear system.
     b : {array, matrix}
         Right hand side of the linear system. Has shape (N,) or (N,1).
-    
+
     Optional Parameters
     -------------------
     x0  : {array, matrix}
@@ -330,14 +330,14 @@
         Maximum number of iterations.  Iteration will stop after maxiter
         steps even if the specified tolerance has not been achieved.
     M : {sparse matrix, dense matrix, LinearOperator}
-        Preconditioner for A.  The preconditioner should approximate the 
+        Preconditioner for A.  The preconditioner should approximate the
         inverse of A.  Effective preconditioning dramatically improves the
         rate of convergence, which implies that fewer iterations are needed
         to reach a given error tolerance.
     callback : function
         User-supplied function to call after each iteration.  It is called
         as callback(rk), where rk is the current residual vector.
-    
+
     Outputs
     -------
     x : {array, matrix}
@@ -347,21 +347,21 @@
             0  : successful exit
             >0 : convergence to tolerance not achieved, number of iterations
             <0 : illegal input or breakdown
-    
+
     See Also
     --------
     LinearOperator
-    
+
     Deprecated Parameters
     ---------------------
     xtype : {'f','d','F','D'}
         The type of the result.  If None, then it will be determined from
-        A.dtype.char and b.  If A does not have a typecode method then it 
-        will compute A.matvec(x0) to get a typecode.   To save the extra 
-        computation when A does not have a typecode attribute use xtype=0 
+        A.dtype.char and b.  If A does not have a typecode method then it
+        will compute A.matvec(x0) to get a typecode.   To save the extra
+        computation when A does not have a typecode attribute use xtype=0
         for the same type as b or use xtype='f','d','F',or 'D'.
         This parameter has been superceeded by LinearOperator.
-    
+
     """
     A,M,x,b,postprocess = make_system(A,M,x0,b,xtype)
 
@@ -369,7 +369,7 @@
     if maxiter is None:
         maxiter = n*10
 
-    restrt = min(restrt, n)        
+    restrt = min(restrt, n)
 
     matvec = A.matvec
     psolve = M.matvec
@@ -433,7 +433,7 @@
 
         if iter_num > maxiter:
             break
-    
+
     if info >= 0 and resid > tol:
         #info isn't set appropriately otherwise
         info = maxiter
@@ -450,7 +450,7 @@
         The N-by-N matrix of the linear system.
     b : {array, matrix}
         Right hand side of the linear system. Has shape (N,) or (N,1).
-    
+
     Optional Parameters
     -------------------
     x0  : {array, matrix}
@@ -469,7 +469,7 @@
     callback : function
         User-supplied function to call after each iteration.  It is called
         as callback(xk), where xk is the current solution vector.
-    
+
     Outputs
     -------
     x : {array, matrix}
@@ -479,21 +479,21 @@
             0  : successful exit
             >0 : convergence to tolerance not achieved, number of iterations
             <0 : illegal input or breakdown
-    
+
     See Also
     --------
     LinearOperator
-    
+
     Deprecated Parameters
     ---------------------
     xtype : {'f','d','F','D'}
         The type of the result.  If None, then it will be determined from
-        A.dtype.char and b.  If A does not have a typecode method then it 
-        will compute A.matvec(x0) to get a typecode.   To save the extra 
-        computation when A does not have a typecode attribute use xtype=0 
+        A.dtype.char and b.  If A does not have a typecode method then it
+        will compute A.matvec(x0) to get a typecode.   To save the extra
+        computation when A does not have a typecode attribute use xtype=0
         for the same type as b or use xtype='f','d','F',or 'D'.
         This parameter has been superceeded by LinearOperator.
-    
+
     """
     A_ = A
     A,M,x,b,postprocess = make_system(A,None,x0,b,xtype)
@@ -568,10 +568,9 @@
                 ftflag = False
             bnrm2, resid, info = stoptest(work[slice1], b, bnrm2, tol, info)
         ijob = 2
-    
+
     if info > 0 and iter_ == maxiter and resid > tol:
         #info isn't set appropriately otherwise
         info = iter_
 
     return postprocess(x), info
-

Modified: trunk/scipy/sparse/linalg/isolve/minres.py
===================================================================
--- trunk/scipy/sparse/linalg/isolve/minres.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/sparse/linalg/isolve/minres.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -6,7 +6,7 @@
 
 __all__ = ['minres']
 
-    
+
 header = \
 """Use MINimum RESidual iteration to solve Ax=b
 
@@ -17,7 +17,7 @@
 """
 
 footer = \
-"""        
+"""
 Notes
 -----
 THIS FUNCTION IS EXPERIMENTAL AND SUBJECT TO CHANGE!

Modified: trunk/scipy/sparse/linalg/isolve/tests/test_iterative.py
===================================================================
--- trunk/scipy/sparse/linalg/isolve/tests/test_iterative.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/sparse/linalg/isolve/tests/test_iterative.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -106,7 +106,7 @@
         """test whether all methods accept a trivial preconditioner"""
 
         tol = 1e-8
-        
+
         def identity(b,which=None):
             """trivial preconditioner"""
             return b
@@ -131,9 +131,9 @@
                     x, info = solver(A, b, M=precond, x0=x0, tol=tol)
                 assert_equal(info,0)
                 assert( norm(b - A*x) < tol*norm(b) )
-                
+
                 A = A.copy()
-                A.psolve  = identity 
+                A.psolve  = identity
                 A.rpsolve = identity
 
                 x, info = solver(A, b, x0=x0, tol=tol)

Modified: trunk/scipy/sparse/linalg/tests/test_interface.py
===================================================================
--- trunk/scipy/sparse/linalg/tests/test_interface.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/sparse/linalg/tests/test_interface.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -44,7 +44,7 @@
 
             assert_equal(A.matvec(array([1,2,3])),       [14,32])
             assert_equal(A.matvec(array([[1],[2],[3]])), [[14],[32]])
-            
+
             assert_equal(A * array([1,2,3]),       [14,32])
             assert_equal(A * array([[1],[2],[3]]), [[14],[32]])
 
@@ -52,7 +52,7 @@
             assert_equal(A.rmatvec(array([[1],[2]])), [[9],[12],[15]])
 
             assert_equal(A.matmat(array([[1,4],[2,5],[3,6]])), [[14,32],[32,77]] )
-            
+
             assert_equal(A * array([[1,4],[2,5],[3,6]]), [[14,32],[32,77]] )
 
             if hasattr(M,'dtype'):

Modified: trunk/scipy/sparse/sputils.py
===================================================================
--- trunk/scipy/sparse/sputils.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/sparse/sputils.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -106,8 +106,8 @@
             if np.rank(M) == 0 and np.rank(N) == 0:
                 return True
         return False
-        
 
+
 def issequence(t):
     return isinstance(t, (list, tuple))\
            or (isinstance(t, np.ndarray) and (t.ndim == 1))

Modified: trunk/scipy/sparse/tests/test_base.py
===================================================================
--- trunk/scipy/sparse/tests/test_base.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/sparse/tests/test_base.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -581,7 +581,7 @@
         A[-1,-2] = 7
         A[ 0, 1] = 5
         assert_array_equal(A.todense(),[[0,5,0,8],[0,0,4,0],[2,0,7,0]])
-        
+
         for ij in [(0,4),(-1,4),(3,0),(3,4),(3,-1)]:
             assert_raises(IndexError, A.__setitem__, ij, 123.0)
 
@@ -597,7 +597,7 @@
         for i in range(-M, M):
             for j in range(-N, N):
                 assert_equal(A[i,j], D[i,j])
-         
+
         for ij in [(0,3),(-1,3),(4,0),(4,3),(4,-1)]:
             assert_raises(IndexError, A.__getitem__, ij)
 

Modified: trunk/scipy/spatial/distance.py
===================================================================
--- trunk/scipy/spatial/distance.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/spatial/distance.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -1432,7 +1432,7 @@
                     raise ValueError('Distance matrix must be symmetric.')
             if not (D[xrange(0, s[0]), xrange(0, s[0])] == 0).all():
                 if name:
-                   raise ValueError('Distance matrix \'%s\' diagonal must be zero.' % name)
+                    raise ValueError('Distance matrix \'%s\' diagonal must be zero.' % name)
                 else:
                     raise ValueError('Distance matrix diagonal must be zero.')
         else:

Modified: trunk/scipy/spatial/kdtree.py
===================================================================
--- trunk/scipy/spatial/kdtree.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/spatial/kdtree.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -6,7 +6,7 @@
 
 def minkowski_distance_p(x,y,p=2):
     """Compute the pth power of the L**p distance between x and y
-    
+
     For efficiency, this function computes the L**p distance but does
     not extract the pth root. If p is 1 or infinity, this is equal to
     the actual L**p distance.
@@ -45,10 +45,10 @@
     def volume(self):
         """Total volume."""
         return np.prod(self.maxes-self.mins)
-    
+
     def split(self, d, split):
         """Produce two hyperrectangles by splitting along axis d.
-        
+
         In general, if you need to compute maximum and minimum
         distances to the children, it can be done more efficiently
         by updating the maximum and minimum distances to the parent.
@@ -83,24 +83,24 @@
 
     This class provides an index into a set of k-dimensional points
     which can be used to rapidly look up the nearest neighbors of any
-    point. 
+    point.
 
-    The algorithm used is described in Maneewongvatana and Mount 1999. 
+    The algorithm used is described in Maneewongvatana and Mount 1999.
     The general idea is that the kd-tree is a binary trie, each of whose
     nodes represents an axis-aligned hyperrectangle. Each node specifies
     an axis and splits the set of points based on whether their coordinate
-    along that axis is greater than or less than a particular value. 
+    along that axis is greater than or less than a particular value.
 
-    During construction, the axis and splitting point are chosen by the 
+    During construction, the axis and splitting point are chosen by the
     "sliding midpoint" rule, which ensures that the cells do not all
-    become long and thin. 
+    become long and thin.
 
-    The tree can be queried for the r closest neighbors of any given point 
-    (optionally returning only those within some maximum distance of the 
-    point). It can also be queried, with a substantial gain in efficiency, 
+    The tree can be queried for the r closest neighbors of any given point
+    (optionally returning only those within some maximum distance of the
+    point). It can also be queried, with a substantial gain in efficiency,
     for the r approximate closest neighbors.
 
-    For large dimensions (20 is already large) do not expect this to run 
+    For large dimensions (20 is already large) do not expect this to run
     significantly faster than brute force. High-dimensional nearest-neighbor
     queries are a substantial open problem in computer science.
 
@@ -146,7 +146,7 @@
             self.less = less
             self.greater = greater
             self.children = less.children+greater.children
-    
+
     def __build(self, idx, maxes, mins):
         if len(idx)<=self.leafsize:
             return KDTree.leafnode(idx)
@@ -186,12 +186,12 @@
             lessmaxes[d] = split
             greatermins = np.copy(mins)
             greatermins[d] = split
-            return KDTree.innernode(d, split, 
+            return KDTree.innernode(d, split,
                     self.__build(idx[less_idx],lessmaxes,mins),
                     self.__build(idx[greater_idx],maxes,greatermins))
 
     def __query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf):
-        
+
         side_distances = np.maximum(0,np.maximum(x-self.maxes,self.mins-x))
         if p!=np.inf:
             side_distances**=p
@@ -205,7 +205,7 @@
         #  distances between the nearest side of the cell and the target
         #  the head node of the cell
         q = [(min_distance,
-              tuple(side_distances),                   
+              tuple(side_distances),
               self.tree)]
         # priority queue for the nearest neighbors
         # furthest known neighbor first
@@ -237,7 +237,7 @@
                             distance_upper_bound = -neighbors[0][0]
             else:
                 # we don't push cells that are too far onto the queue at all,
-                # but since the distance_upper_bound decreases, we might get 
+                # but since the distance_upper_bound decreases, we might get
                 # here even if the cell's too far
                 if min_distance>distance_upper_bound*epsfac:
                     # since this is the nearest cell, we're done, bail out
@@ -283,11 +283,11 @@
         k : integer
             The number of nearest neighbors to return.
         eps : nonnegative float
-            Return approximate nearest neighbors; the kth returned value 
-            is guaranteed to be no further than (1+eps) times the 
+            Return approximate nearest neighbors; the kth returned value
+            is guaranteed to be no further than (1+eps) times the
             distance to the real kth nearest neighbor.
         p : float, 1<=p<=infinity
-            Which Minkowski p-norm to use. 
+            Which Minkowski p-norm to use.
             1 is the sum-of-absolute-values "Manhattan" distance
             2 is the usual Euclidean distance
             infinity is the maximum-coordinate-difference distance
@@ -299,14 +299,14 @@
 
         Returns:
         ========
-        
+
         d : array of floats
-            The distances to the nearest neighbors. 
-            If x has shape tuple+(self.m,), then d has shape tuple if 
-            k is one, or tuple+(k,) if k is larger than one.  Missing 
-            neighbors are indicated with infinite distances.  If k is None, 
-            then d is an object array of shape tuple, containing lists 
-            of distances. In either case the hits are sorted by distance 
+            The distances to the nearest neighbors.
+            If x has shape tuple+(self.m,), then d has shape tuple if
+            k is one, or tuple+(k,) if k is larger than one.  Missing
+            neighbors are indicated with infinite distances.  If k is None,
+            then d is an object array of shape tuple, containing lists
+            of distances. In either case the hits are sorted by distance
             (nearest first).
         i : array of integers
             The locations of the neighbors in self.data. i is the same
@@ -386,7 +386,7 @@
                 return traverse_checking(node.less, less)+traverse_checking(node.greater, greater)
         def traverse_no_checking(node):
             if isinstance(node, KDTree.leafnode):
-                
+
                 return node.idx.tolist()
             else:
                 return traverse_no_checking(node.less)+traverse_no_checking(node.greater)
@@ -450,7 +450,7 @@
             Approximate search. Branches of the tree are not explored
             if their nearest points are further than r/(1+eps), and branches
             are added in bulk if their furthest points are nearer than r*(1+eps).
-        
+
         Returns
         =======
 
@@ -501,7 +501,7 @@
                           other.tree, Rectangle(other.maxes, other.mins))
         return results
 
-        
+
     def count_neighbors(self, other, r, p=2.):
         """Count how many nearby pairs can be formed.
 
@@ -527,7 +527,7 @@
 
         result : integer or one-dimensional array of integers
             The number of pairs. Note that this is internally stored in a numpy int,
-            and so may overflow if very large (two billion). 
+            and so may overflow if very large (two billion).
         """
 
         def traverse(node1, rect1, node2, rect2, idx):
@@ -577,7 +577,7 @@
             return result
         else:
             raise ValueError("r must be either a single value or a one-dimensional array of values")
-        
+
     def sparse_distance_matrix(self, other, max_distance, p=2.):
         """Compute a sparse distance matrix
 

Modified: trunk/scipy/spatial/tests/test_kdtree.py
===================================================================
--- trunk/scipy/spatial/tests/test_kdtree.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/spatial/tests/test_kdtree.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -13,7 +13,7 @@
         assert_almost_equal(d**2,np.sum((x-self.data[i])**2))
         eps = 1e-8
         assert np.all(np.sum((self.data-x[np.newaxis,:])**2,axis=1)>d**2-eps)
-        
+
     def test_m_nearest(self):
         x = self.x
         m = self.m
@@ -73,7 +73,7 @@
         d, i = self.kdtree.query(x, k, eps=eps)
         assert np.all(d<=d_real*(1+eps))
 
-    
+
 class test_random(ConsistencyTests):
     def setUp(self):
         self.n = 100
@@ -279,7 +279,7 @@
     n = 20
     m = 5
     T = KDTree(np.random.randn(n,m))
-    
+
     r = T.query_ball_point(np.random.randn(2,3,m),1)
     assert_equal(r.shape,(2,3))
     assert isinstance(r[0,0],list)
@@ -368,7 +368,7 @@
         m = 2
         self.T1 = KDTree(np.random.randn(n,m),leafsize=2)
         self.T2 = KDTree(np.random.randn(n,m),leafsize=2)
-        
+
     def test_one_radius(self):
         r = 0.2
         assert_equal(self.T1.count_neighbors(self.T2, r),

Modified: trunk/scipy/stats/distributions.py
===================================================================
--- trunk/scipy/stats/distributions.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/stats/distributions.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -19,7 +19,7 @@
      any, argsort, argmax, vectorize, r_, asarray, nan, inf, pi, isnan, isinf, \
      power
 import numpy
-import numpy as np   
+import numpy as np
 import numpy.random as mtrand
 from numpy import flatnonzero as nonzero
 from scipy.special import gammaln as gamln
@@ -385,9 +385,9 @@
         self._size = 1
         self.m = 0.0
         self.moment_type = momtype
-        
+
         self.expandarr = 1
-        
+
         if not hasattr(self,'numargs'):
             #allows more general subclassing with *args
             cdf_signature = inspect.getargspec(self._cdf.im_func)
@@ -395,7 +395,7 @@
             pdf_signature = inspect.getargspec(self._pdf.im_func)
             numargs2 = len(pdf_signature[0]) - 2
             self.numargs = max(numargs1, numargs2)
-        #nin correction 
+        #nin correction
         self.vecfunc = sgf(self._ppf_single_call,otypes='d')
         self.vecfunc.nin = self.numargs + 1
         self.vecentropy = sgf(self._entropy,otypes='d')
@@ -843,7 +843,7 @@
         def integ(x):
             val = self._pdf(x, *args)
             return val*log(val)
-        
+
         entr = -scipy.integrate.quad(integ,self.a,self.b)[0]
         if not np.isnan(entr):
             return entr
@@ -858,8 +858,8 @@
             else:
                 lower = self.a
             return -scipy.integrate.quad(integ,lower,upper)[0]
-            
 
+
     def entropy(self, *args, **kwds):
         loc,scale=map(kwds.get,['loc','scale'])
         args, loc, scale = self._fix_loc_scale(args, loc, scale)
@@ -2804,7 +2804,7 @@
 class powerlognorm_gen(rv_continuous):
     def _pdf(self, x, c, s):
         return c/(x*s)*norm.pdf(log(x)/s)*pow(norm.cdf(-log(x)/s),c*1.0-1.0)
-    
+
     def _cdf(self, x, c, s):
         return 1.0 - pow(norm.cdf(-log(x)/s),c*1.0)
     def _ppf(self, q, c, s):
@@ -2957,7 +2957,7 @@
         isqx = 1.0/sqrt(x)
         return 1.0-norm.cdf(isqx*trm1)-exp(2.0/mu)*norm.cdf(-isqx*trm2)
     # xb=50 or something large is necessary for stats to converge without exception
-recipinvgauss = recipinvgauss_gen(a=0.0, xb=50, name='recipinvgauss',    
+recipinvgauss = recipinvgauss_gen(a=0.0, xb=50, name='recipinvgauss',
                                   longname="A reciprocal inverse Gaussian",
                                   shapes="mu", extradoc="""
 
@@ -3329,19 +3329,19 @@
     #pos = self.a
     pos = max(0, self.a)
     count = 0
-    #handle cases with infinite support 
+    #handle cases with infinite support
     ulimit = max(1000, (min(self.b,1000) + max(self.a,-1000))/2.0 )
     llimit = min(-1000, (min(self.b,1000) + max(self.a,-1000))/2.0 )
-    
+
     while (pos <= self.b) and ((pos <= ulimit) or \
                                (diff > self.moment_tol)):
-        diff = pos**n * self.pmf(pos,*args) 
+        diff = pos**n * self.pmf(pos,*args)
         # use pmf because _pmf does not check support in randint
         #     and there might be problems ? with correct self.a, self.b at this stage
         tot += diff
         pos += self.inc
         count += 1
-        
+
     if self.a < 0: #handle case when self.a = -inf
         diff = 1e100
         pos = -self.inc
@@ -3384,11 +3384,11 @@
     #testcase: return wrong number at lower index
     #python -c "from scipy.stats import zipf;print zipf.ppf(0.01,2)" wrong
     #python -c "from scipy.stats import zipf;print zipf.ppf([0.01,0.61,0.77,0.83],2)"
-    #python -c "from scipy.stats import logser;print logser.ppf([0.1,0.66, 0.86,0.93],0.6)"            
+    #python -c "from scipy.stats import logser;print logser.ppf([0.1,0.66, 0.86,0.93],0.6)"
             if qa > q:
                 return a
             else:
-                return b     
+                return b
         c = int((a+b)/2.0)
         qc = self._cdf(c, *args)
         if (qc < q):
@@ -3529,11 +3529,11 @@
             self._vecppf = new.instancemethod(_vppf,
                                               self, rv_discrete)
 
-            
 
+
         #now that self.numargs is defined, we can adjust nin
         self._cdfvec.nin = self.numargs + 1
-        
+
         if longname is None:
             if name[0] in ['aeiouAEIOU']: hstr = "An "
             else: hstr = "A "
@@ -3714,7 +3714,7 @@
             goodargs = argsreduce(cond, *((q,)+args+(loc,)))
             loc, goodargs = goodargs[-1], goodargs[:-1]
             place(output,cond,self._ppf(*goodargs) + loc)
-            
+
         if output.ndim == 0:
             return output[()]
         return output
@@ -3744,17 +3744,17 @@
         #typecode 'd' to handle nin and inf
         place(output,(1-cond0)*(cond1==cond1), self.badvalue)
         place(output,cond2,self.a-1)
-        
 
+
         #same problem as with ppf
 
-        
+
         # call place only if at least 1 valid argument
         if any(cond):
             goodargs = argsreduce(cond, *((q,)+args+(loc,)))
             loc, goodargs = goodargs[-1], goodargs[:-1]
             place(output,cond,self._isf(*goodargs) + loc) #PB same as ticket 766
-            
+
         if output.ndim == 0:
             return output[()]
         return output
@@ -4382,7 +4382,7 @@
         # variance mu2 does not aggree with sample variance,
         #   nor with direct calculation using pmf
         # remove for now because generic calculation works
-        #   except it does not show nice zeros for mean and skew(?) 
+        #   except it does not show nice zeros for mean and skew(?)
         ea = exp(-a)
         e2a = exp(-2*a)
         e3a = exp(-3*a)

Modified: trunk/scipy/stats/tests/test_stats.py
===================================================================
--- trunk/scipy/stats/tests/test_stats.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/scipy/stats/tests/test_stats.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -903,11 +903,11 @@
         assert_array_almost_equal(p, self.P1_2)
 
 def test_scoreatpercentile():
-        assert_equal(stats.scoreatpercentile(range(10),50),4.5)
-        assert_equal(stats.scoreatpercentile(range(10),50,(2,7)),4.5)
-        assert_equal(stats.scoreatpercentile(range(100),50,(1,8)),4.5)
-        assert_equal(stats.scoreatpercentile(np.array([1, 10 ,100]),50,(10,100)), 55)
-        assert_equal(stats.scoreatpercentile(np.array([1, 10 ,100]),50,(1,10)), 5.5)
+    assert_equal(stats.scoreatpercentile(range(10),50),4.5)
+    assert_equal(stats.scoreatpercentile(range(10),50,(2,7)),4.5)
+    assert_equal(stats.scoreatpercentile(range(100),50,(1,8)),4.5)
+    assert_equal(stats.scoreatpercentile(np.array([1, 10 ,100]),50,(10,100)), 55)
+    assert_equal(stats.scoreatpercentile(np.array([1, 10 ,100]),50,(1,10)), 5.5)
 
 if __name__ == "__main__":
     run_module_suite()

Modified: trunk/tools/win32/build_scripts/pavement.py
===================================================================
--- trunk/tools/win32/build_scripts/pavement.py	2008-11-16 07:44:16 UTC (rev 5125)
+++ trunk/tools/win32/build_scripts/pavement.py	2008-11-16 09:23:48 UTC (rev 5126)
@@ -191,7 +191,7 @@
 
 def get_svn_version(chdir):
     out = subprocess.Popen(['svn', 'info'],
-                           stdout = subprocess.PIPE, 
+                           stdout = subprocess.PIPE,
                            cwd = chdir).communicate()[0]
     r = re.compile('Revision: ([0-9]+)')
     svnver = None
@@ -321,7 +321,7 @@
 
 def raw_build_nsis(pyver):
     bdir = bootstrap_dir(options.pyver)
-    st = subprocess.call(['makensis', 'scipy-superinstaller.nsi'], 
+    st = subprocess.call(['makensis', 'scipy-superinstaller.nsi'],
                          cwd=bdir)
     if st:
         raise RuntimeError("Error while executing makensis command")



More information about the Scipy-svn mailing list