[Numpy-svn] r5055 - in trunk/numpy: . core core/tests distutils doc/newdtype_example lib lib/tests linalg linalg/tests ma ma/tests oldnumeric testing

numpy-svn@scip... numpy-svn@scip...
Sun Apr 20 06:49:52 CDT 2008


Author: jarrod.millman
Date: 2008-04-20 06:49:35 -0500 (Sun, 20 Apr 2008)
New Revision: 5055

Modified:
   trunk/numpy/__init__.py
   trunk/numpy/add_newdocs.py
   trunk/numpy/core/defmatrix.py
   trunk/numpy/core/fromnumeric.py
   trunk/numpy/core/memmap.py
   trunk/numpy/core/numerictypes.py
   trunk/numpy/core/tests/test_numerictypes.py
   trunk/numpy/core/tests/test_regression.py
   trunk/numpy/core/tests/test_scalarmath.py
   trunk/numpy/distutils/cpuinfo.py
   trunk/numpy/doc/newdtype_example/example.py
   trunk/numpy/doc/newdtype_example/setup.py
   trunk/numpy/lib/financial.py
   trunk/numpy/lib/function_base.py
   trunk/numpy/lib/index_tricks.py
   trunk/numpy/lib/io.py
   trunk/numpy/lib/tests/test__datasource.py
   trunk/numpy/lib/tests/test_function_base.py
   trunk/numpy/lib/tests/test_io.py
   trunk/numpy/lib/tests/test_regression.py
   trunk/numpy/lib/twodim_base.py
   trunk/numpy/lib/utils.py
   trunk/numpy/linalg/linalg.py
   trunk/numpy/linalg/tests/test_regression.py
   trunk/numpy/ma/core.py
   trunk/numpy/ma/extras.py
   trunk/numpy/ma/morestats.py
   trunk/numpy/ma/mrecords.py
   trunk/numpy/ma/mstats.py
   trunk/numpy/ma/tests/test_core.py
   trunk/numpy/ma/tests/test_extras.py
   trunk/numpy/ma/tests/test_mrecords.py
   trunk/numpy/ma/testutils.py
   trunk/numpy/oldnumeric/compat.py
   trunk/numpy/oldnumeric/ma.py
   trunk/numpy/testing/utils.py
Log:
ran reindent in preparation for the 1.1 release


Modified: trunk/numpy/__init__.py
===================================================================
--- trunk/numpy/__init__.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/__init__.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -1,7 +1,7 @@
 """
 NumPy
 ==========
-Provides 
+Provides
    1) An array object of arbitrary homogeneous items
    2) Fast mathematical operations over arrays
    3) Linear Algebra, Fourier Transforms, Random Number Generation

Modified: trunk/numpy/add_newdocs.py
===================================================================
--- trunk/numpy/add_newdocs.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/add_newdocs.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -1314,10 +1314,10 @@
     Notes
     -----
     The standard deviation is the square root of the average of the squared
-    deviations from the mean, i.e. var = sqrt(mean(abs(x - x.mean())**2)).  
-    The computed standard deviation is computed by dividing by the number of 
-    elements, N-ddof. The option ddof defaults to zero, that is, a 
-    biased estimate. Note that for complex numbers std takes the absolute 
+    deviations from the mean, i.e. var = sqrt(mean(abs(x - x.mean())**2)).
+    The computed standard deviation is computed by dividing by the number of
+    elements, N-ddof. The option ddof defaults to zero, that is, a
+    biased estimate. Note that for complex numbers std takes the absolute
     value before squaring, so that the result is always real and nonnegative.
 
     """))
@@ -1503,10 +1503,10 @@
     Notes
     -----
     The variance is the average of the squared deviations from the mean,
-    i.e.  var = mean(abs(x - x.mean())**2).  The mean is computed by 
-    dividing by N-ddof, where N is the number of elements. The argument 
-    ddof defaults to zero; for an unbiased estimate supply ddof=1. Note 
-    that for complex numbers the absolute value is taken before squaring, 
+    i.e.  var = mean(abs(x - x.mean())**2).  The mean is computed by
+    dividing by N-ddof, where N is the number of elements. The argument
+    ddof defaults to zero; for an unbiased estimate supply ddof=1. Note
+    that for complex numbers the absolute value is taken before squaring,
     so that the result is always real and nonnegative.
 
     """))

Modified: trunk/numpy/core/defmatrix.py
===================================================================
--- trunk/numpy/core/defmatrix.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/core/defmatrix.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -390,11 +390,11 @@
         -----
         The standard deviation is the square root of the
         average of the squared deviations from the mean, i.e. var =
-        sqrt(mean(abs(x - x.mean())**2)).  The computed standard 
-        deviation is computed by dividing by the number of elements, 
-        N-ddof. The option ddof defaults to zero, that is, a biased 
-        estimate. Note that for complex numbers std takes the absolute 
-        value before squaring, so that the result is always real 
+        sqrt(mean(abs(x - x.mean())**2)).  The computed standard
+        deviation is computed by dividing by the number of elements,
+        N-ddof. The option ddof defaults to zero, that is, a biased
+        estimate. Note that for complex numbers std takes the absolute
+        value before squaring, so that the result is always real
         and nonnegative.
 
         """
@@ -439,11 +439,11 @@
         -----
 
         The variance is the average of the squared deviations from the
-        mean, i.e.  var = mean(abs(x - x.mean())**2).  The mean is 
-        computed by dividing by N-ddof, where N is the number of elements. 
-        The argument ddof defaults to zero; for an unbiased estimate 
-        supply ddof=1. Note that for complex numbers the absolute value 
-        is taken before squaring, so that the result is always real 
+        mean, i.e.  var = mean(abs(x - x.mean())**2).  The mean is
+        computed by dividing by N-ddof, where N is the number of elements.
+        The argument ddof defaults to zero; for an unbiased estimate
+        supply ddof=1. Note that for complex numbers the absolute value
+        is taken before squaring, so that the result is always real
         and nonnegative.
         """
         return N.ndarray.var(self, axis, dtype, out)._align(axis)

Modified: trunk/numpy/core/fromnumeric.py
===================================================================
--- trunk/numpy/core/fromnumeric.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/core/fromnumeric.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -1671,10 +1671,10 @@
     Notes
     -----
     The standard deviation is the square root of the average of the squared
-    deviations from the mean, i.e. var = sqrt(mean(abs(x - x.mean())**2)).  
-    The computed standard deviation is computed by dividing by the number of 
-    elements, N-ddof. The option ddof defaults to zero, that is, a 
-    biased estimate. Note that for complex numbers std takes the absolute 
+    deviations from the mean, i.e. var = sqrt(mean(abs(x - x.mean())**2)).
+    The computed standard deviation is computed by dividing by the number of
+    elements, N-ddof. The option ddof defaults to zero, that is, a
+    biased estimate. Note that for complex numbers std takes the absolute
     value before squaring, so that the result is always real and nonnegative.
 
     Examples

Modified: trunk/numpy/core/memmap.py
===================================================================
--- trunk/numpy/core/memmap.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/core/memmap.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -20,7 +20,7 @@
 
     Memory-mapped files are used for accessing small segments of large files
     on disk, without reading the entire file into memory.  Numpy's memmaps are
-    array-like objects.  This differs from python's mmap module which are 
+    array-like objects.  This differs from python's mmap module which are
     file-like objects.
 
     Parameters
@@ -250,4 +250,3 @@
                 # flush any changes to disk, even if it's a view
                 self.flush()
                 self._close()
-

Modified: trunk/numpy/core/numerictypes.py
===================================================================
--- trunk/numpy/core/numerictypes.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/core/numerictypes.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -606,8 +606,8 @@
             return newdtype
         thisind += 1
     return None
-    
 
+
 def find_common_type(array_types, scalar_types):
     """Determine common type following standard coercion rules
 
@@ -617,13 +617,13 @@
         A list of dtype convertible objects representing arrays
     scalar_types : sequence
         A list of dtype convertible objects representing scalars
-        
+
     Returns
     -------
     datatype : dtype
         The common data-type which is the maximum of the array_types
         ignoring the scalar_types unless the maximum of the scalar_types
-        is of a different kind. 
+        is of a different kind.
 
         If the kinds is not understood, then None is returned.
     """
@@ -646,7 +646,7 @@
         index_sc = _kind_list.index(maxsc.kind)
     except ValueError:
         return None
-    
+
     if index_sc > index_a:
         return _find_common_coerce(maxsc,maxa)
     else:

Modified: trunk/numpy/core/tests/test_numerictypes.py
===================================================================
--- trunk/numpy/core/tests/test_numerictypes.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/core/tests/test_numerictypes.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -355,10 +355,10 @@
         res = numpy.find_common_type(['u8','i8','i8'],['f8'])
         assert(res == 'f8')
 
-        
 
-        
-        
 
+
+
+
 if __name__ == "__main__":
     NumpyTest().run()

Modified: trunk/numpy/core/tests/test_regression.py
===================================================================
--- trunk/numpy/core/tests/test_regression.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/core/tests/test_regression.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -818,10 +818,10 @@
         np.indices((0,3,4)).T.reshape(-1,3)
 
     def check_flat_byteorder(self, level=rlevel):
-       """Ticket #657"""
-       x = np.arange(10)
-       assert_array_equal(x.astype('>i4'),x.astype('<i4').flat[:])
-       assert_array_equal(x.astype('>i4').flat[:],x.astype('<i4'))
+        """Ticket #657"""
+        x = np.arange(10)
+        assert_array_equal(x.astype('>i4'),x.astype('<i4').flat[:])
+        assert_array_equal(x.astype('>i4').flat[:],x.astype('<i4'))
 
     def check_uint64_from_negative(self, level=rlevel) :
         assert_equal(np.uint64(-2), np.uint64(18446744073709551614))

Modified: trunk/numpy/core/tests/test_scalarmath.py
===================================================================
--- trunk/numpy/core/tests/test_scalarmath.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/core/tests/test_scalarmath.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -76,7 +76,7 @@
     def check_float_repr(self):
         from numpy import nan, inf
         for t in [np.float32, np.float64, np.longdouble]:
-            if t is np.longdouble: # skip it for now.  
+            if t is np.longdouble: # skip it for now.
                 continue
             finfo=np.finfo(t)
             last_fraction_bit_idx = finfo.nexp + finfo.nmant

Modified: trunk/numpy/distutils/cpuinfo.py
===================================================================
--- trunk/numpy/distutils/cpuinfo.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/distutils/cpuinfo.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -276,10 +276,10 @@
         return re.match(r'.*?\bsse2\b',self.info[0]['flags']) is not None
 
     def _has_sse3(self):
-        return re.match(r'.*?\bpni\b',self.info[0]['flags']) is not None 
+        return re.match(r'.*?\bpni\b',self.info[0]['flags']) is not None
 
     def _has_ssse3(self):
-        return re.match(r'.*?\bssse3\b',self.info[0]['flags']) is not None 
+        return re.match(r'.*?\bssse3\b',self.info[0]['flags']) is not None
 
     def _has_3dnow(self):
         return re.match(r'.*?\b3dnow\b',self.info[0]['flags']) is not None

Modified: trunk/numpy/doc/newdtype_example/example.py
===================================================================
--- trunk/numpy/doc/newdtype_example/example.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/doc/newdtype_example/example.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -8,10 +8,9 @@
 # But we can get a view as an ndarray of the given type:
 g = np.array([1,2,3,4,5,6,7,8]).view(ff.floatint_type)
 
-# Now, the elements will be the scalar type associated 
+# Now, the elements will be the scalar type associated
 #  with the ndarray.
 print g[0]
 print type(g[1])
 
 # Now, you need to register ufuncs and more arrfuncs to do useful things...
-

Modified: trunk/numpy/doc/newdtype_example/setup.py
===================================================================
--- trunk/numpy/doc/newdtype_example/setup.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/doc/newdtype_example/setup.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -4,10 +4,9 @@
 def configuration(parent_package = '', top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('floatint',parent_package,top_path)
-    
+
     config.add_extension('floatint',
                          sources = ['floatint.c']);
     return config
 
 setup(configuration=configuration)
-

Modified: trunk/numpy/lib/financial.py
===================================================================
--- trunk/numpy/lib/financial.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/lib/financial.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -2,12 +2,12 @@
 #  patterned after spreadsheet computations.
 
 # There is some complexity in each function
-#  so that the functions behave like ufuncs with 
+#  so that the functions behave like ufuncs with
 #  broadcasting and being able to be called with scalars
-#  or arrays (or other sequences). 
+#  or arrays (or other sequences).
 import numpy as np
 
-__all__ = ['fv', 'pmt', 'nper', 'ipmt', 'ppmt', 'pv', 'rate', 
+__all__ = ['fv', 'pmt', 'nper', 'ipmt', 'ppmt', 'pv', 'rate',
            'irr', 'npv', 'mirr']
 
 _when_to_num = {'end':0, 'begin':1,
@@ -19,7 +19,7 @@
 
 eqstr = """
 
-                  nper       / (1 + rate*when) \   /        nper   \  
+                  nper       / (1 + rate*when) \   /        nper   \
   fv + pv*(1+rate)    + pmt*|-------------------|*| (1+rate)    - 1 | = 0
                              \     rate        /   \               /
 
@@ -28,23 +28,23 @@
 where (all can be scalars or sequences)
 
     Parameters
-    ---------- 
-    rate : 
+    ----------
+    rate :
         Rate of interest (per period)
-    nper : 
+    nper :
         Number of compounding periods
-    pmt : 
-        Payment 
+    pmt :
+        Payment
     pv :
         Present value
     fv :
-        Future value 
-    when : 
+        Future value
+    when :
         When payments are due ('begin' (1) or 'end' (0))
-                                                                   
+
 """
 
-def _convert_when(when):    
+def _convert_when(when):
     try:
         return _when_to_num[when]
     except KeyError:
@@ -85,19 +85,19 @@
     temp = (1+rate)**nper
     miter = np.broadcast(rate, nper, pv, fv, when)
     zer = np.zeros(miter.shape)
-    fact = np.where(rate==zer, nper+zer, (1+rate*when)*(temp-1)/rate+zer) 
+    fact = np.where(rate==zer, nper+zer, (1+rate*when)*(temp-1)/rate+zer)
     return -(fv + pv*temp) / fact
 pmt.__doc__ += eqstr + """
 Example
 -------
 
-What would the monthly payment need to be to pay off a $200,000 loan in 15 
+What would the monthly payment need to be to pay off a $200,000 loan in 15
   years at an annual interest rate of 7.5%?
 
 >>> pmt(0.075/12, 12*15, 200000)
 -1854.0247200054619
 
-In order to pay-off (i.e. have a future-value of 0) the $200,000 obtained 
+In order to pay-off (i.e. have a future-value of 0) the $200,000 obtained
   today, a monthly payment of $1,854.02 would be required.
 """
 
@@ -160,19 +160,19 @@
 pv.__doc__ += eqstr
 
 # Computed with Sage
-#  (y + (r + 1)^n*x + p*((r + 1)^n - 1)*(r*w + 1)/r)/(n*(r + 1)^(n - 1)*x - p*((r + 1)^n - 1)*(r*w + 1)/r^2 + n*p*(r + 1)^(n - 1)*(r*w + 1)/r + p*((r + 1)^n - 1)*w/r)    
+#  (y + (r + 1)^n*x + p*((r + 1)^n - 1)*(r*w + 1)/r)/(n*(r + 1)^(n - 1)*x - p*((r + 1)^n - 1)*(r*w + 1)/r^2 + n*p*(r + 1)^(n - 1)*(r*w + 1)/r + p*((r + 1)^n - 1)*w/r)
 
 def _g_div_gp(r, n, p, x, y, w):
     t1 = (r+1)**n
     t2 = (r+1)**(n-1)
     return (y + t1*x + p*(t1 - 1)*(r*w + 1)/r)/(n*t2*x - p*(t1 - 1)*(r*w + 1)/(r**2) + n*p*t2*(r*w + 1)/r + p*(t1 - 1)*w/r)
 
-# Use Newton's iteration until the change is less than 1e-6 
+# Use Newton's iteration until the change is less than 1e-6
 #  for all values or a maximum of 100 iterations is reached.
-#  Newton's rule is 
-#  r_{n+1} = r_{n} - g(r_n)/g'(r_n) 
+#  Newton's rule is
+#  r_{n+1} = r_{n} - g(r_n)/g'(r_n)
 #     where
-#  g(r) is the formula 
+#  g(r) is the formula
 #  g'(r) is the derivative with respect to r.
 def rate(nper, pmt, pv, fv, when='end', guess=0.10, tol=1e-6, maxiter=100):
     """Number of periods found by solving the equation
@@ -194,7 +194,7 @@
     else:
         return rn
 rate.__doc__ += eqstr
-    
+
 def irr(values):
     """Internal Rate of Return
 
@@ -212,7 +212,7 @@
     if rate.size == 1:
         rate = rate.item()
     return rate
-    
+
 def npv(rate, values):
     """Net Present Value
 
@@ -223,15 +223,15 @@
 
 def mirr(values, finance_rate, reinvest_rate):
     """Modified internal rate of return
-    
+
     Parameters
     ----------
     values:
         Cash flows (must contain at least one positive and one negative value)
         or nan is returned.
-    finance_rate : 
+    finance_rate :
         Interest rate paid on the cash flows
-    reinvest_rate : 
+    reinvest_rate :
         Interest rate received on the cash flows upon reinvestment
     """
 
@@ -240,7 +240,7 @@
     neg = values < 0
     if not (pos.size > 0 and neg.size > 0):
         return np.nan
-    
+
     n = pos.size + neg.size
     numer = -npv(reinvest_rate, values[pos])*((1+reinvest_rate)**n)
     denom = npv(finance_rate, values[neg])*(1+finance_rate)

Modified: trunk/numpy/lib/function_base.py
===================================================================
--- trunk/numpy/lib/function_base.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/lib/function_base.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -328,49 +328,49 @@
 
 def average(a, axis=None, weights=None, returned=False):
     """Return the weighted average of array a over the given axis.
-       
-    
+
+
     Parameters
     ----------
     a : array_like
         Data to be averaged.
     axis : {None, integer}, optional
-        Axis along which to average a. If None, averaging is done over the 
-        entire array irrespective of its shape. 
+        Axis along which to average a. If None, averaging is done over the
+        entire array irrespective of its shape.
     weights : {None, array_like}, optional
-        The importance each datum has in the computation of the 
-        average. The weights array can either be 1D, in which case  its length 
-        must be the size of a along the given axis, or of the same shape as a. 
-        If weights=None, all data are assumed to have weight equal to one. 
+        The importance each datum has in the computation of the
+        average. The weights array can either be 1D, in which case  its length
+        must be the size of a along the given axis, or of the same shape as a.
+        If weights=None, all data are assumed to have weight equal to one.
     returned :{False, boolean}, optional
         If True, the tuple (average, sum_of_weights) is returned,
-        otherwise only the average is returmed. Note that if weights=None, then 
+        otherwise only the average is returmed. Note that if weights=None, then
         the sum of the weights is also the number of elements averaged over.
 
     Returns
     -------
     average, [sum_of_weights] : {array_type, double}
-        Return the average along the specified axis. When returned is True, 
-        return a tuple with the average as the first element and the sum 
-        of the weights as the second element. The return type is Float if a is 
+        Return the average along the specified axis. When returned is True,
+        return a tuple with the average as the first element and the sum
+        of the weights as the second element. The return type is Float if a is
         of integer type, otherwise it is of the same type as a.
         sum_of_weights is has the same type as the average.
 
-    
+
     Example
     -------
       >>> average(range(1,11), weights=range(10,0,-1))
       4.0
-    
+
     Exceptions
     ----------
     ZeroDivisionError
-        Raised when all weights along axis are zero. See numpy.ma.average for a 
-        version robust to this type of error. 
+        Raised when all weights along axis are zero. See numpy.ma.average for a
+        version robust to this type of error.
     TypeError
-        Raised when the length of 1D weights is not the same as the shape of a 
-        along axis. 
-    
+        Raised when the length of 1D weights is not the same as the shape of a
+        along axis.
+
     """
     if not isinstance(a, np.matrix) :
         a = np.asarray(a)
@@ -390,7 +390,7 @@
                 raise TypeError, "1D weights expected when shapes of a and weights differ."
             if wgt.shape[0] != a.shape[axis] :
                 raise ValueError, "Length of weights not compatible with specified axis."
-    
+
             # setup wgt to broadcast along axis
             wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1,axis)
 
@@ -681,21 +681,21 @@
 def interp(x, xp, fp, left=None, right=None):
     """Return the value of a piecewise-linear function at each value in x.
 
-    The piecewise-linear function, f, is defined by the known data-points 
-    fp=f(xp). The xp points must be sorted in increasing order but this is 
+    The piecewise-linear function, f, is defined by the known data-points
+    fp=f(xp). The xp points must be sorted in increasing order but this is
     not checked.
-    
-    For values of x < xp[0] return the value given by left.  If left is None, 
+
+    For values of x < xp[0] return the value given by left.  If left is None,
     then return fp[0].
-    For values of x > xp[-1] return the value given by right. If right is 
+    For values of x > xp[-1] return the value given by right. If right is
     None, then return fp[-1].
     """
     if isinstance(x, (float, int, number)):
         return compiled_interp([x], xp, fp, left, right).item()
     else:
         return compiled_interp(x, xp, fp, left, right)
-    
 
+
 def angle(z, deg=0):
     """Return the angle of the complex argument z.
     """

Modified: trunk/numpy/lib/index_tricks.py
===================================================================
--- trunk/numpy/lib/index_tricks.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/lib/index_tricks.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -294,7 +294,7 @@
             objs.append(newobj)
             if not scalar and isinstance(newobj, _nx.ndarray):
                 arraytypes.append(newobj.dtype)
-                
+
         #  Esure that scalars won't up-cast unless warranted
         final_dtype = find_common_type(arraytypes, scalartypes)
         if final_dtype is not None:

Modified: trunk/numpy/lib/io.py
===================================================================
--- trunk/numpy/lib/io.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/lib/io.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -232,37 +232,37 @@
 
     Parameters
     ----------
-    fname : filename or a file handle.  
+    fname : filename or a file handle.
       Support for gzipped files is automatic, if the filename ends in .gz
 
-    dtype : data-type 
-      Data type of the resulting array.  If this is a record data-type, the 
-      resulting array will be 1-d and each row will be interpreted as an 
-      element of the array. The number of columns used must match the number 
+    dtype : data-type
+      Data type of the resulting array.  If this is a record data-type, the
+      resulting array will be 1-d and each row will be interpreted as an
+      element of the array. The number of columns used must match the number
       of fields in the data-type in this case.
 
-    comments : str 
+    comments : str
       The character used to indicate the start of a comment in the file.
 
     delimiter : str
-      A string-like character used to separate values in the file. If delimiter 
+      A string-like character used to separate values in the file. If delimiter
       is unspecified or none, any whitespace string is a separator.
 
     converters : {}
-      A dictionary mapping column number to a function that will convert that 
-      column to a float.  Eg, if column 0 is a date string: 
-      converters={0:datestr2num}. Converters can also be used to provide 
-      a default value for missing data: converters={3:lambda s: float(s or 0)}. 
-    
+      A dictionary mapping column number to a function that will convert that
+      column to a float.  Eg, if column 0 is a date string:
+      converters={0:datestr2num}. Converters can also be used to provide
+      a default value for missing data: converters={3:lambda s: float(s or 0)}.
+
     skiprows : int
       The number of rows from the top to skip.
 
     usecols : sequence
-      A sequence of integer column indexes to extract where 0 is the first 
+      A sequence of integer column indexes to extract where 0 is the first
       column, eg. usecols=(1,4,5) will extract the 2nd, 5th and 6th columns.
 
     unpack : bool
-      If True, will transpose the matrix allowing you to unpack into named 
+      If True, will transpose the matrix allowing you to unpack into named
       arguments on the left hand side.
 
     Examples
@@ -271,8 +271,8 @@
       >>> x,y,z = load('somefile.dat', usecols=(3,5,7), unpack=True)
       >>> r = np.loadtxt('record.dat', dtype={'names':('gender','age','weight'),
                 'formats': ('S1','i4', 'f4')})
-    
-    SeeAlso: scipy.io.loadmat to read and write matfiles.            
+
+    SeeAlso: scipy.io.loadmat to read and write matfiles.
     """
 
     if _string_like(fname):
@@ -332,23 +332,23 @@
     Parameters
     ----------
     fname : filename or a file handle
-      If the filename ends in .gz, the file is automatically saved in 
-      compressed gzip format.  The load() command understands gzipped files 
+      If the filename ends in .gz, the file is automatically saved in
+      compressed gzip format.  The load() command understands gzipped files
       transparently.
     X : array or sequence
       Data to write to file.
-    fmt : string 
-      A format string %[flags][width][.precision]specifier. See notes below for 
+    fmt : string
+      A format string %[flags][width][.precision]specifier. See notes below for
       a description of some common flags and specifiers.
     delimiter : str
       Character separating columns.
-  
+
     Examples
     --------
       >>> savetxt('test.out', x, delimiter=',')         # X is an array
       >>> savetxt('test.out', (x,y,z))     # x,y,z equal sized 1D arrays
-      >>> savetxt('test.out', x, fmt='%1.4e')  # use exponential notation      
-    
+      >>> savetxt('test.out', x, fmt='%1.4e')  # use exponential notation
+
     Notes on fmt
     ------------
     flags:
@@ -362,19 +362,19 @@
       For e, E and f specifiers, the number of digits to print after the decimal
       point.
       For g and G, the maximum number of significant digits.
-      For s, the maximum number of characters. 
+      For s, the maximum number of characters.
     specifiers:
       c : character
       d or i : signed decimal integer
-      e or E : scientific notation with e or E. 
+      e or E : scientific notation with e or E.
       f : decimal floating point
       g,G : use the shorter of e,E or f
       o : signed octal
       s : string of characters
       u : unsigned decimal integer
       x,X : unsigned hexadecimal integer
-      
-    This is not an exhaustive specification. 
+
+    This is not an exhaustive specification.
     """
 
     if _string_like(fname):
@@ -403,7 +403,7 @@
 import re
 def fromregex(file, regexp, dtype):
     """Construct a record array from a text file, using regular-expressions parsing.
-    
+
     Array is constructed from all matches of the regular expression
     in the file. Groups in the regular expression are converted to fields.
 
@@ -423,7 +423,7 @@
     >>> f.write("1312 foo\n1534  bar\n 444   qux")
     >>> f.close()
     >>> np.fromregex('test.dat', r"(\d+)\s+(...)", [('num', np.int64), ('key', 'S3')])
-    array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')], 
+    array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
           dtype=[('num', '<i8'), ('key', '|S3')])
 
     """
@@ -433,7 +433,7 @@
         regexp = re.compile(regexp)
     if not isinstance(dtype, np.dtype):
         dtype = np.dtype(dtype)
-    
+
     seq = regexp.findall(file.read())
     if seq and not isinstance(seq[0], tuple):
         # make sure np.array doesn't interpret strings as binary data
@@ -441,4 +441,3 @@
         seq = [(x,) for x in seq]
     output = np.array(seq, dtype=dtype)
     return output
-

Modified: trunk/numpy/lib/tests/test__datasource.py
===================================================================
--- trunk/numpy/lib/tests/test__datasource.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/lib/tests/test__datasource.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -200,7 +200,7 @@
         tmpfilename = os.path.split(tmpfile)[-1]
 
         tmp_path = lambda x: os.path.abspath(self.ds.abspath(x))
-        
+
         assert tmp_path(valid_httpurl()).startswith(self.tmpdir)
         assert tmp_path(invalid_httpurl()).startswith(self.tmpdir)
         assert tmp_path(tmpfile).startswith(self.tmpdir)
@@ -208,7 +208,7 @@
         for fn in malicious_files:
             assert tmp_path(http_path+fn).startswith(self.tmpdir)
             assert tmp_path(fn).startswith(self.tmpdir)
-    
+
     def test_windows_os_sep(self):
         orig_os_sep = os.sep
         try:
@@ -244,7 +244,7 @@
         for fn in malicious_files:
             assert tmp_path(http_path+fn).startswith(self.tmpdir)
             assert tmp_path(fn).startswith(self.tmpdir)
-        
+
     def test_windows_os_sep(self):
         orig_os_sep = os.sep
         try:

Modified: trunk/numpy/lib/tests/test_function_base.py
===================================================================
--- trunk/numpy/lib/tests/test_function_base.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/lib/tests/test_function_base.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -58,36 +58,36 @@
         assert_almost_equal(y5.mean(1), average(y5, 1))
 
         y6 = matrix(rand(5,5))
-        assert_array_equal(y6.mean(0), average(y6,0)) 
-               
+        assert_array_equal(y6.mean(0), average(y6,0))
+
     def check_weights(self):
         y = arange(10)
         w = arange(10)
         assert_almost_equal(average(y, weights=w), (arange(10)**2).sum()*1./arange(10).sum())
-    
+
         y1 = array([[1,2,3],[4,5,6]])
         w0 = [1,2]
         actual = average(y1,weights=w0,axis=0)
         desired = array([3.,4.,5.])
         assert_almost_equal(actual, desired)
-        
-        
+
+
         w1 = [0,0,1]
         desired = array([3., 6.])
         assert_almost_equal(average(y1, weights=w1, axis=1), desired)
 
         # This should raise an error. Can we test for that ?
         # assert_equal(average(y1, weights=w1), 9./2.)
-        
-    
+
+
         # 2D Case
         w2 = [[0,0,1],[0,0,2]]
         desired = array([3., 6.])
         assert_array_equal(average(y1, weights=w2, axis=1), desired)
-        
+
         assert_equal(average(y1, weights=w2), 5.)
-        
-        
+
+
     def check_returned(self):
         y = array([[1,2,3],[4,5,6]])
 
@@ -97,24 +97,24 @@
 
         avg, scl = average(y, 0, returned=True)
         assert_array_equal(scl, array([2.,2.,2.]))
-        
+
         avg, scl = average(y, 1, returned=True)
         assert_array_equal(scl, array([3.,3.]))
-        
+
         # With weights
         w0 = [1,2]
         avg, scl = average(y, weights=w0, axis=0, returned=True)
         assert_array_equal(scl, array([3., 3., 3.]))
-        
+
         w1 = [1,2,3]
         avg, scl = average(y, weights=w1, axis=1, returned=True)
         assert_array_equal(scl, array([6., 6.]))
-        
+
         w2 = [[0,0,1],[1,2,3]]
         avg, scl = average(y, weights=w2, axis=1, returned=True)
         assert_array_equal(scl, array([1.,6.]))
-    
 
+
 class TestSelect(NumpyTestCase):
     def _select(self,cond,values,default=0):
         output = []
@@ -433,7 +433,7 @@
         (a,b)=histogram(v)
         #check if the sum of the bins equals the number of samples
         assert(sum(a,axis=0)==n)
-        #check that the bin counts are evenly spaced when the data is from a 
+        #check that the bin counts are evenly spaced when the data is from a
         # linear function
         (a,b)=histogram(linspace(0,10,100))
         assert(all(a==10))
@@ -443,7 +443,7 @@
         x = array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5], \
         [.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]])
         H, edges = histogramdd(x, (2,3,3), range = [[-1,1], [0,3], [0,3]])
-        answer = asarray([[[0,1,0], [0,0,1], [1,0,0]], [[0,1,0], [0,0,1], 
+        answer = asarray([[[0,1,0], [0,0,1], [1,0,0]], [[0,1,0], [0,0,1],
             [0,0,1]]])
         assert_array_equal(H,answer)
         # Check normalization
@@ -451,12 +451,12 @@
         H, edges = histogramdd(x, bins = ed, normed = True)
         assert(all(H == answer/12.))
         # Check that H has the correct shape.
-        H, edges = histogramdd(x, (2,3,4), range = [[-1,1], [0,3], [0,4]], 
+        H, edges = histogramdd(x, (2,3,4), range = [[-1,1], [0,3], [0,4]],
             normed=True)
-        answer = asarray([[[0,1,0,0], [0,0,1,0], [1,0,0,0]], [[0,1,0,0], 
+        answer = asarray([[[0,1,0,0], [0,0,1,0], [1,0,0,0]], [[0,1,0,0],
             [0,0,1,0], [0,0,1,0]]])
         assert_array_almost_equal(H, answer/6., 4)
-        # Check that a sequence of arrays is accepted and H has the correct 
+        # Check that a sequence of arrays is accepted and H has the correct
         # shape.
         z = [squeeze(y) for y in split(x,3,axis=1)]
         H, edges = histogramdd(z, bins=(4,3,2),range=[[-2,2], [0,3], [0,2]])
@@ -473,7 +473,7 @@
 
     def check_shape_3d(self):
         # All possible permutations for bins of different lengths in 3D.
-        bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4), 
+        bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4),
             (4, 5, 6))
         r = rand(10,3)
         for b in bins:
@@ -482,11 +482,11 @@
 
     def check_shape_4d(self):
         # All possible permutations for bins of different lengths in 4D.
-        bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4), 
-            (5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6), 
-            (7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7), 
-            (4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5), 
-            (6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5), 
+        bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4),
+            (5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6),
+            (7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7),
+            (4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5),
+            (6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5),
             (5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4))
 
         r = rand(10,4)

Modified: trunk/numpy/lib/tests/test_io.py
===================================================================
--- trunk/numpy/lib/tests/test_io.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/lib/tests/test_io.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -38,8 +38,8 @@
         np.savetxt(c, a, delimiter=',', fmt='%d')
         c.seek(0)
         assert_equal(c.readlines(), ['1,2\n', '3,4\n'])
-        
-        
+
+
 ##    def test_format(self):
 ##        a = np.array([(1, 2), (3, 4)])
 ##        c = StringIO.StringIO()
@@ -47,22 +47,22 @@
 ##        np.savetxt(c, a, fmt=['%02d', '%3.1f'])
 ##        c.seek(0)
 ##        assert_equal(c.readlines(), ['01 2.0\n', '03 4.0\n'])
-##        
+##
 ##        # A single multiformat string
 ##        c = StringIO.StringIO()
 ##        np.savetxt(c, a, fmt='%02d : %3.1f')
 ##        c.seek(0)
 ##        lines = c.readlines()
 ##        assert_equal(lines, ['01 : 2.0\n', '03 : 4.0\n'])
-##        
+##
 ##        # Specify delimiter, should be overiden
 ##        c = StringIO.StringIO()
 ##        np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
 ##        c.seek(0)
 ##        lines = c.readlines()
 ##        assert_equal(lines, ['01 : 2.0\n', '03 : 4.0\n'])
-        
 
+
 class TestLoadTxt(NumpyTestCase):
     def test_record(self):
         c = StringIO.StringIO()
@@ -122,7 +122,7 @@
             converters={3:lambda s: int(s or -999)})
         a = np.array([1,2,3,-999,5], int)
         assert_array_equal(x, a)
-        
+
     def test_comments(self):
         c = StringIO.StringIO()
         c.write('# comment\n1,2,3,5\n')
@@ -131,7 +131,7 @@
             comments='#')
         a = np.array([1,2,3,5], int)
         assert_array_equal(x, a)
-        
+
     def test_skiprows(self):
         c = StringIO.StringIO()
         c.write('comment\n1,2,3,5\n')
@@ -140,7 +140,7 @@
             skiprows=1)
         a = np.array([1,2,3,5], int)
         assert_array_equal(x, a)
-        
+
         c = StringIO.StringIO()
         c.write('# comment\n1,2,3,5\n')
         c.seek(0)
@@ -148,7 +148,7 @@
             skiprows=1)
         a = np.array([1,2,3,5], int)
         assert_array_equal(x, a)
-        
+
     def test_usecols(self):
         a =np.array( [[1,2],[3,4]], float)
         c = StringIO.StringIO()
@@ -156,15 +156,15 @@
         c.seek(0)
         x = np.loadtxt(c, dtype=float, usecols=(1,))
         assert_array_equal(x, a[:,1])
-        
+
         a =np.array( [[1,2,3],[3,4,5]], float)
         c = StringIO.StringIO()
         np.savetxt(c, a)
         c.seek(0)
         x = np.loadtxt(c, dtype=float, usecols=(1,2))
         assert_array_equal(x, a[:,1:])
-        
 
+
 class Testfromregex(NumpyTestCase):
     def test_record(self):
         c = StringIO.StringIO()

Modified: trunk/numpy/lib/tests/test_regression.py
===================================================================
--- trunk/numpy/lib/tests/test_regression.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/lib/tests/test_regression.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -28,6 +28,6 @@
         tested = np.polyfit(x, y, 4)
         assert_array_almost_equal(ref, tested)
 
-        
+
 if __name__ == "__main__":
     NumpyTest().run()

Modified: trunk/numpy/lib/twodim_base.py
===================================================================
--- trunk/numpy/lib/twodim_base.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/lib/twodim_base.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -83,9 +83,9 @@
         raise ValueError, "Input must be 1- or 2-d."
 
 def diagflat(v,k=0):
-    """Return a 2D array whose k'th diagonal is a flattened v and all other 
-    elements are zero. 
-    
+    """Return a 2D array whose k'th diagonal is a flattened v and all other
+    elements are zero.
+
     Examples
     --------
       >>> diagflat([[1,2],[3,4]]])
@@ -93,12 +93,12 @@
              [0, 2, 0, 0],
              [0, 0, 3, 0],
              [0, 0, 0, 4]])
-      
+
       >>> diagflat([1,2], 1)
       array([[0, 1, 0],
-             [0, 0, 2], 
+             [0, 0, 2],
              [0, 0, 0]])
-    """ 
+    """
     try:
         wrap = v.__array_wrap__
     except AttributeError:

Modified: trunk/numpy/lib/utils.py
===================================================================
--- trunk/numpy/lib/utils.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/lib/utils.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -512,7 +512,7 @@
     if not whats: return
 
     for name, (docstring, kind, index) in cache.iteritems():
-        if kind in ('module', 'object'): 
+        if kind in ('module', 'object'):
             # don't show modules or objects
             continue
         ok = True
@@ -528,7 +528,7 @@
     # XXX: this is full Harrison-Stetson heuristics now,
     # XXX: it probably could be improved
 
-    kind_relevance = {'func': 1000, 'class': 1000, 
+    kind_relevance = {'func': 1000, 'class': 1000,
                       'module': -1000, 'object': -1000}
 
     def relevance(name, docstr, kind, index):
@@ -597,7 +597,7 @@
     cache : dict {obj_full_name: (docstring, kind, index), ...}
         Docstring cache for the module, either cached one (regenerate=False)
         or newly generated.
-        
+
     """
     global _lookfor_caches
 
@@ -623,7 +623,7 @@
 
         index += 1
         kind = "object"
-       
+
         if inspect.ismodule(item):
             kind = "module"
             try:
@@ -649,13 +649,13 @@
                 stack.append(("%s.%s" % (name, n), v))
         elif callable(item):
             kind = "func"
-        
+
         doc = inspect.getdoc(item)
         if doc is not None:
             cache[name] = (doc, kind, index)
 
     return cache
- 
+
 #-----------------------------------------------------------------------------
 
 # The following SafeEval class and company are adapted from Michael Spencer's

Modified: trunk/numpy/linalg/linalg.py
===================================================================
--- trunk/numpy/linalg/linalg.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/linalg/linalg.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -972,8 +972,8 @@
 def cond(x,p=None):
     """Compute the condition number of a matrix.
 
-    The condition number of x is the norm of x times the norm 
-    of the inverse of x.  The norm can be the usual L2 
+    The condition number of x is the norm of x times the norm
+    of the inverse of x.  The norm can be the usual L2
     (root-of-sum-of-squares) norm or a number of other matrix norms.
 
     Parameters
@@ -983,16 +983,16 @@
     p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}
         Order of the norm:
 
-        p      norm for matrices           
+        p      norm for matrices
         =====  ============================
         None   2-norm, computed directly using the SVD
-        'fro'  Frobenius norm             
-        inf    max(sum(abs(x), axis=1))   
-        -inf   min(sum(abs(x), axis=1))    
-        1      max(sum(abs(x), axis=0))     
-        -1     min(sum(abs(x), axis=0))     
-        2      2-norm (largest sing. value) 
-        -2     smallest singular value      
+        'fro'  Frobenius norm
+        inf    max(sum(abs(x), axis=1))
+        -inf   min(sum(abs(x), axis=1))
+        1      max(sum(abs(x), axis=0))
+        -1     min(sum(abs(x), axis=0))
+        2      2-norm (largest sing. value)
+        -2     smallest singular value
         =====  ============================
 
     Returns

Modified: trunk/numpy/linalg/tests/test_regression.py
===================================================================
--- trunk/numpy/linalg/tests/test_regression.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/linalg/tests/test_regression.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -12,9 +12,9 @@
 class TestRegression(NumpyTestCase):
     def test_eig_build(self, level = rlevel):
         """Ticket #652"""
-        rva = [1.03221168e+02 +0.j, 
+        rva = [1.03221168e+02 +0.j,
                -1.91843603e+01 +0.j,
-               -6.04004526e-01+15.84422474j, 
+               -6.04004526e-01+15.84422474j,
                -6.04004526e-01-15.84422474j,
                -1.13692929e+01 +0.j,
                -6.57612485e-01+10.41755503j,
@@ -24,7 +24,7 @@
                7.80732773e+00 +0.j ,
                -7.65390898e-01 +0.j,
                1.51971555e-15 +0.j ,
-               -1.51308713e-15 +0.j]        
+               -1.51308713e-15 +0.j]
         a = arange(13*13, dtype = float64)
         a.shape = (13,13)
         a = a%17
@@ -38,7 +38,7 @@
         cov = array([[ 77.70273908,   3.51489954,  15.64602427],
                      [3.51489954,  88.97013878,  -1.07431931],
                      [15.64602427,  -1.07431931,  98.18223512]])
-         
+
         vals, vecs = linalg.eigh(cov)
         assert_array_almost_equal(vals, rvals)
 

Modified: trunk/numpy/ma/core.py
===================================================================
--- trunk/numpy/ma/core.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/ma/core.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -26,13 +26,13 @@
            'arctanh', 'argmax', 'argmin', 'argsort', 'around',
            'array', 'asarray','asanyarray',
            'bitwise_and', 'bitwise_or', 'bitwise_xor',
-           'ceil', 'choose', 'common_fill_value', 'compress', 'compressed', 
+           'ceil', 'choose', 'common_fill_value', 'compress', 'compressed',
            'concatenate', 'conjugate', 'cos', 'cosh', 'count',
            'default_fill_value', 'diagonal', 'divide', 'dump', 'dumps',
            'empty', 'empty_like', 'equal', 'exp',
            'fabs', 'fmod', 'filled', 'floor', 'floor_divide','fix_invalid',
            'frombuffer', 'fromfunction',
-           'getdata','getmask', 'getmaskarray', 'greater', 'greater_equal', 
+           'getdata','getmask', 'getmaskarray', 'greater', 'greater_equal',
            'hypot',
            'identity', 'ids', 'indices', 'inner', 'innerproduct',
            'isMA', 'isMaskedArray', 'is_mask', 'is_masked', 'isarray',
@@ -41,16 +41,16 @@
            'make_mask', 'make_mask_none', 'mask_or', 'masked',
            'masked_array', 'masked_equal', 'masked_greater',
            'masked_greater_equal', 'masked_inside', 'masked_invalid',
-           'masked_less','masked_less_equal', 'masked_not_equal', 
-           'masked_object','masked_outside', 'masked_print_option', 
-           'masked_singleton','masked_values', 'masked_where', 'max', 'maximum', 
+           'masked_less','masked_less_equal', 'masked_not_equal',
+           'masked_object','masked_outside', 'masked_print_option',
+           'masked_singleton','masked_values', 'masked_where', 'max', 'maximum',
            'mean', 'min', 'minimum', 'multiply',
            'negative', 'nomask', 'nonzero', 'not_equal',
            'ones', 'outer', 'outerproduct',
            'power', 'product', 'ptp', 'put', 'putmask',
            'rank', 'ravel', 'remainder', 'repeat', 'reshape', 'resize',
            'right_shift', 'round_',
-           'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'sometrue', 'sort', 
+           'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'sometrue', 'sort',
            'sqrt', 'std', 'subtract', 'sum', 'swapaxes',
            'take', 'tan', 'tanh', 'transpose', 'true_divide',
            'var', 'where',
@@ -192,7 +192,7 @@
         else:
             fill_value = default_fill_value(dtype)
     else:
-        fill_value = narray(fill_value).tolist() 
+        fill_value = narray(fill_value).tolist()
         fval = numpy.resize(fill_value, len(descr))
         if len(descr) > 1:
             fill_value = [numpy.asarray(f).astype(d[1]).item()
@@ -259,7 +259,7 @@
     """
     if hasattr(a, 'filled'):
         return a.filled(value)
-    elif isinstance(a, ndarray): 
+    elif isinstance(a, ndarray):
         # Should we check for contiguity ? and a.flags['CONTIGUOUS']:
         return a
     elif isinstance(a, dict):
@@ -1579,21 +1579,21 @@
         if self._mask is not nomask:
             data = data[numpy.logical_not(ndarray.ravel(self._mask))]
         return data
-    
-    
+
+
     def compress(self, condition, axis=None, out=None):
         """Return a where condition is True.
         If condition is a MaskedArray, missing values are considered as False.
-        
+
         Returns
         -------
         A MaskedArray object.
-        
+
         Notes
         -----
-        Please note the difference with compressed() ! 
+        Please note the difference with compressed() !
         The output of compress has a mask, the output of compressed does not.
-    
+
         """
         # Get the basic components
         (_data, _mask) = (self._data, self._mask)
@@ -2169,16 +2169,16 @@
 
         Notes
         -----
-        The value returned is by default a biased estimate of the 
+        The value returned is by default a biased estimate of the
         true variance, since the mean is computed by dividing by N-ddof.
         For the (more standard) unbiased estimate, use ddof=1 or.
-        Note that for complex numbers the absolute value is taken before 
+        Note that for complex numbers the absolute value is taken before
         squaring, so that the result is always real and nonnegative.
 
         """
         if self._mask is nomask:
             # TODO: Do we keep super, or var _data and take a view ?
-            return super(MaskedArray, self).var(axis=axis, dtype=dtype, 
+            return super(MaskedArray, self).var(axis=axis, dtype=dtype,
                                                 ddof=ddof)
         else:
             cnt = self.count(axis=axis)-ddof
@@ -2213,17 +2213,17 @@
 
         Notes
         -----
-        The value returned is by default a biased estimate of the 
-        true standard deviation, since the mean is computed by dividing 
-        by N-ddof.  For the more standard unbiased estimate, use ddof=1. 
-        Note that for complex numbers the absolute value is taken before 
+        The value returned is by default a biased estimate of the
+        true standard deviation, since the mean is computed by dividing
+        by N-ddof.  For the more standard unbiased estimate, use ddof=1.
+        Note that for complex numbers the absolute value is taken before
         squaring, so that the result is always real and nonnegative.
         """
         dvar = self.var(axis,dtype,ddof=ddof)
         if axis is not None or dvar is not masked:
             dvar = sqrt(dvar)
         return dvar
-    
+
     #............................................
     def round(self, decimals=0, out=None):
         result = self._data.round(decimals).view(type(self))
@@ -2564,7 +2564,7 @@
     #........................
     def tofile(self, fid, sep="", format="%s"):
         raise NotImplementedError("Not implemented yet, sorry...")
-        
+
     #--------------------------------------------
     # Pickling
     def __getstate__(self):
@@ -2886,7 +2886,7 @@
     a = narray(a, copy=True, subok=True)
     if axis is None:
         a = a.flatten()
-        axis = 0        
+        axis = 0
     if fill_value is None:
         if endwith:
             filler = minimum_fill_value(a)

Modified: trunk/numpy/ma/extras.py
===================================================================
--- trunk/numpy/ma/extras.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/ma/extras.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -11,18 +11,18 @@
 __revision__ = "$Revision: 3473 $"
 __date__     = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $'
 
-__all__ = ['apply_along_axis', 'atleast_1d', 'atleast_2d', 'atleast_3d', 
-           'average', 
+__all__ = ['apply_along_axis', 'atleast_1d', 'atleast_2d', 'atleast_3d',
+           'average',
            'column_stack','compress_cols','compress_rowcols', 'compress_rows',
            'count_masked',
            'dot','dstack',
            'expand_dims',
-           'flatnotmasked_contiguous','flatnotmasked_edges', 
-           'hsplit','hstack', 
+           'flatnotmasked_contiguous','flatnotmasked_edges',
+           'hsplit','hstack',
            'mask_cols','mask_rowcols','mask_rows','masked_all','masked_all_like',
-           'median','mediff1d','mr_', 
-           'notmasked_contiguous','notmasked_edges', 
-           'row_stack', 
+           'median','mediff1d','mr_',
+           'notmasked_contiguous','notmasked_edges',
+           'row_stack',
            'vstack',
            ]
 
@@ -262,7 +262,7 @@
             the size of a along the given axis.
             If no weights are given, weights are assumed to be 1.
         returned : bool
-            Flag indicating whether a tuple (result, sum of weights/counts) 
+            Flag indicating whether a tuple (result, sum of weights/counts)
             should be returned as output (True), or just the result (False).
 
     """
@@ -417,7 +417,7 @@
         else:
             choice = slice(idx-1,idx+1)
         return data[choice].mean(0)
-    # 
+    #
     if overwrite_input:
         if axis is None:
             sorted = a.ravel()
@@ -432,10 +432,10 @@
     else:
         result = apply_along_axis(_median1D, axis, sorted)
     return result
- 
 
 
 
+
 #..............................................................................
 def compress_rowcols(x, axis=None):
     """Suppress the rows and/or columns of a 2D array that contains
@@ -445,7 +445,7 @@
         - If axis is None, rows and columns are suppressed.
         - If axis is 0, only rows are suppressed.
         - If axis is 1 or -1, only columns are suppressed.
-        
+
     Parameters
     ----------
         axis : int, optional
@@ -504,7 +504,7 @@
         axis : int, optional
             Axis along which to perform the operation.
             If None, applies to a flattened version of the array.
-            
+
     Returns
     -------
          a *pure* ndarray.
@@ -795,7 +795,7 @@
         axis : int, optional
             Axis along which to perform the operation.
             If None, applies to a flattened version of the array.
-            
+
     Returns
     -------
         a sorted sequence of slices (start index, end index).

Modified: trunk/numpy/ma/morestats.py
===================================================================
--- trunk/numpy/ma/morestats.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/ma/morestats.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -59,7 +59,7 @@
 Notes
 -----
     The function is restricted to 2D arrays.
-    
+
     """
     def _hd_1D(data,prob,var):
         "Computes the HD quantiles for a 1D array. Returns nan for invalid data."
@@ -114,7 +114,7 @@
         Axis along which to compute the quantiles. If None, use a flattened array.
     var : boolean
         Whether to return the variance of the estimate.
-        
+
     """
     result = hdquantiles(data,[0.5], axis=axis, var=var)
     return result.squeeze()
@@ -137,7 +137,7 @@
 Notes
 -----
     The function is restricted to 2D arrays.
-    
+
     """
     def _hdsd_1D(data,prob):
         "Computes the std error for 1D arrays."
@@ -192,7 +192,7 @@
         Confidence level of the intervals.
     axis : int
         Axis along which to cut. If None, uses a flattened version of the input.
-    
+
     """
     data = masked_array(data, copy=False)
     trimmed = trim_both(data, proportiontocut=proportiontocut, axis=axis)
@@ -215,7 +215,7 @@
         Sequence of quantiles to compute.
     axis : int
         Axis along which to compute the quantiles. If None, use a flattened array.
-    
+
     """
     def _mjci_1D(data, p):
         data = data.compressed()
@@ -345,7 +345,7 @@
     along the given axis.
 
     If some values are tied, their rank is averaged.
-    If some values are masked, their rank is set to 0 if use_missing is False, 
+    If some values are masked, their rank is set to 0 if use_missing is False,
     or set to the average rank of the unmasked values if use_missing is True.
 
     Parameters
@@ -353,8 +353,8 @@
         data : sequence
             Input data. The data is transformed to a masked array
         axis : integer
-            Axis along which to perform the ranking. 
-            If None, the array is first flattened. An exception is raised if 
+            Axis along which to perform the ranking.
+            If None, the array is first flattened. An exception is raised if
             the axis is specified for arrays with a dimension larger than 2
         use_missing : boolean
             Whether the masked values have a rank of 0 (False) or equal to the

Modified: trunk/numpy/ma/mrecords.py
===================================================================
--- trunk/numpy/ma/mrecords.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/ma/mrecords.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -142,7 +142,7 @@
                     msg = "Mask and data not compatible: data size is %i, "+\
                           "mask size is %i."
                     raise MAError(msg % (nd, nm))
-                copy = True        
+                copy = True
             if not keep_mask:
                 self.__setmask__(mask)
                 self._sharedmask = True
@@ -214,7 +214,7 @@
     def _getmask(self):
         """Return the mask of the mrecord.
     A record is masked when all the fields are masked.
-        
+
         """
         if self.size > 1:
             return self._fieldmask.view((bool_, len(self.dtype))).all(1)
@@ -415,7 +415,7 @@
         return ndarray.view(self, obj)
     #......................................................
     def filled(self, fill_value=None):
-        """Returns an array of the same class as the _data part, where masked 
+        """Returns an array of the same class as the _data part, where masked
     values are filled with fill_value.
     If fill_value is None, self.fill_value is used instead.
 
@@ -487,11 +487,11 @@
                  self._fieldmask.tostring(),
                  self._fill_value,
                  )
-        return state  
+        return state
     #
     def __setstate__(self, state):
-        """Restore the internal state of the masked array, for pickling purposes.  
-    ``state`` is typically the output of the ``__getstate__`` output, and is a 
+        """Restore the internal state of the masked array, for pickling purposes.
+    ``state`` is typically the output of the ``__getstate__`` output, and is a
     5-tuple:
 
         - class name
@@ -570,8 +570,8 @@
     """
     datalist = [getdata(x) for x in arraylist]
     masklist = [getmaskarray(x) for x in arraylist]
-    _array = recfromarrays(datalist, 
-                           dtype=dtype, shape=shape, formats=formats, 
+    _array = recfromarrays(datalist,
+                           dtype=dtype, shape=shape, formats=formats,
                            names=names, titles=titles, aligned=aligned,
                            byteorder=byteorder).view(mrecarray)
     _array._fieldmask[:] = zip(*masklist)
@@ -629,8 +629,8 @@
         if dtype is None:
             dtype = reclist.dtype
         reclist = reclist.tolist()
-    mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats, 
-                          names=names, titles=titles, 
+    mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats,
+                          names=names, titles=titles,
                           aligned=aligned, byteorder=byteorder).view(mrecarray)
     # Set the fill_value if needed
     if fill_value is not None:
@@ -805,5 +805,3 @@
         import cPickle
         _ = cPickle.dumps(mbase)
         mrec_ = cPickle.loads(_)
-        
-        
\ No newline at end of file

Modified: trunk/numpy/ma/mstats.py
===================================================================
--- trunk/numpy/ma/mstats.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/ma/mstats.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -33,9 +33,9 @@
 
 def winsorize(data, alpha=0.2):
     """Returns a Winsorized version of the input array.
-    
-    The (alpha/2.) lowest values are set to the (alpha/2.)th percentile, 
-    and the (alpha/2.) highest values are set to the (1-alpha/2.)th 
+
+    The (alpha/2.) lowest values are set to the (alpha/2.)th percentile,
+    and the (alpha/2.) highest values are set to the (1-alpha/2.)th
     percentile.
     Masked values are skipped.
 
@@ -44,7 +44,7 @@
         data : ndarray
             Input data to Winsorize. The data is first flattened.
         alpha : float
-            Percentage of total Winsorization: alpha/2. on the left, 
+            Percentage of total Winsorization: alpha/2. on the left,
             alpha/2. on the right
 
     """
@@ -57,8 +57,8 @@
 
 #..............................................................................
 def trim_both(data, proportiontocut=0.2, axis=None):
-    """Trims the data by masking the int(trim*n) smallest and int(trim*n) 
-    largest values of data along the given axis, where n is the number 
+    """Trims the data by masking the int(trim*n) smallest and int(trim*n)
+    largest values of data along the given axis, where n is the number
     of unmasked values.
 
     Parameters
@@ -66,11 +66,11 @@
         data : ndarray
             Data to trim.
         proportiontocut : float
-            Percentage of trimming. If n is the number of unmasked values 
+            Percentage of trimming. If n is the number of unmasked values
             before trimming, the number of values after trimming is:
                 (1-2*trim)*n.
         axis : int
-            Axis along which to perform the trimming. 
+            Axis along which to perform the trimming.
             If None, the input array is first flattened.
 
     Notes
@@ -99,7 +99,7 @@
 
 #..............................................................................
 def trim_tail(data, proportiontocut=0.2, tail='left', axis=None):
-    """Trims the data by masking int(trim*n) values from ONE tail of the 
+    """Trims the data by masking int(trim*n) values from ONE tail of the
     data along the given axis, where n is the number of unmasked values.
 
     Parameters
@@ -107,16 +107,16 @@
         data : ndarray
             Data to trim.
         proportiontocut : float
-            Percentage of trimming. If n is the number of unmasked values 
-            before trimming, the number of values after trimming is 
+            Percentage of trimming. If n is the number of unmasked values
+            before trimming, the number of values after trimming is
             (1-trim)*n.
         tail : string
-            Trimming direction, in ('left', 'right'). 
-            If left, the ``proportiontocut`` lowest values are set to the 
-            corresponding percentile. If right, the ``proportiontocut`` 
+            Trimming direction, in ('left', 'right').
+            If left, the ``proportiontocut`` lowest values are set to the
+            corresponding percentile. If right, the ``proportiontocut``
             highest values are used instead.
         axis : int
-            Axis along which to perform the trimming. 
+            Axis along which to perform the trimming.
             If None, the input array is first flattened.
 
     Notes
@@ -158,7 +158,7 @@
 
 #..............................................................................
 def trimmed_mean(data, proportiontocut=0.2, axis=None):
-    """Returns the trimmed mean of the data along the given axis. 
+    """Returns the trimmed mean of the data along the given axis.
     Trimming is performed on both ends of the distribution.
 
     Parameters
@@ -169,7 +169,7 @@
             Proportion of the data to cut from each side of the data .
             As a result, (2*proportiontocut*n) values are actually trimmed.
         axis : int
-            Axis along which to perform the trimming. 
+            Axis along which to perform the trimming.
             If None, the input array is first flattened.
 
     """
@@ -188,7 +188,7 @@
             Proportion of the data to cut from each side of the data .
             As a result, (2*proportiontocut*n) values are actually trimmed.
         axis : int
-            Axis along which to perform the trimming. 
+            Axis along which to perform the trimming.
             If None, the input array is first flattened.
 
     Notes
@@ -222,7 +222,7 @@
         data : ndarray
             Data to trim.
         axis : int
-            Axis along which to perform the trimming. 
+            Axis along which to perform the trimming.
             If None, the input array is first flattened.
 
     """

Modified: trunk/numpy/ma/tests/test_core.py
===================================================================
--- trunk/numpy/ma/tests/test_core.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/ma/tests/test_core.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -848,9 +848,9 @@
         assert_equal(xf.dtype, float_)
         assert_equal(xs.data, ['A', 'b', 'pi'])
         assert_equal(xs.dtype, '|S3')
-        
 
 
+
 #...............................................................................
 
 class TestUfuncs(NumpyTestCase):

Modified: trunk/numpy/ma/tests/test_extras.py
===================================================================
--- trunk/numpy/ma/tests/test_extras.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/ma/tests/test_extras.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -324,7 +324,7 @@
             return b[1]
         xa = apply_along_axis(myfunc,2,a)
         assert_equal(xa,[[1,4],[7,10]])
-        
+
 class TestMedian(NumpyTestCase):
     def __init__(self, *args, **kwds):
         NumpyTestCase.__init__(self, *args, **kwds)
@@ -333,7 +333,7 @@
         "Tests median w/ 2D"
         (n,p) = (101,30)
         x = masked_array(numpy.linspace(-1.,1.,n),)
-        x[:10] = x[-10:] = masked       
+        x[:10] = x[-10:] = masked
         z = masked_array(numpy.empty((n,p), dtype=numpy.float_))
         z[:,0] = x[:]
         idx = numpy.arange(len(x))
@@ -352,9 +352,9 @@
         assert_equal(median(x,0),[[99,10],[11,99],[13,14]])
         x = numpy.ma.arange(24).reshape(4,3,2)
         x[x%5==0] = masked
-        assert_equal(median(x,0), [[12,10],[8,9],[16,17]])        
-        
+        assert_equal(median(x,0), [[12,10],[8,9],[16,17]])
 
+
 ###############################################################################
 #------------------------------------------------------------------------------
 if __name__ == "__main__":

Modified: trunk/numpy/ma/tests/test_mrecords.py
===================================================================
--- trunk/numpy/ma/tests/test_mrecords.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/ma/tests/test_mrecords.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -41,7 +41,7 @@
         ddtype = [('a',int),('b',float),('c','|S8')]
         mask = [0,1,0,0,1]
         self.base = ma.array(zip(ilist,flist,slist), mask=mask, dtype=ddtype)
-        
+
     def test_byview(self):
         "Test creation by view"
         base = self.base
@@ -69,7 +69,7 @@
         assert_equal(mbase_first.mask, nomask)
         assert_equal(mbase_first._fieldmask.item(), (False, False, False))
         assert_equal(mbase_first['a'], mbase['a'][0])
-        mbase_last = mbase[-1]        
+        mbase_last = mbase[-1]
         assert isinstance(mbase_last, mrecarray)
         assert_equal(mbase_last.dtype, mbase.dtype)
         assert_equal(mbase_last.tolist(), (None,None,None))
@@ -87,7 +87,7 @@
             assert_equal(getattr(mbase_sl,field), base[:2][field])
 
     def test_set_fields(self):
-        "Tests setting fields."        
+        "Tests setting fields."
         base = self.base.copy()
         mbase = base.view(mrecarray)
         mbase = mbase.copy()
@@ -101,7 +101,7 @@
         assert_equal(mbase['a']._data, [1]*5)
         assert_equal(ma.getmaskarray(mbase['a']), [0]*5)
         assert_equal(mbase._mask, [False]*5)
-        assert_equal(mbase._fieldmask.tolist(), 
+        assert_equal(mbase._fieldmask.tolist(),
                      np.array([(0,0,0),(0,1,1),(0,0,0),(0,0,0),(0,1,1)],
                               dtype=bool))
         # Set a field to mask ........................
@@ -109,7 +109,7 @@
         assert_equal(mbase.c.mask, [1]*5)
         assert_equal(ma.getmaskarray(mbase['c']), [1]*5)
         assert_equal(ma.getdata(mbase['c']), ['N/A']*5)
-        assert_equal(mbase._fieldmask.tolist(), 
+        assert_equal(mbase._fieldmask.tolist(),
                      np.array([(0,0,1),(0,1,1),(0,0,1),(0,0,1),(0,1,1)],
                               dtype=bool))
         # Set fields by slices .......................
@@ -129,12 +129,12 @@
         assert_equal(ma.getmaskarray(mbase['b']), [1]*5)
         assert_equal(mbase['a']._mask, mbase['b']._mask)
         assert_equal(mbase['a']._mask, mbase['c']._mask)
-        assert_equal(mbase._fieldmask.tolist(), 
+        assert_equal(mbase._fieldmask.tolist(),
                      np.array([(1,1,1)]*5, dtype=bool))
         # Delete the mask ............................
         mbase._mask = nomask
         assert_equal(ma.getmaskarray(mbase['c']), [0]*5)
-        assert_equal(mbase._fieldmask.tolist(), 
+        assert_equal(mbase._fieldmask.tolist(),
                      np.array([(0,0,0)]*5, dtype=bool))
     #
     def test_set_mask_fromarray(self):
@@ -154,7 +154,7 @@
     def test_set_mask_fromfields(self):
         mbase = self.base.copy().view(mrecarray)
         #
-        nmask = np.array([(0,1,0),(0,1,0),(1,0,1),(1,0,1),(0,0,0)], 
+        nmask = np.array([(0,1,0),(0,1,0),(1,0,1),(1,0,1),(0,0,0)],
                          dtype=[('a',bool),('b',bool),('c',bool)])
         mbase.mask = nmask
         assert_equal(mbase.a.mask, [0,0,1,1,0])
@@ -240,8 +240,8 @@
         _b = ma.array([1.1,2.2,3.3],mask=[0,0,1],dtype=float)
         _c = ma.array(['one','two','three'],mask=[0,0,1],dtype='|S8')
         ddtype = [('a',int),('b',float),('c','|S8')]
-        mrec = fromarrays([_a,_b,_c], dtype=ddtype, 
-                          fill_value=(99999,99999.,'N/A')) 
+        mrec = fromarrays([_a,_b,_c], dtype=ddtype,
+                          fill_value=(99999,99999.,'N/A'))
         mrecfilled = mrec.filled()
         assert_equal(mrecfilled['a'], np.array((1,2,99999), dtype=int))
         assert_equal(mrecfilled['b'], np.array((1.1,2.2,99999.), dtype=float))
@@ -253,8 +253,8 @@
         _b = ma.array([1.1,2.2,3.3],mask=[0,0,1],dtype=float)
         _c = ma.array(['one','two','three'],mask=[1,0,0],dtype='|S8')
         ddtype = [('a',int),('b',float),('c','|S8')]
-        mrec = fromarrays([_a,_b,_c], dtype=ddtype, 
-                          fill_value=(99999,99999.,'N/A')) 
+        mrec = fromarrays([_a,_b,_c], dtype=ddtype,
+                          fill_value=(99999,99999.,'N/A'))
         #
         assert_equal(mrec.tolist(),
                      [(1,1.1,None),(2,2.2,'two'),(None,None,'three')])
@@ -272,11 +272,11 @@
         _b = ma.array([1.1,2.2,3.3],mask=[0,0,1],dtype=float)
         _c = ma.array(['one','two','three'],mask=[0,0,1],dtype='|S8')
         ddtype = [('a',int),('b',float),('c','|S8')]
-        mrec = fromarrays([_a,_b,_c], dtype=ddtype, 
-                          fill_value=(99999,99999.,'N/A')) 
+        mrec = fromarrays([_a,_b,_c], dtype=ddtype,
+                          fill_value=(99999,99999.,'N/A'))
         nrec = recfromarrays((_a.data,_b.data,_c.data), dtype=ddtype)
         self.data = (mrec, nrec, ddtype)
-        
+
     def test_fromarrays(self):
         _a = ma.array([1,2,3],mask=[0,0,1],dtype=int)
         _b = ma.array([1.1,2.2,3.3],mask=[0,0,1],dtype=float)
@@ -284,8 +284,8 @@
         (mrec, nrec, _) = self.data
         for (f,l) in zip(('a','b','c'),(_a,_b,_c)):
             assert_equal(getattr(mrec,f)._mask, l._mask)
-        
-        
+
+
     def test_fromrecords(self):
         "Test construction from records."
         (mrec, nrec, ddtype) = self.data
@@ -300,7 +300,7 @@
         _mrec = fromrecords(nrec)
         assert_equal(_mrec.dtype, mrec.dtype)
         for field in _mrec.dtype.names:
-            assert_equal(getattr(_mrec, field), getattr(mrec._data, field)) 
+            assert_equal(getattr(_mrec, field), getattr(mrec._data, field))
         #
         _mrec = fromrecords(nrec.tolist(), names='c1,c2,c3')
         assert_equal(_mrec.dtype, [('c1',int),('c2',float),('c3','|S5')])
@@ -311,7 +311,7 @@
         assert_equal(_mrec.dtype, mrec.dtype)
         assert_equal_records(_mrec._data, mrec.filled())
         assert_equal_records(_mrec._fieldmask, mrec._fieldmask)
-            
+
     def test_fromrecords_wmask(self):
         "Tests construction from records w/ mask."
         (mrec, nrec, ddtype) = self.data
@@ -328,7 +328,7 @@
         assert_equal_records(_mrec._data, mrec._data)
         assert_equal(_mrec._fieldmask.tolist(), mrec._fieldmask.tolist())
         #
-        _mrec = fromrecords(nrec.tolist(), dtype=ddtype, 
+        _mrec = fromrecords(nrec.tolist(), dtype=ddtype,
                             mask=mrec._fieldmask.tolist())
         assert_equal_records(_mrec._data, mrec._data)
         assert_equal(_mrec._fieldmask.tolist(), mrec._fieldmask.tolist())

Modified: trunk/numpy/ma/testutils.py
===================================================================
--- trunk/numpy/ma/testutils.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/ma/testutils.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -216,4 +216,4 @@
     assert_array_equal(m1, m2)
 
 if __name__ == '__main__':
-    pass
\ No newline at end of file
+    pass

Modified: trunk/numpy/oldnumeric/compat.py
===================================================================
--- trunk/numpy/oldnumeric/compat.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/oldnumeric/compat.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -88,7 +88,7 @@
     dstr = fp.read(sz)
     m = mu.fromstring(dstr, typeconv.convtypecode(typecode))
     m.shape = shape
-    
+
     if (LittleEndian and endian == 'B') or (not LittleEndian and endian == 'L'):
         return m.byteswap(True)
     else:
@@ -97,10 +97,10 @@
 import pickle, copy
 class Unpickler(pickle.Unpickler):
     def load_array(self):
-        self.stack.append(_LoadArray(self))    
+        self.stack.append(_LoadArray(self))
 
     dispatch = copy.copy(pickle.Unpickler.dispatch)
-    dispatch['A'] = load_array    
+    dispatch['A'] = load_array
 
 class Pickler(pickle.Pickler):
     def __init__(self, *args, **kwds):

Modified: trunk/numpy/oldnumeric/ma.py
===================================================================
--- trunk/numpy/oldnumeric/ma.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/oldnumeric/ma.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -2265,5 +2265,3 @@
 
 def take(a, indices, axis=0):
     return new_take(a, indices, axis)
-
-

Modified: trunk/numpy/testing/utils.py
===================================================================
--- trunk/numpy/testing/utils.py	2008-04-20 11:24:30 UTC (rev 5054)
+++ trunk/numpy/testing/utils.py	2008-04-20 11:49:35 UTC (rev 5055)
@@ -311,7 +311,7 @@
                 # Anything else.
                 raise
             else:
-                raise AssertionError('%s() did not raise one of (%s)' % 
+                raise AssertionError('%s() did not raise one of (%s)' %
                     (function.__name__, ', '.join([e.__name__ for e in exceptions])))
         try:
             f2.__name__ = function.__name__



More information about the Numpy-svn mailing list