[Scipy-svn] r6928 - in trunk/scipy/sparse: . linalg/dsolve/umfpack linalg/dsolve/umfpack/tests linalg/isolve

scipy-svn@scip... scipy-svn@scip...
Sat Nov 20 10:00:23 CST 2010


Author: warren.weckesser
Date: 2010-11-20 10:00:23 -0600 (Sat, 20 Nov 2010)
New Revision: 6928

Modified:
   trunk/scipy/sparse/base.py
   trunk/scipy/sparse/bsr.py
   trunk/scipy/sparse/compressed.py
   trunk/scipy/sparse/csr.py
   trunk/scipy/sparse/dok.py
   trunk/scipy/sparse/linalg/dsolve/umfpack/tests/try_umfpack.py
   trunk/scipy/sparse/linalg/dsolve/umfpack/umfpack.py
   trunk/scipy/sparse/linalg/isolve/utils.py
   trunk/scipy/sparse/spfuncs.py
   trunk/scipy/sparse/sputils.py
Log:
ENH: sparse: update 'raise' statements

Modified: trunk/scipy/sparse/base.py
===================================================================
--- trunk/scipy/sparse/base.py	2010-11-20 14:54:23 UTC (rev 6927)
+++ trunk/scipy/sparse/base.py	2010-11-20 16:00:23 UTC (rev 6928)
@@ -52,8 +52,8 @@
         self.format = self.__class__.__name__[:3]
         self._shape = None
         if self.format == 'spm':
-            raise ValueError, "This class is not intended" \
-                  " to be instantiated directly."
+            raise ValueError("This class is not intended"
+                            " to be instantiated directly.")
         self.maxprint = maxprint
 
     def set_shape(self,shape):
@@ -101,8 +101,8 @@
                 if self.dtype <= np.dtype(fp_type):
                     return self.astype(fp_type)
 
-            raise TypeError,'cannot upcast [%s] to a floating \
-                             point format' % self.dtype.name
+            raise TypeError('cannot upcast [%s] to a floating '
+                             'point format' % self.dtype.name)
 
     def __iter__(self):
         for r in xrange(self.shape[0]):
@@ -126,7 +126,7 @@
         try:
             return self.nnz
         except AttributeError:
-            raise AttributeError, "nnz not defined"
+            raise AttributeError("nnz not defined")
 
     def getformat(self):
         try:
@@ -187,8 +187,8 @@
     # non-zeros is more important.  For now, raise an exception!
     def __len__(self):
         # return self.getnnz()
-        raise TypeError, "sparse matrix length is ambiguous; use getnnz()" \
-                         " or shape[0]"
+        raise TypeError("sparse matrix length is ambiguous; use getnnz()"
+                         " or shape[0]")
 
     def asformat(self, format):
         """Return this matrix in a given sparse format
@@ -426,7 +426,7 @@
         elif attr == 'size':
             return self.getnnz()
         else:
-            raise AttributeError, attr + " not found"
+            raise AttributeError(attr + " not found")
 
     def transpose(self):
         return self.tocsr().transpose()
@@ -541,7 +541,7 @@
             # sum over rows and columns
             return ( self * np.asmatrix(np.ones((n, 1), dtype=self.dtype)) ).sum()
         else:
-            raise ValueError, "axis out of bounds"
+            raise ValueError("axis out of bounds")
 
     def mean(self, axis=None):
         """Average the matrix over the given axis.  If the axis is None,
@@ -558,7 +558,7 @@
         elif axis is None:
             return self.sum(None) * 1.0 / (self.shape[0]*self.shape[1])
         else:
-            raise ValueError, "axis out of bounds"
+            raise ValueError("axis out of bounds")
 
     def diagonal(self):
         """Returns the main diagonal of the matrix
@@ -577,7 +577,7 @@
         """
         M, N = self.shape
         if (k > 0 and k >= N) or (k < 0 and -k >= M):
-            raise ValueError, "k exceedes matrix dimensions"
+            raise ValueError("k exceedes matrix dimensions")
         if k < 0:
             max_index = min(M+k, N, len(values))
             for i,v in enumerate(values[:max_index]):

Modified: trunk/scipy/sparse/bsr.py
===================================================================
--- trunk/scipy/sparse/bsr.py	2010-11-20 14:54:23 UTC (rev 6927)
+++ trunk/scipy/sparse/bsr.py	2010-11-20 16:00:23 UTC (rev 6928)
@@ -119,7 +119,7 @@
 
                 R,C = blocksize
                 if (M % R) != 0 or (N % C) != 0:
-                    raise ValueError, 'shape must be multiple of blocksize'
+                    raise ValueError('shape must be multiple of blocksize')
 
                 self.indptr  = np.zeros(M//R + 1, dtype=np.intc )
 
@@ -200,25 +200,23 @@
 
         # check array shapes
         if np.rank(self.indices) != 1 or np.rank(self.indptr) != 1:
-            raise ValueError,"indices, and indptr should be rank 1"
+            raise ValueError("indices, and indptr should be rank 1")
         if np.rank(self.data) != 3:
-            raise ValueError,"data should be rank 3"
+            raise ValueError("data should be rank 3")
 
         # check index pointer
         if (len(self.indptr) != M//R + 1 ):
-            raise ValueError, \
-                "index pointer size (%d) should be (%d)" % \
-                 (len(self.indptr), M//R + 1)
+            raise ValueError("index pointer size (%d) should be (%d)" %
+                                (len(self.indptr), M//R + 1))
         if (self.indptr[0] != 0):
-            raise ValueError,"index pointer should start with 0"
+            raise ValueError("index pointer should start with 0")
 
         # check index and data arrays
         if (len(self.indices) != len(self.data)):
-            raise ValueError,"indices and data should have the same size"
+            raise ValueError("indices and data should have the same size")
         if (self.indptr[-1] > len(self.indices)):
-            raise ValueError, \
-                  "Last value of index pointer should be less than "\
-                  "the size of index and data arrays"
+            raise ValueError("Last value of index pointer should be less than "
+                                "the size of index and data arrays")
 
         self.prune()
 
@@ -227,12 +225,12 @@
             if self.nnz > 0:
                 if self.indices.max() >= N//C:
                     print "max index",self.indices.max()
-                    raise ValueError, "column index values must be < %d" % (N//C)
+                    raise ValueError("column index values must be < %d" % (N//C))
                 if self.indices.min() < 0:
-                    raise ValueError, "column index values must be >= 0"
+                    raise ValueError("column index values must be >= 0")
                 if diff(self.indptr).min() < 0:
-                    raise ValueError,'index pointer values must form a " \
-                                        "non-decreasing sequence'
+                    raise ValueError("index pointer values must form a "
+                                        "non-decreasing sequence")
 
         #if not self.has_sorted_indices():
         #    warn('Indices were not in sorted order. Sorting indices.')
@@ -475,14 +473,14 @@
         M,N = self.shape
 
         if len(self.indptr) != M//R + 1:
-            raise ValueError, "index pointer has invalid length"
+            raise ValueError("index pointer has invalid length")
 
         bnnz = self.indptr[-1]
 
         if len(self.indices) < bnnz:
-            raise ValueError, "indices array has too few elements"
+            raise ValueError("indices array has too few elements")
         if len(self.data) < bnnz:
-            raise ValueError, "data array has too few elements"
+            raise ValueError("data array has too few elements")
 
         self.data    = self.data[:bnnz]
         self.indices = self.indices[:bnnz]

Modified: trunk/scipy/sparse/compressed.py
===================================================================
--- trunk/scipy/sparse/compressed.py	2010-11-20 14:54:23 UTC (rev 6927)
+++ trunk/scipy/sparse/compressed.py	2010-11-20 16:00:23 UTC (rev 6928)
@@ -57,16 +57,16 @@
                     self.indptr  = np.array(indptr, copy=copy)
                     self.data    = np.array(data, copy=copy, dtype=getdtype(dtype, data))
                 else:
-                    raise ValueError, "unrecognized %s_matrix constructor usage" %\
-                            self.format
+                    raise ValueError("unrecognized %s_matrix constructor usage" %
+                            self.format)
 
         else:
             #must be dense
             try:
                 arg1 = np.asarray(arg1)
             except:
-                raise ValueError, "unrecognized %s_matrix constructor usage" % \
-                        self.format
+                raise ValueError("unrecognized %s_matrix constructor usage" %
+                        self.format)
             from coo import coo_matrix
             self._set_self( self.__class__(coo_matrix(arg1, dtype=dtype)) )
 
@@ -80,7 +80,7 @@
                     major_dim = len(self.indptr) - 1
                     minor_dim = self.indices.max() + 1
                 except:
-                    raise ValueError,'unable to infer matrix dimensions'
+                    raise ValueError('unable to infer matrix dimensions')
                 else:
                     self.shape = self._swap((major_dim,minor_dim))
 
@@ -139,19 +139,17 @@
 
         # check index pointer
         if (len(self.indptr) != major_dim + 1 ):
-            raise ValueError, \
-                "index pointer size (%d) should be (%d)" % \
-                 (len(self.indptr), major_dim + 1)
+            raise ValueError("index pointer size (%d) should be (%d)" %
+                                (len(self.indptr), major_dim + 1))
         if (self.indptr[0] != 0):
-            raise ValueError,"index pointer should start with 0"
+            raise ValueError("index pointer should start with 0")
 
         # check index and data arrays
         if (len(self.indices) != len(self.data)):
-            raise ValueError,"indices and data should have the same size"
+            raise ValueError("indices and data should have the same size")
         if (self.indptr[-1] > len(self.indices)):
-            raise ValueError, \
-                  "Last value of index pointer should be less than "\
-                  "the size of index and data arrays"
+            raise ValueError("Last value of index pointer should be less than "
+                                "the size of index and data arrays")
 
         self.prune()
 
@@ -159,14 +157,14 @@
             #check format validity (more expensive)
             if self.nnz > 0:
                 if self.indices.max() >= minor_dim:
-                    raise ValueError, "%s index values must be < %d" % \
-                            (minor_name,minor_dim)
+                    raise ValueError("%s index values must be < %d" %
+                                        (minor_name,minor_dim))
                 if self.indices.min() < 0:
-                    raise ValueError, "%s index values must be >= 0" % \
-                            minor_name
+                    raise ValueError("%s index values must be >= 0" %
+                                        minor_name)
                 if np.diff(self.indptr).min() < 0:
-                    raise ValueError,'index pointer values must form a " \
-                                        "non-decreasing sequence'
+                    raise ValueError("index pointer values must form a "
+                                        "non-decreasing sequence")
 
         #if not self.has_sorted_indices():
         #    warn('Indices were not in sorted order.  Sorting indices.')
@@ -179,11 +177,11 @@
         # First check if argument is a scalar
         if isscalarlike(other):
             # Now we would add this scalar to every element.
-            raise NotImplementedError, 'adding a scalar to a CSC or CSR ' \
-                  'matrix is not supported'
+            raise NotImplementedError('adding a scalar to a CSC or CSR '
+                                        'matrix is not supported')
         elif isspmatrix(other):
             if (other.shape != self.shape):
-                raise ValueError, "inconsistent shapes"
+                raise ValueError("inconsistent shapes")
 
             return self._binopt(other,'_plus_')
         elif isdense(other):
@@ -199,11 +197,11 @@
         # First check if argument is a scalar
         if isscalarlike(other):
             # Now we would add this scalar to every element.
-            raise NotImplementedError, 'adding a scalar to a sparse ' \
-                  'matrix is not supported'
+            raise NotImplementedError('adding a scalar to a sparse '
+                                        'matrix is not supported')
         elif isspmatrix(other):
             if (other.shape != self.shape):
-                raise ValueError, "inconsistent shapes"
+                raise ValueError("inconsistent shapes")
 
             return self._binopt(other,'_minus_')
         elif isdense(other):
@@ -216,8 +214,8 @@
         #note: this can't be replaced by other + (-self) for unsigned types
         if isscalarlike(other):
             # Now we would add this scalar to every element.
-            raise NotImplementedError, 'adding a scalar to a sparse ' \
-                  'matrix is not supported'
+            raise NotImplementedError('adding a scalar to a sparse '
+                                        'matrix is not supported')
         elif isdense(other):
             # Convert this matrix to a dense matrix and subtract them
             return other - self.todense()
@@ -330,7 +328,7 @@
             return self.data.sum()
         else:
             return spmatrix.sum(self,axis)
-            raise ValueError, "axis out of bounds"
+            raise ValueError("axis out of bounds")
 
     #######################
     # Getting and Setting #
@@ -361,7 +359,7 @@
         elif isintlike(key):
             return self[key, :]
         else:
-            raise IndexError, "invalid index"
+            raise IndexError("invalid index")
 
 
     def _get_single_element(self,row,col):
@@ -395,9 +393,9 @@
             [start:stop:string, i] for column-oriented matrices
         """
         if stride != 1:
-            raise ValueError, "slicing with step != 1 not supported"
+            raise ValueError("slicing with step != 1 not supported")
         if stop <= start:
-            raise ValueError, "slice width must be >= 1"
+            raise ValueError("slice width must be >= 1")
 
         #TODO make [i,:] faster
         #TODO implement [i,x:y:z]
@@ -445,9 +443,8 @@
 
         def _in_bounds( i0, i1, num ):
             if not (0<=i0<num) or not (0<i1<=num) or not (i0<i1):
-                raise IndexError,\
-                      "index out of bounds: 0<=%d<%d, 0<=%d<%d, %d<%d" %\
-                      (i0, num, i1, num, i0, i1)
+                raise IndexError("index out of bounds: 0<=%d<%d, 0<=%d<%d, %d<%d" %
+                                    (i0, num, i1, num, i0, i1))
 
         i0, i1 = _process_slice( slice0, shape0 )
         j0, j1 = _process_slice( slice1, shape1 )
@@ -477,7 +474,7 @@
             if (col < 0):
                 col += N
             if not (0<=row<M) or not (0<=col<N):
-                raise IndexError, "index out of bounds"
+                raise IndexError("index out of bounds")
 
             major_index, minor_index = self._swap((row,col))
 
@@ -519,12 +516,13 @@
                 self.data[start:end][indxs[0]] = val
             else:
                 #entry appears more than once
-                raise ValueError,'nonzero entry (%d,%d) occurs more than once' % (row,col)
+                raise ValueError('nonzero entry (%d,%d) occurs more than once'
+                                % (row,col))
 
             self.check_format(full_check=True)
         else:
             # We should allow slices here!
-            raise IndexError, "invalid index"
+            raise IndexError("invalid index")
 
     ######################
     # Conversion methods #

Modified: trunk/scipy/sparse/csr.py
===================================================================
--- trunk/scipy/sparse/csr.py	2010-11-20 14:54:23 UTC (rev 6927)
+++ trunk/scipy/sparse/csr.py	2010-11-20 16:00:23 UTC (rev 6928)
@@ -315,9 +315,9 @@
         start, stop, stride = cslice.indices(self.shape[1])
 
         if stride != 1:
-            raise ValueError, "slicing with step != 1 not supported"
+            raise ValueError("slicing with step != 1 not supported")
         if stop <= start:
-            raise ValueError, "slice width must be >= 1"
+            raise ValueError("slice width must be >= 1")
 
         #TODO make [i,:] faster
         #TODO implement [i,x:y:z]

Modified: trunk/scipy/sparse/dok.py
===================================================================
--- trunk/scipy/sparse/dok.py	2010-11-20 14:54:23 UTC (rev 6927)
+++ trunk/scipy/sparse/dok.py	2010-11-20 16:00:23 UTC (rev 6928)
@@ -174,8 +174,8 @@
                     ###################################
                     # We should reshape the new matrix here!
                     ###################################
-                    raise NotImplementedError, "fancy indexing supported over" \
-                            " one axis only"
+                    raise NotImplementedError("fancy indexing supported over"
+                            " one axis only")
                 return new
 
             # Below here, j is a sequence, but i is an integer
@@ -186,14 +186,14 @@
                 seq = j
             else:
                 # j is not an integer
-                raise TypeError, "index must be a pair of integers or slices"
+                raise TypeError("index must be a pair of integers or slices")
 
             # Create a new matrix of the correct dimensions
             first = seq[0]
             last = seq[-1]
             if first < 0 or first >= self.shape[1] or last < 0 \
                          or last >= self.shape[1]:
-                raise IndexError, "index out of bounds"
+                raise IndexError("index out of bounds")
             newshape = (1, last-first+1)
             new = dok_matrix(newshape)
             # ** This uses linear time in the size n of dimension 1:
@@ -212,7 +212,7 @@
         try:
             i, j = key
         except (ValueError, TypeError):
-            raise TypeError, "index must be a pair of integers or slices"
+            raise TypeError("index must be a pair of integers or slices")
 
         # First deal with the case where both i and j are integers
         if isintlike(i) and isintlike(j):
@@ -222,7 +222,7 @@
                 j += self.shape[1]
 
             if i < 0 or i >= self.shape[0] or j < 0 or j >= self.shape[1]:
-                raise IndexError, "index out of bounds"
+                raise IndexError("index out of bounds")
 
             if np.isscalar(value):
                 if value==0 and self.has_key((i,j)):
@@ -243,7 +243,7 @@
             else:
                 # Make sure i is an integer. (But allow it to be a subclass of int).
                 if not isintlike(i):
-                    raise TypeError, "index must be a pair of integers or slices"
+                    raise TypeError("index must be a pair of integers or slices")
                 seq = None
             if seq is not None:
                 # First see if 'value' is another dok_matrix of the appropriate
@@ -253,8 +253,8 @@
                         for element in seq:
                             self[element, j] = value[element, 0]
                     else:
-                        raise NotImplementedError, "setting a 2-d slice of" \
-                                " a dok_matrix is not yet supported"
+                        raise NotImplementedError("setting a 2-d slice of"
+                                " a dok_matrix is not yet supported")
                 elif np.isscalar(value):
                     for element in seq:
                         self[element, j] = value
@@ -262,12 +262,12 @@
                     # See if value is a sequence
                     try:
                         if len(seq) != len(value):
-                            raise ValueError, "index and value ranges must" \
-                                              " have the same length"
+                            raise ValueError("index and value ranges must"
+                                              " have the same length")
                     except TypeError:
                         # Not a sequence
-                        raise TypeError, "unsupported type for" \
-                                         " dok_matrix.__setitem__"
+                        raise TypeError("unsupported type for"
+                                         " dok_matrix.__setitem__")
 
                     # Value is a sequence
                     for element, val in izip(seq, value):
@@ -283,7 +283,7 @@
                     seq = j
                 else:
                     # j is not an integer
-                    raise TypeError, "index must be a pair of integers or slices"
+                    raise TypeError("index must be a pair of integers or slices")
 
                 # First see if 'value' is another dok_matrix of the appropriate
                 # dimensions
@@ -292,8 +292,8 @@
                         for element in seq:
                             self[i, element] = value[0, element]
                     else:
-                        raise NotImplementedError, "setting a 2-d slice of" \
-                                " a dok_matrix is not yet supported"
+                        raise NotImplementedError("setting a 2-d slice of"
+                                " a dok_matrix is not yet supported")
                 elif np.isscalar(value):
                     for element in seq:
                         self[i, element] = value
@@ -301,11 +301,11 @@
                     # See if value is a sequence
                     try:
                         if len(seq) != len(value):
-                            raise ValueError, "index and value ranges must have" \
-                                              " the same length"
+                            raise ValueError("index and value ranges must have"
+                                              " the same length")
                     except TypeError:
                         # Not a sequence
-                        raise TypeError, "unsupported type for dok_matrix.__setitem__"
+                        raise TypeError("unsupported type for dok_matrix.__setitem__")
                     else:
                         for element, val in izip(seq, value):
                             self[i, element] = val
@@ -325,7 +325,7 @@
             #new.dtype.char = self.dtype.char
         elif isinstance(other, dok_matrix):
             if other.shape != self.shape:
-                raise ValueError, "matrix dimensions are not equal"
+                raise ValueError("matrix dimensions are not equal")
             # We could alternatively set the dimensions to the the largest of
             # the two matrices to be summed.  Would this be a good idea?
             new = dok_matrix(self.shape, dtype=self.dtype)
@@ -338,7 +338,7 @@
         elif isdense(other):
             new = self.todense() + other
         else:
-            raise TypeError, "data type not understood"
+            raise TypeError("data type not understood")
         return new
 
     def __radd__(self, other):
@@ -354,7 +354,7 @@
                         new[i, j] = aij
         elif isinstance(other, dok_matrix):
             if other.shape != self.shape:
-                raise ValueError, "matrix dimensions are not equal"
+                raise ValueError("matrix dimensions are not equal")
             new = dok_matrix(self.shape, dtype=self.dtype)
             new.update(self)
             for key in other:
@@ -365,7 +365,7 @@
         elif isdense(other):
             new = other + self.todense()
         else:
-            raise TypeError, "data type not understood"
+            raise TypeError("data type not understood")
         return new
 
     def __neg__(self):
@@ -534,8 +534,8 @@
         non-zero elements that lie outside.
         """
         if not isshape(shape):
-            raise TypeError, "dimensions must be a 2-tuple of positive"\
-                             " integers"
+            raise TypeError("dimensions must be a 2-tuple of positive"
+                             " integers")
         newM, newN = shape
         M, N = self.shape
         if newM < M or newN < N:

Modified: trunk/scipy/sparse/linalg/dsolve/umfpack/tests/try_umfpack.py
===================================================================
--- trunk/scipy/sparse/linalg/dsolve/umfpack/tests/try_umfpack.py	2010-11-20 14:54:23 UTC (rev 6927)
+++ trunk/scipy/sparse/linalg/dsolve/umfpack/tests/try_umfpack.py	2010-11-20 16:00:23 UTC (rev 6928)
@@ -83,7 +83,7 @@
     try:
         readMatrix = formatMap[options.format]
     except:
-        raise ValueError, 'unsupported format: %s' % options.format
+        raise ValueError('unsupported format: %s' % options.format)
 
     print 'format:', options.format
 
@@ -180,7 +180,7 @@
         try:
             import pylab
         except ImportError:
-            raise ImportError, "could not import pylab"
+            raise ImportError("could not import pylab")
         times = np.array( times )
         print times
         pylab.plot( times[:,0], 'b-o' )

Modified: trunk/scipy/sparse/linalg/dsolve/umfpack/umfpack.py
===================================================================
--- trunk/scipy/sparse/linalg/dsolve/umfpack/umfpack.py	2010-11-20 14:54:23 UTC (rev 6927)
+++ trunk/scipy/sparse/linalg/dsolve/umfpack/umfpack.py	2010-11-20 16:00:23 UTC (rev 6928)
@@ -277,7 +277,7 @@
         Struct.__init__( self, **kwargs )
 
         if family not in umfFamilyTypes.keys():
-            raise TypeError, 'wrong family: %s' % family
+            raise TypeError('wrong family: %s' % family)
 
         self.family = family
         self.control = np.zeros( (UMFPACK_CONTROL, ), dtype = np.double )
@@ -328,25 +328,25 @@
             indx = mtx.indices
             self.isCSR = 1
         else:
-            raise TypeError, 'must be a CSC/CSR matrix (is %s)' % mtx.__class__
+            raise TypeError('must be a CSC/CSR matrix (is %s)' % mtx.__class__)
 
         ##
         # Should check types of indices to correspond to familyTypes.
         if self.family[1] == 'i':
             if (indx.dtype != np.dtype('i')) \
                    or mtx.indptr.dtype != np.dtype('i'):
-                raise ValueError, 'matrix must have int indices'
+                raise ValueError('matrix must have int indices')
         else:
             if (indx.dtype != np.dtype('l')) \
                    or mtx.indptr.dtype != np.dtype('l'):
-                raise ValueError, 'matrix must have long indices'
+                raise ValueError('matrix must have long indices')
 
         if self.isReal:
             if mtx.data.dtype != np.dtype('<f8'):
-                raise ValueError, 'matrix must have float64 values'
+                raise ValueError('matrix must have float64 values')
         else:
             if mtx.data.dtype != np.dtype('<c16'):
-                raise ValueError, 'matrix must have complex128 values'
+                raise ValueError('matrix must have complex128 values')
 
         return indx
 
@@ -379,8 +379,8 @@
 ##         print status, self._symbolic
 
         if status != UMFPACK_OK:
-            raise RuntimeError, '%s failed with %s' % (self.funs.symbolic,
-                                                       umfStatus[status])
+            raise RuntimeError('%s failed with %s' % (self.funs.symbolic,
+                                                       umfStatus[status]))
 
         self.mtx = mtx
 
@@ -432,8 +432,8 @@
             else:
                 break
             if failCount >= 2:
-                raise RuntimeError, '%s failed with %s' % (self.funs.numeric,
-                                                           umfStatus[status])
+                raise RuntimeError('%s failed with %s' % (self.funs.numeric,
+                                                           umfStatus[status]))
 
     ##
     # 14.12.2005, c
@@ -507,7 +507,7 @@
                       assumes CSC internally
         """
         if sys not in umfSys:
-            raise ValueError, 'sys must be in' % umfSys
+            raise ValueError('sys must be in' % umfSys)
 
         if autoTranspose and self.isCSR:
             ##
@@ -517,13 +517,13 @@
             if sys in umfSys_transposeMap[ii]:
                 sys = umfSys_transposeMap[ii][sys]
             else:
-                raise RuntimeError, 'autoTranspose ambiguous, switch it off'
+                raise RuntimeError('autoTranspose ambiguous, switch it off')
 
         if self._numeric is not None:
             if self.mtx is not mtx:
-                raise ValueError, 'must be called with same matrix as numeric()'
+                raise ValueError('must be called with same matrix as numeric()')
         else:
-            raise RuntimeError, 'numeric() not called'
+            raise RuntimeError('numeric() not called')
 
         indx = self._getIndx( mtx )
 
@@ -551,8 +551,8 @@
                 print 'zeroing nan and inf entries...'
                 sol[~np.isfinite( sol )] = 0.0
             else:
-                raise RuntimeError, '%s failed with %s' % (self.funs.solve,
-                                                           umfStatus[status])
+                raise RuntimeError('%s failed with %s' % (self.funs.solve,
+                                                           umfStatus[status]))
         econd = 1.0 / self.info[UMFPACK_RCOND]
         if econd > self.maxCond:
             print 'warning: (almost) singular matrix! '\
@@ -581,7 +581,7 @@
 
 #        print self.family
         if sys not in umfSys:
-            raise ValueError, 'sys must be in' % umfSys
+            raise ValueError('sys must be in' % umfSys)
 
         if self._numeric is None:
             self.numeric( mtx )
@@ -646,8 +646,8 @@
                  = self.funs.get_lunz( self._numeric )
 
         if status != UMFPACK_OK:
-            raise RuntimeError, '%s failed with %s' % (self.funs.get_lunz,
-                                                       umfStatus[status])
+            raise RuntimeError('%s failed with %s' % (self.funs.get_lunz,
+                                                       umfStatus[status]))
 
         #allocate storage for decomposition data
         i_type = mtx.indptr.dtype
@@ -673,8 +673,8 @@
                                                        self._numeric )
 
             if status != UMFPACK_OK:
-                raise RuntimeError, '%s failed with %s'\
-                      % (self.funs.get_numeric, umfStatus[status])
+                raise RuntimeError('%s failed with %s'
+                        % (self.funs.get_numeric, umfStatus[status]))
 
             L = sp.csr_matrix((Lx,Lj,Lp),(n_row,min(n_row,n_col)))
             U = sp.csc_matrix((Ux,Ui,Up),(min(n_row,n_col),n_col))
@@ -693,8 +693,8 @@
                                                       self._numeric)
 
             if status != UMFPACK_OK:
-                raise RuntimeError, '%s failed with %s'\
-                      % (self.funs.get_numeric, umfStatus[status])
+                raise RuntimeError('%s failed with %s'
+                        % (self.funs.get_numeric, umfStatus[status]))
 
 
             Lxz = np.zeros( (lnz,), dtype = np.complex128 )

Modified: trunk/scipy/sparse/linalg/isolve/utils.py
===================================================================
--- trunk/scipy/sparse/linalg/isolve/utils.py	2010-11-20 14:54:23 UTC (rev 6927)
+++ trunk/scipy/sparse/linalg/isolve/utils.py	2010-11-20 16:00:23 UTC (rev 6928)
@@ -93,7 +93,7 @@
             xtype = b.dtype.char
         else:
             if xtype not in 'fdFD':
-                raise ValueError, "xtype must be 'f', 'd', 'F', or 'D'"
+                raise ValueError("xtype must be 'f', 'd', 'F', or 'D'")
 
     b = asarray(b,dtype=xtype) #make b the same type as x
     b = b.ravel()

Modified: trunk/scipy/sparse/spfuncs.py
===================================================================
--- trunk/scipy/sparse/spfuncs.py	2010-11-20 14:54:23 UTC (rev 6927)
+++ trunk/scipy/sparse/spfuncs.py	2010-11-20 16:00:23 UTC (rev 6928)
@@ -41,7 +41,7 @@
         return (1,1)
 
     if not 0 < efficiency < 1.0:
-        raise ValueError,'efficiency must satisfy 0.0 < efficiency < 1.0'
+        raise ValueError('efficiency must satisfy 0.0 < efficiency < 1.0')
 
     high_efficiency = (1.0 + efficiency) / 2.0
     nnz = float(A.nnz)
@@ -85,7 +85,7 @@
     """
     r,c = blocksize
     if r < 1 or c < 1:
-        raise ValueError,'r and c must be positive'
+        raise ValueError('r and c must be positive')
 
     if isspmatrix_csr(A):
         M,N = A.shape

Modified: trunk/scipy/sparse/sputils.py
===================================================================
--- trunk/scipy/sparse/sputils.py	2010-11-20 14:54:23 UTC (rev 6927)
+++ trunk/scipy/sparse/sputils.py	2010-11-20 16:00:23 UTC (rev 6928)
@@ -44,7 +44,7 @@
         if np.can_cast(sample.dtype,t):
             return t
 
-    raise TypeError,'no supported conversion for types: %s' % args
+    raise TypeError('no supported conversion for types: %s' % args)
 
 
 def to_native(A):
@@ -68,7 +68,7 @@
                 newdtype = np.dtype(default)
                 canCast = False
             else:
-                raise TypeError, "could not interpret data type"
+                raise TypeError("could not interpret data type")
     else:
         newdtype = np.dtype(dtype)
 



More information about the Scipy-svn mailing list