[Scipysvn] r6959  in trunk/scipy: signal sparse/linalg/isolve stats
scipysvn@scip...
scipysvn@scip...
Sun Nov 28 07:35:38 CST 2010
Author: rgommers
Date: 20101128 07:35:38 0600 (Sun, 28 Nov 2010)
New Revision: 6959
Modified:
trunk/scipy/signal/filter_design.py
trunk/scipy/signal/wavelets.py
trunk/scipy/sparse/linalg/isolve/iterative.py
trunk/scipy/stats/stats.py
Log:
DOC: merge some more doc wiki edits.
Modified: trunk/scipy/signal/filter_design.py
===================================================================
 trunk/scipy/signal/filter_design.py 20101128 13:34:42 UTC (rev 6958)
+++ trunk/scipy/signal/filter_design.py 20101128 13:35:38 UTC (rev 6959)
@@ 34,14 +34,15 @@
return w
def freqs(b, a, worN=None, plot=None):
 """Compute frequency response of analog filter.
+ """
+ Compute frequency response of analog filter.
Given the numerator (b) and denominator (a) of a filter compute its
 frequency response.
+ frequency response::
 b[0]*(jw)**(nb1) + b[1]*(jw)**(nb2) + ... + b[nb1]
 H(w) = 
 a[0]*(jw)**(na1) + a[1]*(jw)**(na2) + ... + a[na1]
+ b[0]*(jw)**(nb1) + b[1]*(jw)**(nb2) + ... + b[nb1]
+ H(w) = 
+ a[0]*(jw)**(na1) + a[1]*(jw)**(na2) + ... + a[na1]
Parameters

@@ 54,6 +55,10 @@
of the response curve (determined by polezero locations). If a single
integer, the compute at that many frequencies. Otherwise, compute the
response at frequencies given in worN.
+ plot : callable
+ A callable that takes two arguments. If given, the return parameters
+ `w` and `h` are passed to plot. Useful for plotting the frequency
+ response inside `freqz`.
Returns

@@ 61,6 +66,17 @@
The frequencies at which h was computed.
h : ndarray
The frequency response.
+
+ See Also
+ 
+ freqz : Compute the frequency response of a digital filter.
+
+ Notes
+ 
+ Using Matplotlib's "plot" function as the callable for `plot` produces
+ unexpected results, this plots the real part of the complex transfer
+ function, not the magnitude.
+
"""
if worN is None:
w = findfreqs(b,a,200)
Modified: trunk/scipy/signal/wavelets.py
===================================================================
 trunk/scipy/signal/wavelets.py 20101128 13:34:42 UTC (rev 6958)
+++ trunk/scipy/signal/wavelets.py 20101128 13:35:38 UTC (rev 6959)
@@ 78,7 +78,8 @@
return NotImplemented
def cascade(hk, J=7):
 """Return (x, phi, psi) at dyadic points K/2**J from filter coefficients.
+ """
+ Return (x, phi, psi) at dyadic points K/2**J from filter coefficients.
Parameters

@@ 98,6 +99,7 @@
N
phi(x) = sum hk * phi(2xk)
k=0
+
psi :
The wavelet function ``psi(x)`` at `x`:
Modified: trunk/scipy/sparse/linalg/isolve/iterative.py
===================================================================
 trunk/scipy/sparse/linalg/isolve/iterative.py 20101128 13:34:42 UTC (rev 6958)
+++ trunk/scipy/sparse/linalg/isolve/iterative.py 20101128 13:35:38 UTC (rev 6959)
@@ 305,7 +305,8 @@
def gmres(A, b, x0=None, tol=1e5, restart=None, maxiter=None, xtype=None, M=None, callback=None, restrt=None):
 """Use Generalized Minimal RESidual iteration to solve A x = b
+ """
+ Use Generalized Minimal RESidual iteration to solve A x = b.
Parameters

@@ 318,34 +319,54 @@

x : {array, matrix}
The converged solution.
 info : integer
+ info : int
Provides convergence information:
 0 : successful exit
 >0 : convergence to tolerance not achieved, number of iterations
 <0 : illegal input or breakdown
+ * 0 : successful exit
+ * >0 : convergence to tolerance not achieved, number of iterations
+ * <0 : illegal input or breakdown
 Other Parameters
+ Other parameters

 x0 : {array, matrix}
 Starting guess for the solution.
+ x0 : {array, matrix}
+ Starting guess for the solution (a vector of zeros by default).
tol : float
Tolerance to achieve. The algorithm terminates when either the relative
or the absolute residual is below `tol`.
 restart : integer
+ restart : int, optional
Number of iterations between restarts. Larger values increase
iteration cost, but may be necessary for convergence.
 (Default: 20)
 maxiter : integer, optional
+ Default is 20.
+ maxiter : int, optional
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse matrix, dense matrix, LinearOperator}
 Preconditioner for A. The preconditioner should approximate the
 inverse of A. Effective preconditioning dramatically improves the
 rate of convergence, which implies that fewer iterations are needed
 to reach a given error tolerance.
+ Inverse of the preconditioner of A. M should approximate the
+ inverse of A and be easy to solve for (see Notes). Effective
+ preconditioning dramatically improves the rate of convergence,
+ which implies that fewer iterations are needed to reach a given
+ error tolerance. By default, no preconditioner is used.
callback : function
Usersupplied function to call after each iteration. It is called
as callback(rk), where rk is the current residual vector.
+
+ See Also
+ 
+ LinearOperator
+
+ Notes
+ 
+ A preconditioner, P, is chosen such that P is close to A but easy to solve for.
+ The preconditioner parameter required by this routine is ``M = P^1``.
+ The inverse should preferably not be calculated explicitly. Rather, use the
+ following template to produce M::
+
+ # Construct a linear operator that computes P^1 * x.
+ import scipy.sparse.linalg as spla
+ M_x = lambda x: spla.spsolve(P, x)
+ M = spla.LinearOperator((n, n), M_x)
+
+ Deprecated Parameters
+ 
xtype : {'f','d','F','D'}
This parameter is DEPRECATED  avoid using it.
Modified: trunk/scipy/stats/stats.py
===================================================================
 trunk/scipy/stats/stats.py 20101128 13:34:42 UTC (rev 6958)
+++ trunk/scipy/stats/stats.py 20101128 13:35:38 UTC (rev 6959)
@@ 532,27 +532,27 @@
def cmedian(a, numbins=1000):
 # fixme: numpy.median() always seems to be a better choice.
 # A better version of this function would take alreadyhistogrammed data
 # and compute the median from that.
 # fixme: the wording of the docstring is a bit wonky.
 """Returns the computed median value of an array.
+ """
+ Returns the computed median value of an array.
All of the values in the input array are used. The input array is first
 histogrammed using numbins bins. The bin containing the median is
+ histogrammed using `numbins` bins. The bin containing the median is
selected by searching for the halfway point in the cumulative histogram.
 The median value is then computed by linearly interpolating across that bin.
+ The median value is then computed by linearly interpolating across that
+ bin.
Parameters

 a : array
+ a : array_like
+ Input array.
numbins : int
The number of bins used to histogram the data. More bins give greater
accuracy to the approximation of the median.
Returns

 A floating point value approximating the median.
+ cmedian : float
+ An approximation of the median.
References

@@ 563,6 +563,9 @@
York. 2000.
"""
+ # TODO: numpy.median() always seems to be a better choice.
+ # A better version of this function would take alreadyhistogrammed data
+ # and compute the median from that.
a = np.ravel(a)
n = float(len(a))
@@ 965,7 +968,7 @@
For normally distributed data, the skewness should be about 0. A skewness
value > 0 means that there is more weight in the left tail of the
 distribution. The function skewtest() can be used to determine if the
+ distribution. The function `skewtest` can be used to determine if the
skewness value is close enough to 0, statistically speaking.
Parameters
@@ 1020,7 +1023,7 @@
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
 Use kurtosistest() to see if result is close enough to normal.
+ Use `kurtosistest` to see if result is close enough to normal.
Parameters

@@ 1137,6 +1140,8 @@
Returns

+ zscore : float
+ The computed zscore for this test.
pvalue : float
a 2sided pvalue for the hypothesis test
@@ 1170,7 +1175,7 @@
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
 of the normal distribution: kurtosis=3(n1)/(n+1).
+ of the normal distribution: ``kurtosis = 3(n1)/(n+1)``.
Parameters

@@ 1182,6 +1187,8 @@
Returns

+ zscore : float
+ The computed zscore for this test.
pvalue : float
The 2sided pvalue for the hypothesis test
@@ 1260,22 +1267,51 @@
#####################################
def itemfreq(a):
 # fixme: I'm not sure I understand what this does. The docstring is
 # internally inconsistent.
 # comment: fortunately, this function doesn't appear to be used elsewhere
 """Returns a 2D array of item frequencies.
+ """
+ Returns a 2D array of item frequencies.
 Column 1 contains item values, column 2 contains their respective counts.
 Assumes a 1D array is passed.

Parameters

 a : array
+ a : array_like of rank 1
+ Input array.
Returns

 A 2D frequency table (col [0:n1]=scores, col n=frequencies)
+ itemfreq : ndarray of rank 2
+ A 2D frequency table (col [0:n1]=scores, col n=frequencies).
+ Column 1 contains item values, column 2 contains their respective
+ counts.
+
+ Notes
+ 
+ This uses a loop that is only reasonably fast if the number of unique
+ elements is not large. For integers, numpy.bincount is much faster.
+ This function currently does not support strings or multidimensional
+ scores.
+
+ Examples
+ 
+ >>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])
+ >>> stats.itemfreq(a)
+ array([[ 0., 2.],
+ [ 1., 4.],
+ [ 2., 2.],
+ [ 4., 1.],
+ [ 5., 1.]])
+ >>> np.bincount(a)
+ array([2, 4, 2, 0, 1, 1])
+
+ >>> stats.itemfreq(a/10.)
+ array([[ 0. , 2. ],
+ [ 0.1, 4. ],
+ [ 0.2, 2. ],
+ [ 0.4, 1. ],
+ [ 0.5, 1. ]])
+
"""
+ # TODO: I'm not sure I understand what this does. The docstring is
+ # internally inconsistent.
+ # comment: fortunately, this function doesn't appear to be used elsewhere
scores = _support.unique(a)
scores = np.sort(scores)
freq = zeros(len(scores))
@@ 1432,29 +1468,31 @@
def histogram2(a, bins):
 # comment: probably obsoleted by numpy.histogram()
 """ histogram2(a,bins)  Compute histogram of a using divisions in bins
+ """
+ Compute histogram using divisions in bins.
 Description:
 Count the number of times values from array a fall into
 numerical ranges defined by bins. Range x is given by
 bins[x] <= range_x < bins[x+1] where x =0,N and N is the
 length of the bins array. The last range is given by
 bins[N] <= range_N < infinity. Values less than bins[0] are
 not included in the histogram.
 Arguments:
 a  1D array. The array of values to be divied into bins
 bins  1D array. Defines the ranges of values to use during
 histogramming.
 Returns:
 1D array. Each value represents the occurences for a given
 bin (range) of values.
+ Count the number of times values from array `a` fall into
+ numerical ranges defined by `bins`. Range x is given by
+ bins[x] <= range_x < bins[x+1] where x =0,N and N is the
+ length of the `bins` array. The last range is given by
+ bins[N] <= range_N < infinity. Values less than bins[0] are
+ not included in the histogram.
 Caveat:
 This should probably have an axis argument that would histogram
 along a specific axis (kinda like matlab)
+ Parameters
+ 
+ a : array_like of rank 1
+ The array of values to be assigned into bins
+ bins : array_like of rank 1
+ Defines the ranges of values to use during histogramming.
+ Returns
+ 
+ histogram2 : ndarray of rank 1
+ Each value represents the occurrences for a given bin (range) of
+ values.
+
"""
+ # comment: probably obsoleted by numpy.histogram()
n = np.searchsorted(np.sort(a), bins)
n = np.concatenate([ n, [len(a)]])
return n[ 1:]n[:1]
@@ 1558,12 +1596,30 @@
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a relative frequency histogram, using the histogram function.
Defaultreallimits can be None (use all data), or a 2sequence containing
lower and upper limits on values to include.
+ Returns a relative frequency histogram, using the histogram function.
Returns: array of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
+ Parameters
+ 
+ a : ndarray
+ Input array.
+ numbins : int, optional
+ Number of bins.
+ defaultreallimits : 2sequence or None, optional
+ None (use all data), or a 2sequence containing lower and upper limits
+ on values to include.
+
+ Returns
+ 
+ relfreq : ndarray
+ Binned values of relative frequency.
+ lowerreallimit : float
+ Lower real limit
+ binsize : float
+ Width of each bin.
+ extrapoints : int
+ Extra points.
+
+ """
h,l,b,e = histogram(a,numbins,defaultreallimits, weights=weights)
h = array(h/float(a.shape[0]))
return h,l,b,e
@@ 3718,13 +3774,28 @@
def f_value_multivariate(ER, EF, dfnum, dfden):
 """Returns an Fstatistic given the following:
 ER = error associated with the null hypothesis (the Restricted model)
 EF = error associated with the alternate hypothesis (the Full model)
 dfR = degrees of freedom the Restricted model
 dfF = degrees of freedom associated with the Restricted model
 where ER and EF are matrices from a multivariate F calculation.
"""
+ Returns a multivariate Fstatistic.
+
+ Parameters
+ 
+ ER : ndarray
+ Error associated with the null hypothesis (the Restricted model).
+ From a multivariate F calculation.
+ EF : ndarray
+ Error associated with the alternate hypothesis (the Full model)
+ From a multivariate F calculation.
+ dfnum : int
+ Degrees of freedom the Restricted model.
+ dfden : int
+ Degrees of freedom associated with the Restricted model.
+
+ Returns
+ 
+ fstat : float
+ The computed Fstatistic.
+
+ """
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
@@ 3817,19 +3888,21 @@
def fastsort(a):
 # fixme: the wording in the docstring is nonsense.
 """Sort an array and provide the argsort.
+ """
+ Sort an array and provide the argsort.
Parameters

 a : array
+ a : array_like
+ Input array.
Returns

 (sorted array,
 indices into the original array,
 )
+ fastsort : ndarray of type int
+ sorted indices into the original array
+
"""
+ # TODO: the wording in the docstring is nonsense.
it = np.argsort(a)
as_ = a[it]
return as_, it
More information about the Scipysvn
mailing list