[Numpy-svn] r6369 - in branches/dynamic_cpu_configuration: . doc/source/reference doc/sphinxext numpy numpy/core numpy/core/code_generators numpy/core/src numpy/core/tests numpy/distutils numpy/distutils/command numpy/distutils/fcompiler numpy/doc numpy/f2py numpy/lib numpy/lib/src numpy/lib/tests numpy/linalg numpy/linalg/tests numpy/ma numpy/ma/tests numpy/numarray numpy/oldnumeric numpy/testing numpy/testing/tests

numpy-svn@scip... numpy-svn@scip...
Sun Feb 15 06:06:40 CST 2009


Author: cdavid
Date: 2009-02-15 06:03:15 -0600 (Sun, 15 Feb 2009)
New Revision: 6369

Added:
   branches/dynamic_cpu_configuration/doc/source/reference/maskedarray.baseclass.rst
   branches/dynamic_cpu_configuration/doc/source/reference/maskedarray.generic.rst
   branches/dynamic_cpu_configuration/doc/source/reference/maskedarray.rst
   branches/dynamic_cpu_configuration/numpy/core/src/numpyos.c
   branches/dynamic_cpu_configuration/numpy/doc/constants.py
   branches/dynamic_cpu_configuration/numpy/lib/_iotools.py
   branches/dynamic_cpu_configuration/numpy/lib/recfunctions.py
   branches/dynamic_cpu_configuration/numpy/lib/tests/test__iotools.py
   branches/dynamic_cpu_configuration/numpy/lib/tests/test_recfunctions.py
   branches/dynamic_cpu_configuration/numpy/testing/tests/test_decorators.py
Removed:
   branches/dynamic_cpu_configuration/numpy/testing/parametric.py
Modified:
   branches/dynamic_cpu_configuration/
   branches/dynamic_cpu_configuration/LICENSE.txt
   branches/dynamic_cpu_configuration/MANIFEST.in
   branches/dynamic_cpu_configuration/THANKS.txt
   branches/dynamic_cpu_configuration/doc/source/reference/arrays.classes.rst
   branches/dynamic_cpu_configuration/doc/source/reference/arrays.ndarray.rst
   branches/dynamic_cpu_configuration/doc/source/reference/arrays.rst
   branches/dynamic_cpu_configuration/doc/sphinxext/docscrape.py
   branches/dynamic_cpu_configuration/doc/sphinxext/docscrape_sphinx.py
   branches/dynamic_cpu_configuration/doc/sphinxext/numpydoc.py
   branches/dynamic_cpu_configuration/numpy/add_newdocs.py
   branches/dynamic_cpu_configuration/numpy/core/SConscript
   branches/dynamic_cpu_configuration/numpy/core/_internal.py
   branches/dynamic_cpu_configuration/numpy/core/code_generators/generate_numpy_api.py
   branches/dynamic_cpu_configuration/numpy/core/setup.py
   branches/dynamic_cpu_configuration/numpy/core/src/arrayobject.c
   branches/dynamic_cpu_configuration/numpy/core/src/arraytypes.inc.src
   branches/dynamic_cpu_configuration/numpy/core/src/multiarraymodule.c
   branches/dynamic_cpu_configuration/numpy/core/src/scalarmathmodule.c.src
   branches/dynamic_cpu_configuration/numpy/core/src/scalartypes.inc.src
   branches/dynamic_cpu_configuration/numpy/core/tests/test_memmap.py
   branches/dynamic_cpu_configuration/numpy/core/tests/test_multiarray.py
   branches/dynamic_cpu_configuration/numpy/core/tests/test_numerictypes.py
   branches/dynamic_cpu_configuration/numpy/core/tests/test_print.py
   branches/dynamic_cpu_configuration/numpy/core/tests/test_regression.py
   branches/dynamic_cpu_configuration/numpy/core/tests/test_unicode.py
   branches/dynamic_cpu_configuration/numpy/ctypeslib.py
   branches/dynamic_cpu_configuration/numpy/distutils/command/config.py
   branches/dynamic_cpu_configuration/numpy/distutils/command/scons.py
   branches/dynamic_cpu_configuration/numpy/distutils/fcompiler/compaq.py
   branches/dynamic_cpu_configuration/numpy/distutils/fcompiler/gnu.py
   branches/dynamic_cpu_configuration/numpy/distutils/lib2def.py
   branches/dynamic_cpu_configuration/numpy/distutils/mingw32ccompiler.py
   branches/dynamic_cpu_configuration/numpy/distutils/misc_util.py
   branches/dynamic_cpu_configuration/numpy/distutils/system_info.py
   branches/dynamic_cpu_configuration/numpy/f2py/cfuncs.py
   branches/dynamic_cpu_configuration/numpy/f2py/f2py.1
   branches/dynamic_cpu_configuration/numpy/f2py/f2py2e.py
   branches/dynamic_cpu_configuration/numpy/f2py/rules.py
   branches/dynamic_cpu_configuration/numpy/lib/__init__.py
   branches/dynamic_cpu_configuration/numpy/lib/arraysetops.py
   branches/dynamic_cpu_configuration/numpy/lib/function_base.py
   branches/dynamic_cpu_configuration/numpy/lib/getlimits.py
   branches/dynamic_cpu_configuration/numpy/lib/index_tricks.py
   branches/dynamic_cpu_configuration/numpy/lib/info.py
   branches/dynamic_cpu_configuration/numpy/lib/io.py
   branches/dynamic_cpu_configuration/numpy/lib/src/_compiled_base.c
   branches/dynamic_cpu_configuration/numpy/lib/tests/test_function_base.py
   branches/dynamic_cpu_configuration/numpy/lib/tests/test_getlimits.py
   branches/dynamic_cpu_configuration/numpy/lib/tests/test_io.py
   branches/dynamic_cpu_configuration/numpy/lib/utils.py
   branches/dynamic_cpu_configuration/numpy/linalg/linalg.py
   branches/dynamic_cpu_configuration/numpy/linalg/tests/test_linalg.py
   branches/dynamic_cpu_configuration/numpy/ma/core.py
   branches/dynamic_cpu_configuration/numpy/ma/extras.py
   branches/dynamic_cpu_configuration/numpy/ma/mrecords.py
   branches/dynamic_cpu_configuration/numpy/ma/tests/test_core.py
   branches/dynamic_cpu_configuration/numpy/ma/tests/test_extras.py
   branches/dynamic_cpu_configuration/numpy/ma/tests/test_mrecords.py
   branches/dynamic_cpu_configuration/numpy/ma/tests/test_subclassing.py
   branches/dynamic_cpu_configuration/numpy/ma/testutils.py
   branches/dynamic_cpu_configuration/numpy/numarray/util.py
   branches/dynamic_cpu_configuration/numpy/oldnumeric/arrayfns.py
   branches/dynamic_cpu_configuration/numpy/oldnumeric/mlab.py
   branches/dynamic_cpu_configuration/numpy/oldnumeric/rng.py
   branches/dynamic_cpu_configuration/numpy/testing/__init__.py
   branches/dynamic_cpu_configuration/numpy/testing/decorators.py
   branches/dynamic_cpu_configuration/numpy/testing/noseclasses.py
   branches/dynamic_cpu_configuration/numpy/testing/nosetester.py
   branches/dynamic_cpu_configuration/numpy/testing/numpytest.py
   branches/dynamic_cpu_configuration/setup.py
Log:
Merged revisions 6191-6221,6235-6238,6240-6241,6244,6250-6251,6253,6256,6258,6260-6261,6263,6265-6266,6268,6271,6283-6286,6291-6316,6320-6352,6354,6356,6358-6368 via svnmerge from 
http://svn.scipy.org/svn/numpy/trunk

................
  r6191 | cdavid | 2008-12-23 13:10:59 +0900 (Tue, 23 Dec 2008) | 1 line
  
  Fix typos in the comments for manifest.
................
  r6192 | cdavid | 2008-12-23 13:11:12 +0900 (Tue, 23 Dec 2008) | 1 line
  
  Use msvcrt values if available for manifest generation: only there starting from python 2.6.1.
................
  r6193 | pearu | 2008-12-23 18:02:15 +0900 (Tue, 23 Dec 2008) | 1 line
  
  Fix issue 964: f2py python 2.6, 2.6.1 support.
................
  r6194 | pierregm | 2008-12-24 08:43:43 +0900 (Wed, 24 Dec 2008) | 12 lines
  
  testutils:
  * assert_equal : use assert_equal_array on records
  * assert_array_compare : prevent the common mask to be back-propagated to the initial input arrays.
  * assert_equal_array : use operator.__eq__ instead of ma.equal
  * assert_equal_less: use operator.__less__ instead of ma.less
  
  core:
  * Fixed _check_fill_value for nested flexible types
  * Add a ndtype option to _make_mask_descr
  * Fixed mask_or for nested flexible types
  * Fixed the printing of masked arrays w/ flexible types.
................
  r6195 | cdavid | 2008-12-26 21:16:45 +0900 (Fri, 26 Dec 2008) | 1 line
  
  Update to handle numscons 0.10.0 and above.
................
  r6196 | cdavid | 2008-12-26 21:36:19 +0900 (Fri, 26 Dec 2008) | 1 line
  
  Do not import msvcrt globally in mingw32compiler module, since the module is imported on all platforms.
................
  r6197 | cdavid | 2008-12-26 23:39:55 +0900 (Fri, 26 Dec 2008) | 1 line
  
  Do not test for functions already tested by python configure script.
................
  r6198 | cdavid | 2008-12-27 14:56:58 +0900 (Sat, 27 Dec 2008) | 1 line
  
  BUG: Add a runtime check about endianness, to detect bug 4728 in python on Mac OS X.
................
  r6199 | cdavid | 2008-12-27 19:06:25 +0900 (Sat, 27 Dec 2008) | 1 line
  
  Fix some typo/syntax errors when converting dict access to a function in manifest generation.
................
  r6200 | cdavid | 2008-12-27 19:15:30 +0900 (Sat, 27 Dec 2008) | 1 line
  
  BUG (#970): fix a python 2.6 bug in distutils which caused an unhelpful Error:None message when trying to build with no VS installed and without the -c mingw32 option.
................
  r6201 | cdavid | 2008-12-27 19:30:49 +0900 (Sat, 27 Dec 2008) | 1 line
  
  Improve the error message when initializing compiler failed.
................
  r6202 | cdavid | 2008-12-27 19:32:05 +0900 (Sat, 27 Dec 2008) | 1 line
  
  Try to initialize the msvc compiler before the general code to detect the error early.
................
  r6203 | cdavid | 2008-12-27 19:43:41 +0900 (Sat, 27 Dec 2008) | 1 line
  
  BUG (#970): this commit should fix the actual bug, which albeeit linked to commir r6200, was caused in anoter code path.
................
  r6204 | cdavid | 2008-12-27 19:57:05 +0900 (Sat, 27 Dec 2008) | 1 line
  
  Fix manifest generation.
................
  r6205 | cdavid | 2008-12-27 20:46:08 +0900 (Sat, 27 Dec 2008) | 1 line
  
  BUG (#827): close temp file before reopning them on windows, and make sure they are not automatically deleted on close either (2.6and higher specific).
................
  r6206 | cdavid | 2008-12-27 21:18:47 +0900 (Sat, 27 Dec 2008) | 1 line
  
  Do not define the union for runtime endianness detection if we don't check endianness.
................
  r6207 | cdavid | 2008-12-27 22:48:52 +0900 (Sat, 27 Dec 2008) | 1 line
  
  Start working on formatting failure on 2.6: copy how python does complex formatting.
................
  r6208 | cdavid | 2008-12-27 23:44:11 +0900 (Sat, 27 Dec 2008) | 1 line
  
  Fix formatting for purely imaginary complex numbers.
................
  r6209 | cdavid | 2008-12-27 23:53:15 +0900 (Sat, 27 Dec 2008) | 1 line
  
  More work on formatting float.
................
  r6210 | cdavid | 2008-12-27 23:59:41 +0900 (Sat, 27 Dec 2008) | 1 line
  
  Finish formatting fixes for float scalar arrays.
................
  r6211 | cdavid | 2008-12-28 00:12:20 +0900 (Sun, 28 Dec 2008) | 1 line
  
  Include umath_funcs_c99 in multiarray so that we can use isinf and co macros.
................
  r6212 | cdavid | 2008-12-28 01:15:04 +0900 (Sun, 28 Dec 2008) | 1 line
  
  Include config.h before our C99 math compat layer.
................
  r6213 | cdavid | 2008-12-28 01:15:41 +0900 (Sun, 28 Dec 2008) | 1 line
  
  Fix formatting.
................
  r6214 | cdavid | 2008-12-28 01:16:18 +0900 (Sun, 28 Dec 2008) | 1 line
  
  Do not define FMTR and FMTI macros, as those are already defined on some platforms.
................
  r6215 | cdavid | 2008-12-28 01:16:52 +0900 (Sun, 28 Dec 2008) | 1 line
  
  More formatting fixes.
................
  r6216 | cdavid | 2008-12-28 01:17:27 +0900 (Sun, 28 Dec 2008) | 1 line
  
  Remove undef of removed macro.
................
  r6217 | cdavid | 2008-12-28 01:33:40 +0900 (Sun, 28 Dec 2008) | 1 line
  
  Do not use PyOS_ascii_formatd, as it does not handle long double correctly.
................
  r6218 | cdavid | 2008-12-28 02:19:40 +0900 (Sun, 28 Dec 2008) | 1 line
  
  Try ugly hack to circumvent long double brokenness with mingw.
................
  r6219 | cdavid | 2008-12-28 02:25:50 +0900 (Sun, 28 Dec 2008) | 1 line
  
  Use ugly hack for mingw long double pb with complex format function as well.
................
  r6220 | cdavid | 2008-12-28 12:18:20 +0900 (Sun, 28 Dec 2008) | 1 line
  
  Revert formatting changes: ascii_formatd only works for double, so we can't use it as it is for our formatting needs.
................
  r6221 | cdavid | 2008-12-28 15:44:06 +0900 (Sun, 28 Dec 2008) | 1 line
  
  Do not add doc sources through add_data_dir: it will put the docs alongside numpy, as a separate package, which is not what we want. Use the manifest instead, since that's the only way I know of to include something in sdist-generated tarballs.
................
  r6235 | cdavid | 2008-12-29 16:57:52 +0900 (Mon, 29 Dec 2008) | 13 lines
  
  Merged revisions 6233-6234 via svnmerge from 
  http://svn.scipy.org/svn/numpy/branches/fix_float_format
  
  ........
    r6233 | cdavid | 2008-12-29 12:49:09 +0900 (Mon, 29 Dec 2008) | 1 line
    
    Use parametric tests for format tests so that it is clearer which type is failing.
  ........
    r6234 | cdavid | 2008-12-29 12:49:27 +0900 (Mon, 29 Dec 2008) | 1 line
    
    Fix formatting tests: cfloat and cdouble as well as np.float and np.double are the same; make sure we test 4 bytes float.
  ........
................
  r6236 | cdavid | 2008-12-29 17:02:15 +0900 (Mon, 29 Dec 2008) | 1 line
  
  Add nan/inf tests for formatting.
................
  r6237 | cdavid | 2008-12-29 17:26:04 +0900 (Mon, 29 Dec 2008) | 1 line
  
  Add test for real float types locale independance.
................
  r6238 | cdavid | 2008-12-29 17:35:06 +0900 (Mon, 29 Dec 2008) | 1 line
  
  Clearer error messages for formatting failures.
................
  r6240 | cdavid | 2008-12-30 12:48:11 +0900 (Tue, 30 Dec 2008) | 1 line
  
  Add tests for print of float types.
................
  r6241 | cdavid | 2008-12-30 12:56:54 +0900 (Tue, 30 Dec 2008) | 1 line
  
  Add print tests for complex types.
................
  r6244 | cdavid | 2008-12-30 13:20:48 +0900 (Tue, 30 Dec 2008) | 1 line
  
  Fix test for print: forgot to make sure the value is a float before comparing it.
................
  r6250 | cdavid | 2008-12-30 14:02:28 +0900 (Tue, 30 Dec 2008) | 17 lines
  
  Merged revisions 6247-6249 via svnmerge from 
  http://svn.scipy.org/svn/numpy/branches/fix_float_format
  
  ........
    r6247 | cdavid | 2008-12-30 13:41:37 +0900 (Tue, 30 Dec 2008) | 1 line
    
    Handle 1e10 specially, as it is the limit where exp notation is shorter than decimal for single precision, but not for double (python native one).
  ........
    r6248 | cdavid | 2008-12-30 13:47:38 +0900 (Tue, 30 Dec 2008) | 1 line
    
    Refactor a bit redirected output print test.
  ........
    r6249 | cdavid | 2008-12-30 13:49:31 +0900 (Tue, 30 Dec 2008) | 1 line
    
    Fix test for single precision print.
  ........
................
  r6251 | cdavid | 2008-12-30 14:12:50 +0900 (Tue, 30 Dec 2008) | 1 line
  
  Use np.inf instead of float('inf'), as the later does not work on windows for python < 2.6.
................
  r6253 | cdavid | 2008-12-30 14:15:09 +0900 (Tue, 30 Dec 2008) | 1 line
  
  Fix typo in test.
................
  r6256 | cdavid | 2008-12-30 14:34:22 +0900 (Tue, 30 Dec 2008) | 1 line
  
  Special case float tests on windows: python 2.5 and below have >=3 digits in the exp.
................
  r6258 | cdavid | 2008-12-30 14:42:03 +0900 (Tue, 30 Dec 2008) | 1 line
  
  Hardcode reference for inf/nan-involved values.
................
  r6260 | cdavid | 2008-12-30 14:50:18 +0900 (Tue, 30 Dec 2008) | 1 line
  
  Fix more formatting tests on win32.
................
  r6261 | cdavid | 2008-12-30 14:52:16 +0900 (Tue, 30 Dec 2008) | 1 line
  
  Fix some more redirected output print tests.
................
  r6263 | cdavid | 2008-12-30 15:01:31 +0900 (Tue, 30 Dec 2008) | 1 line
  
  More fixes for print tests.
................
  r6265 | cdavid | 2008-12-30 15:03:56 +0900 (Tue, 30 Dec 2008) | 1 line
  
  Fix typo.
................
  r6266 | cdavid | 2008-12-30 15:08:06 +0900 (Tue, 30 Dec 2008) | 1 line
  
  Fix typo.
................
  r6268 | cdavid | 2008-12-30 15:12:26 +0900 (Tue, 30 Dec 2008) | 1 line
  
  complex scalar arrays cannot be created from real/imag args: wrap init values in a complex.
................
  r6271 | cdavid | 2008-12-30 15:32:03 +0900 (Tue, 30 Dec 2008) | 1 line
  
  Do not use dict for reference: hashing on scalar arrays does not work as I expected.
................
  r6283 | ptvirtan | 2008-12-31 10:14:47 +0900 (Wed, 31 Dec 2008) | 1 line
  
  Fix #951: make tests to clean temp files properly
................
  r6284 | jarrod.millman | 2009-01-01 08:25:03 +0900 (Thu, 01 Jan 2009) | 2 lines
  
  ran reindent
................
  r6285 | alan.mcintyre | 2009-01-01 08:46:34 +0900 (Thu, 01 Jan 2009) | 15 lines
  
  Remove the following deprecated items from numpy.testing:
  
  - ParametricTestCase 
  - The following arguments from numpy.testing.Tester.test(): level, 
  verbosity, 
    all, sys_argv, testcase_pattern
  - Path manipulation functions: set_package_path, set_local_path, 
  restore_path
  - NumpyTestCase, NumpyTest
  
  Also separated testing parameter setup from NoseTester.test into 
  NoseTester.prepare_test_args for use in a utility script for valgrind 
  testing (see NumPy ticket #784).
................
  r6286 | jarrod.millman | 2009-01-01 16:56:53 +0900 (Thu, 01 Jan 2009) | 2 lines
  
  add default include dir for Fedora/Red Hat (see SciPy ticket 817)
................
  r6291 | cdavid | 2009-01-04 19:57:39 +0900 (Sun, 04 Jan 2009) | 1 line
  
  Do not import md5 on python >= 2.6; use hashlib instead.
................
  r6292 | cdavid | 2009-01-04 20:08:16 +0900 (Sun, 04 Jan 2009) | 1 line
  
  Do not use popen* but subprocess.Popen instead.
................
  r6293 | cdavid | 2009-01-04 21:03:29 +0900 (Sun, 04 Jan 2009) | 1 line
  
  Revert md5 change: hashlib.md5 is not a drop-in replacement for md5.
................
  r6294 | pierregm | 2009-01-05 05:16:00 +0900 (Mon, 05 Jan 2009) | 2 lines
  
  * adapted default_fill_value for flexible datatype
  * fixed max/minimum_fill_value for flexible datatype
................
  r6295 | stefan | 2009-01-06 06:51:18 +0900 (Tue, 06 Jan 2009) | 1 line
  
  Credit more developers.
................
  r6296 | pierregm | 2009-01-06 07:52:21 +0900 (Tue, 06 Jan 2009) | 1 line
  
  *moved the printing templates out of MaskedArray.__repr__
................
  r6297 | stefan | 2009-01-06 19:09:00 +0900 (Tue, 06 Jan 2009) | 1 line
  
  Use new-style classes with multiple-inheritance to address bug in IronPython.
................
  r6298 | pierregm | 2009-01-07 05:35:37 +0900 (Wed, 07 Jan 2009) | 1 line
  
  * Bugfix #961
................
  r6299 | pierregm | 2009-01-08 03:14:12 +0900 (Thu, 08 Jan 2009) | 1 line
  
  * Fixed iadd/isub/imul when the base array has no mask but the other array does
................
  r6300 | pierregm | 2009-01-08 07:34:51 +0900 (Thu, 08 Jan 2009) | 3 lines
  
  * Renamed `torecords` to `toflex`, keeping `torecords` as an alias
  * Introduced `fromflex`, to reconstruct a masked_array from the output of `toflex` (can?\226?\128?\153t `use fromrecords` as it would clash with `numpy.ma.mrecords.fromrecords`)
  * Fixed a bug in MaskedBinaryOperation (#979) (wrong array broadcasting)
................
  r6301 | cdavid | 2009-01-08 18:19:00 +0900 (Thu, 08 Jan 2009) | 1 line
  
  Avoid putting things into stderr when errors occurs in f2py wrappers; put all the info in the python error string instead.
................
  r6302 | cdavid | 2009-01-09 00:11:32 +0900 (Fri, 09 Jan 2009) | 1 line
  
  Fix python 2.4 issue.
................
  r6303 | chanley | 2009-01-09 01:30:01 +0900 (Fri, 09 Jan 2009) | 1 line
  
  Fix test_print.py function _test_locale_independance() since str(1.2) does not use the LC_NUMERIC locale to convert numbers.  Fix from Mark Sienkiewicz.
................
  r6304 | cdavid | 2009-01-09 04:22:21 +0900 (Fri, 09 Jan 2009) | 1 line
  
  Revert buggy test fix for locale independecce.
................
  r6305 | pierregm | 2009-01-09 05:02:29 +0900 (Fri, 09 Jan 2009) | 2 lines
  
  * Add __eq__ and __ne__ for support of flexible arrays.
  * Fixed .filled for nested structures
................
  r6306 | pierregm | 2009-01-09 06:51:04 +0900 (Fri, 09 Jan 2009) | 1 line
  
  * Remove a debugging print statement.
................
  r6307 | jarrod.millman | 2009-01-09 11:14:35 +0900 (Fri, 09 Jan 2009) | 2 lines
  
  Updated license file
................
  r6308 | cdavid | 2009-01-09 14:26:58 +0900 (Fri, 09 Jan 2009) | 1 line
  
  Tag formatting unit tests as known failures.
................
  r6309 | jarrod.millman | 2009-01-09 17:59:29 +0900 (Fri, 09 Jan 2009) | 2 lines
  
  should be more reliable way to determine what bit platform
................
  r6310 | jarrod.millman | 2009-01-09 18:14:17 +0900 (Fri, 09 Jan 2009) | 2 lines
  
  better default library paths for 64bit arch
................
  r6311 | jarrod.millman | 2009-01-09 18:57:15 +0900 (Fri, 09 Jan 2009) | 2 lines
  
  simplification suggested by stefan
................
  r6312 | jarrod.millman | 2009-01-09 19:02:09 +0900 (Fri, 09 Jan 2009) | 2 lines
  
  switch the order [lib,lib64] --> [lib64,lib] 
................
  r6313 | jarrod.millman | 2009-01-09 19:18:29 +0900 (Fri, 09 Jan 2009) | 2 lines
  
  removed unneeded import
................
  r6314 | jarrod.millman | 2009-01-10 04:37:16 +0900 (Sat, 10 Jan 2009) | 2 lines
  
  can't use append an int to a string
................
  r6315 | pierregm | 2009-01-10 05:18:12 +0900 (Sat, 10 Jan 2009) | 2 lines
  
  * Added flatten_structured_arrays
  * Fixed _get_recordarray for nested structures
................
  r6316 | pierregm | 2009-01-10 10:53:05 +0900 (Sat, 10 Jan 2009) | 1 line
  
  * Add flatten_structured_array to the namespace
................
  r6320 | pierregm | 2009-01-14 06:01:58 +0900 (Wed, 14 Jan 2009) | 9 lines
  
  numpy.ma.core:
  * introduced baseclass, sharedmask and hardmask as readonly properties of MaskedArray
  * docstrings update
  
  numpy.ma.extras:
  * docstring updates
  
  docs/reference
  * introduced maskedarray, maskedarray.baseclass, maskedarray.generic
................
  r6321 | stefan | 2009-01-14 16:14:27 +0900 (Wed, 14 Jan 2009) | 2 lines
  
  Docstring: remove old floating point arithmetic, parallel
  execution and postponed import references.
................
  r6322 | stefan | 2009-01-14 16:55:16 +0900 (Wed, 14 Jan 2009) | 1 line
  
  Fix printing of limits.
................
  r6323 | stefan | 2009-01-14 16:56:10 +0900 (Wed, 14 Jan 2009) | 1 line
  
  Fix finfo to work on all instances, not just NumPy scalars.
................
  r6324 | pierregm | 2009-01-17 09:15:15 +0900 (Sat, 17 Jan 2009) | 1 line
  
  * fixed _arraymethod.__call__ for structured arrays
................
  r6325 | ptvirtan | 2009-01-18 06:24:13 +0900 (Sun, 18 Jan 2009) | 3 lines
  
  Make `trapz` accept 1-D `x` parameter for n-d `y`, even if axis != -1.
  
  Additional tests included.
................
  r6326 | pierregm | 2009-01-19 17:53:53 +0900 (Mon, 19 Jan 2009) | 3 lines
  
  * renamed FlatIter to MaskedIterator
  * added __getitem__ to MaskedIterator
................
  r6327 | pierregm | 2009-01-19 18:01:24 +0900 (Mon, 19 Jan 2009) | 2 lines
  
  * replace np.asarray by np.asanyarray in unique1d
................
  r6328 | pierregm | 2009-01-19 18:04:20 +0900 (Mon, 19 Jan 2009) | 2 lines
  
  * add intersect1d, intersect1d_nu, setdiff1d, setmember1d, setxor1d, unique1d, union1d
  * use np.atleast1d instead of ma.atleast1d
................
  r6329 | pierregm | 2009-01-20 06:22:52 +0900 (Tue, 20 Jan 2009) | 3 lines
  
  * lib     : introduced _iotools
  * lib.io : introduced genfromtxt, ndfromtxt, mafromtxt, recfromtxt, recfromcsv.
................
  r6330 | pierregm | 2009-01-22 14:37:36 +0900 (Thu, 22 Jan 2009) | 1 line
  
  * genfromtxt : if names is True, accept a line starting with a comment character as header.
................
  r6331 | pierregm | 2009-01-22 14:40:25 +0900 (Thu, 22 Jan 2009) | 1 line
  
  * added recfunctions, a collection of utilities to manipulate structured arrays.
................
  r6332 | pierregm | 2009-01-23 03:21:32 +0900 (Fri, 23 Jan 2009) | 2 lines
  
  * fixed a machine-dependent issue on default int ('<i4' on OS X, '<i8' on linux) ?
  * fixed an machine-dependent issue on argsort ?
................
  r6333 | cdavid | 2009-01-24 17:02:14 +0900 (Sat, 24 Jan 2009) | 1 line
  
  Fix compilation error on 2.4.
................
  r6334 | pierregm | 2009-01-27 06:04:26 +0900 (Tue, 27 Jan 2009) | 7 lines
  
  * _iotools.StringConverter :
      - add a _checked attribute to indicate whether the converter has been upgraded or not.
      - switched the default value for bool to False
  * io.genfromtxt:
      - fixed for the case where a whole column is masked: switch to bool or the common dtype (if needed)
................
  r6335 | pierregm | 2009-01-27 11:46:26 +0900 (Tue, 27 Jan 2009) | 1 line
  
  * prevent MaskedBinaryOperation and DomainedBinaryOperation to shrink the mask of the output when at least one of the inputs has a mask full of False
................
  r6336 | matthew.brett@gmail.com | 2009-01-30 09:26:44 +0900 (Fri, 30 Jan 2009) | 1 line
  
  New docstrings for byteorder and newbyteorder()
................
  r6337 | pierregm | 2009-02-02 14:20:17 +0900 (Mon, 02 Feb 2009) | 2 lines
  
  * Added a 'autoconvert' option to stack_arrays.
  * Fixed 'stack_arrays' to work with fields with titles.
................
  r6338 | pierregm | 2009-02-04 02:11:44 +0900 (Wed, 04 Feb 2009) | 1 line
  
  * Make sure that StringConverter.update sets the type to object if it can't define it.
................
  r6339 | pierregm | 2009-02-05 05:52:36 +0900 (Thu, 05 Feb 2009) | 2 lines
  
  * test__iotools : prevent test_upgrademapper if dateutil is not installed
  * MaskedArray.__rmul__ : switch to multiply(self, other)
................
  r6340 | pierregm | 2009-02-05 06:53:05 +0900 (Thu, 05 Feb 2009) | 1 line
  
  test_upgrademapper : got rid of the dateutil import
................
  r6341 | pierregm | 2009-02-05 13:31:51 +0900 (Thu, 05 Feb 2009) | 2 lines
  
  *  genfromtxt : Fixed when a dtype involving objects is explicitly given. Raise a NotImplementedError if the dtype is nested.
  * _iotools : make sure StringConverter gets properly initiated when a function returning a np.object is used as input parameter.
................
  r6342 | alan.mcintyre | 2009-02-06 05:11:40 +0900 (Fri, 06 Feb 2009) | 6 lines
  
  Issue #957:
  - Fix problems with test decorators when used on test generators.  
  - The skip/fail arguments for skipif and knownfailureif can now be 
    either a bool or a callable that returns a bool.
  - Added tests for the test decorators. 
................
  r6343 | ptvirtan | 2009-02-06 09:27:08 +0900 (Fri, 06 Feb 2009) | 1 line
  
  doc/numpydoc: work better together with Sphinx's  config option
................
  r6344 | ptvirtan | 2009-02-06 09:51:41 +0900 (Fri, 06 Feb 2009) | 1 line
  
  doc: Move maskedarray docs upward in TOC
................
  r6345 | oliphant | 2009-02-06 15:25:50 +0900 (Fri, 06 Feb 2009) | 1 line
  
  Avoid re-creating the sequence when there is only one field in the regular expression.
................
  r6346 | oliphant | 2009-02-06 15:31:11 +0900 (Fri, 06 Feb 2009) | 1 line
  
  Removed an unneccessary return statement in a unit test.
................
  r6347 | pearu | 2009-02-06 23:36:58 +0900 (Fri, 06 Feb 2009) | 1 line
  
  Fix a bug: python system_info.py failed because _pkg_config_info defined section to be None.
................
  r6348 | pearu | 2009-02-06 23:38:57 +0900 (Fri, 06 Feb 2009) | 1 line
  
  Fix another bug, see last commit.
................
  r6349 | pierregm | 2009-02-07 18:19:12 +0900 (Sat, 07 Feb 2009) | 2 lines
  
  MaskedArray.resize : systematically raise a TypeError exception, as a masked array never owns its data
  MaskedIterator : fixed to allow .flat on masked matrices
................
  r6350 | pierregm | 2009-02-08 03:51:31 +0900 (Sun, 08 Feb 2009) | 1 line
................
  r6351 | ptvirtan | 2009-02-10 05:18:08 +0900 (Tue, 10 Feb 2009) | 1 line
  
  Fix #955: fix errobj leak in scalarmath floating point error handling
................
  r6352 | pierregm | 2009-02-10 09:42:40 +0900 (Tue, 10 Feb 2009) | 1 line
  
  * prevent modifications to the mask to be back-propagated w/ __array_wrap__
................
  r6354 | cdavid | 2009-02-10 19:44:01 +0900 (Tue, 10 Feb 2009) | 1 line
  
  Fix hyphen (patch from debian package).
................
  r6356 | pierregm | 2009-02-11 10:51:28 +0900 (Wed, 11 Feb 2009) | 1 line
  
  * MaskedArray.__array_wrap__ : forces the domain (if any) to a ndarray (fill with True)
................
  r6358 | oliphant | 2009-02-12 13:22:03 +0900 (Thu, 12 Feb 2009) | 1 line
  
  Add multiple-field access by making a copy of the array and filling with the selected fields.
................
  r6359 | stefan | 2009-02-12 14:44:07 +0900 (Thu, 12 Feb 2009) | 2 lines
  
  Trust user's specification of MACOSX_DEPLOYMENT_TARGET [patch by Brian
  Granger].
................
  r6360 | cdavid | 2009-02-14 23:54:26 +0900 (Sat, 14 Feb 2009) | 1 line
  
  Merge fix_float_format branch into the trunk.
................
  r6361 | cdavid | 2009-02-15 00:02:39 +0900 (Sun, 15 Feb 2009) | 1 line
  
  Fix typo in multiarray tests.
................
  r6362 | cdavid | 2009-02-15 00:03:22 +0900 (Sun, 15 Feb 2009) | 1 line
  
  Remove leftover in TestIO.
................
  r6363 | cdavid | 2009-02-15 02:03:51 +0900 (Sun, 15 Feb 2009) | 1 line
  
  Include C99 math compatbility layer in multiarray - isnan and co needed by numpyos.c
................
  r6364 | ptvirtan | 2009-02-15 07:09:26 +0900 (Sun, 15 Feb 2009) | 1 line
  
  More add_newdocs entries, and make add_newdoc capable of adding docs also to normal Python objects.
................
  r6365 | ptvirtan | 2009-02-15 07:10:24 +0900 (Sun, 15 Feb 2009) | 1 line
  
  Move (un)packbits docstrings to add_newdocs.py. Fix typos.
................
  r6366 | ptvirtan | 2009-02-15 07:11:19 +0900 (Sun, 15 Feb 2009) | 1 line
  
  Document constants in numpy.doc.constants
................
  r6367 | ptvirtan | 2009-02-15 07:38:32 +0900 (Sun, 15 Feb 2009) | 1 line
  
  Move numpy.lib __doc__ back to info.py; was moved to __init__.py by mistake.
................
  r6368 | pierregm | 2009-02-15 07:42:29 +0900 (Sun, 15 Feb 2009) | 1 line
  
  * genfromtxt : fixed case when using explicit converters and explicit dtype.
................



Property changes on: branches/dynamic_cpu_configuration
___________________________________________________________________
Name: svnmerge-integrated
   - /branches/distutils-revamp:1-2752 /branches/multicore:1-3687 /branches/visualstudio_manifest:1-6077 /trunk:1-6187
   + /branches/distutils-revamp:1-2752 /branches/multicore:1-3687 /branches/visualstudio_manifest:1-6077 /trunk:1-6368

Modified: branches/dynamic_cpu_configuration/LICENSE.txt
===================================================================
--- branches/dynamic_cpu_configuration/LICENSE.txt	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/LICENSE.txt	2009-02-15 12:03:15 UTC (rev 6369)
@@ -1,4 +1,4 @@
-Copyright (c) 2005, NumPy Developers
+Copyright (c) 2005-2009, NumPy Developers.
 All rights reserved.
 
 Redistribution and use in source and binary forms, with or without

Modified: branches/dynamic_cpu_configuration/MANIFEST.in
===================================================================
--- branches/dynamic_cpu_configuration/MANIFEST.in	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/MANIFEST.in	2009-02-15 12:03:15 UTC (rev 6369)
@@ -11,3 +11,9 @@
 recursive-include numpy/core/code_generators *.py
 include numpy/core/include/numpy/numpyconfig.h.in
 recursive-include numpy SConstruct
+# Add documentation: we don't use add_data_dir since we do not want to include
+# this at installation, only for sdist-generated tarballs
+include doc/Makefile doc/postprocess.py
+recursive-include doc/release *
+recursive-include doc/source *
+recursive-include doc/sphinxext *

Modified: branches/dynamic_cpu_configuration/THANKS.txt
===================================================================
--- branches/dynamic_cpu_configuration/THANKS.txt	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/THANKS.txt	2009-02-15 12:03:15 UTC (rev 6369)
@@ -45,9 +45,18 @@
 Pierre Gerard-Marchant for rewriting masked array functionality.
 Roberto de Almeida for the buffered array iterator.
 Alan McIntyre for updating the NumPy test framework to use nose, improve
-    the test coverage, and enhancing the test system documentation 
+    the test coverage, and enhancing the test system documentation.
+Joe Harrington for administering the 2008 Documentation Sprint.
 
 NumPy is based on the Numeric (Jim Hugunin, Paul Dubois, Konrad
 Hinsen, and David Ascher) and NumArray (Perry Greenfield, J Todd
 Miller, Rick White and Paul Barrett) projects.  We thank them for
 paving the way ahead.
+
+Institutions
+------------
+
+Enthought for providing resources and finances for development of NumPy.
+UC Berkeley for providing travel money and hosting numerous sprints.
+The University of Central Florida for funding the 2008 Documentation Marathon.
+The University of Stellenbosch for hosting the buildbot.

Modified: branches/dynamic_cpu_configuration/doc/source/reference/arrays.classes.rst
===================================================================
--- branches/dynamic_cpu_configuration/doc/source/reference/arrays.classes.rst	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/doc/source/reference/arrays.classes.rst	2009-02-15 12:03:15 UTC (rev 6369)
@@ -261,23 +261,8 @@
 Masked arrays (:mod:`numpy.ma`)
 ===============================
 
-.. seealso:: :ref:`routines.ma`
+.. seealso:: :ref:`maskedarray`
 
-.. XXX: masked array documentation should be improved
-
-.. currentmodule:: numpy
-
-.. index::
-   single: masked arrays
-
-.. autosummary::
-   :toctree: generated/
-
-   ma.masked_array
-
-.. automodule:: numpy.ma
-
-
 Standard container class
 ========================
 

Modified: branches/dynamic_cpu_configuration/doc/source/reference/arrays.ndarray.rst
===================================================================
--- branches/dynamic_cpu_configuration/doc/source/reference/arrays.ndarray.rst	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/doc/source/reference/arrays.ndarray.rst	2009-02-15 12:03:15 UTC (rev 6369)
@@ -135,6 +135,8 @@
    is automatically made.
 
 
+.. _arrays.ndarray.attributes:
+
 Array attributes
 ================
 
@@ -217,6 +219,9 @@
 
 .. note:: XXX: update and check these docstrings.
 
+
+.. _array.ndarray.methods:
+
 Array methods
 =============
 

Modified: branches/dynamic_cpu_configuration/doc/source/reference/arrays.rst
===================================================================
--- branches/dynamic_cpu_configuration/doc/source/reference/arrays.rst	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/doc/source/reference/arrays.rst	2009-02-15 12:03:15 UTC (rev 6369)
@@ -43,4 +43,5 @@
    arrays.dtypes
    arrays.indexing
    arrays.classes
+   maskedarray
    arrays.interface

Copied: branches/dynamic_cpu_configuration/doc/source/reference/maskedarray.baseclass.rst (from rev 6368, trunk/doc/source/reference/maskedarray.baseclass.rst)

Copied: branches/dynamic_cpu_configuration/doc/source/reference/maskedarray.generic.rst (from rev 6368, trunk/doc/source/reference/maskedarray.generic.rst)

Copied: branches/dynamic_cpu_configuration/doc/source/reference/maskedarray.rst (from rev 6368, trunk/doc/source/reference/maskedarray.rst)

Modified: branches/dynamic_cpu_configuration/doc/sphinxext/docscrape.py
===================================================================
--- branches/dynamic_cpu_configuration/doc/sphinxext/docscrape.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/doc/sphinxext/docscrape.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -406,11 +406,13 @@
 
 
 class FunctionDoc(NumpyDocString):
-    def __init__(self, func, role='func'):
+    def __init__(self, func, role='func', doc=None):
         self._f = func
         self._role = role # e.g. "func" or "meth"
+        if doc is None:
+            doc = inspect.getdoc(func) or ''
         try:
-            NumpyDocString.__init__(self,inspect.getdoc(func) or '')
+            NumpyDocString.__init__(self, doc)
         except ValueError, e:
             print '*'*78
             print "ERROR: '%s' while parsing `%s`" % (e, self._f)
@@ -459,7 +461,7 @@
 
 
 class ClassDoc(NumpyDocString):
-    def __init__(self,cls,modulename='',func_doc=FunctionDoc):
+    def __init__(self,cls,modulename='',func_doc=FunctionDoc,doc=None):
         if not inspect.isclass(cls):
             raise ValueError("Initialise using a class. Got %r" % cls)
         self._cls = cls
@@ -470,8 +472,11 @@
         self._name = cls.__name__
         self._func_doc = func_doc
 
-        NumpyDocString.__init__(self, pydoc.getdoc(cls))
+        if doc is None:
+            doc = pydoc.getdoc(cls)
 
+        NumpyDocString.__init__(self, doc)
+
     @property
     def methods(self):
         return [name for name,func in inspect.getmembers(self._cls)

Modified: branches/dynamic_cpu_configuration/doc/sphinxext/docscrape_sphinx.py
===================================================================
--- branches/dynamic_cpu_configuration/doc/sphinxext/docscrape_sphinx.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/doc/sphinxext/docscrape_sphinx.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -115,7 +115,7 @@
 class SphinxClassDoc(SphinxDocString, ClassDoc):
     pass
 
-def get_doc_object(obj, what=None):
+def get_doc_object(obj, what=None, doc=None):
     if what is None:
         if inspect.isclass(obj):
             what = 'class'
@@ -126,8 +126,11 @@
         else:
             what = 'object'
     if what == 'class':
-        return SphinxClassDoc(obj, '', func_doc=SphinxFunctionDoc)
+        return SphinxClassDoc(obj, '', func_doc=SphinxFunctionDoc, doc=doc)
     elif what in ('function', 'method'):
-        return SphinxFunctionDoc(obj, '')
+        return SphinxFunctionDoc(obj, '', doc=doc)
     else:
-        return SphinxDocString(pydoc.getdoc(obj))
+        if doc is None:
+            doc = pydoc.getdoc(obj)
+        return SphinxDocString(doc)
+

Modified: branches/dynamic_cpu_configuration/doc/sphinxext/numpydoc.py
===================================================================
--- branches/dynamic_cpu_configuration/doc/sphinxext/numpydoc.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/doc/sphinxext/numpydoc.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -28,7 +28,7 @@
                               re.I|re.S)
         lines[:] = title_re.sub('', "\n".join(lines)).split("\n")
     else:
-        doc = get_doc_object(obj, what)
+        doc = get_doc_object(obj, what, "\n".join(lines))
         lines[:] = str(doc).split("\n")
 
     if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \

Modified: branches/dynamic_cpu_configuration/numpy/add_newdocs.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/add_newdocs.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/add_newdocs.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -8,140 +8,6 @@
 
 from lib import add_newdoc
 
-add_newdoc('numpy.core', 'dtype',
-"""Create a data type.
-
-A numpy array is homogeneous, and contains elements described by a
-dtype.  A dtype can be constructed from different combinations of
-fundamental numeric types, as illustrated below.
-
-Examples
---------
-
-Using array-scalar type:
->>> np.dtype(np.int16)
-dtype('int16')
-
-Record, one field name 'f1', containing int16:
->>> np.dtype([('f1', np.int16)])
-dtype([('f1', '<i2')])
-
-Record, one field named 'f1', in itself containing a record with one field:
->>> np.dtype([('f1', [('f1', np.int16)])])
-dtype([('f1', [('f1', '<i2')])])
-
-Record, two fields: the first field contains an unsigned int, the
-second an int32:
->>> np.dtype([('f1', np.uint), ('f2', np.int32)])
-dtype([('f1', '<u4'), ('f2', '<i4')])
-
-Using array-protocol type strings:
->>> np.dtype([('a','f8'),('b','S10')])
-dtype([('a', '<f8'), ('b', '|S10')])
-
-Using comma-separated field formats.  The shape is (2,3):
->>> np.dtype("i4, (2,3)f8")
-dtype([('f0', '<i4'), ('f1', '<f8', (2, 3))])
-
-Using tuples.  ``int`` is a fixed type, 3 the field's shape.  ``void``
-is a flexible type, here of size 10:
->>> np.dtype([('hello',(np.int,3)),('world',np.void,10)])
-dtype([('hello', '<i4', 3), ('world', '|V10')])
-
-Subdivide ``int16`` into 2 ``int8``'s, called x and y.  0 and 1 are
-the offsets in bytes:
->>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)}))
-dtype(('<i2', [('x', '|i1'), ('y', '|i1')]))
-
-Using dictionaries.  Two fields named 'gender' and 'age':
->>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]})
-dtype([('gender', '|S1'), ('age', '|u1')])
-
-Offsets in bytes, here 0 and 25:
->>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)})
-dtype([('surname', '|S25'), ('age', '|u1')])
-
-""")
-
-add_newdoc('numpy.core', 'dtype',
-    """
-    dtype(obj, align=False, copy=False)
-
-    Create a data type object.
-
-    A numpy array is homogeneous, and contains elements described by a
-    dtype object. A dtype object can be constructed from different
-    combinations of fundamental numeric types.
-
-    Parameters
-    ----------
-    obj
-        Object to be converted to a data type object.
-    align : bool, optional
-        Add padding to the fields to match what a C compiler would output
-        for a similar C-struct. Can be ``True`` only if `obj` is a dictionary
-        or a comma-separated string.
-    copy : bool, optional
-        Make a new copy of the data-type object. If ``False``, the result
-        may just be a reference to a built-in data-type object.
-
-    Examples
-    --------
-    Using array-scalar type:
-
-    >>> np.dtype(np.int16)
-    dtype('int16')
-
-    Record, one field name 'f1', containing int16:
-
-    >>> np.dtype([('f1', np.int16)])
-    dtype([('f1', '<i2')])
-
-    Record, one field named 'f1', in itself containing a record with one field:
-
-    >>> np.dtype([('f1', [('f1', np.int16)])])
-    dtype([('f1', [('f1', '<i2')])])
-
-    Record, two fields: the first field contains an unsigned int, the
-    second an int32:
-
-    >>> np.dtype([('f1', np.uint), ('f2', np.int32)])
-    dtype([('f1', '<u4'), ('f2', '<i4')])
-
-    Using array-protocol type strings:
-
-    >>> np.dtype([('a','f8'),('b','S10')])
-    dtype([('a', '<f8'), ('b', '|S10')])
-
-    Using comma-separated field formats.  The shape is (2,3):
-
-    >>> np.dtype("i4, (2,3)f8")
-    dtype([('f0', '<i4'), ('f1', '<f8', (2, 3))])
-
-    Using tuples.  ``int`` is a fixed type, 3 the field's shape.  ``void``
-    is a flexible type, here of size 10:
-
-    >>> np.dtype([('hello',(np.int,3)),('world',np.void,10)])
-    dtype([('hello', '<i4', 3), ('world', '|V10')])
-
-    Subdivide ``int16`` into 2 ``int8``'s, called x and y.  0 and 1 are
-    the offsets in bytes:
-
-    >>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)}))
-    dtype(('<i2', [('x', '|i1'), ('y', '|i1')]))
-
-    Using dictionaries.  Two fields named 'gender' and 'age':
-
-    >>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]})
-    dtype([('gender', '|S1'), ('age', '|u1')])
-
-    Offsets in bytes, here 0 and 25:
-
-    >>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)})
-    dtype([('surname', '|S25'), ('age', '|u1')])
-
-    """)
-
 ###############################################################################
 #
 # flatiter
@@ -150,7 +16,12 @@
 #
 ###############################################################################
 
-# attributes
+add_newdoc('numpy.core', 'flatiter',
+    """
+    """)
+
+# flatiter attributes
+
 add_newdoc('numpy.core', 'flatiter', ('base',
     """documentation needed
 
@@ -170,9 +41,8 @@
 
     """))
 
+# flatiter functions
 
-
-# functions
 add_newdoc('numpy.core', 'flatiter', ('__array__',
     """__array__(type=None) Get array from iterator
 
@@ -191,37 +61,37 @@
 #
 ###############################################################################
 
+add_newdoc('numpy.core', 'broadcast',
+    """
+    """)
+
 # attributes
+
 add_newdoc('numpy.core', 'broadcast', ('index',
     """current index in broadcasted result
 
     """))
 
-
 add_newdoc('numpy.core', 'broadcast', ('iters',
     """tuple of individual iterators
 
     """))
 
-
 add_newdoc('numpy.core', 'broadcast', ('nd',
     """number of dimensions of broadcasted result
 
     """))
 
-
 add_newdoc('numpy.core', 'broadcast', ('numiter',
     """number of iterators
 
     """))
 
-
 add_newdoc('numpy.core', 'broadcast', ('shape',
     """shape of broadcasted result
 
     """))
 
-
 add_newdoc('numpy.core', 'broadcast', ('size',
     """total size of broadcasted result
 
@@ -1997,6 +1867,32 @@
 
     Equivalent to a.view(a.dtype.newbytorder(byteorder))
 
+    Return array with dtype changed to interpret array data as
+    specified byte order.
+
+    Changes are also made in all fields and sub-arrays of the array
+    data type.
+
+    Parameters
+    ----------
+    new_order : string, optional
+        Byte order to force; a value from the byte order
+        specifications below.  The default value ('S') results in
+        swapping the current byte order.
+        `new_order` codes can be any of:
+         * {'<', 'L'} - little endian
+         * {'>', 'B'} - big endian
+         * {'=', 'N'} - native order
+         * 'S' - swap dtype from current to opposite endian
+         * {'|', 'I'} - ignore (no change to byte order)
+        The code does a case-insensitive check on the first letter of
+        `new_order` for these alternatives.  For example, any of '>'
+        or 'B' or 'b' or 'brian' are valid to specify big-endian.
+
+    Returns
+    -------
+    new_arr : array
+        array with the given change to the dtype byte order.
     """))
 
 
@@ -2555,6 +2451,25 @@
 
     """))
 
+
+##############################################################################
+#
+# umath functions
+#
+##############################################################################
+
+add_newdoc('numpy.core.umath', 'frexp',
+    """
+    """)
+
+add_newdoc('numpy.core.umath', 'frompyfunc',
+    """
+    """)
+
+add_newdoc('numpy.core.umath', 'ldexp',
+    """
+    """)
+
 add_newdoc('numpy.core.umath','geterrobj',
     """geterrobj()
 
@@ -2584,6 +2499,102 @@
 
     """)
 
+
+##############################################################################
+#
+# lib._compiled_base functions
+#
+##############################################################################
+
+add_newdoc('numpy.lib._compiled_base', 'digitize',
+    """
+    digitize(x,bins)
+
+    Return the index of the bin to which each value of x belongs.
+
+    Each index i returned is such that bins[i-1] <= x < bins[i] if
+    bins is monotonically increasing, or bins [i-1] > x >= bins[i] if
+    bins is monotonically decreasing.
+
+    Beyond the bounds of the bins 0 or len(bins) is returned as appropriate.
+    """)
+
+add_newdoc('numpy.lib._compiled_base', 'bincount',
+    """
+    bincount(x,weights=None)
+
+    Return the number of occurrences of each value in x.
+
+    x must be a list of non-negative integers.  The output, b[i],
+    represents the number of times that i is found in x.  If weights
+    is specified, every occurrence of i at a position p contributes
+    weights[p] instead of 1.
+
+    See also: histogram, digitize, unique.
+    """)
+
+add_newdoc('numpy.lib._compiled_base', 'add_docstring',
+    """
+    docstring(obj, docstring)
+
+    Add a docstring to a built-in obj if possible.
+    If the obj already has a docstring raise a RuntimeError
+    If this routine does not know how to add a docstring to the object
+    raise a TypeError
+    """)
+
+add_newdoc('numpy.lib._compiled_base', 'packbits',
+    """
+    out = numpy.packbits(myarray, axis=None)
+
+    myarray : an integer type array whose elements should be packed to bits
+
+    This routine packs the elements of a binary-valued dataset into a
+    NumPy array of type uint8 ('B') whose bits correspond to
+    the logical (0 or nonzero) value of the input elements.
+    The dimension over-which bit-packing is done is given by axis.
+    The shape of the output has the same number of dimensions as the input
+    (unless axis is None, in which case the output is 1-d).
+
+    Example:
+    >>> a = array([[[1,0,1],
+    ...             [0,1,0]],
+    ...            [[1,1,0],
+    ...             [0,0,1]]])
+    >>> b = numpy.packbits(a,axis=-1)
+    >>> b
+    array([[[160],[64]],[[192],[32]]], dtype=uint8)
+
+    Note that 160 = 128 + 32
+              192 = 128 + 64
+    """)
+
+add_newdoc('numpy.lib._compiled_base', 'unpackbits',
+    """
+    out = numpy.unpackbits(myarray, axis=None)
+
+    myarray - array of uint8 type where each element represents a bit-field
+        that should be unpacked into a boolean output array
+
+        The shape of the output array is either 1-d (if axis is None) or
+        the same shape as the input array with unpacking done along the
+        axis specified.
+    """)
+
+
+##############################################################################
+#
+# Documentation for ufunc attributes and methods
+#
+##############################################################################
+
+
+##############################################################################
+#
+# ufunc object
+#
+##############################################################################
+
 add_newdoc('numpy.core', 'ufunc',
     """
     Functions that operate element by element on whole arrays.
@@ -2636,6 +2647,12 @@
     """)
 
 
+##############################################################################
+#
+# ufunc methods
+#
+##############################################################################
+
 add_newdoc('numpy.core', 'ufunc', ('reduce',
     """
     reduce(array, axis=0, dtype=None, out=None)
@@ -2815,3 +2832,680 @@
            [12, 15, 18]])
 
     """))
+
+
+##############################################################################
+#
+# Documentation for dtype attributes and methods
+#
+##############################################################################
+
+##############################################################################
+#
+# dtype object
+#
+##############################################################################
+
+add_newdoc('numpy.core.multiarray', 'dtype',
+    """
+    dtype(obj, align=False, copy=False)
+
+    Create a data type object.
+
+    A numpy array is homogeneous, and contains elements described by a
+    dtype object. A dtype object can be constructed from different
+    combinations of fundamental numeric types.
+
+    Parameters
+    ----------
+    obj
+        Object to be converted to a data type object.
+    align : bool, optional
+        Add padding to the fields to match what a C compiler would output
+        for a similar C-struct. Can be ``True`` only if `obj` is a dictionary
+        or a comma-separated string.
+    copy : bool, optional
+        Make a new copy of the data-type object. If ``False``, the result
+        may just be a reference to a built-in data-type object.
+
+    Examples
+    --------
+    Using array-scalar type:
+
+    >>> np.dtype(np.int16)
+    dtype('int16')
+
+    Record, one field name 'f1', containing int16:
+
+    >>> np.dtype([('f1', np.int16)])
+    dtype([('f1', '<i2')])
+
+    Record, one field named 'f1', in itself containing a record with one field:
+
+    >>> np.dtype([('f1', [('f1', np.int16)])])
+    dtype([('f1', [('f1', '<i2')])])
+
+    Record, two fields: the first field contains an unsigned int, the
+    second an int32:
+
+    >>> np.dtype([('f1', np.uint), ('f2', np.int32)])
+    dtype([('f1', '<u4'), ('f2', '<i4')])
+
+    Using array-protocol type strings:
+
+    >>> np.dtype([('a','f8'),('b','S10')])
+    dtype([('a', '<f8'), ('b', '|S10')])
+
+    Using comma-separated field formats.  The shape is (2,3):
+
+    >>> np.dtype("i4, (2,3)f8")
+    dtype([('f0', '<i4'), ('f1', '<f8', (2, 3))])
+
+    Using tuples.  ``int`` is a fixed type, 3 the field's shape.  ``void``
+    is a flexible type, here of size 10:
+
+    >>> np.dtype([('hello',(np.int,3)),('world',np.void,10)])
+    dtype([('hello', '<i4', 3), ('world', '|V10')])
+
+    Subdivide ``int16`` into 2 ``int8``'s, called x and y.  0 and 1 are
+    the offsets in bytes:
+
+    >>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)}))
+    dtype(('<i2', [('x', '|i1'), ('y', '|i1')]))
+
+    Using dictionaries.  Two fields named 'gender' and 'age':
+
+    >>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]})
+    dtype([('gender', '|S1'), ('age', '|u1')])
+
+    Offsets in bytes, here 0 and 25:
+
+    >>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)})
+    dtype([('surname', '|S25'), ('age', '|u1')])
+
+    """)
+
+##############################################################################
+#
+# dtype attributes
+#
+##############################################################################
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('alignment',
+    """
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder',
+    '''
+    dt.byteorder
+
+    String giving byteorder of dtype
+
+    One of:
+    * '=' - native byteorder
+    * '<' - little endian
+    * '>' - big endian
+    * '|' - endian not relevant
+
+    Examples
+    --------
+    >>> dt = np.dtype('i2')
+    >>> dt.byteorder
+    '='
+    >>> # endian is not relevant for 8 bit numbers
+    >>> np.dtype('i1').byteorder
+    '|'
+    >>> # or ASCII strings
+    >>> np.dtype('S2').byteorder
+    '|'
+    >>> # Even if specific code is given, and it is native
+    >>> # '=' is the byteorder
+    >>> import sys
+    >>> sys_is_le = sys.byteorder == 'little'
+    >>> native_code = sys_is_le and '<' or '>'
+    >>> swapped_code = sys_is_le and '>' or '<'
+    >>> dt = np.dtype(native_code + 'i2')
+    >>> dt.byteorder
+    '='
+    >>> # Swapped code shows up as itself
+    >>> dt = np.dtype(swapped_code + 'i2')
+    >>> dt.byteorder == swapped_code
+    True
+    '''))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('char',
+    """
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('descr',
+    """
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('fields',
+    """
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('flags',
+    """
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject',
+    """
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin',
+    """
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('isnative',
+    """
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize',
+    """
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('kind',
+    """
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('name',
+    """
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('names',
+    """
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('num',
+    """
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('shape',
+    """
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('str',
+    """
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype',
+    """
+    """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('type',
+    """
+    """))
+
+##############################################################################
+#
+# dtype methods
+#
+##############################################################################
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder',
+    '''
+    newbyteorder(new_order='S')
+
+    Return a new dtype with a different byte order.
+
+    Changes are also made in all fields and sub-arrays of the data type.
+
+    Parameters
+    ----------
+    new_order : string, optional
+        Byte order to force; a value from the byte order
+        specifications below.  The default value ('S') results in
+        swapping the current byte order.
+        `new_order` codes can be any of:
+         * {'<', 'L'} - little endian
+         * {'>', 'B'} - big endian
+         * {'=', 'N'} - native order
+         * 'S' - swap dtype from current to opposite endian
+         * {'|', 'I'} - ignore (no change to byte order)
+        The code does a case-insensitive check on the first letter of
+        `new_order` for these alternatives.  For example, any of '>'
+        or 'B' or 'b' or 'brian' are valid to specify big-endian.
+
+    Returns
+    -------
+    new_dtype : dtype
+        New dtype object with the given change to the byte order.
+
+    Examples
+    --------
+    >>> import sys
+    >>> sys_is_le = sys.byteorder == 'little'
+    >>> native_code = sys_is_le and '<' or '>'
+    >>> swapped_code = sys_is_le and '>' or '<'
+    >>> native_dt = np.dtype(native_code+'i2')
+    >>> swapped_dt = np.dtype(swapped_code+'i2')
+    >>> native_dt.newbyteorder('S') == swapped_dt
+    True
+    >>> native_dt.newbyteorder() == swapped_dt
+    True
+    >>> native_dt == swapped_dt.newbyteorder('S')
+    True
+    >>> native_dt == swapped_dt.newbyteorder('=')
+    True
+    >>> native_dt == swapped_dt.newbyteorder('N')
+    True
+    >>> native_dt == native_dt.newbyteorder('|')
+    True
+    >>> np.dtype('<i2') == native_dt.newbyteorder('<')
+    True
+    >>> np.dtype('<i2') == native_dt.newbyteorder('L')
+    True
+    >>> np.dtype('>i2') == native_dt.newbyteorder('>')
+    True
+    >>> np.dtype('>i2') == native_dt.newbyteorder('B')
+    True
+    '''))
+
+
+##############################################################################
+#
+# nd_grid instances
+#
+##############################################################################
+
+add_newdoc('numpy.lib.index_tricks', 'mgrid',
+    """
+    Construct a multi-dimensional filled "meshgrid".
+
+    Returns a mesh-grid when indexed.  The dimension and number of the
+    output arrays are equal to the number of indexing dimensions.  If
+    the step length is not a complex number, then the stop is not
+    inclusive.
+
+    However, if the step length is a **complex number** (e.g. 5j),
+    then the integer part of its magnitude is interpreted as
+    specifying the number of points to create between the start and
+    stop values, where the stop value **is inclusive**.
+
+    See also
+    --------
+    ogrid
+
+    Examples
+    --------
+    >>> np.mgrid[0:5,0:5]
+    array([[[0, 0, 0, 0, 0],
+            [1, 1, 1, 1, 1],
+            [2, 2, 2, 2, 2],
+            [3, 3, 3, 3, 3],
+            [4, 4, 4, 4, 4]],
+    <BLANKLINE>
+           [[0, 1, 2, 3, 4],
+            [0, 1, 2, 3, 4],
+            [0, 1, 2, 3, 4],
+            [0, 1, 2, 3, 4],
+            [0, 1, 2, 3, 4]]])
+    >>> np.mgrid[-1:1:5j]
+    array([-1. , -0.5,  0. ,  0.5,  1. ])
+    """)
+
+add_newdoc('numpy.lib.index_tricks', 'ogrid',
+    """
+    Construct a multi-dimensional open "meshgrid".
+
+    Returns an 'open' mesh-grid when indexed.  The dimension and
+    number of the output arrays are equal to the number of indexing
+    dimensions.  If the step length is not a complex number, then the
+    stop is not inclusive.
+
+    The returned mesh-grid is open (or not fleshed out), so that only
+    one-dimension of each returned argument is greater than 1
+
+    If the step length is a **complex number** (e.g. 5j), then the
+    integer part of its magnitude is interpreted as specifying the
+    number of points to create between the start and stop values,
+    where the stop value **is inclusive**.
+
+    See also
+    --------
+    mgrid
+
+    Examples
+    --------
+    >>> np.ogrid[0:5,0:5]
+    [array([[0],
+            [1],
+            [2],
+            [3],
+            [4]]), array([[0, 1, 2, 3, 4]])]
+    """)
+
+   
+##############################################################################
+#
+# Documentation for `generic` attributes and methods
+#
+##############################################################################
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+    """
+    """)
+
+# Attributes
+    
+add_newdoc('numpy.core.numerictypes', 'generic', ('T',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('base',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('data',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('dtype',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('flags',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('flat',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('imag',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('ndim',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('real',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('shape',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('size',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('strides',
+    """
+    """))
+
+# Methods
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('all',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('any',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('argmax',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('argmin',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('argsort',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('astype',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('choose',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('clip',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('compress',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('copy',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('dump',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('dumps',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('fill',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('flatten',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('getfield',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('item',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('itemset',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('max',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('mean',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('min',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('prod',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('ptp',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('put',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('ravel',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('repeat',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('reshape',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('resize',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('round',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('setfield',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('setflags',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('sort',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('std',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('sum',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('take',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('tofile',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('tolist',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('tostring',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('trace',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('transpose',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('var',
+    """
+    """))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('view',
+    """
+    """))
+   
+
+##############################################################################
+#
+# Documentation for other scalar classes
+#
+##############################################################################
+
+add_newdoc('numpy.core.numerictypes', 'bool_',
+    """
+    """)
+
+add_newdoc('numpy.core.numerictypes', 'complex64',
+    """
+    """)
+
+add_newdoc('numpy.core.numerictypes', 'complex128',
+    """
+    """)
+
+add_newdoc('numpy.core.numerictypes', 'complex256',
+    """
+    """)
+
+add_newdoc('numpy.core.numerictypes', 'float32',
+    """
+    """)
+
+add_newdoc('numpy.core.numerictypes', 'float64',
+    """
+    """)
+
+add_newdoc('numpy.core.numerictypes', 'float96',
+    """
+    """)
+
+add_newdoc('numpy.core.numerictypes', 'float128',
+    """
+    """)
+
+add_newdoc('numpy.core.numerictypes', 'int8',
+    """
+    """)
+
+add_newdoc('numpy.core.numerictypes', 'int16',
+    """
+    """)
+
+add_newdoc('numpy.core.numerictypes', 'int32',
+    """
+    """)
+
+add_newdoc('numpy.core.numerictypes', 'int64',
+    """
+    """)
+
+add_newdoc('numpy.core.numerictypes', 'object_',
+    """
+    """)

Modified: branches/dynamic_cpu_configuration/numpy/core/SConscript
===================================================================
--- branches/dynamic_cpu_configuration/numpy/core/SConscript	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/core/SConscript	2009-02-15 12:03:15 UTC (rev 6369)
@@ -211,6 +211,10 @@
         config.Define('DISTUTILS_USE_SDK', distutils_use_sdk,
                       "define to 1 to disable SMP support ")
 
+    if a == "Intel":
+        config.Define('FORCE_NO_LONG_DOUBLE_FORMATTING', 1,
+                      "define to 1 to force long double format string to the" \
+                      " same as double (Lg -> g)")
 #--------------
 # Checking Blas
 #--------------

Modified: branches/dynamic_cpu_configuration/numpy/core/_internal.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/core/_internal.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/core/_internal.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -292,3 +292,22 @@
                 raise ValueError, "unknown field name: %s" % (name,)
         return tuple(list(order) + nameslist)
     raise ValueError, "unsupported order value: %s" % (order,)
+
+# Given an array with fields and a sequence of field names
+# construct a new array with just those fields copied over
+def _index_fields(ary, fields):
+    from multiarray import empty, dtype
+    dt = ary.dtype
+    new_dtype = [(name, dt[name]) for name in dt.names if name in fields]
+    if ary.flags.f_contiguous:
+        order = 'F'
+    else:
+        order = 'C'
+
+    newarray = empty(ary.shape, dtype=new_dtype, order=order) 
+   
+    for name in fields:
+        newarray[name] = ary[name]
+
+    return newarray
+    

Modified: branches/dynamic_cpu_configuration/numpy/core/code_generators/generate_numpy_api.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/core/code_generators/generate_numpy_api.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/core/code_generators/generate_numpy_api.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -65,6 +65,13 @@
 static int
 _import_array(void)
 {
+#ifdef WORDS_BIGENDIAN
+  union {
+    long i;
+    char c[sizeof(long)];
+  } bint = {1};
+#endif
+
   PyObject *numpy = PyImport_ImportModule("numpy.core.multiarray");
   PyObject *c_api = NULL;
   if (numpy == NULL) return -1;
@@ -83,6 +90,17 @@
         (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion());
     return -1;
   }
+
+#ifdef WORDS_BIGENDIAN
+  if (bint.c[0] == 1) {
+    PyErr_Format(PyExc_RuntimeError, "module compiled against "\
+        "python headers configured as big endian, but little endian arch "\
+        "detected: this is a python 2.6.* bug (see bug 4728 in python bug "\
+        "tracker )");
+    return -1;
+  }
+#endif
+
   return 0;
 }
 

Modified: branches/dynamic_cpu_configuration/numpy/core/setup.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/core/setup.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/core/setup.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -67,8 +67,8 @@
 
     # Mandatory functions: if not found, fail the build
     mandatory_funcs = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs",
-		"floor", "ceil", "sqrt", "log10", "log", "exp", "asin",
-		"acos", "atan", "fmod", 'modf', 'frexp', 'ldexp']
+                "floor", "ceil", "sqrt", "log10", "log", "exp", "asin",
+                "acos", "atan", "fmod", 'modf', 'frexp', 'ldexp']
 
     if not check_funcs_once(mandatory_funcs):
         raise SystemError("One of the required function to build numpy is not"
@@ -81,6 +81,14 @@
     optional_stdfuncs = ["expm1", "log1p", "acosh", "asinh", "atanh",
                          "rint", "trunc", "exp2", "log2"]
 
+    # XXX: hack to circumvent cpp pollution from python: python put its
+    # config.h in the public namespace, so we have a clash for the common
+    # functions we test. We remove every function tested by python's autoconf,
+    # hoping their own test are correct
+    if sys.version_info[0] == 2 and sys.version_info[1] >= 6:
+        for f in ["expm1", "log1p", "acosh", "atanh", "asinh"]:
+            optional_stdfuncs.remove(f)
+
     check_funcs(optional_stdfuncs)
 
     # C99 functions: float and long double versions
@@ -179,6 +187,14 @@
                                          headers=['stdlib.h']):
                     moredefs.append(('PyOS_ascii_strtod', 'strtod'))
 
+            if sys.platform == "win32":
+                from numpy.distutils.misc_util import get_build_architecture
+                # On win32, force long double format string to be 'g', not
+                # 'Lg', since the MS runtime does not support long double whose
+                # size is > sizeof(double)
+                if get_build_architecture()=="Intel":
+                    moredefs.append('FORCE_NO_LONG_DOUBLE_FORMATTING')
+
             target_f = open(target,'a')
             for d in moredefs:
                 if isinstance(d,str):
@@ -322,6 +338,7 @@
     deps = [join('src','arrayobject.c'),
             join('src','arraymethods.c'),
             join('src','scalartypes.inc.src'),
+            join('src','numpyos.c'),
             join('src','arraytypes.inc.src'),
             join('src','_signbit.c'),
             join('src','ucsnarrow.c'),

Modified: branches/dynamic_cpu_configuration/numpy/core/src/arrayobject.c
===================================================================
--- branches/dynamic_cpu_configuration/numpy/core/src/arrayobject.c	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/core/src/arrayobject.c	2009-02-15 12:03:15 UTC (rev 6369)
@@ -2827,10 +2827,10 @@
     int nd, fancy;
     PyArrayObject *other;
     PyArrayMapIterObject *mit;
+    PyObject *obj;
 
     if (PyString_Check(op) || PyUnicode_Check(op)) {
         if (self->descr->names) {
-            PyObject *obj;
             obj = PyDict_GetItem(self->descr->fields, op);
             if (obj != NULL) {
                 PyArray_Descr *descr;
@@ -2852,6 +2852,34 @@
         return NULL;
     }
 
+    /* Check for multiple field access 
+     */
+    if (self->descr->names && PySequence_Check(op) && !PyTuple_Check(op)) {
+	int seqlen, i;
+	seqlen = PySequence_Size(op);
+	for (i=0; i<seqlen; i++) {
+	    obj = PySequence_GetItem(op, i);
+	    if (!PyString_Check(obj) && !PyUnicode_Check(obj)) {
+		Py_DECREF(obj);
+		break;
+	    }
+	    Py_DECREF(obj);
+	}
+	/* extract multiple fields if all elements in sequence
+	   are either string or unicode (i.e. no break occurred). 
+	*/
+	fancy = ((seqlen > 0) && (i == seqlen));
+	if (fancy) { 
+	    PyObject *_numpy_internal;
+	    _numpy_internal = PyImport_ImportModule("numpy.core._internal");
+	    if (_numpy_internal == NULL) return NULL;
+	    obj = PyObject_CallMethod(_numpy_internal, "_index_fields",
+				      "OO", self, op);
+	    Py_DECREF(_numpy_internal);
+	    return obj;
+	}
+    }
+
     if (op == Py_Ellipsis) {
 	Py_INCREF(self);
 	return (PyObject *)self;

Modified: branches/dynamic_cpu_configuration/numpy/core/src/arraytypes.inc.src
===================================================================
--- branches/dynamic_cpu_configuration/numpy/core/src/arraytypes.inc.src	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/core/src/arraytypes.inc.src	2009-02-15 12:03:15 UTC (rev 6369)
@@ -2,41 +2,17 @@
 #include "config.h"
 
 static double
-_getNAN(void) {
-#ifdef NAN
-    return NAN;
-#else
-    static double nan=0;
-
-    if (nan == 0) {
-        double mul = 1e100;
-        double tmp = 0.0;
-        double pinf=0;
-        pinf = mul;
-        for (;;) {
-            pinf *= mul;
-            if (pinf == tmp) break;
-            tmp = pinf;
-        }
-        nan = pinf / pinf;
-    }
-    return nan;
-#endif
-}
-
-
-static double
 MyPyFloat_AsDouble(PyObject *obj)
 {
     double ret = 0;
     PyObject *num;
 
     if (obj == Py_None) {
-        return _getNAN();
+        return NumPyOS_NAN;
     }
     num = PyNumber_Float(obj);
     if (num == NULL) {
-        return _getNAN();
+        return NumPyOS_NAN;
     }
     ret = PyFloat_AsDouble(num);
     Py_DECREF(num);
@@ -192,7 +168,7 @@
             op2 = op; Py_INCREF(op);
         }
         if (op2 == Py_None) {
-            oop.real = oop.imag = _getNAN();
+            oop.real = oop.imag = NumPyOS_NAN;
         }
         else {
             oop = PyComplex_AsCComplex (op2);
@@ -897,17 +873,30 @@
  */
 
 /**begin repeat
-
-#fname=SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE#
-#type=short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble#
-#format="hd","hu","d","u","ld","lu",LONGLONG_FMT,ULONGLONG_FMT,"f","lf","Lf"#
+#fname=SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG#
+#type=short,ushort,int,uint,long,ulong,longlong,ulonglong#
+#format="hd","hu","d","u","ld","lu",LONGLONG_FMT,ULONGLONG_FMT#
 */
 static int
 @fname@_scan (FILE *fp, @type@ *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignored))
 {
     return fscanf(fp, "%"@format@, ip);
 }
+/**end repeat**/
 
+/**begin repeat
+#fname=FLOAT,DOUBLE,LONGDOUBLE#
+#type=float,double,longdouble#
+*/
+static int
+@fname@_scan (FILE *fp, @type@ *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignored))
+{
+    double result;
+    int ret;
+    ret = NumPyOS_ascii_ftolf(fp, &result);
+    *ip = (@type@) result;
+    return ret;
+}
 /**end repeat**/
 
 /**begin repeat
@@ -966,19 +955,15 @@
 #fname=FLOAT,DOUBLE,LONGDOUBLE#
 #type=float,double,longdouble#
 */
-#if (PY_VERSION_HEX >= 0x02040000) || defined(PyOS_ascii_strtod)
 static int
 @fname@_fromstr(char *str, @type@ *ip, char **endptr, PyArray_Descr *NPY_UNUSED(ignore))
 {
     double result;
 
-    result = PyOS_ascii_strtod(str, endptr);
+    result = NumPyOS_ascii_strtod(str, endptr);
     *ip = (@type@) result;
     return 0;
 }
-#else
-#define @fname@_fromstr NULL
-#endif
 /**end repeat**/
 
 

Modified: branches/dynamic_cpu_configuration/numpy/core/src/multiarraymodule.c
===================================================================
--- branches/dynamic_cpu_configuration/numpy/core/src/multiarraymodule.c	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/core/src/multiarraymodule.c	2009-02-15 12:03:15 UTC (rev 6369)
@@ -81,6 +81,10 @@
     return NULL;
 }
 
+/* XXX: We include c99 compat math module here because it is needed for
+ * numpyos.c (included by arrayobject). This is bad - we should separate
+ * declaration/implementation and share this in a lib. */
+#include "umath_funcs_c99.inc"
 
 /* Including this file is the only way I know how to declare functions
    static in each file, and store the pointers from functions in both
@@ -7705,6 +7709,9 @@
     PyObject *m, *d, *s;
     PyObject *c_api;
 
+    /* Initialize constants etc. */
+    NumPyOS_init();
+
     /* Create the module and add the functions */
     m = Py_InitModule("multiarray", array_module_methods);
     if (!m) goto err;

Copied: branches/dynamic_cpu_configuration/numpy/core/src/numpyos.c (from rev 6368, trunk/numpy/core/src/numpyos.c)

Modified: branches/dynamic_cpu_configuration/numpy/core/src/scalarmathmodule.c.src
===================================================================
--- branches/dynamic_cpu_configuration/numpy/core/src/scalarmathmodule.c.src	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/core/src/scalarmathmodule.c.src	2009-02-15 12:03:15 UTC (rev 6369)
@@ -636,8 +636,11 @@
                                 &errobj) < 0)
             return NULL;
         first = 1;
-        if (PyUFunc_handlefperr(errmask, errobj, retstatus, &first))
+        if (PyUFunc_handlefperr(errmask, errobj, retstatus, &first)) {
+            Py_XDECREF(errobj);
             return NULL;
+        }
+        Py_XDECREF(errobj);
     }
 #endif
 
@@ -736,8 +739,11 @@
                                 &errobj) < 0)
             return NULL;
         first = 1;
-        if (PyUFunc_handlefperr(errmask, errobj, retstatus, &first))
+        if (PyUFunc_handlefperr(errmask, errobj, retstatus, &first)) {
+            Py_XDECREF(errobj);
             return NULL;
+        }
+        Py_XDECREF(errobj);
     }
 
 #if @isint@

Modified: branches/dynamic_cpu_configuration/numpy/core/src/scalartypes.inc.src
===================================================================
--- branches/dynamic_cpu_configuration/numpy/core/src/scalartypes.inc.src	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/core/src/scalartypes.inc.src	2009-02-15 12:03:15 UTC (rev 6369)
@@ -5,6 +5,9 @@
 #endif
 #include "numpy/arrayscalars.h"
 
+#include "config.h"
+#include "numpyos.c"
+
 static PyBoolScalarObject _PyArrayScalar_BoolValues[2] = {
     {PyObject_HEAD_INIT(&PyBoolArrType_Type) 0},
     {PyObject_HEAD_INIT(&PyBoolArrType_Type) 1},
@@ -604,23 +607,36 @@
     return ret;
 }
 
+#ifdef FORCE_NO_LONG_DOUBLE_FORMATTING
+#undef NPY_LONGDOUBLE_FMT
+#define NPY_LONGDOUBLE_FMT NPY_DOUBLE_FMT
+#endif
+
 /**begin repeat
  * #name=float, double, longdouble#
  * #NAME=FLOAT, DOUBLE, LONGDOUBLE#
+ * #type=f, d, l#
  */
 
-#define FMT "%.*" NPY_@NAME@_FMT
-#define CFMT1 "%.*" NPY_@NAME@_FMT "j"
-#define CFMT2 "(%.*" NPY_@NAME@_FMT "%+.*" NPY_@NAME@_FMT "j)"
+#define _FMT1 "%%.%i" NPY_@NAME@_FMT
+#define _FMT2 "%%+.%i" NPY_@NAME@_FMT
 
 static void
 format_@name@(char *buf, size_t buflen, @name@ val, unsigned int prec)
 {
-    int cnt, i;
+    /* XXX: Find a correct size here for format string */
+    char format[64], *res;
+    int i, cnt;
 
-    cnt = PyOS_snprintf(buf, buflen, FMT, prec, val);
+    PyOS_snprintf(format, sizeof(format), _FMT1, prec);
+    res = NumPyOS_ascii_format@type@(buf, buflen, format, val, 0);
+    if (res == NULL) {
+        fprintf(stderr, "Error while formatting\n");
+	return;
+    }
 
     /* If nothing but digits after sign, append ".0" */
+    cnt = strlen(buf);
     for (i = (val < 0) ? 1 : 0; i < cnt; ++i) {
         if (!isdigit(Py_CHARMASK(buf[i]))) {
             break;
@@ -634,17 +650,39 @@
 static void
 format_c@name@(char *buf, size_t buflen, c@name@ val, unsigned int prec)
 {
+    /* XXX: Find a correct size here for format string */
+    char format[64];
+    char *res;
     if (val.real == 0.0) {
-        PyOS_snprintf(buf, buflen, CFMT1, prec, val.imag);
+        PyOS_snprintf(format, sizeof(format), _FMT1, prec);
+        res = NumPyOS_ascii_format@type@(buf, buflen-1, format, val.imag, 0);
+	if (res == NULL) {
+            fprintf(stderr, "Error while formatting\n");
+	    return;
+	}
+	strncat(buf, "j", 1);
     }
     else {
-        PyOS_snprintf(buf, buflen, CFMT2, prec, val.real, prec, val.imag);
+	char re[64], im[64];
+	PyOS_snprintf(format, sizeof(format), _FMT1, prec);
+        res = NumPyOS_ascii_format@type@(re, sizeof(re), format, val.real, 0);
+	if (res == NULL) {
+            fprintf(stderr, "Error while formatting\n");
+	    return;
+	}
+
+	PyOS_snprintf(format, sizeof(format), _FMT2, prec);
+        res = NumPyOS_ascii_format@type@(im, sizeof(im), format, val.imag, 0);
+	if (res == NULL) {
+            fprintf(stderr, "Error while formatting\n");
+	    return;
+	}
+	PyOS_snprintf(buf, buflen, "(%s%sj)", re, im);
     }
 }
 
-#undef FMT
-#undef CFMT1
-#undef CFMT2
+#undef _FMT1
+#undef _FMT2
 
 /**end repeat**/
 
@@ -736,7 +774,47 @@
 /**end repeat1**/
 /**end repeat**/
 
+/*
+ * float type print (control print a, where a is a float type instance)
+ */
+/**begin repeat
+ * #name=float, double, longdouble#
+ * #Name=Float, Double, LongDouble#
+ * #NAME=FLOAT, DOUBLE, LONGDOUBLE#
+ */
 
+static int
+@name@type_print(PyObject *v, FILE *fp, int flags)
+{
+	char buf[100];
+        @name@ val = ((Py@Name@ScalarObject *)v)->obval;
+
+	format_@name@(buf, sizeof(buf), val,
+		      (flags & Py_PRINT_RAW) ? @NAME@PREC_STR : @NAME@PREC_REPR);
+	Py_BEGIN_ALLOW_THREADS
+	fputs(buf, fp);
+	Py_END_ALLOW_THREADS
+	return 0;
+}
+
+static int
+c@name@type_print(PyObject *v, FILE *fp, int flags)
+{
+        /* Size of buf: twice sizeof(real) + 2 (for the parenthesis) */
+	char buf[202];
+        c@name@ val = ((PyC@Name@ScalarObject *)v)->obval;
+
+	format_c@name@(buf, sizeof(buf), val,
+		       (flags & Py_PRINT_RAW) ? @NAME@PREC_STR : @NAME@PREC_REPR);
+	Py_BEGIN_ALLOW_THREADS
+	fputs(buf, fp);
+	Py_END_ALLOW_THREADS
+	return 0;
+}
+
+/**end repeat**/
+
+
 /*
  * Could improve this with a PyLong_FromLongDouble(longdouble ldval)
  * but this would need some more work...
@@ -2254,7 +2332,9 @@
     0, 	/* nb_inplace_floor_divide */
     0, 	/* nb_inplace_true_divide */
     /* Added in release 2.5 */
+#if PY_VERSION_HEX >= 0x02050000
     0, 	/* nb_index */
+#endif
 };
 
 static PyObject *
@@ -3075,6 +3155,14 @@
     PyCDoubleArrType_Type.tp_@name@  = cdoubletype_@name@;
     /**end repeat**/
 
+    PyFloatArrType_Type.tp_print = floattype_print;
+    PyDoubleArrType_Type.tp_print = doubletype_print;
+    PyLongDoubleArrType_Type.tp_print = longdoubletype_print;
+
+    PyCFloatArrType_Type.tp_print = cfloattype_print;
+    PyCDoubleArrType_Type.tp_print = cdoubletype_print;
+    PyCLongDoubleArrType_Type.tp_print = clongdoubletype_print;
+
     /* These need to be coded specially because getitem does not
        return a normal Python type
      */

Modified: branches/dynamic_cpu_configuration/numpy/core/tests/test_memmap.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/core/tests/test_memmap.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/core/tests/test_memmap.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -14,6 +14,9 @@
         self.data = arange(12, dtype=self.dtype)
         self.data.resize(self.shape)
 
+    def tearDown(self):
+        self.tmpfp.close()
+
     def test_roundtrip(self):
         # Write data to file
         fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',

Modified: branches/dynamic_cpu_configuration/numpy/core/tests/test_multiarray.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/core/tests/test_multiarray.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/core/tests/test_multiarray.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -1,9 +1,12 @@
 import tempfile
 import sys
+import os
 import numpy as np
 from numpy.testing import *
 from numpy.core import *
 
+from test_print import in_foreign_locale
+
 class TestFlags(TestCase):
     def setUp(self):
         self.a = arange(10)
@@ -113,41 +116,6 @@
         d2 = dtype('f8')
         assert_equal(d2, dtype(float64))
 
-
-class TestFromstring(TestCase):
-    def test_binary(self):
-        a = fromstring('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',dtype='<f4')
-        assert_array_equal(a, array([1,2,3,4]))
-
-    def test_string(self):
-        a = fromstring('1,2,3,4', sep=',')
-        assert_array_equal(a, [1., 2., 3., 4.])
-
-    def test_counted_string(self):
-        a = fromstring('1,2,3,4', count=4, sep=',')
-        assert_array_equal(a, [1., 2., 3., 4.])
-        a = fromstring('1,2,3,4', count=3, sep=',')
-        assert_array_equal(a, [1., 2., 3.])
-
-    def test_string_with_ws(self):
-        a = fromstring('1 2  3     4   ', dtype=int, sep=' ')
-        assert_array_equal(a, [1, 2, 3, 4])
-
-    def test_counted_string_with_ws(self):
-        a = fromstring('1 2  3     4   ', count=3, dtype=int, sep=' ')
-        assert_array_equal(a, [1, 2, 3])
-
-    def test_ascii(self):
-        a = fromstring('1 , 2 , 3 , 4', sep=',')
-        b = fromstring('1,2,3,4', dtype=float, sep=',')
-        assert_array_equal(a, [1.,2.,3.,4.])
-        assert_array_equal(a,b)
-
-    def test_malformed(self):
-        a = fromstring('1.234 1,234', sep=' ')
-        assert_array_equal(a, [1.234, 1.])
-
-
 class TestZeroRank(TestCase):
     def setUp(self):
         self.d = array(0), array('x', object)
@@ -812,43 +780,155 @@
         assert_array_equal(x[1][idx],np.sort(x[1]))
 
 
-class TestFromToFile(TestCase):
+class TestIO(object):
+    """Test tofile, fromfile, tostring, and fromstring"""
+    
     def setUp(self):
-        shape = (4,7)
+        shape = (2,4,3)
         rand = np.random.random
-
         self.x = rand(shape) + rand(shape).astype(np.complex)*1j
+        self.x[0,:,1] = [nan, inf, -inf, nan]
         self.dtype = self.x.dtype
+        self.filename = tempfile.mktemp()
 
-    def test_file(self):
-        # Test disabled on Windows, since the tempfile does not flush
-        # properly.  The test ensures that both filenames and file
-        # objects are accepted in tofile and fromfile, so as long as
-        # it runs on at least one platform, we should be ok.
-        if not sys.platform.startswith('win'):
-            tmp_file = tempfile.NamedTemporaryFile('wb',
-                                                   prefix='numpy_tofromfile')
-            self.x.tofile(tmp_file.file)
-            tmp_file.flush()
-            y = np.fromfile(tmp_file.name,dtype=self.dtype)
-            assert_array_equal(y,self.x.flat)
+    def tearDown(self):
+        if os.path.isfile(self.filename):
+            os.unlink(self.filename)
+            #tmp_file.close()
 
-    def test_filename(self):
-        filename = tempfile.mktemp()
-        f = open(filename,'wb')
+    def test_roundtrip_file(self):
+        f = open(self.filename, 'wb')
         self.x.tofile(f)
         f.close()
-        y = np.fromfile(filename,dtype=self.dtype)
-        assert_array_equal(y,self.x.flat)
+        # NB. doesn't work with flush+seek, due to use of C stdio
+        f = open(self.filename, 'rb')
+        y = np.fromfile(f, dtype=self.dtype)
+        f.close()
+        assert_array_equal(y, self.x.flat)
+        os.unlink(self.filename)
 
+    def test_roundtrip_filename(self):
+        self.x.tofile(self.filename)
+        y = np.fromfile(self.filename, dtype=self.dtype)
+        assert_array_equal(y, self.x.flat)
+
+    def test_roundtrip_binary_str(self):
+        s = self.x.tostring()
+        y = np.fromstring(s, dtype=self.dtype)
+        assert_array_equal(y, self.x.flat)
+
+        s = self.x.tostring('F')
+        y = np.fromstring(s, dtype=self.dtype)
+        assert_array_equal(y, self.x.flatten('F'))
+
+    def test_roundtrip_str(self):
+        x = self.x.real.ravel()
+        s = "@".join(map(str, x))
+        y = np.fromstring(s, sep="@")
+        # NB. str imbues less precision
+        nan_mask = ~np.isfinite(x)
+        assert_array_equal(x[nan_mask], y[nan_mask])
+        assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
+
+    def test_roundtrip_repr(self):
+        x = self.x.real.ravel()
+        s = "@".join(map(repr, x))
+        y = np.fromstring(s, sep="@")
+        assert_array_equal(x, y)
+
+    def _check_from(self, s, value, **kw):
+        y = np.fromstring(s, **kw)
+        assert_array_equal(y, value)
+
+        f = open(self.filename, 'wb')
+        f.write(s)
+        f.close()
+        y = np.fromfile(self.filename, **kw)
+        assert_array_equal(y, value)
+
+    def test_nan(self):
+        self._check_from("nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
+                         [nan, nan, nan, nan, nan, nan, nan],
+                         sep=' ')
+
+    def test_inf(self):
+        self._check_from("inf +inf -inf infinity -Infinity iNfInItY -inF",
+                         [inf, inf, -inf, inf, -inf, inf, -inf], sep=' ')
+
+    def test_numbers(self):
+        self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133",
+                         [1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
+
+    def test_binary(self):
+        self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
+                         array([1,2,3,4]),
+                         dtype='<f4')
+
+    def test_string(self):
+        self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',')
+
+    def test_counted_string(self):
+        self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
+        self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',')
+        self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
+
+    def test_string_with_ws(self):
+        self._check_from('1 2  3     4   ', [1, 2, 3, 4], dtype=int, sep=' ')
+
+    def test_counted_string_with_ws(self):
+        self._check_from('1 2  3     4   ', [1,2,3], count=3, dtype=int,
+                         sep=' ')
+
+    def test_ascii(self):
+        self._check_from('1 , 2 , 3 , 4', [1.,2.,3.,4.], sep=',')
+        self._check_from('1,2,3,4', [1.,2.,3.,4.], dtype=float, sep=',')
+
     def test_malformed(self):
-        filename = tempfile.mktemp()
-        f = open(filename,'w')
-        f.write("1.234 1,234")
+        self._check_from('1.234 1,234', [1.234, 1.], sep=' ')
+
+    def test_long_sep(self):
+        self._check_from('1_x_3_x_4_x_5', [1,3,4,5], sep='_x_')
+
+    def test_dtype(self):
+        v = np.array([1,2,3,4], dtype=np.int_)
+        self._check_from('1,2,3,4', v, sep=',', dtype=np.int_)
+
+    def test_tofile_sep(self):
+        x = np.array([1.51, 2, 3.51, 4], dtype=float)
+        f = open(self.filename, 'w')
+        x.tofile(f, sep=',')
         f.close()
-        y = np.fromfile(filename, sep=' ')
-        assert_array_equal(y, [1.234, 1.])
+        f = open(self.filename, 'r')
+        s = f.read()
+        f.close()
+        assert_equal(s, '1.51,2.0,3.51,4.0')
+        os.unlink(self.filename)
 
+    def test_tofile_format(self):
+        x = np.array([1.51, 2, 3.51, 4], dtype=float)
+        f = open(self.filename, 'w')
+        x.tofile(f, sep=',', format='%.2f')
+        f.close()
+        f = open(self.filename, 'r')
+        s = f.read()
+        f.close()
+        assert_equal(s, '1.51,2.00,3.51,4.00')
+
+    @in_foreign_locale
+    def _run_in_foreign_locale(self, func, fail=False):
+        np.testing.dec.knownfailureif(fail)(func)(self)
+
+    def test_locale(self):
+        yield self._run_in_foreign_locale, TestIO.test_numbers
+        yield self._run_in_foreign_locale, TestIO.test_nan
+        yield self._run_in_foreign_locale, TestIO.test_inf
+        yield self._run_in_foreign_locale, TestIO.test_counted_string
+        yield self._run_in_foreign_locale, TestIO.test_ascii
+        yield self._run_in_foreign_locale, TestIO.test_malformed
+        yield self._run_in_foreign_locale, TestIO.test_tofile_sep
+        yield self._run_in_foreign_locale, TestIO.test_tofile_format
+
+
 class TestFromBuffer(TestCase):
     def tst_basic(self,buffer,expected,kwargs):
         assert_array_equal(np.frombuffer(buffer,**kwargs),expected)
@@ -951,7 +1031,7 @@
         self.x = 2*ones((3,),dtype=int)
         self.y = 3*ones((3,),dtype=int)
         self.x2 = 2*ones((2,3), dtype=int)
-        self.y2 = 3*ones((2,3), dtype=int)        
+        self.y2 = 3*ones((2,3), dtype=int)
         self.ind = [0,0,1]
 
     def test_basic(self):
@@ -961,11 +1041,11 @@
     def test_broadcast1(self):
         A = np.choose(self.ind, (self.x2, self.y2))
         assert_equal(A, [[2,2,3],[2,2,3]])
-    
+
     def test_broadcast2(self):
         A = np.choose(self.ind, (self.x, self.y2))
         assert_equal(A, [[2,2,3],[2,2,3]])
-        
 
+
 if __name__ == "__main__":
     run_module_suite()

Modified: branches/dynamic_cpu_configuration/numpy/core/tests/test_numerictypes.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/core/tests/test_numerictypes.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/core/tests/test_numerictypes.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -97,7 +97,7 @@
 #    Creation tests
 ############################################################
 
-class create_zeros:
+class create_zeros(object):
     """Check the creation of heterogeneous arrays zero-valued"""
 
     def test_zeros0D(self):
@@ -140,7 +140,7 @@
     _descr = Ndescr
 
 
-class create_values:
+class create_values(object):
     """Check the creation of heterogeneous arrays with values"""
 
     def test_tuple(self):
@@ -200,7 +200,7 @@
 #    Reading tests
 ############################################################
 
-class read_values_plain:
+class read_values_plain(object):
     """Check the reading of values in heterogeneous arrays (plain)"""
 
     def test_access_fields(self):
@@ -232,7 +232,7 @@
     multiple_rows = 1
     _buffer = PbufferT
 
-class read_values_nested:
+class read_values_nested(object):
     """Check the reading of values in heterogeneous arrays (nested)"""
 
 
@@ -353,6 +353,16 @@
         res = np.find_common_type(['u8','i8','i8'],['f8'])
         assert(res == 'f8')
 
+class TestMultipleFields(TestCase):
+    def setUp(self):
+        self.ary = np.array([(1,2,3,4),(5,6,7,8)], dtype='i4,f4,i2,c8')
+    def _bad_call(self):
+        return self.ary['f0','f1']
+    def test_no_tuple(self):
+        self.failUnlessRaises(ValueError, self._bad_call)
+    def test_return(self):
+        res = self.ary[['f0','f2']].tolist()
+        assert(res == [(1,3), (5,7)])        
 
 if __name__ == "__main__":
     run_module_suite()

Modified: branches/dynamic_cpu_configuration/numpy/core/tests/test_print.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/core/tests/test_print.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/core/tests/test_print.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -1,34 +1,197 @@
 import numpy as np
 from numpy.testing import *
+import nose
 
-class TestPrint(TestCase):
+import locale
+import sys
+from StringIO import StringIO
 
-    def test_float_types(self) :
-        """ Check formatting.
+_REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'}
 
-            This is only for the str function, and only for simple types.
-            The precision of np.float and np.longdouble aren't the same as the
-            python float precision.
 
-        """
-        for t in [np.float, np.double, np.longdouble] :
-            for x in [0, 1,-1, 1e10, 1e20] :
-                assert_equal(str(t(x)), str(float(x)))
+def check_float_type(tp):
+    for x in [0, 1,-1, 1e20] :
+        assert_equal(str(tp(x)), str(float(x)),
+                     err_msg='Failed str formatting for type %s' % tp)
 
-    def test_complex_types(self) :
-        """Check formatting.
+    if tp(1e10).itemsize > 4:
+        assert_equal(str(tp(1e10)), str(float('1e10')),
+                     err_msg='Failed str formatting for type %s' % tp)
+    else:
+        if sys.platform == 'win32' and sys.version_info[0] <= 2 and \
+           sys.version_info[1] <= 5:
+            ref = '1e+010'
+        else:
+            ref = '1e+10'
+        assert_equal(str(tp(1e10)), ref,
+                     err_msg='Failed str formatting for type %s' % tp)
 
-            This is only for the str function, and only for simple types.
-            The precision of np.float and np.longdouble aren't the same as the
-            python float precision.
+#@dec.knownfailureif(True, "formatting tests are known to fail")
+def test_float_types():
+    """ Check formatting.
 
-        """
-        for t in [np.cfloat, np.cdouble, np.clongdouble] :
-            for x in [0, 1,-1, 1e10, 1e20] :
-                assert_equal(str(t(x)), str(complex(x)))
-                assert_equal(str(t(x*1j)), str(complex(x*1j)))
-                assert_equal(str(t(x + x*1j)), str(complex(x + x*1j)))
+        This is only for the str function, and only for simple types.
+        The precision of np.float and np.longdouble aren't the same as the
+        python float precision.
 
+    """
+    for t in [np.float32, np.double, np.longdouble] :
+        yield check_float_type, t
 
+def check_nan_inf_float(tp):
+    for x in [np.inf, -np.inf, np.nan]:
+        assert_equal(str(tp(x)), _REF[x],
+                     err_msg='Failed str formatting for type %s' % tp)
+
+#@dec.knownfailureif(True, "formatting tests are known to fail")
+def test_nan_inf_float():
+    """ Check formatting of nan & inf.
+
+        This is only for the str function, and only for simple types.
+        The precision of np.float and np.longdouble aren't the same as the
+        python float precision.
+
+    """
+    for t in [np.float32, np.double, np.longdouble] :
+        yield check_nan_inf_float, t
+
+def check_complex_type(tp):
+    for x in [0, 1,-1, 1e20] :
+        assert_equal(str(tp(x)), str(complex(x)),
+                     err_msg='Failed str formatting for type %s' % tp)
+        assert_equal(str(tp(x*1j)), str(complex(x*1j)),
+                     err_msg='Failed str formatting for type %s' % tp)
+        assert_equal(str(tp(x + x*1j)), str(complex(x + x*1j)),
+                     err_msg='Failed str formatting for type %s' % tp)
+
+    if tp(1e10).itemsize > 8:
+        assert_equal(str(tp(1e10)), str(complex(1e10)),
+                     err_msg='Failed str formatting for type %s' % tp)
+    else:
+        if sys.platform == 'win32' and sys.version_info[0] <= 2 and \
+           sys.version_info[1] <= 5:
+            ref = '(1e+010+0j)'
+        else:
+            ref = '(1e+10+0j)'
+        assert_equal(str(tp(1e10)), ref,
+                     err_msg='Failed str formatting for type %s' % tp)
+
+#@dec.knownfailureif(True, "formatting tests are known to fail")
+def test_complex_types():
+    """Check formatting of complex types.
+
+        This is only for the str function, and only for simple types.
+        The precision of np.float and np.longdouble aren't the same as the
+        python float precision.
+
+    """
+    for t in [np.complex64, np.cdouble, np.clongdouble] :
+        yield check_complex_type, t
+
+# print tests
+def _test_redirected_print(x, tp, ref=None):
+    file = StringIO()
+    file_tp = StringIO()
+    stdout = sys.stdout
+    try:
+        sys.stdout = file_tp
+        print tp(x)
+        sys.stdout = file
+        if ref:
+            print ref
+        else:
+            print x
+    finally:
+        sys.stdout = stdout
+
+    assert_equal(file.getvalue(), file_tp.getvalue(),
+                 err_msg='print failed for type%s' % tp)
+
+def check_float_type_print(tp):
+    for x in [0, 1,-1, 1e20]:
+        _test_redirected_print(float(x), tp)
+
+    for x in [np.inf, -np.inf, np.nan]:
+        _test_redirected_print(float(x), tp, _REF[x])
+
+    if tp(1e10).itemsize > 4:
+        _test_redirected_print(float(1e10), tp)
+    else:
+        if sys.platform == 'win32' and sys.version_info[0] <= 2 and \
+           sys.version_info[1] <= 5:
+            ref = '1e+010'
+        else:
+            ref = '1e+10'
+        _test_redirected_print(float(1e10), tp, ref)
+
+#@dec.knownfailureif(True, "formatting tests are known to fail")
+def check_complex_type_print(tp):
+    # We do not create complex with inf/nan directly because the feature is
+    # missing in python < 2.6
+    for x in [0, 1, -1, 1e20]:
+        _test_redirected_print(complex(x), tp)
+
+    if tp(1e10).itemsize > 8:
+        _test_redirected_print(complex(1e10), tp)
+    else:
+        if sys.platform == 'win32' and sys.version_info[0] <= 2 and \
+           sys.version_info[1] <= 5:
+            ref = '(1e+010+0j)'
+        else:
+            ref = '(1e+10+0j)'
+        _test_redirected_print(complex(1e10), tp, ref)
+
+    _test_redirected_print(complex(np.inf, 1), tp, '(inf+1j)')
+    _test_redirected_print(complex(-np.inf, 1), tp, '(-inf+1j)')
+    _test_redirected_print(complex(-np.nan, 1), tp, '(nan+1j)')
+
+def test_float_type_print():
+    """Check formatting when using print """
+    for t in [np.float32, np.double, np.longdouble] :
+        yield check_float_type_print, t
+
+#@dec.knownfailureif(True, "formatting tests are known to fail")
+def test_complex_type_print():
+    """Check formatting when using print """
+    for t in [np.complex64, np.cdouble, np.clongdouble] :
+        yield check_complex_type_print, t
+
+# Locale tests: scalar types formatting should be independent of the locale
+def in_foreign_locale(func):
+    # XXX: How to query locale on a given system ?
+
+    # French is one language where the decimal is ',' not '.', and should be
+    # relatively common on many systems
+    def wrapper(*args, **kwargs):
+        curloc = locale.getlocale(locale.LC_NUMERIC)
+        try:
+            try:
+                if not sys.platform == 'win32':
+                    locale.setlocale(locale.LC_NUMERIC, 'fr_FR')
+                else:
+                    locale.setlocale(locale.LC_NUMERIC, 'FRENCH')
+            except locale.Error:
+                raise nose.SkipTest("Skipping locale test, because "
+                                    "French locale not found")
+            return func(*args, **kwargs)
+        finally:
+            locale.setlocale(locale.LC_NUMERIC, locale=curloc)
+    return nose.tools.make_decorator(func)(wrapper)
+
+#@dec.knownfailureif(True, "formatting tests are known to fail")
+@in_foreign_locale
+def test_locale_single():
+    assert_equal(str(np.float32(1.2)), str(float(1.2)))
+
+#@dec.knownfailureif(True, "formatting tests are known to fail")
+@in_foreign_locale
+def test_locale_double():
+    assert_equal(str(np.double(1.2)), str(float(1.2)))
+
+#@dec.knownfailureif(True, "formatting tests are known to fail")
+@in_foreign_locale
+def test_locale_longdouble():
+    assert_equal(str(np.longdouble(1.2)), str(float(1.2)))
+
 if __name__ == "__main__":
     run_module_suite()

Modified: branches/dynamic_cpu_configuration/numpy/core/tests/test_regression.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/core/tests/test_regression.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/core/tests/test_regression.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -1,7 +1,7 @@
-
 from StringIO import StringIO
 import pickle
 import sys
+import gc
 from os import path
 from numpy.testing import *
 import numpy as np
@@ -1208,5 +1208,17 @@
         a = np.array(1)
         self.failUnlessRaises(ValueError, lambda x: x.choose([]), a)
 
+    def test_errobj_reference_leak(self, level=rlevel):
+        """Ticket #955"""
+        z = int(0)
+        p = np.int32(-1)
+
+        gc.collect()
+        n_before = len(gc.get_objects())
+        z**p  # this shouldn't leak a reference to errobj
+        gc.collect()
+        n_after = len(gc.get_objects())
+        assert n_before >= n_after, (n_before, n_after)
+
 if __name__ == "__main__":
     run_module_suite()

Modified: branches/dynamic_cpu_configuration/numpy/core/tests/test_unicode.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/core/tests/test_unicode.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/core/tests/test_unicode.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -17,7 +17,7 @@
 #    Creation tests
 ############################################################
 
-class create_zeros:
+class create_zeros(object):
     """Check the creation of zero-valued arrays"""
 
     def content_check(self, ua, ua_scalar, nbytes):
@@ -69,7 +69,7 @@
     ulen = 1009
 
 
-class create_values:
+class create_values(object):
     """Check the creation of unicode arrays with values"""
 
     def content_check(self, ua, ua_scalar, nbytes):
@@ -154,7 +154,7 @@
 #    Assignment tests
 ############################################################
 
-class assign_values:
+class assign_values(object):
     """Check the assignment of unicode arrays with values"""
 
     def content_check(self, ua, ua_scalar, nbytes):

Modified: branches/dynamic_cpu_configuration/numpy/ctypeslib.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/ctypeslib.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/ctypeslib.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -353,8 +353,3 @@
         result = tp.from_address(addr)
         result.__keep = ai
         return result
-
-
-def test(level=1, verbosity=1):
-    from numpy.testing import NumpyTest
-    return NumpyTest().test(level, verbosity)

Modified: branches/dynamic_cpu_configuration/numpy/distutils/command/config.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/distutils/command/config.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/distutils/command/config.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -5,11 +5,13 @@
 
 import os, signal
 import warnings
+import sys
 
 from distutils.command.config import config as old_config
 from distutils.command.config import LANG_EXT
 from distutils import log
 from distutils.file_util import copy_file
+import distutils
 from numpy.distutils.exec_command import exec_command
 from numpy.distutils.mingw32ccompiler import generate_manifest
 
@@ -39,6 +41,30 @@
     def _check_compiler (self):
         old_config._check_compiler(self)
         from numpy.distutils.fcompiler import FCompiler, new_fcompiler
+
+        if sys.platform == 'win32' and self.compiler.compiler_type == 'msvc':
+            # XXX: hack to circumvent a python 2.6 bug with msvc9compiler:
+            # initialize call query_vcvarsall, which throws an IOError, and
+            # causes an error along the way without much information. We try to
+            # catch it here, hoping it is early enough, and print an helpful
+            # message instead of Error: None.
+            if not self.compiler.initialized:
+                try:
+                    self.compiler.initialize()
+                except IOError, e:
+                    msg = """\
+Could not initialize compiler instance: do you have Visual Studio
+installed ? If you are trying to build with mingw, please use python setup.py
+build -c mingw32 instead ). If you have Visual Studio installed, check it is
+correctly installed, and the right version (VS 2008 for python 2.6, VS 2003 for
+2.5, etc...). Original exception was: %s, and the Compiler
+class was %s
+============================================================================""" \
+                        % (e, self.compiler.__class__.__name__)
+                    print """\
+============================================================================"""
+                    raise distutils.errors.DistutilsPlatformError(msg)
+
         if not isinstance(self.fcompiler, FCompiler):
             self.fcompiler = new_fcompiler(compiler=self.fcompiler,
                                            dry_run=self.dry_run, force=1,

Modified: branches/dynamic_cpu_configuration/numpy/distutils/command/scons.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/distutils/command/scons.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/distutils/command/scons.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -361,9 +361,13 @@
 
             try:
                 minver = "0.9.3"
-                from numscons import get_version
-                if get_version() < minver:
-                    raise ValueError()
+                try:
+                    # version_info was added in 0.10.0
+                    from numscons import version_info
+                except ImportError:
+                    from numscons import get_version
+                    if get_version() < minver:
+                        raise ValueError()
             except ImportError:
                 raise RuntimeError("You need numscons >= %s to build numpy "\
                                    "with numscons (imported numscons path " \

Modified: branches/dynamic_cpu_configuration/numpy/distutils/fcompiler/compaq.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/distutils/fcompiler/compaq.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/distutils/fcompiler/compaq.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -85,6 +85,10 @@
                 print 'Ignoring "%s" (I think it is msvccompiler.py bug)' % (msg)
             else:
                 raise
+        except IOError, e:
+            if not "vcvarsall.bat" in str(e):
+                print "Unexpected IOError in", __file__
+                raise e
 
     executables = {
         'version_cmd'  : ['<F90>', "/what"],

Modified: branches/dynamic_cpu_configuration/numpy/distutils/fcompiler/gnu.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/distutils/fcompiler/gnu.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/distutils/fcompiler/gnu.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -87,21 +87,29 @@
     def get_flags_linker_so(self):
         opt = self.linker_so[1:]
         if sys.platform=='darwin':
-            # MACOSX_DEPLOYMENT_TARGET must be at least 10.3. This is
-            # a reasonable default value even when building on 10.4 when using
-            # the official Python distribution and those derived from it (when
-            # not broken).
             target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None)
-            if target is None or target == '':
-                target = '10.3'
-            major, minor = target.split('.')
-            if int(minor) < 3:
-                minor = '3'
-                warnings.warn('Environment variable '
-                    'MACOSX_DEPLOYMENT_TARGET reset to %s.%s' % (major, minor))
-            os.environ['MACOSX_DEPLOYMENT_TARGET'] = '%s.%s' % (major,
-                minor)
-
+            # If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value
+            # and leave it alone.  But, distutils will complain if the 
+            # environment's value is different from the one in the Python 
+            # Makefile used to build Python.  We let disutils handle this 
+            # error checking.
+            if not target:
+                # If MACOSX_DEPLOYMENT_TARGET is not set in the environment, 
+                # we try to get it first from the Python Makefile and then we 
+                # fall back to setting it to 10.3 to maximize the set of 
+                # versions we can work with.  This is a reasonable default
+                # even when using the official Python dist and those derived
+                # from it.
+                import distutils.sysconfig as sc
+                g = {}
+                filename = sc.get_makefile_filename()
+                sc.parse_makefile(filename, g)
+                target = g.get('MACOSX_DEPLOYMENT_TARGET', '10.3')
+                os.environ['MACOSX_DEPLOYMENT_TARGET'] = target
+                if target == '10.3':
+                    s = 'Env. variable MACOSX_DEPLOYMENT_TARGET set to 10.3'
+                    warnings.warn(s)
+            
             opt.extend(['-undefined', 'dynamic_lookup', '-bundle'])
         else:
             opt.append("-shared")
@@ -272,30 +280,30 @@
 
     def get_library_dirs(self):
         opt = GnuFCompiler.get_library_dirs(self)
-	if sys.platform == 'win32':
-	    c_compiler = self.c_compiler
-	    if c_compiler and c_compiler.compiler_type == "msvc":
-		target = self.get_target()
-		if target:
+        if sys.platform == 'win32':
+            c_compiler = self.c_compiler
+            if c_compiler and c_compiler.compiler_type == "msvc":
+                target = self.get_target()
+                if target:
                     d = os.path.normpath(self.get_libgcc_dir())
-		    root = os.path.join(d, os.pardir, os.pardir, os.pardir, os.pardir)
-		    mingwdir = os.path.normpath(os.path.join(root, target, "lib"))
-		    full = os.path.join(mingwdir, "libmingwex.a")
-		    if os.path.exists(full):
-			opt.append(mingwdir)
-	return opt
+                    root = os.path.join(d, os.pardir, os.pardir, os.pardir, os.pardir)
+                    mingwdir = os.path.normpath(os.path.join(root, target, "lib"))
+                    full = os.path.join(mingwdir, "libmingwex.a")
+                    if os.path.exists(full):
+                        opt.append(mingwdir)
+        return opt
 
     def get_libraries(self):
         opt = GnuFCompiler.get_libraries(self)
         if sys.platform == 'darwin':
             opt.remove('cc_dynamic')
-	if sys.platform == 'win32':
-	    c_compiler = self.c_compiler
-	    if c_compiler and c_compiler.compiler_type == "msvc":
-		if "gcc" in opt:
-		    i = opt.index("gcc")
-		    opt.insert(i+1, "mingwex")
-		    opt.insert(i+1, "mingw32")
+        if sys.platform == 'win32':
+            c_compiler = self.c_compiler
+            if c_compiler and c_compiler.compiler_type == "msvc":
+                if "gcc" in opt:
+                    i = opt.index("gcc")
+                    opt.insert(i+1, "mingwex")
+                    opt.insert(i+1, "mingw32")
         return opt
 
     def get_target(self):
@@ -303,9 +311,9 @@
                                       ['-v'],
                                       use_tee=0)
         if not status:
-	    m = TARGET_R.search(output)
-	    if m:
-	        return m.group(1)	
+            m = TARGET_R.search(output)
+            if m:
+                return m.group(1)
         return ""
 
 if __name__ == '__main__':

Modified: branches/dynamic_cpu_configuration/numpy/distutils/lib2def.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/distutils/lib2def.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/distutils/lib2def.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -1,6 +1,7 @@
 import re
 import sys
 import os
+import subprocess
 
 __doc__ = """This module generates a DEF file from the symbols in
 an MSVC-compiled DLL import library.  It correctly discriminates between
@@ -59,13 +60,13 @@
         deffile = None
     return libfile, deffile
 
-def getnm(nm_cmd = 'nm -Cs python%s.lib' % py_ver):
+def getnm(nm_cmd = ['nm', '-Cs', 'python%s.lib' % py_ver]):
     """Returns the output of nm_cmd via a pipe.
 
 nm_output = getnam(nm_cmd = 'nm -Cs py_lib')"""
-    f = os.popen(nm_cmd)
-    nm_output = f.read()
-    f.close()
+    f = subprocess.Popen(nm_cmd, shell=True, stdout=subprocess.PIPE)
+    nm_output = f.stdout.read()
+    f.stdout.close()
     return nm_output
 
 def parse_nm(nm_output):
@@ -107,7 +108,7 @@
         deffile = sys.stdout
     else:
         deffile = open(deffile, 'w')
-    nm_cmd = '%s %s' % (DEFAULT_NM, libfile)
+    nm_cmd = [str(DEFAULT_NM), str(libfile)]
     nm_output = getnm(nm_cmd)
     dlist, flist = parse_nm(nm_output)
     output_def(dlist, flist, DEF_HEADER, deffile)

Modified: branches/dynamic_cpu_configuration/numpy/distutils/mingw32ccompiler.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/distutils/mingw32ccompiler.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/distutils/mingw32ccompiler.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -9,6 +9,7 @@
 """
 
 import os
+import subprocess
 import sys
 import log
 
@@ -50,9 +51,10 @@
         # get_versions methods regex
         if self.gcc_version is None:
             import re
-            out = os.popen('gcc -dumpversion','r')
-            out_string = out.read()
-            out.close()
+            p = subprocess.Popen(['gcc', '-dumpversion'], shell=True,
+                                 stdout=subprocess.PIPE)
+            out_string = p.stdout.read()
+            p.stdout.close()
             result = re.search('(\d+\.\d+)',out_string)
             if result:
                 self.gcc_version = StrictVersion(result.group(1))
@@ -227,20 +229,36 @@
     #    raise DistutilsPlatformError, msg
     return
 
+#=====================================
+# Dealing with Visual Studio MANIFESTS
+#=====================================
+
 # Functions to deal with visual studio manifests. Manifest are a mechanism to
 # enforce strong DLL versioning on windows, and has nothing to do with
 # distutils MANIFEST. manifests are XML files with version info, and used by
-# the OS loader; they are necessary when linking against a DLL no in the system
-# path; in particular, python 2.6 is built against the MS runtime 9 (the one
-# from VS 2008), which is not available on most windows systems; python 2.6
-# installer does install it in the Win SxS (Side by side) directory, but this
-# requires the manifest too. This is a big mess, thanks MS for a wonderful
-# system.
+# the OS loader; they are necessary when linking against a DLL not in the
+# system path; in particular, official python 2.6 binary is built against the
+# MS runtime 9 (the one from VS 2008), which is not available on most windows
+# systems; python 2.6 installer does install it in the Win SxS (Side by side)
+# directory, but this requires the manifest for this to work. This is a big
+# mess, thanks MS for a wonderful system.
 
-# XXX: ideally, we should use exactly the same version as used by python, but I
-# have no idea how to obtain the exact version from python. We could use the
-# strings utility on python.exe, maybe ?
-_MSVCRVER_TO_FULLVER = {'90': "9.0.21022.8"}
+# XXX: ideally, we should use exactly the same version as used by python. I
+# submitted a patch to get this version, but it was only included for python
+# 2.6.1 and above. So for versions below, we use a "best guess".
+_MSVCRVER_TO_FULLVER = {}
+if sys.platform == 'win32':
+    try:
+        import msvcrt
+        if hasattr(msvcrt, "CRT_ASSEMBLY_VERSION"):
+            _MSVCRVER_TO_FULLVER['90'] = msvcrt.CRT_ASSEMBLY_VERSION
+        else:
+            _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8"
+    except ImportError:
+        # If we are here, means python was not built with MSVC. Not sure what to do
+        # in that case: manifest building will fail, but it should not be used in
+        # that case anyway
+        log.warn('Cannot import msvcrt: using manifest will not be possible')
 
 def msvc_manifest_xml(maj, min):
     """Given a major and minor version of the MSVCR, returns the
@@ -311,15 +329,15 @@
 def configtest_name(config):
     base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c"))
     return os.path.splitext(base)[0]
-       
+
 def manifest_name(config):
-    # Get configest name (including suffix)  
+    # Get configest name (including suffix)
     root = configtest_name(config)
     exext = config.compiler.exe_extension
     return root + exext + ".manifest"
 
 def rc_name(config):
-    # Get configest name (including suffix)  
+    # Get configest name (including suffix)
     root = configtest_name(config)
     return root + ".rc"
 

Modified: branches/dynamic_cpu_configuration/numpy/distutils/misc_util.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/distutils/misc_util.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/distutils/misc_util.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -6,6 +6,7 @@
 import glob
 import atexit
 import tempfile
+import subprocess
 
 try:
     set
@@ -1340,7 +1341,10 @@
         revision = None
         m = None
         try:
-            sin, sout = os.popen4('svnversion')
+            p = subprocess.Popen(['svnversion'], shell=True,
+                    stdout=subprocess.PIPE, stderr=STDOUT,
+                    close_fds=True)
+            sout = p.stdout
             m = re.match(r'(?P<revision>\d+)', sout.read())
         except:
             pass

Modified: branches/dynamic_cpu_configuration/numpy/distutils/system_info.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/distutils/system_info.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/distutils/system_info.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -128,6 +128,50 @@
 from numpy.distutils.misc_util import is_sequence, is_string
 from numpy.distutils.command.config import config as cmd_config
 
+# Determine number of bits
+import platform
+_bits = {'32bit':32,'64bit':64}
+platform_bits = _bits[platform.architecture()[0]]
+
+def libpaths(paths,bits):
+    """Return a list of library paths valid on 32 or 64 bit systems.
+
+    Inputs:
+      paths : sequence
+        A sequence of strings (typically paths)
+      bits : int
+        An integer, the only valid values are 32 or 64.  A ValueError exception
+      is raised otherwise.
+
+    Examples:
+
+    Consider a list of directories
+    >>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib']
+
+    For a 32-bit platform, this is already valid:
+    >>> libpaths(paths,32)
+    ['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib']
+
+    On 64 bits, we prepend the '64' postfix
+    >>> libpaths(paths,64)
+    ['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib',
+    '/usr/lib64', '/usr/lib']
+    """
+    if bits not in (32, 64):
+        raise ValueError("Invalid bit size in libpaths: 32 or 64 only")
+
+    # Handle 32bit case
+    if bits==32:
+        return paths
+
+    # Handle 64bit case
+    out = []
+    for p in paths:
+        out.extend([p+'64', p])
+
+    return out
+
+
 if sys.platform == 'win32':
     default_lib_dirs = ['C:\\',
                         os.path.join(distutils.sysconfig.EXEC_PREFIX,
@@ -137,24 +181,16 @@
     default_x11_lib_dirs = []
     default_x11_include_dirs = []
 else:
-    default_lib_dirs = ['/usr/local/lib', '/opt/lib', '/usr/lib',
-                        '/opt/local/lib', '/sw/lib']
+    default_lib_dirs = libpaths(['/usr/local/lib','/opt/lib','/usr/lib',
+                                 '/opt/local/lib','/sw/lib'], platform_bits)
     default_include_dirs = ['/usr/local/include',
                             '/opt/include', '/usr/include',
-                            '/opt/local/include', '/sw/include']
+                            '/opt/local/include', '/sw/include',
+                            '/usr/include/suitesparse']
     default_src_dirs = ['.','/usr/local/src', '/opt/src','/sw/src']
 
-    try:
-        platform = os.uname()
-        bit64 = platform[-1].endswith('64')
-    except:
-        bit64 = False
-
-    if bit64:
-        default_x11_lib_dirs = ['/usr/lib64']
-    else:
-        default_x11_lib_dirs = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib']
-
+    default_x11_lib_dirs = libpaths(['/usr/X11R6/lib','/usr/X11/lib',
+                                     '/usr/lib'], platform_bits)
     default_x11_include_dirs = ['/usr/X11R6/include','/usr/X11/include',
                                 '/usr/include']
 
@@ -364,14 +400,16 @@
         self.files.extend(get_standard_file('.numpy-site.cfg'))
         self.files.extend(get_standard_file('site.cfg'))
         self.parse_config_files()
-        self.search_static_first = self.cp.getboolean(self.section,
-                                                      'search_static_first')
+        if self.section is not None:
+            self.search_static_first = self.cp.getboolean(self.section,
+                                                          'search_static_first')
         assert isinstance(self.search_static_first, int)
 
     def parse_config_files(self):
         self.cp.read(self.files)
         if not self.cp.has_section(self.section):
-            self.cp.add_section(self.section)
+            if self.section is not None:
+                self.cp.add_section(self.section)
 
     def calc_libraries_info(self):
         libs = self.get_libraries()

Copied: branches/dynamic_cpu_configuration/numpy/doc/constants.py (from rev 6368, trunk/numpy/doc/constants.py)

Modified: branches/dynamic_cpu_configuration/numpy/f2py/cfuncs.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/f2py/cfuncs.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/f2py/cfuncs.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -472,15 +472,17 @@
 cppmacros['CHECKSTRING']="""\
 #define CHECKSTRING(check,tcheck,name,show,var)\\
 \tif (!(check)) {\\
-\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
-\t\tfprintf(stderr,show\"\\n\",slen(var),var);\\
+\t\tchar errstring[256];\\
+\t\tsprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\
+\t\tPyErr_SetString(#modulename#_error, errstring);\\
 \t\t/*goto capi_fail;*/\\
 \t} else """
 cppmacros['CHECKSCALAR']="""\
 #define CHECKSCALAR(check,tcheck,name,show,var)\\
 \tif (!(check)) {\\
-\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
-\t\tfprintf(stderr,show\"\\n\",var);\\
+\t\tchar errstring[256];\\
+\t\tsprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\
+\t\tPyErr_SetString(#modulename#_error,errstring);\\
 \t\t/*goto capi_fail;*/\\
 \t} else """
 ## cppmacros['CHECKDIMS']="""\

Modified: branches/dynamic_cpu_configuration/numpy/f2py/f2py.1
===================================================================
--- branches/dynamic_cpu_configuration/numpy/f2py/f2py.1	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/f2py/f2py.1	2009-02-15 12:03:15 UTC (rev 6369)
@@ -20,7 +20,7 @@
 This program generates a Python C/API file (<modulename>module.c)
 that contains wrappers for given Fortran or C functions so that they
 can be called from Python.
-With the -c option the corresponding
+With the \-c option the corresponding
 extension modules are built.
 .SH OPTIONS
 .TP
@@ -49,8 +49,8 @@
 \'untitled\'.
 .TP
 .B \-\-[no\-]lower
-Do [not] lower the cases in <fortran files>. By default, --lower is
-assumed with -h key, and --no-lower without -h key.
+Do [not] lower the cases in <fortran files>. By default, \-\-lower is
+assumed with \-h key, and \-\-no\-lower without \-h key.
 .TP
 .B \-\-build\-dir <dirname>
 All f2py generated files are created in <dirname>. Default is tempfile.mktemp().
@@ -59,14 +59,14 @@
 Overwrite existing signature file.
 .TP
 .B \-\-[no\-]latex\-doc
-Create (or not) <modulename>module.tex.  Default is --no-latex-doc.
+Create (or not) <modulename>module.tex.  Default is \-\-no\-latex\-doc.
 .TP
 .B \-\-short\-latex
 Create 'incomplete' LaTeX document (without commands \\documentclass,
 \\tableofcontents, and \\begin{document}, \\end{document}).
 .TP
 .B \-\-[no\-]rest\-doc
-Create (or not) <modulename>module.rst.  Default is --no-rest-doc.
+Create (or not) <modulename>module.rst.  Default is \-\-no\-rest\-doc.
 .TP
 .B \-\-debug\-capi
 Create C/API code that reports the state of the wrappers during
@@ -81,12 +81,12 @@
 .TP
 .B \-\-[no\-]wrap\-functions
 Create Fortran subroutine wrappers to Fortran 77
-functions. --wrap-functions is default because it ensures maximum
+functions. \-\-wrap\-functions is default because it ensures maximum
 portability/compiler independence.
 .TP
 .B \-\-help\-link [..]
 List system resources found by system_info.py. [..] may contain
-a list of resources names. See also --link-<resource> switch below.
+a list of resources names. See also \-\-link\-<resource> switch below.
 .TP
 .B \-\-quiet
 Run quietly.
@@ -100,7 +100,7 @@
 .B \-\-include_paths path1:path2:...
 Search include files (that f2py will scan) from the given directories.
 .SH "CONFIG_FC OPTIONS"
-The following options are effective only when -c switch is used.
+The following options are effective only when \-c switch is used.
 .TP
 .B \-\-help-compiler
 List available Fortran compilers [DEPRECIATED].
@@ -147,13 +147,13 @@
 .B \-\-debug
 Compile with debugging information.
 .SH "EXTRA OPTIONS"
-The following options are effective only when -c switch is used.
+The following options are effective only when \-c switch is used.
 .TP
 .B \-\-link-<resource> 
 Link extension module with <resource> as defined by
 numpy_distutils/system_info.py. E.g. to link with optimized LAPACK
 libraries (vecLib on MacOSX, ATLAS elsewhere), use
---link-lapack_opt. See also --help-link switch.
+\-\-link\-lapack_opt. See also \-\-help\-link switch.
 
 .TP
 .B -L/path/to/lib/ -l<libname>

Modified: branches/dynamic_cpu_configuration/numpy/f2py/f2py2e.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/f2py/f2py2e.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/f2py/f2py2e.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -543,7 +543,7 @@
     setup(ext_modules = [ext])
 
     if remove_build_dir and os.path.exists(build_dir):
-        import shutil 
+        import shutil
         outmess('Removing build directory %s\n'%(build_dir))
         shutil.rmtree(build_dir)
 

Modified: branches/dynamic_cpu_configuration/numpy/f2py/rules.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/f2py/rules.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/f2py/rules.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -245,7 +245,7 @@
 f2py_start_clock();
 #endif
 \tif (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\
-\t\t\"#argformat#|#keyformat##xaformat#:#pyname#\",\\
+\t\t\"#argformat##keyformat##xaformat#:#pyname#\",\\
 \t\tcapi_kwlist#args_capi##keys_capi##keys_xa#))\n\t\treturn NULL;
 #frompyobj#
 /*end of frompyobj*/
@@ -1355,6 +1355,16 @@
             rd['latexdocstrsigns']=rd['latexdocstrsigns']+rd[k][0:1]+\
                                     ['\\begin{description}']+rd[k][1:]+\
                                     ['\\end{description}']
+
+    # Workaround for Python 2.6, 2.6.1 bug: http://bugs.python.org/issue4720
+    if rd['keyformat'] or rd['xaformat']:
+        argformat = rd['argformat']
+        if isinstance(argformat, list):
+            argformat.append('|')
+        else:
+            assert isinstance(argformat, str),repr((argformat, type(argformat)))
+            rd['argformat'] += '|'
+
     ar=applyrules(routine_rules,rd)
     if ismoduleroutine(rout):
         outmess('\t\t\t  %s\n'%(ar['docshort']))

Modified: branches/dynamic_cpu_configuration/numpy/lib/__init__.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/lib/__init__.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/lib/__init__.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -1,151 +1,3 @@
-"""
-Basic functions used by several sub-packages and
-useful to have in the main name-space.
-
-Type Handling
--------------
-================ ===================
-iscomplexobj     Test for complex object, scalar result
-isrealobj        Test for real object, scalar result
-iscomplex        Test for complex elements, array result
-isreal           Test for real elements, array result
-imag             Imaginary part
-real             Real part
-real_if_close    Turns complex number with tiny imaginary part to real
-isneginf         Tests for negative infinity, array result
-isposinf         Tests for positive infinity, array result
-isnan            Tests for nans, array result
-isinf            Tests for infinity, array result
-isfinite         Tests for finite numbers, array result
-isscalar         True if argument is a scalar
-nan_to_num       Replaces NaN's with 0 and infinities with large numbers
-cast             Dictionary of functions to force cast to each type
-common_type      Determine the minimum common type code for a group
-                 of arrays
-mintypecode      Return minimal allowed common typecode.
-================ ===================
-
-Index Tricks
-------------
-================ ===================
-mgrid            Method which allows easy construction of N-d
-                 'mesh-grids'
-``r_``           Append and construct arrays: turns slice objects into
-                 ranges and concatenates them, for 2d arrays appends rows.
-index_exp        Konrad Hinsen's index_expression class instance which
-                 can be useful for building complicated slicing syntax.
-================ ===================
-
-Useful Functions
-----------------
-================ ===================
-select           Extension of where to multiple conditions and choices
-extract          Extract 1d array from flattened array according to mask
-insert           Insert 1d array of values into Nd array according to mask
-linspace         Evenly spaced samples in linear space
-logspace         Evenly spaced samples in logarithmic space
-fix              Round x to nearest integer towards zero
-mod              Modulo mod(x,y) = x % y except keeps sign of y
-amax             Array maximum along axis
-amin             Array minimum along axis
-ptp              Array max-min along axis
-cumsum           Cumulative sum along axis
-prod             Product of elements along axis
-cumprod          Cumluative product along axis
-diff             Discrete differences along axis
-angle            Returns angle of complex argument
-unwrap           Unwrap phase along given axis (1-d algorithm)
-sort_complex     Sort a complex-array (based on real, then imaginary)
-trim_zeros       Trim the leading and trailing zeros from 1D array.
-vectorize        A class that wraps a Python function taking scalar
-                 arguments into a generalized function which can handle
-                 arrays of arguments using the broadcast rules of
-                 numerix Python.
-================ ===================
-
-Shape Manipulation
-------------------
-================ ===================
-squeeze          Return a with length-one dimensions removed.
-atleast_1d       Force arrays to be > 1D
-atleast_2d       Force arrays to be > 2D
-atleast_3d       Force arrays to be > 3D
-vstack           Stack arrays vertically (row on row)
-hstack           Stack arrays horizontally (column on column)
-column_stack     Stack 1D arrays as columns into 2D array
-dstack           Stack arrays depthwise (along third dimension)
-split            Divide array into a list of sub-arrays
-hsplit           Split into columns
-vsplit           Split into rows
-dsplit           Split along third dimension
-================ ===================
-
-Matrix (2D Array) Manipulations
--------------------------------
-================ ===================
-fliplr           2D array with columns flipped
-flipud           2D array with rows flipped
-rot90            Rotate a 2D array a multiple of 90 degrees
-eye              Return a 2D array with ones down a given diagonal
-diag             Construct a 2D array from a vector, or return a given
-                 diagonal from a 2D array.
-mat              Construct a Matrix
-bmat             Build a Matrix from blocks
-================ ===================
-
-Polynomials
------------
-================ ===================
-poly1d           A one-dimensional polynomial class
-poly             Return polynomial coefficients from roots
-roots            Find roots of polynomial given coefficients
-polyint          Integrate polynomial
-polyder          Differentiate polynomial
-polyadd          Add polynomials
-polysub          Substract polynomials
-polymul          Multiply polynomials
-polydiv          Divide polynomials
-polyval          Evaluate polynomial at given argument
-================ ===================
-
-Import Tricks
--------------
-================ ===================
-ppimport         Postpone module import until trying to use it
-ppimport_attr    Postpone module import until trying to use its attribute
-ppresolve        Import postponed module and return it.
-================ ===================
-
-Machine Arithmetics
--------------------
-================ ===================
-machar_single    Single precision floating point arithmetic parameters
-machar_double    Double precision floating point arithmetic parameters
-================ ===================
-
-Threading Tricks
-----------------
-================ ===================
-ParallelExec     Execute commands in parallel thread.
-================ ===================
-
-1D Array Set Operations
------------------------
-Set operations for 1D numeric arrays based on sort() function.
-
-================ ===================
-ediff1d          Array difference (auxiliary function).
-unique1d         Unique elements of 1D array.
-intersect1d      Intersection of 1D arrays with unique elements.
-intersect1d_nu   Intersection of 1D arrays with any elements.
-setxor1d         Set exclusive-or of 1D arrays with unique elements.
-setmember1d      Return an array of shape of ar1 containing 1 where
-                 the elements of ar1 are in ar2 and 0 otherwise.
-union1d          Union of 1D arrays with unique elements.
-setdiff1d        Set difference of 1D arrays with unique elements.
-================ ===================
-
-"""
 from info import __doc__
 from numpy.version import version as __version__
 

Copied: branches/dynamic_cpu_configuration/numpy/lib/_iotools.py (from rev 6368, trunk/numpy/lib/_iotools.py)

Modified: branches/dynamic_cpu_configuration/numpy/lib/arraysetops.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/lib/arraysetops.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/lib/arraysetops.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -52,13 +52,19 @@
         If provided, this number will be taked onto the beginning of the
         returned differences.
 
+    Notes
+    -----
+    When applied to masked arrays, this function drops the mask information
+    if the `to_begin` and/or `to_end` parameters are used
+
+
     Returns
     -------
     ed : array
         The differences. Loosely, this will be (ary[1:] - ary[:-1]).
 
     """
-    ary = np.asarray(ary).flat
+    ary = np.asanyarray(ary).flat
     ed = ary[1:] - ary[:-1]
     arrays = [ed]
     if to_begin is not None:
@@ -132,7 +138,7 @@
                       "the output was (indices, unique_arr), but "
                       "has now been reversed to be more consistent.")
 
-    ar = np.asarray(ar1).flatten()
+    ar = np.asanyarray(ar1).flatten()
     if ar.size == 0:
         if return_inverse and return_index:
             return ar, np.empty(0, np.bool), np.empty(0, np.bool)

Modified: branches/dynamic_cpu_configuration/numpy/lib/function_base.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/lib/function_base.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/lib/function_base.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -228,10 +228,10 @@
           * None : the new behaviour is used, no warning is printed.
           * True : the new behaviour is used and a warning is raised about
             the future removal of the `new` keyword.
-          * False : the old behaviour is used and a DeprecationWarning 
+          * False : the old behaviour is used and a DeprecationWarning
             is raised.
-        As of NumPy 1.3, this keyword should not be used explicitly since it 
-        will disappear in NumPy 1.4. 
+        As of NumPy 1.3, this keyword should not be used explicitly since it
+        will disappear in NumPy 1.4.
 
     Returns
     -------
@@ -267,9 +267,9 @@
     # Old behavior
     if new == False:
         warnings.warn("""
-        The histogram semantics being used is now deprecated and 
-        will disappear in NumPy 1.4.  Please update your code to 
-        use the default semantics. 
+        The histogram semantics being used is now deprecated and
+        will disappear in NumPy 1.4.  Please update your code to
+        use the default semantics.
         """, DeprecationWarning)
 
         a = asarray(a).ravel()
@@ -320,8 +320,8 @@
     elif new in [True, None]:
         if new is True:
             warnings.warn("""
-            The new semantics of histogram is now the default and the `new` 
-            keyword will be removed in NumPy 1.4. 
+            The new semantics of histogram is now the default and the `new`
+            keyword will be removed in NumPy 1.4.
             """, Warning)
         a = asarray(a)
         if weights is not None:
@@ -1073,53 +1073,6 @@
     else:
         return a[slice1]-a[slice2]
 
-try:
-    add_docstring(digitize,
-r"""digitize(x,bins)
-
-Return the index of the bin to which each value of x belongs.
-
-Each index i returned is such that bins[i-1] <= x < bins[i] if
-bins is monotonically increasing, or bins [i-1] > x >= bins[i] if
-bins is monotonically decreasing.
-
-Beyond the bounds of the bins 0 or len(bins) is returned as appropriate.
-
-""")
-except RuntimeError:
-    pass
-
-try:
-    add_docstring(bincount,
-r"""bincount(x,weights=None)
-
-Return the number of occurrences of each value in x.
-
-x must be a list of non-negative integers.  The output, b[i],
-represents the number of times that i is found in x.  If weights
-is specified, every occurrence of i at a position p contributes
-weights[p] instead of 1.
-
-See also: histogram, digitize, unique.
-
-""")
-except RuntimeError:
-    pass
-
-try:
-    add_docstring(add_docstring,
-r"""docstring(obj, docstring)
-
-Add a docstring to a built-in obj if possible.
-If the obj already has a docstring raise a RuntimeError
-If this routine does not know how to add a docstring to the object
-raise a TypeError
-
-""")
-except RuntimeError:
-    pass
-
-
 def interp(x, xp, fp, left=None, right=None):
     """
     One-dimensional linear interpolation.
@@ -2818,9 +2771,9 @@
     y : array_like
         Input array to integrate.
     x : array_like, optional
-        If `x` is None, then spacing between all `y` elements is 1.
+        If `x` is None, then spacing between all `y` elements is `dx`.
     dx : scalar, optional
-        If `x` is None, spacing given by `dx` is assumed.
+        If `x` is None, spacing given by `dx` is assumed. Default is 1.
     axis : int, optional
         Specify the axis.
 
@@ -2836,7 +2789,15 @@
     if x is None:
         d = dx
     else:
-        d = diff(x,axis=axis)
+        x = asarray(x)
+        if x.ndim == 1:
+            d = diff(x)
+            # reshape to correct shape
+            shape = [1]*y.ndim
+            shape[axis] = d.shape[0]
+            d = d.reshape(shape)
+        else:
+            d = diff(x, axis=axis)
     nd = len(y.shape)
     slice1 = [slice(None)]*nd
     slice2 = [slice(None)]*nd

Modified: branches/dynamic_cpu_configuration/numpy/lib/getlimits.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/lib/getlimits.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/lib/getlimits.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -88,6 +88,12 @@
     _finfo_cache = {}
 
     def __new__(cls, dtype):
+        try:
+            dtype = np.dtype(dtype)
+        except TypeError:
+            # In case a float instance was given
+            dtype = np.dtype(type(dtype))
+
         obj = cls._finfo_cache.get(dtype,None)
         if obj is not None:
             return obj
@@ -115,7 +121,7 @@
         return obj
 
     def _init(self, dtype):
-        self.dtype = dtype
+        self.dtype = np.dtype(dtype)
         if dtype is ntypes.double:
             itype = ntypes.int64
             fmt = '%24.16e'
@@ -149,23 +155,23 @@
         self.nexp = machar.iexp
         self.nmant = machar.it
         self.machar = machar
-        self._str_tiny = machar._str_xmin
-        self._str_max = machar._str_xmax
-        self._str_epsneg = machar._str_epsneg
-        self._str_eps = machar._str_eps
-        self._str_resolution = machar._str_resolution
+        self._str_tiny = machar._str_xmin.strip()
+        self._str_max = machar._str_xmax.strip()
+        self._str_epsneg = machar._str_epsneg.strip()
+        self._str_eps = machar._str_eps.strip()
+        self._str_resolution = machar._str_resolution.strip()
         return self
 
     def __str__(self):
         return '''\
 Machine parameters for %(dtype)s
 ---------------------------------------------------------------------
-precision=%(precision)3s   resolution=%(_str_resolution)s
-machep=%(machep)6s   eps=     %(_str_eps)s
-negep =%(negep)6s   epsneg=  %(_str_epsneg)s
-minexp=%(minexp)6s   tiny=    %(_str_tiny)s
-maxexp=%(maxexp)6s   max=     %(_str_max)s
-nexp  =%(nexp)6s   min=       -max
+precision=%(precision)3s   resolution= %(_str_resolution)s
+machep=%(machep)6s   eps=        %(_str_eps)s
+negep =%(negep)6s   epsneg=     %(_str_epsneg)s
+minexp=%(minexp)6s   tiny=       %(_str_tiny)s
+maxexp=%(maxexp)6s   max=        %(_str_max)s
+nexp  =%(nexp)6s   min=        -max
 ---------------------------------------------------------------------
 ''' % self.__dict__
 
@@ -220,8 +226,11 @@
     _min_vals = {}
     _max_vals = {}
 
-    def __init__(self, type):
-        self.dtype = np.dtype(type)
+    def __init__(self, int_type):
+        try:
+            self.dtype = np.dtype(int_type)
+        except TypeError:
+            self.dtype = np.dtype(type(int_type))
         self.kind = self.dtype.kind
         self.bits = self.dtype.itemsize * 8
         self.key = "%s%d" % (self.kind, self.bits)
@@ -256,6 +265,17 @@
 
     max = property(max)
 
+    def __str__(self):
+        """String representation."""
+        return '''\
+Machine parameters for %(dtype)s
+---------------------------------------------------------------------
+min = %(min)s
+max = %(max)s
+---------------------------------------------------------------------
+''' % {'dtype': self.dtype, 'min': self.min, 'max': self.max}
+
+
 if __name__ == '__main__':
     f = finfo(ntypes.single)
     print 'single epsilon:',f.eps

Modified: branches/dynamic_cpu_configuration/numpy/lib/index_tricks.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/lib/index_tricks.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/lib/index_tricks.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -212,6 +212,8 @@
 
 mgrid = nd_grid(sparse=False)
 ogrid = nd_grid(sparse=True)
+mgrid.__doc__ = None # set in numpy.add_newdocs
+ogrid.__doc__ = None # set in numpy.add_newdocs
 
 class AxisConcatenator(object):
     """Translates slice objects to concatenation along an axis.

Modified: branches/dynamic_cpu_configuration/numpy/lib/info.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/lib/info.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/lib/info.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -1,134 +1,149 @@
-__doc_title__ = """Basic functions used by several sub-packages and
-useful to have in the main name-space."""
-__doc__ = __doc_title__ + """
+"""
+Basic functions used by several sub-packages and
+useful to have in the main name-space.
 
-Type handling
-==============
-iscomplexobj     --  Test for complex object, scalar result
-isrealobj        --  Test for real object, scalar result
-iscomplex        --  Test for complex elements, array result
-isreal           --  Test for real elements, array result
-imag             --  Imaginary part
-real             --  Real part
-real_if_close    --  Turns complex number with tiny imaginary part to real
-isneginf         --  Tests for negative infinity ---|
-isposinf         --  Tests for positive infinity    |
-isnan            --  Tests for nans                 |----  array results
-isinf            --  Tests for infinity             |
-isfinite         --  Tests for finite numbers    ---|
-isscalar         --  True if argument is a scalar
-nan_to_num       --  Replaces NaN's with 0 and infinities with large numbers
-cast             --  Dictionary of functions to force cast to each type
-common_type      --  Determine the 'minimum common type code' for a group
-                       of arrays
-mintypecode      --  Return minimal allowed common typecode.
+Type Handling
+-------------
+================ ===================
+iscomplexobj     Test for complex object, scalar result
+isrealobj        Test for real object, scalar result
+iscomplex        Test for complex elements, array result
+isreal           Test for real elements, array result
+imag             Imaginary part
+real             Real part
+real_if_close    Turns complex number with tiny imaginary part to real
+isneginf         Tests for negative infinity, array result
+isposinf         Tests for positive infinity, array result
+isnan            Tests for nans, array result
+isinf            Tests for infinity, array result
+isfinite         Tests for finite numbers, array result
+isscalar         True if argument is a scalar
+nan_to_num       Replaces NaN's with 0 and infinities with large numbers
+cast             Dictionary of functions to force cast to each type
+common_type      Determine the minimum common type code for a group
+                 of arrays
+mintypecode      Return minimal allowed common typecode.
+================ ===================
 
-Index tricks
-==================
-mgrid            --  Method which allows easy construction of N-d 'mesh-grids'
-r_               --  Append and construct arrays: turns slice objects into
-                       ranges and concatenates them, for 2d arrays appends
-                       rows.
-index_exp        --  Konrad Hinsen's index_expression class instance which
-                     can be useful for building complicated slicing syntax.
+Index Tricks
+------------
+================ ===================
+mgrid            Method which allows easy construction of N-d
+                 'mesh-grids'
+``r_``           Append and construct arrays: turns slice objects into
+                 ranges and concatenates them, for 2d arrays appends rows.
+index_exp        Konrad Hinsen's index_expression class instance which
+                 can be useful for building complicated slicing syntax.
+================ ===================
 
-Useful functions
-==================
-select           --  Extension of where to multiple conditions and choices
-extract          --  Extract 1d array from flattened array according to mask
-insert           --  Insert 1d array of values into Nd array according to mask
-linspace         --  Evenly spaced samples in linear space
-logspace         --  Evenly spaced samples in logarithmic space
-fix              --  Round x to nearest integer towards zero
-mod              --  Modulo mod(x,y) = x % y except keeps sign of y
-amax             --  Array maximum along axis
-amin             --  Array minimum along axis
-ptp              --  Array max-min along axis
-cumsum           --  Cumulative sum along axis
-prod             --  Product of elements along axis
-cumprod          --  Cumluative product along axis
-diff             --  Discrete differences along axis
-angle            --  Returns angle of complex argument
-unwrap           --  Unwrap phase along given axis (1-d algorithm)
-sort_complex     --  Sort a complex-array (based on real, then imaginary)
-trim_zeros       --  trim the leading and trailing zeros from 1D array.
+Useful Functions
+----------------
+================ ===================
+select           Extension of where to multiple conditions and choices
+extract          Extract 1d array from flattened array according to mask
+insert           Insert 1d array of values into Nd array according to mask
+linspace         Evenly spaced samples in linear space
+logspace         Evenly spaced samples in logarithmic space
+fix              Round x to nearest integer towards zero
+mod              Modulo mod(x,y) = x % y except keeps sign of y
+amax             Array maximum along axis
+amin             Array minimum along axis
+ptp              Array max-min along axis
+cumsum           Cumulative sum along axis
+prod             Product of elements along axis
+cumprod          Cumluative product along axis
+diff             Discrete differences along axis
+angle            Returns angle of complex argument
+unwrap           Unwrap phase along given axis (1-d algorithm)
+sort_complex     Sort a complex-array (based on real, then imaginary)
+trim_zeros       Trim the leading and trailing zeros from 1D array.
+vectorize        A class that wraps a Python function taking scalar
+                 arguments into a generalized function which can handle
+                 arrays of arguments using the broadcast rules of
+                 numerix Python.
+================ ===================
 
-vectorize        --  a class that wraps a Python function taking scalar
-                         arguments into a generalized function which
-                         can handle arrays of arguments using the broadcast
-                         rules of numerix Python.
+Shape Manipulation
+------------------
+================ ===================
+squeeze          Return a with length-one dimensions removed.
+atleast_1d       Force arrays to be > 1D
+atleast_2d       Force arrays to be > 2D
+atleast_3d       Force arrays to be > 3D
+vstack           Stack arrays vertically (row on row)
+hstack           Stack arrays horizontally (column on column)
+column_stack     Stack 1D arrays as columns into 2D array
+dstack           Stack arrays depthwise (along third dimension)
+split            Divide array into a list of sub-arrays
+hsplit           Split into columns
+vsplit           Split into rows
+dsplit           Split along third dimension
+================ ===================
 
-Shape manipulation
-===================
-squeeze          --  Return a with length-one dimensions removed.
-atleast_1d       --  Force arrays to be > 1D
-atleast_2d       --  Force arrays to be > 2D
-atleast_3d       --  Force arrays to be > 3D
-vstack           --  Stack arrays vertically (row on row)
-hstack           --  Stack arrays horizontally (column on column)
-column_stack     --  Stack 1D arrays as columns into 2D array
-dstack           --  Stack arrays depthwise (along third dimension)
-split            --  Divide array into a list of sub-arrays
-hsplit           --  Split into columns
-vsplit           --  Split into rows
-dsplit           --  Split along third dimension
+Matrix (2D Array) Manipulations
+-------------------------------
+================ ===================
+fliplr           2D array with columns flipped
+flipud           2D array with rows flipped
+rot90            Rotate a 2D array a multiple of 90 degrees
+eye              Return a 2D array with ones down a given diagonal
+diag             Construct a 2D array from a vector, or return a given
+                 diagonal from a 2D array.
+mat              Construct a Matrix
+bmat             Build a Matrix from blocks
+================ ===================
 
-Matrix (2d array) manipluations
-===============================
-fliplr           --  2D array with columns flipped
-flipud           --  2D array with rows flipped
-rot90            --  Rotate a 2D array a multiple of 90 degrees
-eye              --  Return a 2D array with ones down a given diagonal
-diag             --  Construct a 2D array from a vector, or return a given
-                       diagonal from a 2D array.
-mat              --  Construct a Matrix
-bmat             --  Build a Matrix from blocks
-
 Polynomials
-============
-poly1d           --  A one-dimensional polynomial class
+-----------
+================ ===================
+poly1d           A one-dimensional polynomial class
+poly             Return polynomial coefficients from roots
+roots            Find roots of polynomial given coefficients
+polyint          Integrate polynomial
+polyder          Differentiate polynomial
+polyadd          Add polynomials
+polysub          Substract polynomials
+polymul          Multiply polynomials
+polydiv          Divide polynomials
+polyval          Evaluate polynomial at given argument
+================ ===================
 
-poly             --  Return polynomial coefficients from roots
-roots            --  Find roots of polynomial given coefficients
-polyint          --  Integrate polynomial
-polyder          --  Differentiate polynomial
-polyadd          --  Add polynomials
-polysub          --  Substract polynomials
-polymul          --  Multiply polynomials
-polydiv          --  Divide polynomials
-polyval          --  Evaluate polynomial at given argument
+Import Tricks
+-------------
+================ ===================
+ppimport         Postpone module import until trying to use it
+ppimport_attr    Postpone module import until trying to use its attribute
+ppresolve        Import postponed module and return it.
+================ ===================
 
-Import tricks
-=============
-ppimport         --  Postpone module import until trying to use it
-ppimport_attr    --  Postpone module import until trying to use its
-                      attribute
-ppresolve        --  Import postponed module and return it.
+Machine Arithmetics
+-------------------
+================ ===================
+machar_single    Single precision floating point arithmetic parameters
+machar_double    Double precision floating point arithmetic parameters
+================ ===================
 
-Machine arithmetics
-===================
-machar_single    --  MachAr instance storing the parameters of system
-                     single precision floating point arithmetics
-machar_double    --  MachAr instance storing the parameters of system
-                     double precision floating point arithmetics
+Threading Tricks
+----------------
+================ ===================
+ParallelExec     Execute commands in parallel thread.
+================ ===================
 
-Threading tricks
-================
-ParallelExec     --  Execute commands in parallel thread.
-
-1D array set operations
-=======================
+1D Array Set Operations
+-----------------------
 Set operations for 1D numeric arrays based on sort() function.
 
-ediff1d          --  Array difference (auxiliary function).
-unique1d         --  Unique elements of 1D array.
-intersect1d      --  Intersection of 1D arrays with unique elements.
-intersect1d_nu   --  Intersection of 1D arrays with any elements.
-setxor1d         --  Set exclusive-or of 1D arrays with unique elements.
-setmember1d      --  Return an array of shape of ar1 containing 1 where
-                     the elements of ar1 are in ar2 and 0 otherwise.
-union1d          --  Union of 1D arrays with unique elements.
-setdiff1d        --  Set difference of 1D arrays with unique elements.
+================ ===================
+ediff1d          Array difference (auxiliary function).
+unique1d         Unique elements of 1D array.
+intersect1d      Intersection of 1D arrays with unique elements.
+intersect1d_nu   Intersection of 1D arrays with any elements.
+setxor1d         Set exclusive-or of 1D arrays with unique elements.
+setmember1d      Return an array of shape of ar1 containing 1 where
+                 the elements of ar1 are in ar2 and 0 otherwise.
+union1d          Union of 1D arrays with unique elements.
+setdiff1d        Set difference of 1D arrays with unique elements.
+================ ===================
 
 """
 

Modified: branches/dynamic_cpu_configuration/numpy/lib/io.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/lib/io.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/lib/io.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -1,4 +1,5 @@
 __all__ = ['savetxt', 'loadtxt',
+           'genfromtxt', 'ndfromtxt', 'mafromtxt', 'recfromtxt', 'recfromcsv',
            'load', 'loads',
            'save', 'savez',
            'packbits', 'unpackbits',
@@ -15,7 +16,11 @@
 from _datasource import DataSource
 from _compiled_base import packbits, unpackbits
 
+from _iotools import LineSplitter, NameValidator, StringConverter, \
+                     _is_string_like, has_nested_fields, flatten_dtype
+
 _file = file
+_string_like = _is_string_like
 
 class BagObj(object):
     """A simple class that converts attribute lookups to
@@ -264,10 +269,6 @@
         return str
 
 
-def _string_like(obj):
-    try: obj + ''
-    except (TypeError, ValueError): return 0
-    return 1
 
 def loadtxt(fname, dtype=float, comments='#', delimiter=None, converters=None,
             skiprows=0, usecols=None, unpack=False):
@@ -342,7 +343,7 @@
     if usecols is not None:
         usecols = list(usecols)
 
-    if _string_like(fname):
+    if _is_string_like(fname):
         if fname.endswith('.gz'):
             import gzip
             fh = gzip.open(fname)
@@ -520,7 +521,7 @@
 
     """
 
-    if _string_like(fname):
+    if _is_string_like(fname):
         if fname.endswith('.gz'):
             import gzip
             fh = gzip.open(fname,'wb')
@@ -603,8 +604,508 @@
 
     seq = regexp.findall(file.read())
     if seq and not isinstance(seq[0], tuple):
-        # make sure np.array doesn't interpret strings as binary data
-        # by always producing a list of tuples
-        seq = [(x,) for x in seq]
-    output = np.array(seq, dtype=dtype)
+        # Only one group is in the regexp.
+        # Create the new array as a single data-type and then
+        #   re-interpret as a single-field structured array. 
+        newdtype = np.dtype(dtype[dtype.names[0]])
+        output = np.array(seq, dtype=newdtype)
+        output.dtype = dtype
+    else:
+        output = np.array(seq, dtype=dtype)
+
     return output
+
+
+
+
+#####--------------------------------------------------------------------------
+#---- --- ASCII functions ---
+#####--------------------------------------------------------------------------
+
+
+
+def genfromtxt(fname, dtype=float, comments='#', delimiter=None, skiprows=0,
+               converters=None, missing='', missing_values=None, usecols=None,
+               names=None, excludelist=None, deletechars=None,
+               case_sensitive=True, unpack=None, usemask=False, loose=True):
+    """
+    Load data from a text file.
+
+    Each line past the first `skiprows` ones is split at the `delimiter`
+    character, and characters following the `comments` character are discarded.
+    
+
+
+    Parameters
+    ----------
+    fname : file or string
+        File or filename to read.  If the filename extension is `.gz` or `.bz2`,
+        the file is first decompressed.
+    dtype : data-type
+        Data type of the resulting array.  If this is a flexible data-type,
+        the resulting array will be 1-dimensional, and each row will be
+        interpreted as an element of the array. In this case, the number
+        of columns used must match the number of fields in the data-type,
+        and the names of each field will be set by the corresponding name
+        of the dtype.
+        If None, the dtypes will be determined by the contents of each
+        column, individually.
+    comments : {string}, optional
+        The character used to indicate the start of a comment.
+        All the characters occurring on a line after a comment are discarded
+    delimiter : {string}, optional
+        The string used to separate values.  By default, any consecutive
+        whitespace act as delimiter.
+    skiprows : {int}, optional
+        Numbers of lines to skip at the beginning of the file.
+    converters : {None, dictionary}, optional
+        A dictionary mapping column number to a function that will convert
+        values in the column to a number. Converters can also be used to
+        provide a default value for missing data:
+        ``converters = {3: lambda s: float(s or 0)}``.
+    missing : {string}, optional
+        A string representing a missing value, irrespective of the column where
+        it appears (e.g., `'missing'` or `'unused'`).
+    missing_values : {None, dictionary}, optional
+        A dictionary mapping a column number to a string indicating whether the
+        corresponding field should be masked.
+    usecols : {None, sequence}, optional
+        Which columns to read, with 0 being the first.  For example,
+        ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
+    names : {None, True, string, sequence}, optional
+        If `names` is True, the field names are read from the first valid line
+        after the first `skiprows` lines.
+        If `names` is a sequence or a single-string of comma-separated names,
+        the names will be used to define the field names in a flexible dtype.
+        If `names` is None, the names of the dtype fields will be used, if any.
+    excludelist : {sequence}, optional
+        A list of names to exclude. This list is appended to the default list
+        ['return','file','print']. Excluded names are appended an underscore:
+        for example, `file` would become `file_`.
+    deletechars : {string}, optional
+        A string combining invalid characters that must be deleted from the names.
+    case_sensitive : {True, False, 'upper', 'lower'}, optional
+        If True, field names are case_sensitive.
+        If False or 'upper', field names are converted to upper case.
+        If 'lower', field names are converted to lower case.
+    unpack : {bool}, optional
+        If True, the returned array is transposed, so that arguments may be
+        unpacked using ``x, y, z = loadtxt(...)``
+    usemask : {bool}, optional
+        If True, returns a masked array.
+        If False, return a regular standard array.
+
+    Returns
+    -------
+    out : MaskedArray
+        Data read from the text file.
+
+    Notes
+    --------
+    * When spaces are used as delimiters, or when no delimiter has been given
+      as input, there should not be any missing data between two fields.
+    * When the variable are named (either by a flexible dtype or with `names`,
+      there must not be any header in the file (else a :exc:ValueError exception
+      is raised).
+
+    Warnings
+    --------
+    * Individual values are not stripped of spaces by default.
+      When using a custom converter, make sure the function does remove spaces.
+
+    See Also
+    --------
+    numpy.loadtxt : equivalent function when no data is missing.
+
+    """
+    #
+    if usemask:
+        from numpy.ma import MaskedArray, make_mask_descr
+    # Check the input dictionary of converters
+    user_converters = converters or {}
+    if not isinstance(user_converters, dict):
+        errmsg = "The input argument 'converter' should be a valid dictionary "\
+                 "(got '%s' instead)"
+        raise TypeError(errmsg % type(user_converters))
+    # Check the input dictionary of missing values
+    user_missing_values = missing_values or {}
+    if not isinstance(user_missing_values, dict):
+        errmsg = "The input argument 'missing_values' should be a valid "\
+                 "dictionary (got '%s' instead)"
+        raise TypeError(errmsg % type(missing_values))
+    defmissing = [_.strip() for _ in missing.split(',')] + ['']
+
+    # Initialize the filehandle, the LineSplitter and the NameValidator
+#    fhd = _to_filehandle(fname)
+    if isinstance(fname, basestring):
+        fhd = np.lib._datasource.open(fname)
+    elif not hasattr(fname, 'read'):
+        raise TypeError("The input should be a string or a filehandle. "\
+                        "(got %s instead)" % type(fname))
+    else:
+        fhd = fname
+    split_line = LineSplitter(delimiter=delimiter, comments=comments, 
+                              autostrip=False)._handyman
+    validate_names = NameValidator(excludelist=excludelist,
+                                   deletechars=deletechars,
+                                   case_sensitive=case_sensitive)
+
+    # Get the first valid lines after the first skiprows ones
+    for i in xrange(skiprows):
+        fhd.readline()
+    first_values = None
+    while not first_values:
+        first_line = fhd.readline()
+        if first_line == '':
+            raise IOError('End-of-file reached before encountering data.')
+        if names is True:
+            first_values = first_line.strip().split(delimiter)
+        else:
+            first_values = split_line(first_line)
+    if names is True:
+        fval = first_values[0].strip()
+        if fval in comments:
+            del first_values[0]
+
+    # Check the columns to use
+    if usecols is not None:
+        usecols = list(usecols)
+    nbcols = len(usecols or first_values)
+
+    # Check the names and overwrite the dtype.names if needed
+    if dtype is not None:
+        dtype = np.dtype(dtype)
+    dtypenames = getattr(dtype, 'names', None)
+    if names is True:
+        names = validate_names([_.strip() for _ in first_values])
+        first_line =''
+    elif _is_string_like(names):
+        names = validate_names([_.strip() for _ in names.split(',')])
+    elif names:
+        names = validate_names(names)
+    elif dtypenames:
+        dtype.names = validate_names(dtypenames)
+    if names and dtypenames:
+        dtype.names = names
+
+    # If usecols is a list of names, convert to a list of indices
+    if usecols:
+        for (i, current) in enumerate(usecols):
+            if _is_string_like(current):
+                usecols[i] = names.index(current)
+
+    # If user_missing_values has names as keys, transform them to indices
+    missing_values = {}
+    for (key, val) in user_missing_values.iteritems():
+        # If val is a list, flatten it. In any case, add missing &'' to the list
+        if isinstance(val, (list, tuple)):
+            val = [str(_) for _ in val]
+        else:
+            val = [str(val),]
+        val.extend(defmissing)
+        if _is_string_like(key):
+            try:
+                missing_values[names.index(key)] = val
+            except ValueError:
+                pass
+        else:
+            missing_values[key] = val
+
+
+    # Initialize the default converters
+    if dtype is None:
+        # Note: we can't use a [...]*nbcols, as we would have 3 times the same
+        # ... converter, instead of 3 different converters.
+        converters = [StringConverter(None,
+                              missing_values=missing_values.get(_, defmissing))
+                      for _ in range(nbcols)]
+    else:
+        flatdtypes = flatten_dtype(dtype)
+        # Initialize the converters
+        if len(flatdtypes) > 1:
+            # Flexible type : get a converter from each dtype
+            converters = [StringConverter(dt,
+                              missing_values=missing_values.get(i, defmissing),
+                              locked=True)
+                          for (i, dt) in enumerate(flatdtypes)]
+        else:
+            # Set to a default converter (but w/ different missing values)
+            converters = [StringConverter(dtype,
+                              missing_values=missing_values.get(_, defmissing),
+                              locked=True)
+                          for _ in range(nbcols)]
+    missing_values = [_.missing_values for _ in converters]
+
+    # Update the converters to use the user-defined ones
+    uc_update = []
+    for (i, conv) in user_converters.iteritems():
+        # If the converter is specified by column names, use the index instead
+        if _is_string_like(i):
+            i = names.index(i)
+        if usecols:
+            try:
+                i = usecols.index(i)
+            except ValueError:
+                # Unused converter specified
+                continue
+        converters[i].update(conv, default=None, 
+                             missing_values=missing_values[i],
+                             locked=True)
+        uc_update.append((i, conv))
+    # Make sure we have the corrected keys in user_converters...
+    user_converters.update(uc_update)
+
+    # Reset the names to match the usecols
+    if (not first_line) and usecols:
+        names = [names[_] for _ in usecols]
+
+    rows = []
+    append_to_rows = rows.append
+    if usemask:
+        masks = []
+        append_to_masks = masks.append
+    # Parse each line
+    for line in itertools.chain([first_line,], fhd):
+        values = split_line(line)
+        # Skip an empty line
+        if len(values) == 0:
+            continue
+        # Select only the columns we need
+        if usecols:
+            values = [values[_] for _ in usecols]
+        # Check whether we need to update the converter
+        if dtype is None:
+            for (converter, item) in zip(converters, values):
+                converter.upgrade(item)
+        # Store the values
+        append_to_rows(tuple(values))
+        if usemask:
+            append_to_masks(tuple([val.strip() in mss 
+                                   for (val, mss) in zip(values,
+                                                         missing_values)]))
+
+    # Convert each value according to the converter:
+    # We want to modify the list in place to avoid creating a new one...
+    if loose:
+        conversionfuncs = [conv._loose_call for conv in converters]
+    else:
+        conversionfuncs = [conv._strict_call for conv in converters]
+    for (i, vals) in enumerate(rows):
+        rows[i] = tuple([convert(val)
+                         for (convert, val) in zip(conversionfuncs, vals)])
+
+    # Reset the dtype
+    data = rows
+    if dtype is None:
+        # Get the dtypes from the types of the converters
+        coldtypes = [conv.type for conv in converters]
+        # Find the columns with strings...
+        strcolidx = [i for (i, v) in enumerate(coldtypes)
+                     if v in (type('S'), np.string_)]
+        # ... and take the largest number of chars.
+        for i in strcolidx:
+            coldtypes[i] = "|S%i" % max(len(row[i]) for row in data)
+        #
+        if names is None:
+            # If the dtype is uniform, don't define names, else use ''
+            base = set([c.type for c in converters if c._checked])
+            
+            if len(base) == 1:
+                (ddtype, mdtype) = (list(base)[0], np.bool)
+            else:
+                ddtype = [('', dt) for dt in coldtypes]
+                mdtype = [('', np.bool) for dt in coldtypes]
+        else:
+            ddtype = zip(names, coldtypes)
+            mdtype = zip(names, [np.bool] * len(coldtypes))
+        output = np.array(data, dtype=ddtype)
+        if usemask:
+            outputmask = np.array(masks, dtype=mdtype)
+    else:
+        # Overwrite the initial dtype names if needed
+        if names and dtype.names:
+            dtype.names = names
+        flatdtypes = flatten_dtype(dtype)
+        # Case 1. We have a structured type
+        if len(flatdtypes) > 1:
+            # Nested dtype, eg  [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
+            # First, create the array using a flattened dtype:
+            # [('a', int), ('b1', int), ('b2', float)]
+            # Then, view the array using the specified dtype.
+            if has_nested_fields(dtype):
+                if 'O' in (_.char for _ in flatdtypes):
+                    errmsg = "Nested fields involving objects "\
+                             "are not supported..."
+                    raise NotImplementedError(errmsg)
+                rows = np.array(data, dtype=[('', t) for t in flatdtypes])
+                output = rows.view(dtype)
+            else:
+                output = np.array(data, dtype=dtype)
+            # Now, process the rowmasks the same way
+            if usemask:
+                rowmasks = np.array(masks,
+                                    dtype=np.dtype([('', np.bool)
+                                                    for t in flatdtypes]))
+                # Construct the new dtype
+                mdtype = make_mask_descr(dtype)
+                outputmask = rowmasks.view(mdtype)
+        # Case #2. We have a basic dtype
+        else:
+            # We used some user-defined converters
+            if user_converters:
+                ishomogeneous = True
+                descr = []
+                for (i, ttype) in enumerate([conv.type for conv in converters]):
+                    # Keep the dtype of the current converter
+                    if i in user_converters:
+                        ishomogeneous &= (ttype == dtype.type)
+                        if ttype == np.string_:
+                            ttype = "|S%i" % max(len(row[i]) for row in data)
+                        descr.append(('', ttype))
+                    else:
+                        descr.append(('', dtype))
+                # So we changed the dtype ?
+                if not ishomogeneous:
+                    # We have more than one field
+                    if len(descr) > 1:
+                        dtype = np.dtype(descr)
+                    # We have only one field: drop the name if not needed.
+                    else:
+                        dtype = np.dtype(ttype)
+            #
+            output = np.array(data, dtype)
+            if usemask:
+                if dtype.names:
+                    mdtype = [(_, np.bool) for _ in dtype.names]
+                else:
+                    mdtype = np.bool
+                outputmask = np.array(masks, dtype=mdtype)
+    # Try to take care of the missing data we missed
+    if usemask and output.dtype.names:
+        for (name, conv) in zip(names or (), converters):
+            missing_values = [conv(_) for _ in conv.missing_values if _ != '']
+            for mval in missing_values:
+                outputmask[name] |= (output[name] == mval)
+    # Construct the final array
+    if usemask:
+        output = output.view(MaskedArray)
+        output._mask = outputmask
+    if unpack:
+        return output.squeeze().T
+    return output.squeeze()
+
+
+
+def ndfromtxt(fname, dtype=float, comments='#', delimiter=None, skiprows=0,
+             converters=None, missing='', missing_values=None,
+             usecols=None, unpack=None, names=None,
+             excludelist=None, deletechars=None, case_sensitive=True,):
+    """
+    Load ASCII data stored in fname and returns a ndarray.
+    
+    Complete description of all the optional input parameters is available in
+    the docstring of the `genfromtxt` function.
+    
+    See Also
+    --------
+    numpy.genfromtxt : generic function.
+    
+    """
+    kwargs = dict(dtype=dtype, comments=comments, delimiter=delimiter, 
+                  skiprows=skiprows, converters=converters,
+                  missing=missing, missing_values=missing_values,
+                  usecols=usecols, unpack=unpack, names=names, 
+                  excludelist=excludelist, deletechars=deletechars,
+                  case_sensitive=case_sensitive, usemask=False)
+    return genfromtxt(fname, **kwargs)
+
+def mafromtxt(fname, dtype=float, comments='#', delimiter=None, skiprows=0,
+              converters=None, missing='', missing_values=None,
+              usecols=None, unpack=None, names=None,
+              excludelist=None, deletechars=None, case_sensitive=True,):
+    """
+    Load ASCII data stored in fname and returns a MaskedArray.
+    
+    Complete description of all the optional input parameters is available in
+    the docstring of the `genfromtxt` function.
+    
+    See Also
+    --------
+    numpy.genfromtxt : generic function.
+    """
+    kwargs = dict(dtype=dtype, comments=comments, delimiter=delimiter, 
+                  skiprows=skiprows, converters=converters,
+                  missing=missing, missing_values=missing_values,
+                  usecols=usecols, unpack=unpack, names=names, 
+                  excludelist=excludelist, deletechars=deletechars,
+                  case_sensitive=case_sensitive,
+                  usemask=True)
+    return genfromtxt(fname, **kwargs)
+
+
+def recfromtxt(fname, dtype=None, comments='#', delimiter=None, skiprows=0,
+               converters=None, missing='', missing_values=None,
+               usecols=None, unpack=None, names=None,
+               excludelist=None, deletechars=None, case_sensitive=True,
+               usemask=False):
+    """
+    Load ASCII data stored in fname and returns a standard recarray (if 
+    `usemask=False`) or a MaskedRecords (if `usemask=True`).
+    
+    Complete description of all the optional input parameters is available in
+    the docstring of the `genfromtxt` function.
+    
+    See Also
+    --------
+    numpy.genfromtxt : generic function
+
+    Warnings
+    --------
+    * by default, `dtype=None`, which means that the dtype of the output array
+      will be determined from the data.
+    """
+    kwargs = dict(dtype=dtype, comments=comments, delimiter=delimiter, 
+                  skiprows=skiprows, converters=converters,
+                  missing=missing, missing_values=missing_values,
+                  usecols=usecols, unpack=unpack, names=names, 
+                  excludelist=excludelist, deletechars=deletechars,
+                  case_sensitive=case_sensitive, usemask=usemask)
+    output = genfromtxt(fname, **kwargs)
+    if usemask:
+        from numpy.ma.mrecords import MaskedRecords
+        output = output.view(MaskedRecords)
+    else:
+        output = output.view(np.recarray)
+    return output
+
+
+def recfromcsv(fname, dtype=None, comments='#', skiprows=0,
+               converters=None, missing='', missing_values=None,
+               usecols=None, unpack=None, names=True,
+               excludelist=None, deletechars=None, case_sensitive='lower',
+               usemask=False):
+    """
+    Load ASCII data stored in comma-separated file and returns a recarray (if 
+    `usemask=False`) or a MaskedRecords (if `usemask=True`).
+    
+    Complete description of all the optional input parameters is available in
+    the docstring of the `genfromtxt` function.
+    
+    See Also
+    --------
+    numpy.genfromtxt : generic function
+    """
+    kwargs = dict(dtype=dtype, comments=comments, delimiter=",", 
+                  skiprows=skiprows, converters=converters,
+                  missing=missing, missing_values=missing_values,
+                  usecols=usecols, unpack=unpack, names=names, 
+                  excludelist=excludelist, deletechars=deletechars,
+                  case_sensitive=case_sensitive, usemask=usemask)
+    output = genfromtxt(fname, **kwargs)
+    if usemask:
+        from numpy.ma.mrecords import MaskedRecords
+        output = output.view(MaskedRecords)
+    else:
+        output = output.view(np.recarray)
+    return output
+

Copied: branches/dynamic_cpu_configuration/numpy/lib/recfunctions.py (from rev 6368, trunk/numpy/lib/recfunctions.py)

Modified: branches/dynamic_cpu_configuration/numpy/lib/src/_compiled_base.c
===================================================================
--- branches/dynamic_cpu_configuration/numpy/lib/src/_compiled_base.c	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/lib/src/_compiled_base.c	2009-02-15 12:03:15 UTC (rev 6369)
@@ -494,35 +494,46 @@
 
 #define _TESTDOC1(typebase) (obj->ob_type == &Py##typebase##_Type)
 #define _TESTDOC2(typebase) (obj->ob_type == Py##typebase##_TypePtr)
-#define _ADDDOC(typebase, doc, name) {                                  \
+#define _ADDDOC(typebase, doc, name) do {                               \
         Py##typebase##Object *new = (Py##typebase##Object *)obj;        \
         if (!(doc)) {                                                   \
             doc = docstr;                                               \
         }                                                               \
         else {                                                          \
-            PyErr_Format(PyExc_RuntimeError,                            \
-                         "%s method %s",name, msg);                     \
+            PyErr_Format(PyExc_RuntimeError, "%s method %s", name, msg); \
             return NULL;                                                \
         }                                                               \
-    }
+    } while (0)
 
-    if _TESTDOC1(CFunction)
-                    _ADDDOC(CFunction, new->m_ml->ml_doc, new->m_ml->ml_name)
-        else if _TESTDOC1(Type)
-                             _ADDDOC(Type, new->tp_doc, new->tp_name)
-            else if _TESTDOC2(MemberDescr)
-                                 _ADDDOC(MemberDescr, new->d_member->doc, new->d_member->name)
-                else if _TESTDOC2(GetSetDescr)
-                                     _ADDDOC(GetSetDescr, new->d_getset->doc, new->d_getset->name)
-                    else if _TESTDOC2(MethodDescr)
-                                         _ADDDOC(MethodDescr, new->d_method->ml_doc,
-                                                 new->d_method->ml_name)
-                        else {
-                            PyErr_SetString(PyExc_TypeError,
-                                            "Cannot set a docstring for that object");
-                            return NULL;
-                        }
+    if (_TESTDOC1(CFunction))
+        _ADDDOC(CFunction, new->m_ml->ml_doc, new->m_ml->ml_name);
+    else if (_TESTDOC1(Type))
+        _ADDDOC(Type, new->tp_doc, new->tp_name);
+    else if (_TESTDOC2(MemberDescr))
+        _ADDDOC(MemberDescr, new->d_member->doc, new->d_member->name);
+    else if (_TESTDOC2(GetSetDescr))
+        _ADDDOC(GetSetDescr, new->d_getset->doc, new->d_getset->name);
+    else if (_TESTDOC2(MethodDescr))
+        _ADDDOC(MethodDescr, new->d_method->ml_doc, new->d_method->ml_name);
+    else {
+        PyObject *doc_attr;
+        
+        doc_attr = PyObject_GetAttrString(obj, "__doc__");
+        if (doc_attr != NULL && doc_attr != Py_None) {
+            PyErr_Format(PyExc_RuntimeError, "object %s", msg);
+            return NULL;
+        }
+        Py_XDECREF(doc_attr);
 
+        if (PyObject_SetAttrString(obj, "__doc__", str) < 0) {
+            PyErr_SetString(PyExc_TypeError,
+                            "Cannot set a docstring for that object");
+            return NULL;
+        }
+        Py_INCREF(Py_None);
+        return Py_None;
+    }
+
 #undef _TESTDOC1
 #undef _TESTDOC2
 #undef _ADDDOC
@@ -533,35 +544,6 @@
 }
 
 
-static char packbits_doc[] =
-  "out = numpy.packbits(myarray, axis=None)\n\n"
-  "  myarray : an integer type array whose elements should be packed to bits\n\n"
-  "   This routine packs the elements of a binary-valued dataset into a\n"
-  "   NumPy array of type uint8 ('B') whose bits correspond to\n"
-  "   the logical (0 or nonzero) value of the input elements.\n"
-  "   The dimension over-which bit-packing is done is given by axis.\n"
-  "   The shape of the output has the same number of dimensions as the input\n"
-  "   (unless axis is None, in which case the output is 1-d).\n"
-  "\n"
-  "     Example:\n"
-  "     >>> a = array([[[1,0,1],\n"
-  "     ...             [0,1,0]],\n"
-  "     ...            [[1,1,0],\n"
-  "     ...             [0,0,1]]])\n"
-  "     >>> b = numpy.packbits(a,axis=-1)\n"
-  "     >>> b\n"
-  "     array([[[160],[64]],[[192],[32]]], dtype=uint8)\n\n"
-  "     Note that 160 = 128 + 32\n"
-  "               192 = 128 + 64\n";
-
-static char unpackbits_doc[] =
-  "out = numpy.unpackbits(myarray, axis=None)\n\n"
-  "     myarray - array of uint8 type where each element represents a bit-field\n"
-  "        that should be unpacked into a boolean output array\n\n"
-  "        The shape of the output array is either 1-d (if axis is None) or\n"
-  "        the same shape as the input array with unpacking done along the\n"
-  "        axis specified.";
-
 /*  PACKBITS
 
     This function packs binary (0 or 1) 1-bit per pixel arrays
@@ -809,9 +791,9 @@
     {"add_docstring", (PyCFunction)arr_add_docstring, METH_VARARGS,
      NULL},
     {"packbits",  (PyCFunction)io_pack,       METH_VARARGS | METH_KEYWORDS,
-     packbits_doc},
+     NULL},
     {"unpackbits", (PyCFunction)io_unpack,     METH_VARARGS | METH_KEYWORDS,
-     unpackbits_doc},
+     NULL},
     {NULL, NULL}    /* sentinel */
 };
 

Copied: branches/dynamic_cpu_configuration/numpy/lib/tests/test__iotools.py (from rev 6368, trunk/numpy/lib/tests/test__iotools.py)

Modified: branches/dynamic_cpu_configuration/numpy/lib/tests/test_function_base.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/lib/tests/test_function_base.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/lib/tests/test_function_base.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -430,6 +430,44 @@
         #check integral of normal equals 1
         assert_almost_equal(sum(r,axis=0),1,7)
 
+    def test_ndim(self):
+        x = linspace(0, 1, 3)
+        y = linspace(0, 2, 8)
+        z = linspace(0, 3, 13)
+
+        wx = ones_like(x) * (x[1]-x[0])
+        wx[0] /= 2
+        wx[-1] /= 2
+        wy = ones_like(y) * (y[1]-y[0])
+        wy[0] /= 2
+        wy[-1] /= 2
+        wz = ones_like(z) * (z[1]-z[0])
+        wz[0] /= 2
+        wz[-1] /= 2
+
+        q = x[:,None,None] + y[None,:,None] + z[None,None,:]
+
+        qx = (q*wx[:,None,None]).sum(axis=0)
+        qy = (q*wy[None,:,None]).sum(axis=1)
+        qz = (q*wz[None,None,:]).sum(axis=2)
+
+        # n-d `x`
+        r = trapz(q, x=x[:,None,None], axis=0)
+        assert_almost_equal(r, qx)
+        r = trapz(q, x=y[None,:,None], axis=1)
+        assert_almost_equal(r, qy)
+        r = trapz(q, x=z[None,None,:], axis=2)
+        assert_almost_equal(r, qz)
+
+        # 1-d `x`
+        r = trapz(q, x=x, axis=0)
+        assert_almost_equal(r, qx)
+        r = trapz(q, x=y, axis=1)
+        assert_almost_equal(r, qy)
+        r = trapz(q, x=z, axis=2)
+        assert_almost_equal(r, qz)
+
+
 class TestSinc(TestCase):
     def test_simple(self):
         assert(sinc(0)==1)

Modified: branches/dynamic_cpu_configuration/numpy/lib/tests/test_getlimits.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/lib/tests/test_getlimits.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/lib/tests/test_getlimits.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -51,5 +51,9 @@
             assert_equal(iinfo(T).max, T(-1))
 
 
+def test_instances():
+    iinfo(10)
+    finfo(3.0)
+
 if __name__ == "__main__":
     run_module_suite()

Modified: branches/dynamic_cpu_configuration/numpy/lib/tests/test_io.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/lib/tests/test_io.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/lib/tests/test_io.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -1,10 +1,17 @@
-from numpy.testing import *
+
 import numpy as np
+import numpy.ma as ma
+from numpy.ma.testutils import *
+
 import StringIO
 
 from tempfile import NamedTemporaryFile
+import sys
 
-class RoundtripTest:
+
+MAJVER, MINVER = sys.version_info[:2]
+
+class RoundtripTest(object):
     def roundtrip(self, save_func, *args, **kwargs):
         """
         save_func : callable
@@ -25,7 +32,14 @@
         file_on_disk = kwargs.get('file_on_disk', False)
 
         if file_on_disk:
-            target_file = NamedTemporaryFile()
+            # Do not delete the file on windows, because we can't
+            # reopen an already opened file on that platform, so we
+            # need to close the file and reopen it, implying no
+            # automatic deletion.
+            if sys.platform == 'win32' and MAJVER >= 2 and MINVER >= 6:
+                target_file = NamedTemporaryFile(delete=False)
+            else:
+                target_file = NamedTemporaryFile()
             load_file = target_file.name
         else:
             target_file = StringIO.StringIO()
@@ -37,6 +51,9 @@
         target_file.flush()
         target_file.seek(0)
 
+        if sys.platform == 'win32' and not isinstance(target_file, StringIO.StringIO):
+            target_file.close()
+
         arr_reloaded = np.load(load_file, **load_kwds)
 
         self.arr = arr
@@ -319,7 +336,6 @@
         assert_array_equal(x, a)
 
     def test_record_2(self):
-        return # pass this test until #736 is resolved
         c = StringIO.StringIO()
         c.write('1312 foo\n1534 bar\n4444 qux')
         c.seek(0)
@@ -341,5 +357,448 @@
         assert_array_equal(x, a)
 
 
+#####--------------------------------------------------------------------------
+
+
+class TestFromTxt(TestCase):
+    #
+    def test_record(self):
+        "Test w/ explicit dtype"
+        data = StringIO.StringIO('1 2\n3 4')
+#        data.seek(0)
+        test = np.ndfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)])
+        control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
+        assert_equal(test, control)
+        #
+        data = StringIO.StringIO('M 64.0 75.0\nF 25.0 60.0')
+#        data.seek(0)
+        descriptor = {'names': ('gender','age','weight'),
+                      'formats': ('S1', 'i4', 'f4')}
+        control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)],
+                           dtype=descriptor)
+        test = np.ndfromtxt(data, dtype=descriptor)
+        assert_equal(test, control)
+
+    def test_array(self):
+        "Test outputing a standard ndarray"
+        data = StringIO.StringIO('1 2\n3 4')
+        control = np.array([[1,2],[3,4]], dtype=int)
+        test = np.ndfromtxt(data, dtype=int)
+        assert_array_equal(test, control)
+        #
+        data.seek(0)
+        control = np.array([[1,2],[3,4]], dtype=float)
+        test = np.loadtxt(data, dtype=float)
+        assert_array_equal(test, control)
+
+    def test_1D(self):
+        "Test squeezing to 1D"
+        control = np.array([1, 2, 3, 4], int)
+        #
+        data = StringIO.StringIO('1\n2\n3\n4\n')
+        test = np.ndfromtxt(data, dtype=int)
+        assert_array_equal(test, control)
+        #
+        data = StringIO.StringIO('1,2,3,4\n')
+        test = np.ndfromtxt(data, dtype=int, delimiter=',')
+        assert_array_equal(test, control)
+
+    def test_comments(self):
+        "Test the stripping of comments"
+        control = np.array([1, 2, 3, 5], int)
+        # Comment on its own line
+        data = StringIO.StringIO('# comment\n1,2,3,5\n')
+        test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
+        assert_equal(test, control)
+        # Comment at the end of a line
+        data = StringIO.StringIO('1,2,3,5# comment\n')
+        test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
+        assert_equal(test, control)
+
+    def test_skiprows(self):
+        "Test row skipping"
+        control = np.array([1, 2, 3, 5], int)
+        #
+        data = StringIO.StringIO('comment\n1,2,3,5\n')
+        test = np.ndfromtxt(data, dtype=int, delimiter=',', skiprows=1)
+        assert_equal(test, control)
+        #
+        data = StringIO.StringIO('# comment\n1,2,3,5\n')
+        test = np.loadtxt(data, dtype=int, delimiter=',', skiprows=1)
+        assert_equal(test, control)
+
+    def test_header(self):
+        "Test retrieving a header"
+        data = StringIO.StringIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0')
+        test = np.ndfromtxt(data, dtype=None, names=True)
+        control = {'gender': np.array(['M', 'F']),
+                   'age': np.array([64.0, 25.0]),
+                   'weight': np.array([75.0, 60.0])}
+        assert_equal(test['gender'], control['gender'])
+        assert_equal(test['age'], control['age'])
+        assert_equal(test['weight'], control['weight'])
+
+    def test_auto_dtype(self):
+        "Test the automatic definition of the output dtype"
+        data = StringIO.StringIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False')
+        test = np.ndfromtxt(data, dtype=None)
+        control = [np.array(['A', 'BCD']),
+                   np.array([64, 25]),
+                   np.array([75.0, 60.0]),
+                   np.array([3+4j, 5+6j]),
+                   np.array([True, False]),]
+        assert_equal(test.dtype.names, ['f0','f1','f2','f3','f4'])
+        for (i, ctrl) in enumerate(control):
+            assert_equal(test['f%i' % i], ctrl)
+
+
+    def test_auto_dtype_uniform(self):
+        "Tests whether the output dtype can be uniformized"
+        data = StringIO.StringIO('1 2 3 4\n5 6 7 8\n')
+        test = np.ndfromtxt(data, dtype=None)
+        control = np.array([[1,2,3,4],[5,6,7,8]])
+        assert_equal(test, control)
+
+
+    def test_fancy_dtype(self):
+        "Check that a nested dtype isn't MIA"
+        data = StringIO.StringIO('1,2,3.0\n4,5,6.0\n')
+        fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
+        test = np.ndfromtxt(data, dtype=fancydtype, delimiter=',')
+        control = np.array([(1,(2,3.0)),(4,(5,6.0))], dtype=fancydtype)
+        assert_equal(test, control)
+
+
+    def test_names_overwrite(self):
+        "Test overwriting the names of the dtype"
+        descriptor = {'names': ('g','a','w'),
+                      'formats': ('S1', 'i4', 'f4')}
+        data = StringIO.StringIO('M 64.0 75.0\nF 25.0 60.0')
+        names = ('gender','age','weight')
+        test = np.ndfromtxt(data, dtype=descriptor, names=names)
+        descriptor['names'] = names
+        control = np.array([('M', 64.0, 75.0),
+                            ('F', 25.0, 60.0)], dtype=descriptor)
+        assert_equal(test, control)
+
+
+    def test_commented_header(self):
+        "Check that names can be retrieved even if the line is commented out."
+        data = StringIO.StringIO("""
+#gender age weight
+M   21  72.100000
+F   35  58.330000
+M   33  21.99
+        """)
+        # The # is part of the first name and should be deleted automatically.
+        test = np.genfromtxt(data, names=True, dtype=None)
+        ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)],
+                  dtype=[('gender','|S1'), ('age', int), ('weight', float)])
+        assert_equal(test, ctrl)
+        # Ditto, but we should get rid of the first element
+        data = StringIO.StringIO("""
+# gender age weight
+M   21  72.100000
+F   35  58.330000
+M   33  21.99
+        """)
+        test = np.genfromtxt(data, names=True, dtype=None)
+        assert_equal(test, ctrl)
+
+
+    def test_autonames_and_usecols(self):
+        "Tests names and usecols"
+        data = StringIO.StringIO('A B C D\n aaaa 121 45 9.1')
+        test = np.ndfromtxt(data, usecols=('A', 'C', 'D'),
+                            names=True, dtype=None)
+        control = np.array(('aaaa', 45, 9.1),
+                           dtype=[('A', '|S4'), ('C', int), ('D', float)])
+        assert_equal(test, control)
+
+
+    def test_converters_with_usecols(self):
+        "Test the combination user-defined converters and usecol"
+        data = StringIO.StringIO('1,2,3,,5\n6,7,8,9,10\n')
+        test = np.ndfromtxt(data, dtype=int, delimiter=',',
+                            converters={3:lambda s: int(s or -999)},
+                            usecols=(1, 3, ))
+        control = np.array([[2,  -999], [7, 9]], int)
+        assert_equal(test, control)
+
+    def test_converters_with_usecols_and_names(self):
+        "Tests names and usecols"
+        data = StringIO.StringIO('A B C D\n aaaa 121 45 9.1')
+        test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), names=True,
+                            dtype=None, converters={'C':lambda s: 2 * int(s)})
+        control = np.array(('aaaa', 90, 9.1),
+            dtype=[('A', '|S4'), ('C', int), ('D', float)])
+        assert_equal(test, control)
+
+    def test_converters_cornercases(self):
+        "Test the conversion to datetime."
+        from datetime import datetime
+        converter = {'date':lambda s: datetime.strptime(s,'%Y-%m-%d %H:%M:%SZ')}
+        data = StringIO.StringIO('2009-02-03 12:00:00Z, 72214.0')
+        test = np.ndfromtxt(data, delimiter=',', dtype=None,
+                            names=['date','stid'], converters=converter)
+        control = np.array((datetime(2009,02,03,12,0), 72214.),
+                           dtype=[('date', np.object_), ('stid', float)])
+        assert_equal(test, control)
+
+
+    def test_unused_converter(self):
+        "Test whether unused converters are forgotten"
+        data = StringIO.StringIO("1 21\n  3 42\n")
+        test = np.ndfromtxt(data, usecols=(1,),
+                            converters={0: lambda s: int(s, 16)})
+        assert_equal(test, [21, 42])
+        #
+        data.seek(0)
+        test = np.ndfromtxt(data, usecols=(1,),
+                            converters={1: lambda s: int(s, 16)})
+        assert_equal(test, [33, 66])
+
+
+    def test_dtype_with_converters(self):
+        dstr = "2009; 23; 46"
+        test = np.ndfromtxt(StringIO.StringIO(dstr,),
+                            delimiter=";", dtype=float, converters={0:str})
+        control = np.array([('2009', 23., 46)],
+                           dtype=[('f0','|S4'), ('f1', float), ('f2', float)])
+        assert_equal(test, control)
+        test = np.ndfromtxt(StringIO.StringIO(dstr,),
+                            delimiter=";", dtype=float, converters={0:float})
+        control = np.array([2009., 23., 46],)
+        assert_equal(test, control)
+
+
+    def test_dtype_with_object(self):
+        "Test using an explicit dtype with an object"
+        from datetime import date
+        import time
+        data = """
+        1; 2001-01-01
+        2; 2002-01-31
+        """
+        ndtype = [('idx', int), ('code', np.object)]
+        func = lambda s: date(*(time.strptime(s.strip(), "%Y-%m-%d")[:3]))
+        converters = {1: func}
+        test = np.genfromtxt(StringIO.StringIO(data), delimiter=";", dtype=ndtype,
+                             converters=converters)
+        control = np.array([(1, date(2001,1,1)), (2, date(2002,1,31))],
+                           dtype=ndtype)
+        assert_equal(test, control)
+        #
+        ndtype = [('nest', [('idx', int), ('code', np.object)])]
+        try:
+            test = np.genfromtxt(StringIO.StringIO(data), delimiter=";",
+                                 dtype=ndtype, converters=converters)
+        except NotImplementedError:
+            pass
+        else:
+            errmsg = "Nested dtype involving objects should be supported."
+            raise AssertionError(errmsg)
+
+
+    def test_userconverters_with_explicit_dtype(self):
+        "Test user_converters w/ explicit (standard) dtype"
+        data = StringIO.StringIO('skip,skip,2001-01-01,1.0,skip')
+        test = np.genfromtxt(data, delimiter=",", names=None, dtype=float,
+                             usecols=(2, 3), converters={2: str})
+        control = np.array([('2001-01-01', 1.)],
+                           dtype=[('', '|S10'), ('', float)])
+        assert_equal(test, control)
+
+
+    def test_spacedelimiter(self):
+        "Test space delimiter"
+        data = StringIO.StringIO("1  2  3  4   5\n6  7  8  9  10")
+        test = np.ndfromtxt(data)
+        control = np.array([[ 1., 2., 3., 4., 5.],
+                            [ 6., 7., 8., 9.,10.]])
+        assert_equal(test, control)
+
+
+    def test_missing(self):
+        data = StringIO.StringIO('1,2,3,,5\n')
+        test = np.ndfromtxt(data, dtype=int, delimiter=',', \
+                            converters={3:lambda s: int(s or -999)})
+        control = np.array([1, 2, 3, -999, 5], int)
+        assert_equal(test, control)
+
+
+    def test_usecols(self):
+        "Test the selection of columns"
+        # Select 1 column
+        control = np.array( [[1, 2], [3, 4]], float)
+        data = StringIO.StringIO()
+        np.savetxt(data, control)
+        data.seek(0)
+        test = np.ndfromtxt(data, dtype=float, usecols=(1,))
+        assert_equal(test, control[:, 1])
+        #
+        control = np.array( [[1, 2, 3], [3, 4, 5]], float)
+        data = StringIO.StringIO()
+        np.savetxt(data, control)
+        data.seek(0)
+        test = np.ndfromtxt(data, dtype=float, usecols=(1, 2))
+        assert_equal(test, control[:, 1:])
+        # Testing with arrays instead of tuples.
+        data.seek(0)
+        test = np.ndfromtxt(data, dtype=float, usecols=np.array([1, 2]))
+        assert_equal(test, control[:, 1:])
+        # Checking with dtypes defined converters.
+        data = StringIO.StringIO("""JOE 70.1 25.3\nBOB 60.5 27.9""")
+        names = ['stid', 'temp']
+        dtypes = ['S4', 'f8']
+        test = np.ndfromtxt(data, usecols=(0, 2), dtype=zip(names, dtypes))
+        assert_equal(test['stid'],  ["JOE",  "BOB"])
+        assert_equal(test['temp'],  [25.3,  27.9])
+
+
+    def test_empty_file(self):
+        "Test that an empty file raises the proper exception"
+        data = StringIO.StringIO()
+        assert_raises(IOError, np.ndfromtxt, data)
+
+
+    def test_fancy_dtype_alt(self):
+        "Check that a nested dtype isn't MIA"
+        data = StringIO.StringIO('1,2,3.0\n4,5,6.0\n')
+        fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
+        test = np.mafromtxt(data, dtype=fancydtype, delimiter=',')
+        control = ma.array([(1,(2,3.0)),(4,(5,6.0))], dtype=fancydtype)
+        assert_equal(test, control)
+
+
+    def test_withmissing(self):
+        data = StringIO.StringIO('A,B\n0,1\n2,N/A')
+        test = np.mafromtxt(data, dtype=None, delimiter=',', missing='N/A',
+                            names=True)
+        control = ma.array([(0, 1), (2, -1)],
+                           mask=[(False, False), (False, True)],
+                           dtype=[('A', np.int), ('B', np.int)])
+        assert_equal(test, control)
+        assert_equal(test.mask, control.mask)
+        #
+        data.seek(0)
+        test = np.mafromtxt(data, delimiter=',', missing='N/A', names=True)
+        control = ma.array([(0, 1), (2, -1)],
+                           mask=[[False, False], [False, True]],)
+        assert_equal(test, control)
+        assert_equal(test.mask, control.mask)
+
+
+    def test_user_missing_values(self):
+        datastr ="A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j" 
+        data = StringIO.StringIO(datastr)
+        basekwargs = dict(dtype=None, delimiter=',', names=True, missing='N/A')
+        mdtype = [('A', int), ('B', float), ('C', complex)]
+        #
+        test = np.mafromtxt(data, **basekwargs)
+        control = ma.array([(   0, 0.0,    0j), (1, -999, 1j),
+                            (  -9, 2.2, -999j), (3,  -99, 3j)],
+                            mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)],
+                            dtype=mdtype)
+        assert_equal(test, control)
+        #
+        data.seek(0)
+        test = np.mafromtxt(data, 
+                            missing_values={0:-9, 1:-99, 2:-999j}, **basekwargs)
+        control = ma.array([(   0, 0.0,    0j), (1, -999, 1j),
+                            (  -9, 2.2, -999j), (3,  -99, 3j)],
+                            mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
+                            dtype=mdtype)
+        assert_equal(test, control)
+        #
+        data.seek(0)
+        test = np.mafromtxt(data, 
+                            missing_values={0:-9, 'B':-99, 'C':-999j},
+                            **basekwargs)
+        control = ma.array([(   0, 0.0,    0j), (1, -999, 1j),
+                            (  -9, 2.2, -999j), (3,  -99, 3j)],
+                            mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
+                            dtype=mdtype)
+        assert_equal(test, control)
+
+
+    def test_withmissing_float(self):
+        data = StringIO.StringIO('A,B\n0,1.5\n2,-999.00')
+        test = np.mafromtxt(data, dtype=None, delimiter=',', missing='-999.0',
+                            names=True,)
+        control = ma.array([(0, 1.5), (2, -1.)],
+                           mask=[(False, False), (False, True)],
+                           dtype=[('A', np.int), ('B', np.float)])
+        assert_equal(test, control)
+        assert_equal(test.mask, control.mask)
+
+
+    def test_with_masked_column_uniform(self):
+        "Test masked column"
+        data = StringIO.StringIO('1 2 3\n4 5 6\n')
+        test = np.genfromtxt(data, missing='2,5', dtype=None, usemask=True)
+        control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0],[0, 1, 0]])
+        assert_equal(test, control)
+
+    def test_with_masked_column_various(self):
+        "Test masked column"
+        data = StringIO.StringIO('True 2 3\nFalse 5 6\n')
+        test = np.genfromtxt(data, missing='2,5', dtype=None, usemask=True)
+        control = ma.array([(1, 2, 3), (0, 5, 6)],
+                           mask=[(0, 1, 0),(0, 1, 0)],
+                           dtype=[('f0', bool), ('f1', bool), ('f2', int)])
+        assert_equal(test, control)
+
+
+    def test_recfromtxt(self):
+        #
+        data = StringIO.StringIO('A,B\n0,1\n2,3')
+        test = np.recfromtxt(data, delimiter=',', missing='N/A', names=True)
+        control = np.array([(0, 1), (2, 3)],
+                           dtype=[('A', np.int), ('B', np.int)])
+        self.failUnless(isinstance(test, np.recarray))
+        assert_equal(test, control)
+        #
+        data = StringIO.StringIO('A,B\n0,1\n2,N/A')
+        test = np.recfromtxt(data, dtype=None, delimiter=',', missing='N/A',
+                             names=True, usemask=True)
+        control = ma.array([(0, 1), (2, -1)],
+                           mask=[(False, False), (False, True)],
+                           dtype=[('A', np.int), ('B', np.int)])
+        assert_equal(test, control)
+        assert_equal(test.mask, control.mask)
+        assert_equal(test.A, [0, 2])
+
+
+    def test_recfromcsv(self):
+        #
+        data = StringIO.StringIO('A,B\n0,1\n2,3')
+        test = np.recfromcsv(data, missing='N/A',
+                             names=True, case_sensitive=True)
+        control = np.array([(0, 1), (2, 3)],
+                           dtype=[('A', np.int), ('B', np.int)])
+        self.failUnless(isinstance(test, np.recarray))
+        assert_equal(test, control)
+        #
+        data = StringIO.StringIO('A,B\n0,1\n2,N/A')
+        test = np.recfromcsv(data, dtype=None, missing='N/A',
+                             names=True, case_sensitive=True, usemask=True)
+        control = ma.array([(0, 1), (2, -1)],
+                           mask=[(False, False), (False, True)],
+                           dtype=[('A', np.int), ('B', np.int)])
+        assert_equal(test, control)
+        assert_equal(test.mask, control.mask)
+        assert_equal(test.A, [0, 2])
+        #
+        data = StringIO.StringIO('A,B\n0,1\n2,3')
+        test = np.recfromcsv(data, missing='N/A',)
+        control = np.array([(0, 1), (2, 3)],
+                           dtype=[('a', np.int), ('b', np.int)])
+        self.failUnless(isinstance(test, np.recarray))
+        assert_equal(test, control)
+
+
+
+
 if __name__ == "__main__":
     run_module_suite()

Copied: branches/dynamic_cpu_configuration/numpy/lib/tests/test_recfunctions.py (from rev 6368, trunk/numpy/lib/tests/test_recfunctions.py)

Modified: branches/dynamic_cpu_configuration/numpy/lib/utils.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/lib/utils.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/lib/utils.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -699,11 +699,11 @@
 
             # import sub-packages
             if import_modules and hasattr(item, '__path__'):
-                for pth in item.__path__: 
-                    for mod_path in os.listdir(pth): 
-                        init_py = os.path.join(pth, mod_path, '__init__.py') 
+                for pth in item.__path__:
+                    for mod_path in os.listdir(pth):
+                        init_py = os.path.join(pth, mod_path, '__init__.py')
                         if not os.path.isfile(init_py):
-                            continue 
+                            continue
                         if _all is not None and mod_path not in _all:
                             continue
                         try:

Modified: branches/dynamic_cpu_configuration/numpy/linalg/linalg.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/linalg/linalg.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/linalg/linalg.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -9,7 +9,7 @@
 zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
 """
 
-__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv', 
+__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
            'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'det', 'svd',
            'eig', 'eigh','lstsq', 'norm', 'qr', 'cond', 'LinAlgError']
 

Modified: branches/dynamic_cpu_configuration/numpy/linalg/tests/test_linalg.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/linalg/tests/test_linalg.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/linalg/tests/test_linalg.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -202,7 +202,7 @@
         assert_equal(matrix_power(A,2),A)
 
 
-class HermitianTestCase:
+class HermitianTestCase(object):
     def test_single(self):
         a = array([[1.,2.], [2.,1.]], dtype=single)
         self.do(a)

Modified: branches/dynamic_cpu_configuration/numpy/ma/core.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/ma/core.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/ma/core.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -1,20 +1,24 @@
 # pylint: disable-msg=E1002
-"""MA: a facility for dealing with missing observations
-MA is generally used as a numpy.array look-alike.
-by Paul F. Dubois.
+"""
+numpy.ma : a package to handle missing or invalid values.
 
+This package was initially written for numarray by Paul F. Dubois
+at Lawrence Livermore National Laboratory. 
+In 2006, the package was completely rewritten by Pierre Gerard-Marchant
+(University of Georgia) to make the MaskedArray class a subclass of ndarray,
+and to improve support of structured arrays.
+
+
 Copyright 1999, 2000, 2001 Regents of the University of California.
 Released for unlimited redistribution.
-Adapted for numpy_core 2005 by Travis Oliphant and
-(mainly) Paul Dubois.
+* Adapted for numpy_core 2005 by Travis Oliphant and (mainly) Paul Dubois.
+* Subclassing of the base ndarray 2006 by Pierre Gerard-Marchant 
+  (pgmdevlist_AT_gmail_DOT_com)
+* Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com)
 
-Subclassing of the base ndarray 2006 by Pierre Gerard-Marchant.
-pgmdevlist_AT_gmail_DOT_com
-Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com)
+.. moduleauthor:: Pierre Gerard-Marchant
 
-:author: Pierre Gerard-Marchant
 
-
 """
 __author__ = "Pierre GF Gerard-Marchant"
 __docformat__ = "restructuredtext en"
@@ -33,7 +37,8 @@
            'default_fill_value', 'diag', 'diagonal', 'divide', 'dump', 'dumps',
            'empty', 'empty_like', 'equal', 'exp', 'expand_dims',
            'fabs', 'flatten_mask', 'fmod', 'filled', 'floor', 'floor_divide',
-           'fix_invalid', 'frombuffer', 'fromfunction',
+           'fix_invalid', 'flatten_structured_array', 'frombuffer', 'fromflex',
+           'fromfunction',
            'getdata','getmask', 'getmaskarray', 'greater', 'greater_equal',
            'harden_mask', 'hypot',
            'identity', 'ids', 'indices', 'inner', 'innerproduct',
@@ -54,7 +59,7 @@
            'rank', 'ravel', 'remainder', 'repeat', 'reshape', 'resize',
            'right_shift', 'round_', 'round',
            'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'sometrue',
-           'sort', 'soften_mask', 'sqrt', 'squeeze', 'std', 'subtract', 'sum', 
+           'sort', 'soften_mask', 'sqrt', 'squeeze', 'std', 'subtract', 'sum',
            'swapaxes',
            'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide',
            'var', 'where',
@@ -152,7 +157,7 @@
 
     """
     if hasattr(obj,'dtype'):
-        defval = default_filler[obj.dtype.kind]
+        defval = _check_fill_value(None, obj.dtype)
     elif isinstance(obj, np.dtype):
         if obj.subdtype:
             defval = default_filler[obj.subdtype[0].kind]
@@ -170,6 +175,18 @@
         defval = default_filler['O']
     return defval
 
+
+def _recursive_extremum_fill_value(ndtype, extremum):
+    names = ndtype.names
+    if names:
+        deflist = []
+        for name in names:
+            fval = _recursive_extremum_fill_value(ndtype[name], extremum)
+            deflist.append(fval)
+        return tuple(deflist)
+    return extremum[ndtype]
+
+
 def minimum_fill_value(obj):
     """
     Calculate the default fill value suitable for taking the minimum of ``obj``.
@@ -177,11 +194,7 @@
     """
     errmsg = "Unsuitable type for calculating minimum."
     if hasattr(obj, 'dtype'):
-        objtype = obj.dtype
-        filler = min_filler[objtype]
-        if filler is None:
-            raise TypeError(errmsg)
-        return filler
+        return _recursive_extremum_fill_value(obj.dtype, min_filler)
     elif isinstance(obj, float):
         return min_filler[ntypes.typeDict['float_']]
     elif isinstance(obj, int):
@@ -193,6 +206,7 @@
     else:
         raise TypeError(errmsg)
 
+
 def maximum_fill_value(obj):
     """
     Calculate the default fill value suitable for taking the maximum of ``obj``.
@@ -200,11 +214,7 @@
     """
     errmsg = "Unsuitable type for calculating maximum."
     if hasattr(obj, 'dtype'):
-        objtype = obj.dtype
-        filler = max_filler[objtype]
-        if filler is None:
-            raise TypeError(errmsg)
-        return filler
+        return _recursive_extremum_fill_value(obj.dtype, max_filler)
     elif isinstance(obj, float):
         return max_filler[ntypes.typeDict['float_']]
     elif isinstance(obj, int):
@@ -217,6 +227,28 @@
         raise TypeError(errmsg)
 
 
+def _recursive_set_default_fill_value(dtypedescr):
+    deflist = []
+    for currentdescr in dtypedescr:
+        currenttype = currentdescr[1]
+        if isinstance(currenttype, list):
+            deflist.append(tuple(_recursive_set_default_fill_value(currenttype)))
+        else:
+            deflist.append(default_fill_value(np.dtype(currenttype)))
+    return tuple(deflist)
+
+def _recursive_set_fill_value(fillvalue, dtypedescr):
+    fillvalue = np.resize(fillvalue, len(dtypedescr))
+    output_value = []
+    for (fval, descr) in zip(fillvalue, dtypedescr):
+        cdtype = descr[1]
+        if isinstance(cdtype, list):
+            output_value.append(tuple(_recursive_set_fill_value(fval, cdtype)))
+        else:
+            output_value.append(np.array(fval, dtype=cdtype).item())
+    return tuple(output_value)
+
+
 def _check_fill_value(fill_value, ndtype):
     """
     Private function validating the given `fill_value` for the given dtype.
@@ -233,10 +265,9 @@
     fields = ndtype.fields
     if fill_value is None:
         if fields:
-            fdtype = [(_[0], _[1]) for _ in ndtype.descr]
-            fill_value = np.array(tuple([default_fill_value(fields[n][0])
-                                         for n in ndtype.names]),
-                                  dtype=fdtype)
+            descr = ndtype.descr
+            fill_value = np.array(_recursive_set_default_fill_value(descr),
+                                  dtype=ndtype,)
         else:
             fill_value = default_fill_value(ndtype)
     elif fields:
@@ -248,10 +279,9 @@
                 err_msg = "Unable to transform %s to dtype %s"
                 raise ValueError(err_msg % (fill_value, fdtype))
         else:
-            fval = np.resize(fill_value, len(ndtype.descr))
-            fill_value = [np.asarray(f).astype(desc[1]).item()
-                          for (f, desc) in zip(fval, ndtype.descr)]
-            fill_value = np.array(tuple(fill_value), copy=False, dtype=fdtype)
+            descr = ndtype.descr
+            fill_value = np.array(_recursive_set_fill_value(fill_value, descr),
+                                  dtype=ndtype)
     else:
         if isinstance(fill_value, basestring) and (ndtype.char not in 'SV'):
             fill_value = default_fill_value(ndtype)
@@ -315,7 +345,7 @@
 def filled(a, fill_value = None):
     """
     Return `a` as an array where masked data have been replaced by `value`.
-    
+
     If `a` is not a MaskedArray, `a` itself is returned.
     If `a` is a MaskedArray and `fill_value` is None, `fill_value` is set to
     `a.fill_value`.
@@ -367,7 +397,7 @@
     return rcls
 
 #####--------------------------------------------------------------------------
-def get_data(a, subok=True):
+def getdata(a, subok=True):
     """
     Return the `_data` part of `a` if `a` is a MaskedArray, or `a` itself.
 
@@ -384,8 +414,8 @@
     if not subok:
         return data.view(ndarray)
     return data
+get_data = getdata
 
-getdata = get_data
 
 def fix_invalid(a, mask=nomask, copy=True, fill_value=None):
     """
@@ -586,7 +616,7 @@
 
     def __call__ (self, a, b, *args, **kwargs):
         "Execute the call behavior."
-        m = mask_or(getmask(a), getmask(b))
+        m = mask_or(getmask(a), getmask(b), shrink=False)
         (da, db) = (getdata(a), getdata(b))
         # Easy case: there's no mask...
         if m is nomask:
@@ -597,8 +627,12 @@
         # Transforms to a (subclass of) MaskedArray if we don't have a scalar
         if result.shape:
             result = result.view(get_masked_subclass(a, b))
-            result._mask = make_mask_none(result.shape)
-            result._mask.flat = m
+            # If we have a mask, make sure it's broadcasted properly
+            if m.any():
+                result._mask = mask_or(getmaskarray(a), getmaskarray(b))
+            # If some initial masks where not shrunk, don't shrink the result
+            elif m.shape:
+                result._mask = make_mask_none(result.shape, result.dtype)
             if isinstance(a, MaskedArray):
                 result._update_from(a)
             if isinstance(b, MaskedArray):
@@ -607,7 +641,7 @@
         elif m:
             return masked
         return result
-#        
+#
 #        result = self.f(d1, d2, *args, **kwargs).view(get_masked_subclass(a, b))
 #        if len(result.shape):
 #            if m is not nomask:
@@ -724,18 +758,19 @@
     def __call__(self, a, b, *args, **kwargs):
         "Execute the call behavior."
         ma = getmask(a)
-        mb = getmask(b)
+        mb = getmaskarray(b)
         da = getdata(a)
         db = getdata(b)
         t = narray(self.domain(da, db), copy=False)
         if t.any(None):
-            mb = mask_or(mb, t)
+            mb = mask_or(mb, t, shrink=False)
             # The following line controls the domain filling
             if t.size == db.size:
                 db = np.where(t, self.filly, db)
             else:
                 db = np.where(np.resize(t, db.shape), self.filly, db)
-        m = mask_or(ma, mb)
+        # Shrink m if a.mask was nomask, otherwise don't.
+        m = mask_or(ma, mb, shrink=(getattr(a, '_mask', nomask) is nomask))
         if (not m.ndim) and m:
             return masked
         elif (m is nomask):
@@ -744,7 +779,12 @@
             result = np.where(m, da, self.f(da, db, *args, **kwargs))
         if result.shape:
             result = result.view(get_masked_subclass(a, b))
-            result._mask = m
+            # If we have a mask, make sure it's broadcasted properly
+            if m.any():
+                result._mask = mask_or(getmaskarray(a), mb)
+            # If some initial masks where not shrunk, don't shrink the result
+            elif m.shape:
+                result._mask = make_mask_none(result.shape, result.dtype)
             if isinstance(a, MaskedArray):
                 result._update_from(a)
             if isinstance(b, MaskedArray):
@@ -832,36 +872,37 @@
 #---- --- Mask creation functions ---
 #####--------------------------------------------------------------------------
 
+def _recursive_make_descr(datatype, newtype=bool_):
+    "Private function allowing recursion in make_descr."
+    # Do we have some name fields ?
+    if datatype.names:
+        descr = []
+        for name in datatype.names:
+            field = datatype.fields[name]
+            if len(field) == 3:
+                # Prepend the title to the name
+                name = (field[-1], name)
+            descr.append((name, _recursive_make_descr(field[0], newtype)))
+        return descr
+    # Is this some kind of composite a la (np.float,2)
+    elif datatype.subdtype:
+        mdescr = list(datatype.subdtype)
+        mdescr[0] = newtype
+        return tuple(mdescr)
+    else:
+        return newtype
+
 def make_mask_descr(ndtype):
     """Constructs a dtype description list from a given dtype.
     Each field is set to a bool.
 
     """
-    def _make_descr(datatype):
-        "Private function allowing recursion."
-        # Do we have some name fields ?
-        if datatype.names:
-            descr = []
-            for name in datatype.names:
-                field = datatype.fields[name]
-                if len(field) == 3:
-                    # Prepend the title to the name
-                    name = (field[-1], name)
-                descr.append((name, _make_descr(field[0])))
-            return descr
-        # Is this some kind of composite a la (np.float,2)
-        elif datatype.subdtype:
-            mdescr = list(datatype.subdtype)
-            mdescr[0] = np.dtype(bool)
-            return tuple(mdescr)
-        else:
-            return np.bool
     # Make sure we do have a dtype
     if not isinstance(ndtype, np.dtype):
         ndtype = np.dtype(ndtype)
-    return np.dtype(_make_descr(ndtype))
+    return np.dtype(_recursive_make_descr(ndtype, np.bool))
 
-def get_mask(a):
+def getmask(a):
     """Return the mask of a, if any, or nomask.
 
     To get a full array of booleans of the same shape as a, use
@@ -869,7 +910,7 @@
 
     """
     return getattr(a, '_mask', nomask)
-getmask = get_mask
+get_mask = getmask
 
 def getmaskarray(arr):
     """Return the mask of arr, if any, or a boolean array of the shape
@@ -988,7 +1029,17 @@
     ValueError
         If m1 and m2 have different flexible dtypes.
 
-     """
+    """
+    def _recursive_mask_or(m1, m2, newmask):
+        names = m1.dtype.names
+        for name in names:
+            current1 = m1[name]
+            if current1.dtype.names:
+                _recursive_mask_or(current1, m2[name], newmask[name])
+            else:
+                umath.logical_or(current1, m2[name], newmask[name])
+        return
+    #
     if (m1 is nomask) or (m1 is False):
         dtype = getattr(m2, 'dtype', MaskType)
         return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype)
@@ -1002,8 +1053,7 @@
         raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2))
     if dtype1.names:
         newmask = np.empty_like(m1)
-        for n in dtype1.names:
-            newmask[n] = umath.logical_or(m1[n], m2[n])
+        _recursive_mask_or(m1, m2, newmask)
         return newmask
     return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink)
 
@@ -1012,7 +1062,7 @@
     """
     Returns a completely flattened version of the mask, where nested fields
     are collapsed.
-    
+
     Parameters
     ----------
     mask : array_like
@@ -1035,7 +1085,7 @@
     >>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype)
     >>> flatten_mask(mask)
     array([False, False, False, False, False,  True], dtype=bool)
-    
+
     """
     #
     def _flatmask(mask):
@@ -1069,7 +1119,7 @@
 
 def masked_where(condition, a, copy=True):
     """
-    Return ``a`` as an array masked where ``condition`` is True.
+    Return ``a`` as an array masked where ``condition`` is ``True``.
     Masked values of ``a`` or ``condition`` are kept.
 
     Parameters
@@ -1099,35 +1149,45 @@
     result._mask = cond
     return result
 
+
 def masked_greater(x, value, copy=True):
     """
-    Return the array `x` masked where (x > value).
+    Return the array `x` masked where ``(x > value)``.
     Any value of mask already masked is kept masked.
 
     """
     return masked_where(greater(x, value), x, copy=copy)
 
+
 def masked_greater_equal(x, value, copy=True):
-    "Shortcut to masked_where, with condition = (x >= value)."
+    "Shortcut to masked_where, with condition ``(x >= value)``."
     return masked_where(greater_equal(x, value), x, copy=copy)
 
+
 def masked_less(x, value, copy=True):
-    "Shortcut to masked_where, with condition = (x < value)."
+    "Shortcut to masked_where, with condition ``(x < value)``."
     return masked_where(less(x, value), x, copy=copy)
 
+
 def masked_less_equal(x, value, copy=True):
-    "Shortcut to masked_where, with condition = (x <= value)."
+    "Shortcut to masked_where, with condition ``(x <= value)``."
     return masked_where(less_equal(x, value), x, copy=copy)
 
+
 def masked_not_equal(x, value, copy=True):
-    "Shortcut to masked_where, with condition = (x != value)."
+    "Shortcut to masked_where, with condition ``(x != value)``."
     return masked_where(not_equal(x, value), x, copy=copy)
 
+
 def masked_equal(x, value, copy=True):
     """
-    Shortcut to masked_where, with condition = (x == value).  For
-    floating point, consider ``masked_values(x, value)`` instead.
+    Shortcut to masked_where, with condition ``(x == value)``.
 
+    See Also
+    --------
+    masked_where : base function
+    masked_values : equivalent function for floats.
+
     """
     # An alternative implementation relies on filling first: probably not needed.
     # d = filled(x, 0)
@@ -1136,6 +1196,7 @@
     # return array(d, mask=m, copy=copy)
     return masked_where(equal(x, value), x, copy=copy)
 
+
 def masked_inside(x, v1, v2, copy=True):
     """
     Shortcut to masked_where, where ``condition`` is True for x inside
@@ -1153,6 +1214,7 @@
     condition = (xf >= v1) & (xf <= v2)
     return masked_where(condition, x, copy=copy)
 
+
 def masked_outside(x, v1, v2, copy=True):
     """
     Shortcut to ``masked_where``, where ``condition`` is True for x outside
@@ -1170,7 +1232,7 @@
     condition = (xf < v1) | (xf > v2)
     return masked_where(condition, x, copy=copy)
 
-#
+
 def masked_object(x, value, copy=True, shrink=True):
     """
     Mask the array `x` where the data are exactly equal to value.
@@ -1199,6 +1261,7 @@
     mask = mask_or(mask, make_mask(condition, shrink=shrink))
     return masked_array(x, mask=mask, copy=copy, fill_value=value)
 
+
 def masked_values(x, value, rtol=1.e-5, atol=1.e-8, copy=True, shrink=True):
     """
     Mask the array x where the data are approximately equal in
@@ -1236,6 +1299,7 @@
     mask = mask_or(mask, make_mask(condition, shrink=shrink))
     return masked_array(xnew, mask=mask, copy=copy, fill_value=value)
 
+
 def masked_invalid(a, copy=True):
     """
     Mask the array for invalid values (NaNs or infs).
@@ -1257,6 +1321,7 @@
 #####--------------------------------------------------------------------------
 #---- --- Printing options ---
 #####--------------------------------------------------------------------------
+
 class _MaskedPrintOption:
     """
     Handle the string used to represent missing data in a masked array.
@@ -1291,10 +1356,65 @@
 #if you single index into a masked location you get this object.
 masked_print_option = _MaskedPrintOption('--')
 
+
+def _recursive_printoption(result, mask, printopt):
+    """
+    Puts printoptions in result where mask is True.
+    Private function allowing for recursion
+    """
+    names = result.dtype.names
+    for name in names:
+        (curdata, curmask) = (result[name], mask[name])
+        if curdata.dtype.names:
+            _recursive_printoption(curdata, curmask, printopt)
+        else:
+            np.putmask(curdata, curmask, printopt)
+    return
+
+_print_templates = dict(long = """\
+masked_%(name)s(data =
+ %(data)s,
+       %(nlen)s mask =
+ %(mask)s,
+ %(nlen)s fill_value = %(fill)s)
+""",
+                        short = """\
+masked_%(name)s(data = %(data)s,
+       %(nlen)s mask = %(mask)s,
+%(nlen)s  fill_value = %(fill)s)
+""",
+                        long_flx = """\
+masked_%(name)s(data =
+ %(data)s,
+       %(nlen)s mask =
+ %(mask)s,
+%(nlen)s  fill_value = %(fill)s,
+      %(nlen)s dtype = %(dtype)s)
+""",
+                        short_flx = """\
+masked_%(name)s(data = %(data)s,
+%(nlen)s        mask = %(mask)s,
+%(nlen)s  fill_value = %(fill)s,
+%(nlen)s       dtype = %(dtype)s)
+""")
+
 #####--------------------------------------------------------------------------
 #---- --- MaskedArray class ---
 #####--------------------------------------------------------------------------
 
+def _recursive_filled(a, mask, fill_value):
+    """
+    Recursively fill `a` with `fill_value`.
+    Private function
+    """
+    names = a.dtype.names
+    for name in names:
+        current = a[name]
+        if current.dtype.names:
+            _recursive_filled(current, mask[name], fill_value[name])
+        else:
+            np.putmask(current, mask[name], fill_value[name])
+
 #...............................................................................
 class _arraymethod(object):
     """
@@ -1349,17 +1469,17 @@
             elif mask is not nomask:
                 result.__setmask__(getattr(mask, methodname)(*args, **params))
         else:
-            if mask.ndim and mask.all():
+            if mask.ndim and (not mask.dtype.names and mask.all()):
                 return masked
         return result
 #..........................................................
 
-class FlatIter(object):
+class MaskedIterator(object):
     "Define an interator."
     def __init__(self, ma):
         self.ma = ma
-        self.ma_iter = np.asarray(ma).flat
-
+        self.dataiter = ma._data.flat
+        #
         if ma._mask is nomask:
             self.maskiter = None
         else:
@@ -1368,19 +1488,79 @@
     def __iter__(self):
         return self
 
+    def __getitem__(self, indx):
+        result = self.dataiter.__getitem__(indx).view(type(self.ma))
+        if self.maskiter is not None:
+            _mask = self.maskiter.__getitem__(indx)
+            _mask.shape = result.shape
+            result._mask = _mask
+        return result
+
     ### This won't work is ravel makes a copy
     def __setitem__(self, index, value):
-        a = self.ma.ravel()
-        a[index] = value
+        self.dataiter[index] = getdata(value)
+        if self.maskiter is not None:
+            self.maskiter[index] = getmaskarray(value)
+#        self.ma1d[index] = value
 
     def next(self):
         "Returns the next element of the iterator."
-        d = self.ma_iter.next()
+        d = self.dataiter.next()
         if self.maskiter is not None and self.maskiter.next():
             d = masked
         return d
 
 
+def flatten_structured_array(a):
+    """
+    Flatten a strutured array.
+
+    The datatype of the output is the largest datatype of the (nested) fields.
+
+    Returns
+    -------
+    output : var
+        Flatten MaskedArray if the input is a MaskedArray,
+        standard ndarray otherwise.
+
+    Examples
+    --------
+    >>> ndtype = [('a', int), ('b', float)]
+    >>> a = np.array([(1, 1), (2, 2)], dtype=ndtype)
+    >>> flatten_structured_array(a)
+    array([[1., 1.],
+           [2., 2.]])
+
+    """
+    #
+    def flatten_sequence(iterable):
+        """Flattens a compound of nested iterables."""
+        for elm in iter(iterable):
+            if hasattr(elm,'__iter__'):
+                for f in flatten_sequence(elm):
+                    yield f
+            else:
+                yield elm
+    #
+    a = np.asanyarray(a)
+    inishape = a.shape
+    a = a.ravel()
+    if isinstance(a, MaskedArray):
+        out = np.array([tuple(flatten_sequence(d.item())) for d in a._data])
+        out = out.view(MaskedArray)
+        out._mask = np.array([tuple(flatten_sequence(d.item()))
+                              for d in getmaskarray(a)])
+    else:
+        out = np.array([tuple(flatten_sequence(d.item())) for d in a])
+    if len(inishape) > 1:
+        newshape = list(out.shape)
+        newshape[0] = inishape
+        out.shape = tuple(flatten_sequence(newshape))
+    return out
+
+
+
+
 class MaskedArray(ndarray):
     """
     Arrays with possibly masked values.  Masked values of True
@@ -1394,32 +1574,32 @@
     ----------
     data : {var}
         Input data.
-    mask : {nomask, sequence}
+    mask : {nomask, sequence}, optional
         Mask.  Must be convertible to an array of booleans with
         the same shape as data: True indicates a masked (eg.,
         invalid) data.
-    dtype : dtype
-        Data type of the output. If None, the type of the data
-        argument is used.  If dtype is not None and different from
-        data.dtype, a copy is performed.
-    copy : bool
-        Whether to copy the input data (True), or to use a
-        reference instead.  Note: data are NOT copied by default.
-    subok : {True, boolean}
+    dtype : {dtype}, optional
+        Data type of the output.
+        If dtype is None, the type of the data argument (`data.dtype`) is used.
+        If dtype is not None and different from `data.dtype`, a copy is performed.
+    copy : {False, True}, optional
+        Whether to copy the input data (True), or to use a reference instead.
+        Note: data are NOT copied by default.
+    subok : {True, False}, optional
         Whether to return a subclass of MaskedArray (if possible)
         or a plain MaskedArray.
-    ndmin : {0, int}
+    ndmin : {0, int}, optional
         Minimum number of dimensions
-    fill_value : {var}
-        Value used to fill in the masked values when necessary. If
-        None, a default based on the datatype is used.
-    keep_mask : {True, boolean}
+    fill_value : {var}, optional
+        Value used to fill in the masked values when necessary.
+        If None, a default based on the datatype is used.
+    keep_mask : {True, boolean}, optional
         Whether to combine mask with the mask of the input data,
         if any (True), or to use only mask for the output (False).
-    hard_mask : {False, boolean}
-        Whether to use a hard mask or not. With a hard mask,
-        masked values cannot be unmasked.
-    shrink : {True, boolean}
+    hard_mask : {False, boolean}, optional
+        Whether to use a hard mask or not.
+        With a hard mask, masked values cannot be unmasked.
+    shrink : {True, boolean}, optional
         Whether to force compression of an empty mask.
 
     """
@@ -1433,10 +1613,12 @@
                 subok=True, ndmin=0, fill_value=None,
                 keep_mask=True, hard_mask=None, flag=None, shrink=True,
                 **options):
-        """Create a new masked array from scratch.
+        """
+    Create a new masked array from scratch.
 
-        Note: you can also create an array with the .view(MaskedArray)
-        method.
+    Notes
+    -----
+    A masked array can also be created by taking a .view(MaskedArray).
 
         """
         if flag is not None:
@@ -1600,7 +1782,8 @@
         return
     #..................................
     def __array_wrap__(self, obj, context=None):
-        """Special hook for ufuncs.
+        """
+        Special hook for ufuncs.
         Wraps the numpy array and sets the mask according to context.
         """
         result = obj.view(type(self))
@@ -1613,10 +1796,11 @@
             # Get the domain mask................
             domain = ufunc_domain.get(func, None)
             if domain is not None:
+                # Take the domain, and make sure it's a ndarray
                 if len(args) > 2:
-                    d = reduce(domain, args)
+                    d = filled(reduce(domain, args), True)
                 else:
-                    d = domain(*args)
+                    d = filled(domain(*args), True)
                 # Fill the result where the domain is wrong
                 try:
                     # Binary domain: take the last value
@@ -1634,7 +1818,8 @@
                     if d is not nomask:
                         m = d
                 else:
-                    m |= d
+                    # Don't modify inplace, we risk back-propagation
+                    m = (m | d)
             # Make sure the mask has the proper size
             if result.shape == () and m:
                 return masked
@@ -1666,7 +1851,7 @@
             if dtype is None:
                 dtype = output.dtype
             mdtype = make_mask_descr(dtype)
-            
+
             output._mask = self._mask.view(mdtype, ndarray)
             output._mask.shape = output.shape
         # Make sure to reset the _fill_value if needed
@@ -1833,7 +2018,8 @@
             ndarray.__setitem__(_data, indx, dindx)
             _mask[indx] = mindx
         return
-    #............................................
+
+
     def __getslice__(self, i, j):
         """x.__getslice__(i, j) <==> x[i:j]
 
@@ -1842,7 +2028,8 @@
 
         """
         return self.__getitem__(slice(i, j))
-    #........................
+
+
     def __setslice__(self, i, j, value):
         """x.__setslice__(i, j, value) <==> x[i:j]=value
 
@@ -1851,7 +2038,8 @@
 
         """
         self.__setitem__(slice(i, j), value)
-    #............................................
+
+
     def __setmask__(self, mask, copy=False):
         """Set the mask.
 
@@ -1917,33 +2105,28 @@
 #        return self._mask.reshape(self.shape)
         return self._mask
     mask = property(fget=_get_mask, fset=__setmask__, doc="Mask")
-    #
-    def _getrecordmask(self):
-        """Return the mask of the records.
+
+
+    def _get_recordmask(self):
+        """
+    Return the mask of the records.
     A record is masked when all the fields are masked.
 
         """
         _mask = ndarray.__getattribute__(self, '_mask').view(ndarray)
         if _mask.dtype.names is None:
             return _mask
-        if _mask.size > 1:
-            axis = 1
-        else:
-            axis = None
-        #
-        try:
-            return _mask.view((bool_, len(self.dtype))).all(axis)
-        except ValueError:
-            return np.all([[f[n].all() for n in _mask.dtype.names]
-                           for f in _mask], axis=axis)
+        return np.all(flatten_structured_array(_mask), axis=-1)
 
-    def _setrecordmask(self):
+
+    def _set_recordmask(self):
         """Return the mask of the records.
     A record is masked when all the fields are masked.
 
         """
         raise NotImplementedError("Coming soon: setting the mask per records!")
-    recordmask = property(fget=_getrecordmask)
+    recordmask = property(fget=_get_recordmask)
+
     #............................................
     def harden_mask(self):
         """Force the mask to hard.
@@ -1957,6 +2140,10 @@
         """
         self._hardmask = False
 
+    hardmask = property(fget=lambda self: self._hardmask,
+                        doc="Hardness of the mask")
+
+
     def unshare_mask(self):
         """Copy the mask and set the sharedmask flag to False.
 
@@ -1965,6 +2152,9 @@
             self._mask = self._mask.copy()
             self._sharedmask = False
 
+    sharedmask = property(fget=lambda self: self._sharedmask,
+                          doc="Share status of the mask (read-only).")
+
     def shrink_mask(self):
         """Reduce a mask to nomask when possible.
 
@@ -1974,6 +2164,10 @@
             self._mask = nomask
 
     #............................................
+
+    baseclass = property(fget= lambda self:self._baseclass,
+                         doc="Class of the underlying data (read-only).")
+    
     def _get_data(self):
         """Return the current data, as a view of the original
         underlying data.
@@ -1996,7 +2190,7 @@
         """Return a flat iterator.
 
         """
-        return FlatIter(self)
+        return MaskedIterator(self)
     #
     def _set_flat (self, value):
         """Set a flattened version of self to value.
@@ -2027,24 +2221,25 @@
     fill_value = property(fget=get_fill_value, fset=set_fill_value,
                           doc="Filling value.")
 
+
     def filled(self, fill_value=None):
-        """Return a copy of self._data, where masked values are filled
-        with fill_value.
+        """
+    Return a copy of self, where masked values are filled with `fill_value`.
 
-        If fill_value is None, self.fill_value is used instead.
+    If `fill_value` is None, `self.fill_value` is used instead.
 
-        Notes
-        -----
-        + Subclassing is preserved
-        + The result is NOT a MaskedArray !
+    Notes
+    -----
+    + Subclassing is preserved
+    + The result is NOT a MaskedArray !
 
-        Examples
-        --------
-        >>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999)
-        >>> x.filled()
-        array([1,2,-999,4,-999])
-        >>> type(x.filled())
-        <type 'numpy.ndarray'>
+    Examples
+    --------
+    >>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999)
+    >>> x.filled()
+    array([1,2,-999,4,-999])
+    >>> type(x.filled())
+    <type 'numpy.ndarray'>
 
         """
         m = self._mask
@@ -2061,9 +2256,7 @@
         #
         if m.dtype.names:
             result = self._data.copy()
-            for n in result.dtype.names:
-                field = result[n]
-                np.putmask(field, self._mask[n], fill_value[n])
+            _recursive_filled(result, self._mask, fill_value)
         elif not m.any():
             return self._data
         else:
@@ -2184,13 +2377,9 @@
                     res = self._data.astype("|O8")
                     res[m] = f
                 else:
-                    rdtype = [list(_) for _ in self.dtype.descr]
-                    for r in rdtype:
-                        r[1] = '|O8'
-                    rdtype = [tuple(_) for _ in rdtype]
+                    rdtype = _recursive_make_descr(self.dtype, "|O8")
                     res = self._data.astype(rdtype)
-                    for field in names:
-                        np.putmask(res[field], m[field], f)
+                    _recursive_printoption(res, m, f)
         else:
             res = self.filled(self.fill_value)
         return str(res)
@@ -2199,44 +2388,71 @@
         """Literal string representation.
 
         """
-        with_mask = """\
-masked_%(name)s(data =
- %(data)s,
-      mask =
- %(mask)s,
-      fill_value=%(fill)s)
-"""
-        with_mask1 = """\
-masked_%(name)s(data = %(data)s,
-      mask = %(mask)s,
-      fill_value=%(fill)s)
-"""
-        with_mask_flx = """\
-masked_%(name)s(data =
- %(data)s,
-      mask =
- %(mask)s,
-      fill_value=%(fill)s,
-      dtype=%(dtype)s)
-"""
-        with_mask1_flx = """\
-masked_%(name)s(data = %(data)s,
-      mask = %(mask)s,
-      fill_value=%(fill)s
-      dtype=%(dtype)s)
-"""
         n = len(self.shape)
         name = repr(self._data).split('(')[0]
-        parameters =  dict(name=name, data=str(self), mask=str(self._mask),
+        parameters =  dict(name=name, nlen=" "*len(name),
+                           data=str(self), mask=str(self._mask),
                            fill=str(self.fill_value), dtype=str(self.dtype))
         if self.dtype.names:
             if n <= 1:
-                return with_mask1_flx % parameters
-            return  with_mask_flx % parameters
+                return _print_templates['short_flx'] % parameters
+            return  _print_templates['long_flx'] % parameters
         elif n <= 1:
-            return with_mask1 % parameters
-        return with_mask % parameters
+            return _print_templates['short'] % parameters
+        return _print_templates['long'] % parameters
     #............................................
+    def __eq__(self, other):
+        "Check whether other equals self elementwise"
+        omask = getattr(other, '_mask', nomask)
+        if omask is nomask:
+            check = ndarray.__eq__(self.filled(0), other).view(type(self))
+            check._mask = self._mask
+        else:
+            odata = filled(other, 0)
+            check = ndarray.__eq__(self.filled(0), odata).view(type(self))
+            if self._mask is nomask:
+                check._mask = omask
+            else:
+                mask = mask_or(self._mask, omask)
+                if mask.dtype.names:
+                    if mask.size > 1:
+                        axis = 1
+                    else:
+                        axis = None
+                    try:
+                        mask = mask.view((bool_, len(self.dtype))).all(axis)
+                    except ValueError:
+                        mask =  np.all([[f[n].all() for n in mask.dtype.names]
+                                        for f in mask], axis=axis)
+                check._mask = mask
+        return check
+    #
+    def __ne__(self, other):
+        "Check whether other doesn't equal self elementwise"
+        omask = getattr(other, '_mask', nomask)
+        if omask is nomask:
+            check = ndarray.__ne__(self.filled(0), other).view(type(self))
+            check._mask = self._mask
+        else:
+            odata = filled(other, 0)
+            check = ndarray.__ne__(self.filled(0), odata).view(type(self))
+            if self._mask is nomask:
+                check._mask = omask
+            else:
+                mask = mask_or(self._mask, omask)
+                if mask.dtype.names:
+                    if mask.size > 1:
+                        axis = 1
+                    else:
+                        axis = None
+                    try:
+                        mask = mask.view((bool_, len(self.dtype))).all(axis)
+                    except ValueError:
+                        mask =  np.all([[f[n].all() for n in mask.dtype.names]
+                                        for f in mask], axis=axis)
+                check._mask = mask
+        return check
+    #
     def __add__(self, other):
         "Add other to self, and return a new masked array."
         return add(self, other)
@@ -2259,7 +2475,7 @@
     #
     def __rmul__(self, other):
         "Multiply other by self, and return a new masked array."
-        return multiply(other, self)
+        return multiply(self, other)
     #
     def __div__(self, other):
         "Divide other into self, and return a new masked array."
@@ -2281,7 +2497,9 @@
         "Add other to self in-place."
         m = getmask(other)
         if self._mask is nomask:
-            self._mask = m
+            if m is not nomask and m.any():
+                self._mask = make_mask_none(self.shape, self.dtype)
+                self._mask += m
         else:
             if m is not nomask:
                 self._mask += m
@@ -2292,7 +2510,9 @@
         "Subtract other from self in-place."
         m = getmask(other)
         if self._mask is nomask:
-            self._mask = m
+            if m is not nomask and m.any():
+                self._mask = make_mask_none(self.shape, self.dtype)
+                self._mask += m
         elif m is not nomask:
             self._mask += m
         ndarray.__isub__(self._data, np.where(self._mask, 0, getdata(other)))
@@ -2302,7 +2522,9 @@
         "Multiply self by other in-place."
         m = getmask(other)
         if self._mask is nomask:
-            self._mask = m
+            if m is not nomask and m.any():
+                self._mask = make_mask_none(self.shape, self.dtype)
+                self._mask += m
         elif m is not nomask:
             self._mask += m
         ndarray.__imul__(self._data, np.where(self._mask, 1, getdata(other)))
@@ -2324,7 +2546,7 @@
         return self
     #...
     def __ipow__(self, other):
-        "Raise self to the power other, in place"
+        "Raise self to the power other, in place."
         other_data = getdata(other)
         other_mask = getmask(other)
         ndarray.__ipow__(self._data, np.where(self._mask, 1, other_data))
@@ -2494,25 +2716,24 @@
         return result
     #
     def resize(self, newshape, refcheck=True, order=False):
-        """Attempt to modify the size and the shape of the array in place.
+        """
+    Change shape and size of array in-place.
 
-        The array must own its own memory and not be referenced by
-        other arrays.
-
-        Returns
-        -------
-        None.
-
         """
-        try:
-            self._data.resize(newshape, refcheck, order)
-            if self.mask is not nomask:
-                self._mask.resize(newshape, refcheck, order)
-        except ValueError:
-            raise ValueError("Cannot resize an array that has been referenced "
-                             "or is referencing another array in this way.\n"
-                             "Use the resize function.")
-        return None
+        # Note : the 'order' keyword looks broken, let's just drop it
+#        try:
+#            ndarray.resize(self, newshape, refcheck=refcheck)
+#            if self.mask is not nomask:
+#                self._mask.resize(newshape, refcheck=refcheck)
+#        except ValueError:
+#            raise ValueError("Cannot resize an array that has been referenced "
+#                             "or is referencing another array in this way.\n"
+#                             "Use the numpy.ma.resize function.")
+#        return None
+        errmsg = "A masked array does not own its data "\
+                 "and therefore cannot be resized.\n" \
+                 "Use the numpy.ma.resize function instead."
+        raise ValueError(errmsg)
     #
     def put(self, indices, values, mode='raise'):
         """
@@ -3103,7 +3324,7 @@
     index_array : ndarray, int
         Array of indices that sort `a` along the specified axis.
         In other words, ``a[index_array]`` yields a sorted `a`.
-    
+
     See Also
     --------
     sort : Describes sorting algorithms used.
@@ -3430,7 +3651,7 @@
                 outmask = out._mask = make_mask_none(out.shape)
             outmask.flat = newmask
         else:
-            
+
             if out.dtype.kind in 'biu':
                 errmsg = "Masked data information would be lost in one or more"\
                          " location."
@@ -3547,9 +3768,10 @@
     def tofile(self, fid, sep="", format="%s"):
         raise NotImplementedError("Not implemented yet, sorry...")
 
-    def torecords(self):
+    def toflex(self):
         """
         Transforms a MaskedArray into a flexible-type array with two fields:
+
         * the ``_data`` field stores the ``_data`` part of the array;
         * the ``_mask`` field stores the ``_mask`` part of the array;
 
@@ -3592,6 +3814,7 @@
         record['_data'] = self._data
         record['_mask'] = self._mask
         return record
+    torecords = toflex
     #--------------------------------------------
     # Pickling
     def __getstate__(self):
@@ -3651,7 +3874,7 @@
 
     """
     _data = ndarray.__new__(baseclass, baseshape, basetype)
-    _mask = ndarray.__new__(ndarray, baseshape, 'b1')
+    _mask = ndarray.__new__(ndarray, baseshape, make_mask_descr(basetype))
     return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)
 
 
@@ -3975,12 +4198,12 @@
 def compressed(x):
     """
     Return a 1-D array of all the non-masked data.
-    
+
     See Also
     --------
     MaskedArray.compressed
         equivalent method
-    
+
     """
     if getmask(x) is nomask:
         return np.asanyarray(x)
@@ -4348,8 +4571,8 @@
     Returns the inner product of a and b for arrays of floating point types.
 
     Like the generic NumPy equivalent the product sum is over the last dimension
-    of a and b. 
-    
+    of a and b.
+
     Notes
     -----
     The first argument is not conjugated.
@@ -4384,7 +4607,8 @@
 outerproduct = outer
 
 def allequal (a, b, fill_value=True):
-    """Return True if all entries of a and b are equal, using
+    """
+    Return True if all entries of a and b are equal, using
     fill_value as a truth value where either or both are masked.
 
     """
@@ -4419,7 +4643,7 @@
     fill_value : boolean, optional
         Whether masked values in a or b are considered equal (True) or not
         (False).
-        
+
     rtol : Relative tolerance
         The relative difference is equal to `rtol` * `b`.
     atol : Absolute tolerance
@@ -4442,7 +4666,7 @@
     True.
 
      absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
-    
+
     Return True if all elements of a and b are equal subject to
     given tolerances.
 
@@ -4475,10 +4699,10 @@
     return np.all(d)
 
 #..............................................................................
-def asarray(a, dtype=None):
+def asarray(a, dtype=None, order=None):
     """
-    Convert the input to a masked array.
-    
+    Convert the input `a` to a masked array of the given datatype.
+
     Parameters
     ----------
     a : array_like
@@ -4490,29 +4714,40 @@
     order : {'C', 'F'}, optional
         Whether to use row-major ('C') or column-major ('FORTRAN') memory
         representation.  Defaults to 'C'.
-    
+
     Returns
     -------
     out : ndarray
         MaskedArray interpretation of `a`.  No copy is performed if the input
-        is already an ndarray.  If `a` is a subclass of ndarray, a base
-        class ndarray is returned.
-    Return a as a MaskedArray object of the given dtype.
-    If dtype is not given or None, is is set to the dtype of a.
-    No copy is performed if a is already an array.
-    Subclasses are converted to the base class MaskedArray.
+        is already an ndarray.  If `a` is a subclass of MaskedArray, a base
+        class MaskedArray is returned.
 
     """
     return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=False)
 
 def asanyarray(a, dtype=None):
-    """asanyarray(data, dtype) = array(data, dtype, copy=0, subok=1)
+    """
+    Convert the input `a` to a masked array of the given datatype.
+    If `a` is a subclass of MaskedArray, its class is conserved.
 
-    Return a as an masked array.
-    If dtype is not given or None, is is set to the dtype of a.
-    No copy is performed if a is already an array.
-    Subclasses are conserved.
+    Parameters
+    ----------
+    a : array_like
+        Input data, in any form that can be converted to an array.  This
+        includes lists, lists of tuples, tuples, tuples of tuples, tuples
+        of lists and ndarrays.
+    dtype : data-type, optional
+        By default, the data-type is inferred from the input data.
+    order : {'C', 'F'}, optional
+        Whether to use row-major ('C') or column-major ('FORTRAN') memory
+        representation.  Defaults to 'C'.
 
+    Returns
+    -------
+    out : ndarray
+        MaskedArray interpretation of `a`.  No copy is performed if the input
+        is already an ndarray.
+
     """
     return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True)
 
@@ -4557,6 +4792,15 @@
     raise NotImplementedError("Not yet implemented. Sorry")
 
 
+def fromflex(fxarray):
+    """
+    Rebuilds a masked_array from a flexible-type array output by the '.torecord'
+    array
+    """
+    return masked_array(fxarray['_data'], mask=fxarray['_mask'])
+
+
+
 class _convert2ma:
     """Convert functions from numpy to numpy.ma.
 

Modified: branches/dynamic_cpu_configuration/numpy/ma/extras.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/ma/extras.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/ma/extras.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -19,11 +19,14 @@
            'ediff1d',
            'flatnotmasked_contiguous', 'flatnotmasked_edges',
            'hsplit', 'hstack',
+           'intersect1d', 'intersect1d_nu',
            'mask_cols', 'mask_rowcols', 'mask_rows', 'masked_all',
            'masked_all_like', 'median', 'mr_',
            'notmasked_contiguous', 'notmasked_edges',
            'polyfit',
            'row_stack',
+           'setdiff1d', 'setmember1d', 'setxor1d',
+           'unique1d', 'union1d',
            'vander', 'vstack',
            ]
 
@@ -45,22 +48,19 @@
 #...............................................................................
 def issequence(seq):
     """Is seq a sequence (ndarray, list or tuple)?"""
-    if isinstance(seq, ndarray):
+    if isinstance(seq, (ndarray, tuple, list)):
         return True
-    elif isinstance(seq, tuple):
-        return True
-    elif isinstance(seq, list):
-        return True
     return False
 
 def count_masked(arr, axis=None):
-    """Count the number of masked elements along the given axis.
+    """
+    Count the number of masked elements along the given axis.
 
     Parameters
     ----------
-        axis : int, optional
-            Axis along which to count.
-            If None (default), a flattened version of the array is used.
+    axis : int, optional
+        Axis along which to count.
+        If None (default), a flattened version of the array is used.
 
     """
     m = getmaskarray(arr)
@@ -136,9 +136,12 @@
                 res.append(masked_array(_d, mask=_m))
             return res
 
-atleast_1d = _fromnxfunction('atleast_1d')
-atleast_2d = _fromnxfunction('atleast_2d')
-atleast_3d = _fromnxfunction('atleast_3d')
+#atleast_1d = _fromnxfunction('atleast_1d')
+#atleast_2d = _fromnxfunction('atleast_2d')
+#atleast_3d = _fromnxfunction('atleast_3d')
+atleast_1d = np.atleast_1d
+atleast_2d = np.atleast_2d
+atleast_3d = np.atleast_3d
 
 vstack = row_stack = _fromnxfunction('vstack')
 hstack = _fromnxfunction('hstack')
@@ -252,7 +255,8 @@
 
 
 def average(a, axis=None, weights=None, returned=False):
-    """Average the array over the given axis.
+    """
+    Average the array over the given axis.
 
     Parameters
     ----------
@@ -440,10 +444,10 @@
 #..............................................................................
 def compress_rowcols(x, axis=None):
     """
-    Suppress the rows and/or columns of a 2D array that contains
+    Suppress the rows and/or columns of a 2D array that contain
     masked values.
 
-    The suppression behavior is selected with the `axis`parameter.
+    The suppression behavior is selected with the `axis` parameter.
 
         - If axis is None, rows and columns are suppressed.
         - If axis is 0, only rows are suppressed.
@@ -482,13 +486,15 @@
     return x._data[idxr][:,idxc]
 
 def compress_rows(a):
-    """Suppress whole rows of a 2D array that contain masked values.
+    """
+    Suppress whole rows of a 2D array that contain masked values.
 
     """
     return compress_rowcols(a, 0)
 
 def compress_cols(a):
-    """Suppress whole columnss of a 2D array that contain masked values.
+    """
+    Suppress whole columns of a 2D array that contain masked values.
 
     """
     return compress_rowcols(a, 1)
@@ -530,30 +536,35 @@
     return a
 
 def mask_rows(a, axis=None):
-    """Mask whole rows of a 2D array that contain masked values.
+    """
+    Mask whole rows of a 2D array that contain masked values.
 
     Parameters
     ----------
-        axis : int, optional
-            Axis along which to perform the operation.
-            If None, applies to a flattened version of the array.
+    axis : int, optional
+        Axis along which to perform the operation.
+        If None, applies to a flattened version of the array.
+
     """
     return mask_rowcols(a, 0)
 
 def mask_cols(a, axis=None):
-    """Mask whole columns of a 2D array that contain masked values.
+    """
+    Mask whole columns of a 2D array that contain masked values.
 
     Parameters
     ----------
-        axis : int, optional
-            Axis along which to perform the operation.
-            If None, applies to a flattened version of the array.
+    axis : int, optional
+        Axis along which to perform the operation.
+        If None, applies to a flattened version of the array.
+
     """
     return mask_rowcols(a, 1)
 
 
 def dot(a,b, strict=False):
-    """Return the dot product of two 2D masked arrays a and b.
+    """
+    Return the dot product of two 2D masked arrays a and b.
 
     Like the generic numpy equivalent, the product sum is over the last
     dimension of a and the second-to-last dimension of b.  If strict is True,
@@ -582,74 +593,213 @@
     m = ~np.dot(am, bm)
     return masked_array(d, mask=m)
 
-#...............................................................................
-def ediff1d(array, to_end=None, to_begin=None):
-    """Return the differences between consecutive elements of an
-    array, possibly with prefixed and/or appended values.
+#####--------------------------------------------------------------------------
+#---- --- arraysetops ---
+#####--------------------------------------------------------------------------
 
-    Parameters
-    ----------
-        array : {array}
-            Input array,  will be flattened before the difference is taken.
-        to_end : {number}, optional
-            If provided, this number will be tacked onto the end of the returned
-            differences.
-        to_begin : {number}, optional
-            If provided, this number will be taked onto the beginning of the
-            returned differences.
+def ediff1d(arr, to_end=None, to_begin=None):
+    """
+    Computes the differences between consecutive elements of an array.
 
+    This function is the equivalent of `numpy.ediff1d` that takes masked
+    values into account.
+
+    See Also
+    --------
+    numpy.eddif1d : equivalent function for ndarrays.
+
     Returns
     -------
-          ed : {array}
-            The differences. Loosely, this will be (ary[1:] - ary[:-1]).
-
+    output : MaskedArray
+    
     """
-    a = masked_array(array, copy=True)
-    if a.ndim > 1:
-        a.reshape((a.size,))
-    (d, m, n) = (a._data, a._mask, a.size-1)
-    dd = d[1:]-d[:-1]
-    if m is nomask:
-        dm = nomask
-    else:
-        dm = m[1:]-m[:-1]
+    arr = ma.asanyarray(arr).flat
+    ed = arr[1:] - arr[:-1]
+    arrays = [ed]
     #
+    if to_begin is not None:
+        arrays.insert(0, to_begin)
     if to_end is not None:
-        to_end = asarray(to_end)
-        nend = to_end.size
-        if to_begin is not None:
-            to_begin = asarray(to_begin)
-            nbegin = to_begin.size
-            r_data = np.empty((n+nend+nbegin,), dtype=a.dtype)
-            r_mask = np.zeros((n+nend+nbegin,), dtype=bool)
-            r_data[:nbegin] = to_begin._data
-            r_mask[:nbegin] = to_begin._mask
-            r_data[nbegin:-nend] = dd
-            r_mask[nbegin:-nend] = dm
-        else:
-            r_data = np.empty((n+nend,), dtype=a.dtype)
-            r_mask = np.zeros((n+nend,), dtype=bool)
-            r_data[:-nend] = dd
-            r_mask[:-nend] = dm
-        r_data[-nend:] = to_end._data
-        r_mask[-nend:] = to_end._mask
+        arrays.append(to_end)
     #
-    elif to_begin is not None:
-        to_begin = asarray(to_begin)
-        nbegin = to_begin.size
-        r_data = np.empty((n+nbegin,), dtype=a.dtype)
-        r_mask = np.zeros((n+nbegin,), dtype=bool)
-        r_data[:nbegin] = to_begin._data
-        r_mask[:nbegin] = to_begin._mask
-        r_data[nbegin:] = dd
-        r_mask[nbegin:] = dm
+    if len(arrays) != 1:
+        # We'll save ourselves a copy of a potentially large array in the common
+        # case where neither to_begin or to_end was given.
+        ed = hstack(arrays)
     #
+    return ed
+
+
+def unique1d(ar1, return_index=False, return_inverse=False):
+    """
+    Finds the unique elements of an array.
+
+    Masked values are considered the same element (masked).
+
+    The output array is always a MaskedArray.
+
+    See Also
+    --------
+    np.unique1d : equivalent function for ndarrays.
+    """
+    output = np.unique1d(ar1,
+                         return_index=return_index,
+                         return_inverse=return_inverse)
+    if isinstance(output, tuple):
+        output = list(output)
+        output[0] = output[0].view(MaskedArray)
+        output = tuple(output)
     else:
-        r_data = dd
-        r_mask = dm
-    return masked_array(r_data, mask=r_mask)
+        output = output.view(MaskedArray)
+    return output
 
 
+def intersect1d(ar1, ar2):
+    """
+    Returns the repeated or unique elements belonging to the two arrays.
+
+    Masked values are assumed equals one to the other.
+    The output is always a masked array
+
+    See Also
+    --------
+    numpy.intersect1d : equivalent function for ndarrays.
+
+    Examples
+    --------
+    >>> x = array([1, 3, 3, 3], mask=[0, 0, 0, 1])
+    >>> y = array([3, 1, 1, 1], mask=[0, 0, 0, 1])
+    >>> intersect1d(x, y)
+    masked_array(data = [1 1 3 3 --],
+                 mask = [False False False False  True],
+           fill_value = 999999)
+    """
+    aux = ma.concatenate((ar1,ar2))
+    aux.sort()
+    return aux[aux[1:] == aux[:-1]]
+
+
+
+def intersect1d_nu(ar1, ar2):
+    """
+    Returns the unique elements common to both arrays.
+
+    Masked values are considered equal one to the other.
+    The output is always a masked array.
+
+    See Also
+    --------
+    intersect1d : Returns repeated or unique common elements.
+    numpy.intersect1d_nu : equivalent function for ndarrays.
+
+    Examples
+    --------
+    >>> x = array([1, 3, 3, 3], mask=[0, 0, 0, 1])
+    >>> y = array([3, 1, 1, 1], mask=[0, 0, 0, 1])
+    >>> intersect1d_nu(x, y)
+    masked_array(data = [1 3 --],
+                 mask = [False False  True],
+           fill_value = 999999)
+
+    """
+    # Might be faster than unique1d( intersect1d( ar1, ar2 ) )?
+    aux = ma.concatenate((unique1d(ar1), unique1d(ar2)))
+    aux.sort()
+    return aux[aux[1:] == aux[:-1]]
+
+
+
+def setxor1d(ar1, ar2):
+    """
+    Set exclusive-or of 1D arrays with unique elements.
+
+    See Also
+    --------
+    numpy.setxor1d : equivalent function for ndarrays
+
+    """
+    aux = ma.concatenate((ar1, ar2))
+    if aux.size == 0:
+        return aux
+    aux.sort()
+    auxf = aux.filled()
+#    flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0
+    flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True]))
+#    flag2 = ediff1d( flag ) == 0
+    flag2 = (flag[1:] == flag[:-1])
+    return aux[flag2]
+
+
+def setmember1d(ar1, ar2):
+    """
+    Return a boolean array set True where first element is in second array.
+
+    See Also
+    --------
+    numpy.setmember1d : equivalent function for ndarrays.
+
+    """
+    ar1 = ma.asanyarray(ar1)
+    ar2 = ma.asanyarray( ar2 )
+    ar = ma.concatenate((ar1, ar2 ))
+    b1 = ma.zeros(ar1.shape, dtype = np.int8)
+    b2 = ma.ones(ar2.shape, dtype = np.int8)
+    tt = ma.concatenate((b1, b2))
+
+    # We need this to be a stable sort, so always use 'mergesort' here. The
+    # values from the first array should always come before the values from the
+    # second array.
+    perm = ar.argsort(kind='mergesort')
+    aux = ar[perm]
+    aux2 = tt[perm]
+#    flag = ediff1d( aux, 1 ) == 0
+    flag = ma.concatenate((aux[1:] == aux[:-1], [False]))
+    ii = ma.where( flag * aux2 )[0]
+    aux = perm[ii+1]
+    perm[ii+1] = perm[ii]
+    perm[ii] = aux
+    #
+    indx = perm.argsort(kind='mergesort')[:len( ar1 )]
+    #
+    return flag[indx]
+
+
+def union1d(ar1, ar2):
+    """
+    Union of 1D arrays with unique elements.
+
+    See also
+    --------
+    numpy.union1d : equivalent function for ndarrays.
+
+    """
+    return unique1d(ma.concatenate((ar1, ar2)))
+
+
+def setdiff1d(ar1, ar2):
+    """
+    Set difference of 1D arrays with unique elements.
+
+    See Also
+    --------
+    numpy.setdiff1d : equivalent function for ndarrays
+
+    """
+    aux = setmember1d(ar1,ar2)
+    if aux.size == 0:
+        return aux
+    else:
+        return ma.asarray(ar1)[aux == 0]
+
+
+
+#####--------------------------------------------------------------------------
+#---- --- Covariance ---
+#####--------------------------------------------------------------------------
+
+
+
+
 def _covhelper(x, y=None, rowvar=True, allow_masked=True):
     """
     Private function for the computation of covariance and correlation
@@ -747,7 +897,8 @@
 
 
 def corrcoef(x, y=None, rowvar=True, bias=False, allow_masked=True):
-    """The correlation coefficients formed from the array x, where the
+    """
+    The correlation coefficients formed from the array x, where the
     rows are the observations, and the columns are variables.
 
     corrcoef(x,y) where x and y are 1d arrays is the same as
@@ -818,7 +969,8 @@
 #####--------------------------------------------------------------------------
 
 class MAxisConcatenator(AxisConcatenator):
-    """Translate slice objects to concatenation along an axis.
+    """
+    Translate slice objects to concatenation along an axis.
 
     """
 
@@ -877,11 +1029,13 @@
         return self._retval(res)
 
 class mr_class(MAxisConcatenator):
-    """Translate slice objects to concatenation along the first axis.
+    """
+    Translate slice objects to concatenation along the first axis.
 
-    For example:
-        >>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])]
-        array([1, 2, 3, 0, 0, 4, 5, 6])
+    Examples
+    --------
+    >>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])]
+    array([1, 2, 3, 0, 0, 4, 5, 6])
 
     """
     def __init__(self):
@@ -894,7 +1048,8 @@
 #####--------------------------------------------------------------------------
 
 def flatnotmasked_edges(a):
-    """Find the indices of the first and last not masked values in a
+    """
+    Find the indices of the first and last not masked values in a
     1D masked array.  If all values are masked, returns None.
 
     """
@@ -907,8 +1062,10 @@
     else:
         return None
 
+
 def notmasked_edges(a, axis=None):
-    """Find the indices of the first and last not masked values along
+    """
+    Find the indices of the first and last not masked values along
     the given axis in a masked array.
 
     If all values are masked, return None.  Otherwise, return a list
@@ -917,9 +1074,10 @@
 
     Parameters
     ----------
-        axis : int, optional
-            Axis along which to perform the operation.
-            If None, applies to a flattened version of the array.
+    axis : int, optional
+        Axis along which to perform the operation.
+        If None, applies to a flattened version of the array.
+
     """
     a = asarray(a)
     if axis is None or a.ndim == 1:
@@ -929,8 +1087,10 @@
     return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]),
             tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]),]
 
+
 def flatnotmasked_contiguous(a):
-    """Find contiguous unmasked data in a flattened masked array.
+    """
+    Find contiguous unmasked data in a flattened masked array.
 
     Return a sorted sequence of slices (start index, end index).
 
@@ -950,22 +1110,22 @@
     return result
 
 def notmasked_contiguous(a, axis=None):
-    """Find contiguous unmasked data in a masked array along the given
-    axis.
+    """
+    Find contiguous unmasked data in a masked array along the given axis.
 
     Parameters
     ----------
-        axis : int, optional
-            Axis along which to perform the operation.
-            If None, applies to a flattened version of the array.
+    axis : int, optional
+        Axis along which to perform the operation.
+        If None, applies to a flattened version of the array.
 
     Returns
     -------
-        A sorted sequence of slices (start index, end index).
+    A sorted sequence of slices (start index, end index).
 
     Notes
     -----
-        Only accepts 2D arrays at most.
+    Only accepts 2D arrays at most.
 
     """
     a = asarray(a)

Modified: branches/dynamic_cpu_configuration/numpy/ma/mrecords.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/ma/mrecords.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/ma/mrecords.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -357,7 +357,7 @@
                     dtype = None
                 else:
                     output = ndarray.view(self, dtype)
-            # OK, there's the change 
+            # OK, there's the change
             except TypeError:
                 dtype = np.dtype(dtype)
                 # we need to revert to MaskedArray, but keeping the possibility

Modified: branches/dynamic_cpu_configuration/numpy/ma/tests/test_core.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/ma/tests/test_core.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/ma/tests/test_core.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -474,6 +474,20 @@
                      np.array([(1, '1', 1.)], dtype=flexi.dtype))
 
 
+    def test_filled_w_nested_dtype(self):
+        "Test filled w/ nested dtype"
+        ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])]
+        a = array([(1, (1, 1)), (2, (2, 2))],
+                  mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype)
+        test = a.filled(0)
+        control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype)
+        assert_equal(test, control)
+        #
+        test = a['B'].filled(0)
+        control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype)
+        assert_equal(test, control)
+
+
     def test_optinfo_propagation(self):
         "Checks that _optinfo dictionary isn't back-propagated"
         x = array([1,2,3,], dtype=float)
@@ -483,6 +497,55 @@
         y._optinfo['info'] = '!!!'
         assert_equal(x._optinfo['info'], '???')
 
+
+    def test_fancy_printoptions(self):
+        "Test printing a masked array w/ fancy dtype."
+        fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
+        test = array([(1, (2, 3.0)), (4, (5, 6.0))],
+                     mask=[(1, (0, 1)), (0, (1, 0))],
+                     dtype=fancydtype)
+        control = "[(--, (2, --)) (4, (--, 6.0))]"
+        assert_equal(str(test), control)
+
+
+    def test_flatten_structured_array(self):
+        "Test flatten_structured_array on arrays"
+        # On ndarray
+        ndtype = [('a', int), ('b', float)]
+        a = np.array([(1, 1), (2, 2)], dtype=ndtype)
+        test = flatten_structured_array(a)
+        control = np.array([[1., 1.], [2., 2.]], dtype=np.float)
+        assert_equal(test, control)
+        assert_equal(test.dtype, control.dtype)
+        # On masked_array
+        a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
+        test = flatten_structured_array(a)
+        control = array([[1., 1.], [2., 2.]], 
+                        mask=[[0, 1], [1, 0]], dtype=np.float)
+        assert_equal(test, control)
+        assert_equal(test.dtype, control.dtype)
+        assert_equal(test.mask, control.mask)
+        # On masked array with nested structure
+        ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])]
+        a = array([(1, (1, 1.1)), (2, (2, 2.2))],
+                  mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype)
+        test = flatten_structured_array(a)
+        control = array([[1., 1., 1.1], [2., 2., 2.2]], 
+                        mask=[[0, 1, 0], [1, 0, 1]], dtype=np.float)
+        assert_equal(test, control)
+        assert_equal(test.dtype, control.dtype)
+        assert_equal(test.mask, control.mask)
+        # Keeping the initial shape
+        ndtype = [('a', int), ('b', float)]
+        a = np.array([[(1, 1),], [(2, 2),]], dtype=ndtype)
+        test = flatten_structured_array(a)
+        control = np.array([[[1., 1.],], [[2., 2.],]], dtype=np.float)
+        assert_equal(test, control)
+        assert_equal(test.dtype, control.dtype)
+
+
+        
+
 #------------------------------------------------------------------------------
 
 class TestMaskedArrayArithmetic(TestCase):
@@ -539,6 +602,7 @@
             assert_equal(np.multiply(x,y), multiply(xm, ym))
             assert_equal(np.divide(x,y), divide(xm, ym))
 
+
     def test_divide_on_different_shapes(self):
         x = arange(6, dtype=float)
         x.shape = (2,3)
@@ -557,6 +621,7 @@
         assert_equal(z, [[-1.,-1.,-1.], [3.,4.,5.]])
         assert_equal(z.mask, [[1,1,1],[0,0,0]])
 
+
     def test_mixed_arithmetic(self):
         "Tests mixed arithmetics."
         na = np.array([1])
@@ -571,6 +636,7 @@
         assert_equal(getmaskarray(a/2), [0,0,0])
         assert_equal(getmaskarray(2/a), [1,0,1])
 
+
     def test_masked_singleton_arithmetic(self):
         "Tests some scalar arithmetics on MaskedArrays."
         # Masked singleton should remain masked no matter what
@@ -581,6 +647,7 @@
         self.failUnless(maximum(xm, xm).mask)
         self.failUnless(minimum(xm, xm).mask)
 
+
     def test_arithmetic_with_masked_singleton(self):
         "Checks that there's no collapsing to masked"
         x = masked_array([1,2])
@@ -593,6 +660,7 @@
         assert_equal(y.shape, x.shape)
         assert_equal(y._mask, [True, True])
 
+
     def test_arithmetic_with_masked_singleton_on_1d_singleton(self):
         "Check that we're not losing the shape of a singleton"
         x = masked_array([1, ])
@@ -600,6 +668,7 @@
         assert_equal(y.shape, x.shape)
         assert_equal(y.mask, [True, ])
 
+
     def test_scalar_arithmetic(self):
         x = array(0, mask=0)
         assert_equal(x.filled().ctypes.data, x.ctypes.data)
@@ -608,6 +677,7 @@
         assert_equal(xm.shape,(2,))
         assert_equal(xm.mask,[1,1])
 
+
     def test_basic_ufuncs (self):
         "Test various functions such as sin, cos."
         (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
@@ -649,6 +719,7 @@
         assert getmask(count(ott,0)) is nomask
         assert_equal([1,2],count(ott,0))
 
+
     def test_minmax_func (self):
         "Tests minimum and maximum."
         (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
@@ -672,6 +743,7 @@
         x[-1,-1] = masked
         assert_equal(maximum(x), 2)
 
+
     def test_minimummaximum_func(self):
         a = np.ones((2,2))
         aminimum = minimum(a,a)
@@ -690,6 +762,7 @@
         self.failUnless(isinstance(amaximum, MaskedArray))
         assert_equal(amaximum, np.maximum.outer(a,a))
 
+
     def test_minmax_funcs_with_output(self):
         "Tests the min/max functions with explicit outputs"
         mask = np.random.rand(12).round()
@@ -735,7 +808,8 @@
         self.failUnless(x.min() is masked)
         self.failUnless(x.max() is masked)
         self.failUnless(x.ptp() is masked)
-    #........................
+
+
     def test_addsumprod (self):
         "Tests add, sum, product."
         (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
@@ -757,6 +831,98 @@
             assert_equal(np.sum(x,1), sum(x,1))
             assert_equal(np.product(x,1), product(x,1))
 
+
+    def test_binops_d2D(self):
+        "Test binary operations on 2D data"
+        a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])
+        b = array([[2., 3.], [4., 5.], [6., 7.]])
+        #
+        test = a * b
+        control = array([[2., 3.], [2., 2.], [3., 3.]],
+                        mask=[[0, 0], [1, 1], [1, 1]])
+        assert_equal(test, control)
+        assert_equal(test.data, control.data)
+        assert_equal(test.mask, control.mask)
+        #
+        test = b * a
+        control = array([[2., 3.], [4., 5.], [6., 7.]],
+                        mask=[[0, 0], [1, 1], [1, 1]])
+        assert_equal(test, control)
+        assert_equal(test.data, control.data)
+        assert_equal(test.mask, control.mask)
+        #
+        a = array([[1.], [2.], [3.]])
+        b = array([[2., 3.], [4., 5.], [6., 7.]],
+                  mask=[[0, 0], [0, 0], [0, 1]])
+        test = a * b
+        control = array([[2, 3], [8, 10], [18, 3]],
+                        mask=[[0, 0], [0, 0], [0, 1]])
+        assert_equal(test, control)
+        assert_equal(test.data, control.data)
+        assert_equal(test.mask, control.mask)
+        #
+        test = b * a
+        control = array([[2, 3], [8, 10], [18, 7]],
+                        mask=[[0, 0], [0, 0], [0, 1]])
+        assert_equal(test, control)
+        assert_equal(test.data, control.data)
+        assert_equal(test.mask, control.mask)
+
+
+    def test_domained_binops_d2D(self):
+        "Test domained binary operations on 2D data"
+        a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])
+        b = array([[2., 3.], [4., 5.], [6., 7.]])
+        #
+        test = a / b
+        control = array([[1./2., 1./3.], [2., 2.], [3., 3.]],
+                        mask=[[0, 0], [1, 1], [1, 1]])
+        assert_equal(test, control)
+        assert_equal(test.data, control.data)
+        assert_equal(test.mask, control.mask)
+        #
+        test = b / a
+        control = array([[2./1., 3./1.], [4., 5.], [6., 7.]],
+                        mask=[[0, 0], [1, 1], [1, 1]])
+        assert_equal(test, control)
+        assert_equal(test.data, control.data)
+        assert_equal(test.mask, control.mask)
+        #
+        a = array([[1.], [2.], [3.]])
+        b = array([[2., 3.], [4., 5.], [6., 7.]],
+                  mask=[[0, 0], [0, 0], [0, 1]])
+        test = a / b
+        control = array([[1./2, 1./3], [2./4, 2./5], [3./6, 3]],
+                        mask=[[0, 0], [0, 0], [0, 1]])
+        assert_equal(test, control)
+        assert_equal(test.data, control.data)
+        assert_equal(test.mask, control.mask)
+        #
+        test = b / a
+        control = array([[2/1., 3/1.], [4/2., 5/2.], [6/3., 7]],
+                        mask=[[0, 0], [0, 0], [0, 1]])
+        assert_equal(test, control)
+        assert_equal(test.data, control.data)
+        assert_equal(test.mask, control.mask)
+
+
+    def test_noshrinking(self):
+        "Check that we don't shrink a mask when not wanted"
+        # Binary operations
+        a = masked_array([1,2,3], mask=[False,False,False], shrink=False)
+        b = a + 1
+        assert_equal(b.mask, [0, 0, 0])
+        # In place binary operation
+        a += 1
+        assert_equal(a.mask, [0, 0, 0])
+        # Domained binary operation
+        b = a / 1.
+        assert_equal(b.mask, [0, 0, 0])
+        # In place binary operation
+        a /= 1.
+        assert_equal(a.mask, [0, 0, 0])
+        
+
     def test_mod(self):
         "Tests mod"
         (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
@@ -767,7 +933,6 @@
         test = mod(xm, ym)
         assert_equal(test, np.mod(xm, ym))
         assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0)))
-        
 
 
     def test_TakeTransposeInnerOuter(self):
@@ -826,6 +991,56 @@
             self.failUnless(output[0] is masked)
 
 
+    def test_eq_on_structured(self):
+        "Test the equality of structured arrays"
+        ndtype = [('A', int), ('B', int)]
+        a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)
+        test = (a == a)
+        assert_equal(test, [True, True])
+        assert_equal(test.mask, [False, False])
+        b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
+        test = (a == b)
+        assert_equal(test, [False, True])
+        assert_equal(test.mask, [True, False])
+        b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
+        test = (a == b)
+        assert_equal(test, [True, False])
+        assert_equal(test.mask, [False, False])
+
+
+    def test_ne_on_structured(self):
+        "Test the equality of structured arrays"
+        ndtype = [('A', int), ('B', int)]
+        a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)
+        test = (a != a)
+        assert_equal(test, [False, False])
+        assert_equal(test.mask, [False, False])
+        b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
+        test = (a != b)
+        assert_equal(test, [True, False])
+        assert_equal(test.mask, [True, False])
+        b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
+        test = (a != b)
+        assert_equal(test, [False, True])
+        assert_equal(test.mask, [False, False])
+
+
+    def test_numpyarithmetics(self):
+        "Check that the mask is not back-propagated when using numpy functions"
+        a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])
+        control = masked_array([np.nan, np.nan, 0, np.log(2), -1],
+                               mask=[1, 1, 0, 0, 1])
+        #
+        test = log(a)
+        assert_equal(test, control)
+        assert_equal(test.mask, control.mask)
+        assert_equal(a.mask, [0, 0, 0, 0, 1])
+        #
+        test = np.log(a)
+        assert_equal(test, control)
+        assert_equal(test.mask, control.mask)
+        assert_equal(a.mask, [0, 0, 0, 0, 1])
+
 #------------------------------------------------------------------------------
 
 class TestMaskedArrayAttributes(TestCase):
@@ -923,6 +1138,17 @@
         a[1] = 1
         assert_equal(a._mask, zeros(10))
 
+    def test_flat(self):
+        "Test flat on masked_matrices"
+        test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
+        test.flat = masked_array([3, 2, 1], mask=[1, 0, 0])
+        control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0])
+        assert_equal(test, control)
+        #
+        test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
+        testflat = test.flat
+        testflat[:] = testflat[[2, 1, 0]]
+        assert_equal(test, control)
 
 #------------------------------------------------------------------------------
 
@@ -1049,21 +1275,44 @@
         # The shape shouldn't matter
         ndtype = [('f0', float, (2, 2))]
         control = np.array((default_fill_value(0.),),
-                           dtype=[('f0',float)])
+                           dtype=[('f0',float)]).astype(ndtype)
         assert_equal(_check_fill_value(None, ndtype), control)
-        control = np.array((0,), dtype=[('f0',float)])
+        control = np.array((0,), dtype=[('f0',float)]).astype(ndtype)
         assert_equal(_check_fill_value(0, ndtype), control)
         #
         ndtype = np.dtype("int, (2,3)float, float")
         control = np.array((default_fill_value(0),
                             default_fill_value(0.),
                             default_fill_value(0.),),
-                           dtype="int, float, float")
+                           dtype="int, float, float").astype(ndtype)
         test = _check_fill_value(None, ndtype)
         assert_equal(test, control)
-        control = np.array((0,0,0), dtype="int, float, float")
+        control = np.array((0,0,0), dtype="int, float, float").astype(ndtype)
         assert_equal(_check_fill_value(0, ndtype), control)
 
+
+    def test_extremum_fill_value(self):
+        "Tests extremum fill values for flexible type."
+        a = array([(1, (2, 3)), (4, (5, 6))],
+                  dtype=[('A', int), ('B', [('BA', int), ('BB', int)])])
+        test = a.fill_value
+        assert_equal(test['A'], default_fill_value(a['A']))
+        assert_equal(test['B']['BA'], default_fill_value(a['B']['BA']))
+        assert_equal(test['B']['BB'], default_fill_value(a['B']['BB']))
+        #
+        test = minimum_fill_value(a)
+        assert_equal(test[0], minimum_fill_value(a['A']))
+        assert_equal(test[1][0], minimum_fill_value(a['B']['BA']))
+        assert_equal(test[1][1], minimum_fill_value(a['B']['BB']))
+        assert_equal(test[1], minimum_fill_value(a['B']))
+        #
+        test = maximum_fill_value(a)
+        assert_equal(test[0], maximum_fill_value(a['A']))
+        assert_equal(test[1][0], maximum_fill_value(a['B']['BA']))
+        assert_equal(test[1][1], maximum_fill_value(a['B']['BB']))
+        assert_equal(test[1], maximum_fill_value(a['B']))
+    
+
 #------------------------------------------------------------------------------
 
 class TestUfuncs(TestCase):
@@ -1125,7 +1374,17 @@
         self.failUnless(amask.max(1)[0].mask)
         self.failUnless(amask.min(1)[0].mask)
 
+    def test_ndarray_mask(self):
+        "Check that the mask of the result is a ndarray (not a MaskedArray...)"
+        a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])
+        test = np.sqrt(a)
+        control = masked_array([-1, 0, 1, np.sqrt(2), -1],
+                          mask=[1, 0, 0, 0, 1])
+        assert_equal(test, control)
+        assert_equal(test.mask, control.mask)
+        self.failUnless(not isinstance(test.mask, MaskedArray))
 
+
 #------------------------------------------------------------------------------
 
 class TestMaskedArrayInPlaceArithmetics(TestCase):
@@ -1367,6 +1626,51 @@
         assert_equal(x.data, [1., 2.**2.5, 3])
         assert_equal(x.mask, [0, 0, 1])
 
+
+    def test_datafriendly_add_arrays(self):
+        a = array([[1, 1], [3, 3]])
+        b = array([1, 1], mask=[0, 0])
+        a += b
+        assert_equal(a, [[2, 2], [4, 4]])
+        if a.mask is not nomask:
+            assert_equal(a.mask, [[0, 0], [0, 0]])
+        #
+        a = array([[1, 1], [3, 3]])
+        b = array([1, 1], mask=[0, 1])
+        a += b
+        assert_equal(a, [[2, 2], [4, 4]])
+        assert_equal(a.mask, [[0, 1], [0, 1]])
+
+
+    def test_datafriendly_sub_arrays(self):
+        a = array([[1, 1], [3, 3]])
+        b = array([1, 1], mask=[0, 0])
+        a -= b
+        assert_equal(a, [[0, 0], [2, 2]])
+        if a.mask is not nomask:
+            assert_equal(a.mask, [[0, 0], [0, 0]])
+        #
+        a = array([[1, 1], [3, 3]])
+        b = array([1, 1], mask=[0, 1])
+        a -= b
+        assert_equal(a, [[0, 0], [2, 2]])
+        assert_equal(a.mask, [[0, 1], [0, 1]])
+
+
+    def test_datafriendly_mul_arrays(self):
+        a = array([[1, 1], [3, 3]])
+        b = array([1, 1], mask=[0, 0])
+        a *= b
+        assert_equal(a, [[1, 1], [3, 3]])
+        if a.mask is not nomask:
+            assert_equal(a.mask, [[0, 0], [0, 0]])
+        #
+        a = array([[1, 1], [3, 3]])
+        b = array([1, 1], mask=[0, 1])
+        a *= b
+        assert_equal(a, [[1, 1], [3, 3]])
+        assert_equal(a.mask, [[0, 1], [0, 1]])
+
 #------------------------------------------------------------------------------
 
 class TestMaskedArrayMethods(TestCase):
@@ -1442,8 +1746,8 @@
         a *= 1e-8
         a[0] = 0
         self.failUnless(allclose(a, 0, masked_equal=True))
-        
 
+
     def test_allany(self):
         """Checks the any/all methods/functions."""
         x = np.array([[ 0.13,  0.26,  0.90],
@@ -1810,6 +2114,28 @@
         assert_equal(am, an)
 
 
+    def test_sort_flexible(self):
+        "Test sort on flexible dtype."
+        a = array([(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)],
+             mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)],
+            dtype=[('A', int), ('B', int)])
+        #
+        test = sort(a)
+        b = array([(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)],
+             mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)],
+            dtype=[('A', int), ('B', int)])
+        assert_equal(test, b)
+        assert_equal(test.mask, b.mask)
+        #
+        test = sort(a, endwith=False)
+        b = array([(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3),],
+             mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0),],
+            dtype=[('A', int), ('B', int)])
+        assert_equal(test, b)
+        assert_equal(test.mask, b.mask)
+        #
+
+
     def test_squeeze(self):
         "Check squeeze"
         data = masked_array([[1,2,3]])
@@ -1883,15 +2209,15 @@
         assert_equal(x.tolist(), [(1,1.1,'one'),(2,2.2,'two'),(None,None,None)])
 
 
-    def test_torecords(self):
+    def test_toflex(self):
         "Test the conversion to records"
         data = arange(10)
-        record = data.torecords()
+        record = data.toflex()
         assert_equal(record['_data'], data._data)
         assert_equal(record['_mask'], data._mask)
         #
         data[[0,1,2,-1]] = masked
-        record = data.torecords()
+        record = data.toflex()
         assert_equal(record['_data'], data._data)
         assert_equal(record['_mask'], data._mask)
         #
@@ -1901,7 +2227,7 @@
                                                  np.random.rand(10))],
                      dtype=ndtype)
         data[[0,1,2,-1]] = masked
-        record = data.torecords()
+        record = data.toflex()
         assert_equal(record['_data'], data._data)
         assert_equal(record['_mask'], data._mask)
         #
@@ -1911,10 +2237,29 @@
                                                    np.random.rand(10))],
                      dtype=ndtype)
         data[[0,1,2,-1]] = masked
-        record = data.torecords()
-        assert_equal(record['_data'], data._data)
-        assert_equal(record['_mask'], data._mask)
+        record = data.toflex()
+        assert_equal_records(record['_data'], data._data)
+        assert_equal_records(record['_mask'], data._mask)
 
+
+    def test_fromflex(self):
+        "Test the reconstruction of a masked_array from a record"
+        a = array([1, 2, 3])
+        test = fromflex(a.toflex())
+        assert_equal(test, a)
+        assert_equal(test.mask, a.mask)
+        #
+        a = array([1, 2, 3], mask=[0, 0, 1])
+        test = fromflex(a.toflex())
+        assert_equal(test, a)
+        assert_equal(test.mask, a.mask)
+        #
+        a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)],
+                  dtype=[('A', int), ('B', float)])
+        test = fromflex(a.toflex())
+        assert_equal(test, a)
+        assert_equal(test.data, a.data)
+
 #------------------------------------------------------------------------------
 
 
@@ -2078,7 +2423,7 @@
         assert_equal(out, [0, 4, 8])
         assert_equal(out.mask, [0, 1, 0])
         out = diag(out)
-        control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]], 
+        control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]],
                         mask = [[0, 0, 0], [0, 1, 0], [0, 0, 0]])
         assert_equal(out, control)
 
@@ -2531,6 +2876,12 @@
             test = mask_or(mask, other)
         except ValueError:
             pass
+        # Using nested arrays
+        dtype = [('a', np.bool), ('b', [('ba', np.bool), ('bb', np.bool)])]
+        amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype)
+        bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype)
+        cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype)
+        assert_equal(mask_or(amask, bmask), cntrl)
 
 
     def test_flatten_mask(self):
@@ -2543,7 +2894,7 @@
         test = flatten_mask(mask)
         control = np.array([0, 0, 0, 1], dtype=bool)
         assert_equal(test, control)
-        
+
         mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
         data = [(0, (0, 0)), (0, (0, 1))]
         mask = np.array(data, dtype=mdtype)
@@ -2691,7 +3042,7 @@
         self.failUnless(isinstance(test, MaskedArray))
         assert_equal(test._data, a._data)
         assert_equal(test._mask, a._mask)
-        
+
     #
     def test_view_to_type(self):
         (data, a, controlmask) = self.data
@@ -2727,7 +3078,7 @@
         assert_equal(test.dtype.names, ('A', 'B'))
         assert_equal(test['A'], a['a'][-1])
         assert_equal(test['B'], a['b'][-1])
-        
+
     #
     def test_view_to_subdtype(self):
         (data, a, controlmask) = self.data

Modified: branches/dynamic_cpu_configuration/numpy/ma/tests/test_extras.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/ma/tests/test_extras.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/ma/tests/test_extras.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -22,7 +22,7 @@
     #
     def test_masked_all(self):
         "Tests masked_all"
-        # Standard dtype 
+        # Standard dtype
         test = masked_all((2,), dtype=float)
         control = array([1, 1], mask=[1, 1], dtype=float)
         assert_equal(test, control)
@@ -53,7 +53,7 @@
 
     def test_masked_all_like(self):
         "Tests masked_all"
-        # Standard dtype 
+        # Standard dtype
         base = array([1, 2], dtype=float)
         test = masked_all_like(base)
         control = array([1, 1], mask=[1, 1], dtype=float)
@@ -338,40 +338,8 @@
         c = dot(b,a,False)
         assert_equal(c, np.dot(b.filled(0),a.filled(0)))
 
-    def test_ediff1d(self):
-        "Tests mediff1d"
-        x = masked_array(np.arange(5), mask=[1,0,0,0,1])
-        difx_d = (x._data[1:]-x._data[:-1])
-        difx_m = (x._mask[1:]-x._mask[:-1])
-        dx = ediff1d(x)
-        assert_equal(dx._data, difx_d)
-        assert_equal(dx._mask, difx_m)
-        #
-        dx = ediff1d(x, to_begin=masked)
-        assert_equal(dx._data, np.r_[0,difx_d])
-        assert_equal(dx._mask, np.r_[1,difx_m])
-        dx = ediff1d(x, to_begin=[1,2,3])
-        assert_equal(dx._data, np.r_[[1,2,3],difx_d])
-        assert_equal(dx._mask, np.r_[[0,0,0],difx_m])
-        #
-        dx = ediff1d(x, to_end=masked)
-        assert_equal(dx._data, np.r_[difx_d,0])
-        assert_equal(dx._mask, np.r_[difx_m,1])
-        dx = ediff1d(x, to_end=[1,2,3])
-        assert_equal(dx._data, np.r_[difx_d,[1,2,3]])
-        assert_equal(dx._mask, np.r_[difx_m,[0,0,0]])
-        #
-        dx = ediff1d(x, to_end=masked, to_begin=masked)
-        assert_equal(dx._data, np.r_[0,difx_d,0])
-        assert_equal(dx._mask, np.r_[1,difx_m,1])
-        dx = ediff1d(x, to_end=[1,2,3], to_begin=masked)
-        assert_equal(dx._data, np.r_[0,difx_d,[1,2,3]])
-        assert_equal(dx._mask, np.r_[1,difx_m,[0,0,0]])
-        #
-        dx = ediff1d(x._data, to_end=masked, to_begin=masked)
-        assert_equal(dx._data, np.r_[0,difx_d,0])
-        assert_equal(dx._mask, np.r_[1,0,0,0,0,1])
 
+
 class TestApplyAlongAxis(TestCase):
     #
     "Tests 2D functions"
@@ -383,6 +351,7 @@
         assert_equal(xa,[[1,4],[7,10]])
 
 
+
 class TestMedian(TestCase):
     #
     def test_2d(self):
@@ -422,11 +391,12 @@
         assert_equal(median(x,0), [[12,10],[8,9],[16,17]])
 
 
+
 class TestCov(TestCase):
-    #
+
     def setUp(self):
         self.data = array(np.random.rand(12))
-    #
+
     def test_1d_wo_missing(self):
         "Test cov on 1D variable w/o missing values"
         x = self.data
@@ -434,7 +404,7 @@
         assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False))
         assert_almost_equal(np.cov(x, rowvar=False, bias=True),
                             cov(x, rowvar=False, bias=True))
-    #
+
     def test_2d_wo_missing(self):
         "Test cov on 1 2D variable w/o missing values"
         x = self.data.reshape(3,4)
@@ -442,7 +412,7 @@
         assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False))
         assert_almost_equal(np.cov(x, rowvar=False, bias=True),
                             cov(x, rowvar=False, bias=True))
-    #
+
     def test_1d_w_missing(self):
         "Test cov 1 1D variable w/missing values"
         x = self.data
@@ -466,7 +436,7 @@
                             cov(x, x[::-1], rowvar=False))
         assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False, bias=True),
                             cov(x, x[::-1], rowvar=False, bias=True))
-    #
+
     def test_2d_w_missing(self):
         "Test cov on 2D variable w/ missing value"
         x = self.data
@@ -486,11 +456,12 @@
                             np.cov(xf, rowvar=False, bias=True) * x.shape[0]/frac)
 
 
+
 class TestCorrcoef(TestCase):
-    #
+
     def setUp(self):
         self.data = array(np.random.rand(12))
-    #
+
     def test_1d_wo_missing(self):
         "Test cov on 1D variable w/o missing values"
         x = self.data
@@ -499,7 +470,7 @@
                             corrcoef(x, rowvar=False))
         assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True),
                             corrcoef(x, rowvar=False, bias=True))
-    #
+
     def test_2d_wo_missing(self):
         "Test corrcoef on 1 2D variable w/o missing values"
         x = self.data.reshape(3,4)
@@ -508,7 +479,7 @@
                             corrcoef(x, rowvar=False))
         assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True),
                             corrcoef(x, rowvar=False, bias=True))
-    #
+
     def test_1d_w_missing(self):
         "Test corrcoef 1 1D variable w/missing values"
         x = self.data
@@ -532,7 +503,7 @@
                             corrcoef(x, x[::-1], rowvar=False))
         assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False, bias=True),
                             corrcoef(x, x[::-1], rowvar=False, bias=True))
-    #
+
     def test_2d_w_missing(self):
         "Test corrcoef on 2D variable w/ missing value"
         x = self.data
@@ -575,6 +546,213 @@
             assert_almost_equal(a, a_)
 
 
+
+class TestArraySetOps(TestCase):
+    #
+    def test_unique1d_onlist(self):
+        "Test unique1d on list"
+        data = [1, 1, 1, 2, 2, 3]
+        test = unique1d(data, return_index=True, return_inverse=True)
+        self.failUnless(isinstance(test[0], MaskedArray))
+        assert_equal(test[0], masked_array([1, 2, 3], mask=[0, 0, 0]))
+        assert_equal(test[1], [0, 3, 5])
+        assert_equal(test[2], [0, 0, 0, 1, 1, 2])
+
+    def test_unique1d_onmaskedarray(self):
+        "Test unique1d on masked data w/use_mask=True"
+        data = masked_array([1, 1, 1, 2, 2, 3], mask=[0, 0, 1, 0, 1, 0])
+        test = unique1d(data, return_index=True, return_inverse=True)
+        assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1]))
+        assert_equal(test[1], [0, 3, 5, 2])
+        assert_equal(test[2], [0, 0, 3, 1, 3, 2])
+        #
+        data.fill_value = 3
+        data = masked_array([1, 1, 1, 2, 2, 3],
+                       mask=[0, 0, 1, 0, 1, 0], fill_value=3)
+        test = unique1d(data, return_index=True, return_inverse=True)
+        assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1]))
+        assert_equal(test[1], [0, 3, 5, 2])
+        assert_equal(test[2], [0, 0, 3, 1, 3, 2])
+
+    def test_unique1d_allmasked(self):
+        "Test all masked"
+        data = masked_array([1, 1, 1], mask=True)
+        test = unique1d(data, return_index=True, return_inverse=True)
+        assert_equal(test[0], masked_array([1,], mask=[True]))
+        assert_equal(test[1], [0])
+        assert_equal(test[2], [0, 0, 0])
+        #
+        "Test masked"
+        data = masked
+        test = unique1d(data, return_index=True, return_inverse=True)
+        assert_equal(test[0], masked_array(masked))
+        assert_equal(test[1], [0])
+        assert_equal(test[2], [0])
+
+    def test_ediff1d(self):
+        "Tests mediff1d"
+        x = masked_array(np.arange(5), mask=[1,0,0,0,1])
+        control = array([1, 1, 1, 4], mask=[1, 0, 0, 1])
+        test = ediff1d(x)
+        assert_equal(test, control)
+        assert_equal(test.data, control.data)
+        assert_equal(test.mask, control.mask)
+    #
+    def test_ediff1d_tobegin(self):
+        "Test ediff1d w/ to_begin"
+        x = masked_array(np.arange(5), mask=[1,0,0,0,1])
+        test = ediff1d(x, to_begin=masked)
+        control = array([0, 1, 1, 1, 4], mask=[1, 1, 0, 0, 1])
+        assert_equal(test, control)
+        assert_equal(test.data, control.data)
+        assert_equal(test.mask, control.mask)
+        #
+        test = ediff1d(x, to_begin=[1,2,3])
+        control = array([1, 2, 3, 1, 1, 1, 4], mask=[0, 0, 0, 1, 0, 0, 1])
+        assert_equal(test, control)
+        assert_equal(test.data, control.data)
+        assert_equal(test.mask, control.mask)
+    #
+    def test_ediff1d_toend(self):
+        "Test ediff1d w/ to_end"
+        x = masked_array(np.arange(5), mask=[1,0,0,0,1])
+        test = ediff1d(x, to_end=masked)
+        control = array([1, 1, 1, 4, 0], mask=[1, 0, 0, 1, 1])
+        assert_equal(test, control)
+        assert_equal(test.data, control.data)
+        assert_equal(test.mask, control.mask)
+        #
+        test = ediff1d(x, to_end=[1,2,3])
+        control = array([1, 1, 1, 4, 1, 2, 3], mask=[1, 0, 0, 1, 0, 0, 0])
+        assert_equal(test, control)
+        assert_equal(test.data, control.data)
+        assert_equal(test.mask, control.mask)
+    #
+    def test_ediff1d_tobegin_toend(self):
+        "Test ediff1d w/ to_begin and to_end"
+        x = masked_array(np.arange(5), mask=[1,0,0,0,1])
+        test = ediff1d(x, to_end=masked, to_begin=masked)
+        control = array([0, 1, 1, 1, 4, 0], mask=[1, 1, 0, 0, 1, 1])
+        assert_equal(test, control)
+        assert_equal(test.data, control.data)
+        assert_equal(test.mask, control.mask)
+        #
+        test = ediff1d(x, to_end=[1,2,3], to_begin=masked)
+        control = array([0, 1, 1, 1, 4, 1, 2, 3], mask=[1, 1, 0, 0, 1, 0, 0, 0])
+        assert_equal(test, control)
+        assert_equal(test.data, control.data)
+        assert_equal(test.mask, control.mask)
+    #
+    def test_ediff1d_ndarray(self):
+        "Test ediff1d w/ a ndarray"
+        x = np.arange(5)
+        test = ediff1d(x)
+        control = array([1, 1, 1, 1], mask=[0, 0, 0, 0])
+        assert_equal(test, control)
+        self.failUnless(isinstance(test, MaskedArray))
+        assert_equal(test.data, control.data)
+        assert_equal(test.mask, control.mask)
+        #
+        test = ediff1d(x, to_end=masked, to_begin=masked)
+        control = array([0, 1, 1, 1, 1, 0], mask=[1, 0, 0, 0, 0, 1])
+        self.failUnless(isinstance(test, MaskedArray))
+        assert_equal(test.data, control.data)
+        assert_equal(test.mask, control.mask)
+
+
+    def test_intersect1d(self):
+        "Test intersect1d"
+        x = array([1, 3, 3, 3], mask=[0, 0, 0, 1])
+        y = array([3, 1, 1, 1], mask=[0, 0, 0, 1])
+        test = intersect1d(x, y)
+        control = array([1, 1, 3, 3, -1], mask=[0, 0, 0, 0, 1])
+        assert_equal(test, control)
+
+
+    def test_intersect1d_nu(self):
+        "Test intersect1d_nu"
+        x = array([1, 3, 3, 3], mask=[0, 0, 0, 1])
+        y = array([3, 1, 1, 1], mask=[0, 0, 0, 1])
+        test = intersect1d_nu(x, y)
+        control = array([1, 3, -1], mask=[0, 0, 1])
+        assert_equal(test, control)
+
+
+    def test_setxor1d(self):
+        "Test setxor1d"
+        a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
+        b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, -1])
+        test = setxor1d(a, b)
+        assert_equal(test, array([3, 4, 7]))
+        #
+        a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
+        b = [1, 2, 3, 4, 5]
+        test = setxor1d(a, b)
+        assert_equal(test, array([3, 4, 7, -1], mask=[0, 0, 0, 1]))
+        #
+        a = array( [1, 2, 3] )
+        b = array( [6, 5, 4] )
+        test = setxor1d(a, b)
+        assert(isinstance(test, MaskedArray))
+        assert_equal(test, [1, 2, 3, 4, 5, 6])
+        #
+        a = array([1, 8, 2, 3], mask=[0, 1, 0, 0])
+        b = array([6, 5, 4, 8], mask=[0, 0, 0, 1])
+        test = setxor1d(a, b)
+        assert(isinstance(test, MaskedArray))
+        assert_equal(test, [1, 2, 3, 4, 5, 6])
+        #
+        assert_array_equal([], setxor1d([],[]))
+
+
+    def test_setmember1d( self ):
+        "Test setmember1d"
+        a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
+        b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, -1])
+        test = setmember1d(a, b)
+        assert_equal(test, [True, True, True, False, True])
+        #
+        assert_array_equal([], setmember1d([],[]))
+
+
+    def test_union1d( self ):
+        "Test union1d"
+        a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
+        b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, -1])
+        test = union1d(a, b)
+        control = array([1, 2, 3, 4, 5, 7, -1], mask=[0, 0, 0, 0, 0, 0, 1])
+        assert_equal(test, control)
+        #
+        assert_array_equal([], setmember1d([],[]))
+
+
+    def test_setdiff1d( self ):
+        "Test setdiff1d"
+        a = array([6, 5, 4, 7, 1, 2, 1], mask=[0, 0, 0, 0, 0, 0, 1])
+        b = array([2, 4, 3, 3, 2, 1, 5])
+        test = setdiff1d(a, b)
+        assert_equal(test, array([6, 7, -1], mask=[0, 0, 1]))
+        #
+        a = arange(10)
+        b = arange(8)
+        assert_equal(setdiff1d(a, b), array([8, 9]))
+
+
+    def test_setdiff1d_char_array(self):
+        "Test setdiff1d_charray"
+        a = np.array(['a','b','c'])
+        b = np.array(['a','b','s'])
+        assert_array_equal(setdiff1d(a,b), np.array(['c']))
+
+
+
+
+class TestShapeBase(TestCase):
+    #
+    def test_atleast1d(self):
+        pass
+
+
 ###############################################################################
 #------------------------------------------------------------------------------
 if __name__ == "__main__":

Modified: branches/dynamic_cpu_configuration/numpy/ma/tests/test_mrecords.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/ma/tests/test_mrecords.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/ma/tests/test_mrecords.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -334,8 +334,8 @@
         mult[0] = masked
         mult[1] = (1, 1, 1)
         mult.filled(0)
-        assert_equal(mult.filled(0),
-                     np.array([(0,0,0),(1,1,1)], dtype=mult.dtype))
+        assert_equal_records(mult.filled(0),
+                             np.array([(0,0,0),(1,1,1)], dtype=mult.dtype))
 
 
 class TestView(TestCase):

Modified: branches/dynamic_cpu_configuration/numpy/ma/tests/test_subclassing.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/ma/tests/test_subclassing.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/ma/tests/test_subclassing.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -153,5 +153,3 @@
 ################################################################################
 if __name__ == '__main__':
     run_module_suite()
-
-

Modified: branches/dynamic_cpu_configuration/numpy/ma/testutils.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/ma/testutils.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/ma/testutils.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -110,14 +110,14 @@
         return _assert_equal_on_sequences(actual.tolist(),
                                           desired.tolist(),
                                           err_msg='')
-    elif actual_dtype.char in "OV" and desired_dtype.char in "OV":
-        if (actual_dtype != desired_dtype) and actual_dtype:
-            msg = build_err_msg([actual_dtype, desired_dtype],
-                                err_msg, header='', names=('actual', 'desired'))
-            raise ValueError(msg)
-        return _assert_equal_on_sequences(actual.tolist(),
-                                          desired.tolist(),
-                                          err_msg='')
+#    elif actual_dtype.char in "OV" and desired_dtype.char in "OV":
+#        if (actual_dtype != desired_dtype) and actual_dtype:
+#            msg = build_err_msg([actual_dtype, desired_dtype],
+#                                err_msg, header='', names=('actual', 'desired'))
+#            raise ValueError(msg)
+#        return _assert_equal_on_sequences(actual.tolist(),
+#                                          desired.tolist(),
+#                                          err_msg='')
     return assert_array_equal(actual, desired, err_msg)
 
 
@@ -171,15 +171,14 @@
 #    yf = filled(y)
     # Allocate a common mask and refill
     m = mask_or(getmask(x), getmask(y))
-    x = masked_array(x, copy=False, mask=m, subok=False)
-    y = masked_array(y, copy=False, mask=m, subok=False)
+    x = masked_array(x, copy=False, mask=m, keep_mask=False, subok=False)
+    y = masked_array(y, copy=False, mask=m, keep_mask=False, subok=False)
     if ((x is masked) and not (y is masked)) or \
         ((y is masked) and not (x is masked)):
         msg = build_err_msg([x, y], err_msg=err_msg, verbose=verbose,
                             header=header, names=('x', 'y'))
         raise ValueError(msg)
     # OK, now run the basic tests on filled versions
-    comparison = getattr(np, comparison.__name__, lambda x,y: True)
     return utils.assert_array_compare(comparison,
                                       x.filled(fill_value),
                                       y.filled(fill_value),
@@ -189,7 +188,8 @@
 
 def assert_array_equal(x, y, err_msg='', verbose=True):
     """Checks the elementwise equality of two masked arrays."""
-    assert_array_compare(equal, x, y, err_msg=err_msg, verbose=verbose,
+    assert_array_compare(operator.__eq__, x, y,
+                         err_msg=err_msg, verbose=verbose,
                          header='Arrays are not equal')
 
 
@@ -223,7 +223,8 @@
 
 def assert_array_less(x, y, err_msg='', verbose=True):
     "Checks that x is smaller than y elementwise."
-    assert_array_compare(less, x, y, err_msg=err_msg, verbose=verbose,
+    assert_array_compare(operator.__lt__, x, y,
+                         err_msg=err_msg, verbose=verbose,
                          header='Arrays are not less-ordered')
 
 

Modified: branches/dynamic_cpu_configuration/numpy/numarray/util.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/numarray/util.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/numarray/util.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -1,7 +1,7 @@
 import os
 import numpy
 
-__all__ = ['MathDomainError', 'UnderflowError', 'NumOverflowError', 
+__all__ = ['MathDomainError', 'UnderflowError', 'NumOverflowError',
            'handleError', 'get_numarray_include_dirs']
 
 class MathDomainError(ArithmeticError): pass

Modified: branches/dynamic_cpu_configuration/numpy/oldnumeric/arrayfns.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/oldnumeric/arrayfns.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/oldnumeric/arrayfns.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -1,8 +1,8 @@
 """Backward compatible with arrayfns from Numeric
 """
 
-__all__ = ['array_set', 'construct3', 'digitize', 'error', 'find_mask', 
-           'histogram', 'index_sort', 'interp', 'nz', 'reverse', 'span', 
+__all__ = ['array_set', 'construct3', 'digitize', 'error', 'find_mask',
+           'histogram', 'index_sort', 'interp', 'nz', 'reverse', 'span',
            'to_corners', 'zmin_zmax']
 
 import numpy as np

Modified: branches/dynamic_cpu_configuration/numpy/oldnumeric/mlab.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/oldnumeric/mlab.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/oldnumeric/mlab.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -1,7 +1,7 @@
 # This module is for compatibility only.  All functions are defined elsewhere.
 
 __all__ = ['rand', 'tril', 'trapz', 'hanning', 'rot90', 'triu', 'diff', 'angle',
-           'roots', 'ptp', 'kaiser', 'randn', 'cumprod', 'diag', 'msort', 
+           'roots', 'ptp', 'kaiser', 'randn', 'cumprod', 'diag', 'msort',
            'LinearAlgebra', 'RandomArray', 'prod', 'std', 'hamming', 'flipud',
            'max', 'blackman', 'corrcoef', 'bartlett', 'eye', 'squeeze', 'sinc',
            'tri', 'cov', 'svd', 'min', 'median', 'fliplr', 'eig', 'mean']

Modified: branches/dynamic_cpu_configuration/numpy/oldnumeric/rng.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/oldnumeric/rng.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/oldnumeric/rng.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -5,7 +5,7 @@
 
 
 __all__ = ['CreateGenerator','ExponentialDistribution','LogNormalDistribution',
-           'NormalDistribution', 'UniformDistribution', 'error', 'ranf', 
+           'NormalDistribution', 'UniformDistribution', 'error', 'ranf',
            'default_distribution', 'random_sample', 'standard_generator']
 
 import numpy.random.mtrand as mt

Modified: branches/dynamic_cpu_configuration/numpy/testing/__init__.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/testing/__init__.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/testing/__init__.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -5,12 +5,10 @@
 away.
 """
 
-#import unittest
 from unittest import TestCase
 
 import decorators as dec
 from utils import *
-from parametric import ParametricTestCase
 from numpytest import *
 from nosetester import NoseTester as Tester
 from nosetester import run_module_suite

Modified: branches/dynamic_cpu_configuration/numpy/testing/decorators.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/testing/decorators.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/testing/decorators.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -51,8 +51,11 @@
 
     Parameters
     ---------
-    skip_condition : bool
-        Flag to determine whether to skip test (True) or not (False)
+    skip_condition : bool or callable.
+        Flag to determine whether to skip test.  If the condition is a
+        callable, it is used at runtime to dynamically make the decision.  This
+        is useful for tests that may require costly imports, to delay the cost
+        until the test suite is actually executed.
     msg : string
         Message to give on raising a SkipTest exception
 
@@ -69,28 +72,66 @@
     decorator with the nose.tools.make_decorator function in order to
     transmit function name, and various other metadata.
     '''
-    if msg is None:
-        msg = 'Test skipped due to test condition'
+
     def skip_decorator(f):
         # Local import to avoid a hard nose dependency and only incur the
         # import time overhead at actual test-time.
         import nose
-        def skipper(*args, **kwargs):
-            if skip_condition:
-                raise nose.SkipTest, msg
+
+        # Allow for both boolean or callable skip conditions.
+        if callable(skip_condition):
+            skip_val = lambda : skip_condition()
+        else:
+            skip_val = lambda : skip_condition
+
+        def get_msg(func,msg=None):
+            """Skip message with information about function being skipped."""
+            if msg is None: 
+                out = 'Test skipped due to test condition'
+            else: 
+                out = '\n'+msg
+
+            return "Skipping test: %s%s" % (func.__name__,out)
+
+        # We need to define *two* skippers because Python doesn't allow both
+        # return with value and yield inside the same function.
+        def skipper_func(*args, **kwargs):
+            """Skipper for normal test functions."""
+            if skip_val():
+                raise nose.SkipTest(get_msg(f,msg))
             else:
                 return f(*args, **kwargs)
+
+        def skipper_gen(*args, **kwargs):
+            """Skipper for test generators."""
+            if skip_val():
+                raise nose.SkipTest(get_msg(f,msg))
+            else:
+                for x in f(*args, **kwargs):
+                    yield x
+
+        # Choose the right skipper to use when building the actual decorator.
+        if nose.util.isgenerator(f):
+            skipper = skipper_gen
+        else:
+            skipper = skipper_func
+            
         return nose.tools.make_decorator(f)(skipper)
+
     return skip_decorator
 
-def knownfailureif(skip_condition, msg=None):
-    ''' Make function raise KnownFailureTest exception if skip_condition is true
 
+def knownfailureif(fail_condition, msg=None):
+    ''' Make function raise KnownFailureTest exception if fail_condition is true
+
     Parameters
     ---------
-    skip_condition : bool
-        Flag to determine whether to mark test as known failure (True) 
-        or not (False)
+    fail_condition : bool or callable.
+        Flag to determine whether to mark test as known failure (True)
+        or not (False).  If the condition is a callable, it is used at
+        runtime to dynamically make the decision.  This is useful for 
+        tests that may require costly imports, to delay the cost
+        until the test suite is actually executed.
     msg : string
         Message to give on raising a KnownFailureTest exception
 
@@ -109,15 +150,23 @@
     '''
     if msg is None:
         msg = 'Test skipped due to known failure'
-    def skip_decorator(f):
+
+    # Allow for both boolean or callable known failure conditions.
+    if callable(fail_condition):
+        fail_val = lambda : fail_condition()
+    else:
+        fail_val = lambda : fail_condition
+
+    def knownfail_decorator(f):
         # Local import to avoid a hard nose dependency and only incur the
         # import time overhead at actual test-time.
         import nose
         from noseclasses import KnownFailureTest
-        def skipper(*args, **kwargs):
-            if skip_condition:
+        def knownfailer(*args, **kwargs):
+            if fail_val():
                 raise KnownFailureTest, msg
             else:
                 return f(*args, **kwargs)
-        return nose.tools.make_decorator(f)(skipper)
-    return skip_decorator
+        return nose.tools.make_decorator(f)(knownfailer)
+
+    return knownfail_decorator

Modified: branches/dynamic_cpu_configuration/numpy/testing/noseclasses.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/testing/noseclasses.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/testing/noseclasses.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -1,4 +1,6 @@
-# These classes implement a doctest runner plugin for nose.
+# These classes implement a doctest runner plugin for nose, a "known failure"
+# error class, and a customized TestProgram for NumPy.
+
 # Because this module imports nose directly, it should not
 # be used except by nosetester.py to avoid a general NumPy
 # dependency on nose.
@@ -6,6 +8,7 @@
 import os
 import doctest
 
+import nose
 from nose.plugins import doctests as npd
 from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
 from nose.plugins.base import Plugin
@@ -251,7 +254,7 @@
 
 
 class KnownFailure(ErrorClassPlugin):
-    '''Plugin that installs a KNOWNFAIL error class for the 
+    '''Plugin that installs a KNOWNFAIL error class for the
     KnownFailureClass exception.  When KnownFailureTest is raised,
     the exception will be logged in the knownfail attribute of the
     result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the
@@ -275,3 +278,25 @@
         disable = getattr(options, 'noKnownFail', False)
         if disable:
             self.enabled = False
+
+
+
+# Because nose currently discards the test result object, but we need
+# to return it to the user, override TestProgram.runTests to retain
+# the result
+class NumpyTestProgram(nose.core.TestProgram):
+    def runTests(self):
+        """Run Tests. Returns true on success, false on failure, and
+        sets self.success to the same value.
+        """
+        if self.testRunner is None:
+            self.testRunner = nose.core.TextTestRunner(stream=self.config.stream,
+                                                       verbosity=self.config.verbosity,
+                                                       config=self.config)
+        plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
+        if plug_runner is not None:
+            self.testRunner = plug_runner
+
+        self.result = self.testRunner.run(self.test)
+        self.success = self.result.wasSuccessful()
+        return self.success

Modified: branches/dynamic_cpu_configuration/numpy/testing/nosetester.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/testing/nosetester.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/testing/nosetester.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -5,7 +5,6 @@
 '''
 import os
 import sys
-import warnings
 
 def get_package_name(filepath):
     # find the package name given a path name that's part of the package
@@ -28,7 +27,6 @@
     pkg_name.reverse()
     return '.'.join(pkg_name)
 
-
 def import_nose():
     """ Import nose only when needed.
     """
@@ -166,8 +164,8 @@
         print "nose version %d.%d.%d" % nose.__versioninfo__
 
 
-    def test(self, label='fast', verbose=1, extra_argv=None, doctests=False,
-             coverage=False, **kwargs):
+    def prepare_test_args(self, label='fast', verbose=1, extra_argv=None, 
+                          doctests=False, coverage=False):
         ''' Run tests for module using nose
 
         %(test_header)s
@@ -179,39 +177,6 @@
              http://nedbatchelder.com/code/modules/coverage.html)
         '''
 
-        old_args = set(['level', 'verbosity', 'all', 'sys_argv',
-                        'testcase_pattern'])
-        unexpected_args = set(kwargs.keys()) - old_args
-        if len(unexpected_args) > 0:
-            ua = ', '.join(unexpected_args)
-            raise TypeError("test() got unexpected arguments: %s" % ua)
-
-        # issue a deprecation warning if any of the pre-1.2 arguments to
-        # test are given
-        if old_args.intersection(kwargs.keys()):
-            warnings.warn("This method's signature will change in the next " \
-                          "release; the level, verbosity, all, sys_argv, " \
-                          "and testcase_pattern keyword arguments will be " \
-                          "removed. Please update your code.",
-                          DeprecationWarning, stacklevel=2)
-
-        # Use old arguments if given (where it makes sense)
-        # For the moment, level and sys_argv are ignored
-
-        # replace verbose with verbosity
-        if kwargs.get('verbosity') is not None:
-            verbose = kwargs.get('verbosity')
-            # cap verbosity at 3 because nose becomes *very* verbose beyond that
-            verbose = min(verbose, 3)
-
-        import utils
-        utils.verbose = verbose
-
-        # if all evaluates as True, omit attribute filter and run doctests
-        if kwargs.get('all'):
-            label = ''
-            doctests = True
-
         # if doctests is in the extra args, remove it and set the doctest
         # flag so the NumPy doctester is used instead
         if extra_argv and '--with-doctest' in extra_argv:
@@ -221,9 +186,6 @@
         argv = self._test_argv(label, verbose, extra_argv)
         if doctests:
             argv += ['--with-numpydoctest']
-            print "Running unit tests and doctests for %s" % self.package_name
-        else:
-            print "Running unit tests for %s" % self.package_name
 
         if coverage:
             argv+=['--cover-package=%s' % self.package_name, '--with-coverage',
@@ -237,33 +199,8 @@
         argv += ['--exclude','swig_ext']
         argv += ['--exclude','array_from_pyobj']
 
-        self._show_system_info()
-
         nose = import_nose()
 
-        # Because nose currently discards the test result object, but we need
-        # to return it to the user, override TestProgram.runTests to retain
-        # the result
-        class NumpyTestProgram(nose.core.TestProgram):
-            def runTests(self):
-                """Run Tests. Returns true on success, false on failure, and
-                sets self.success to the same value.
-                """
-                if self.testRunner is None:
-                    self.testRunner = nose.core.TextTestRunner(stream=self.config.stream,
-                                                               verbosity=self.config.verbosity,
-                                                               config=self.config)
-                plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
-                if plug_runner is not None:
-                    self.testRunner = plug_runner
-                self.result = self.testRunner.run(self.test)
-                self.success = self.result.wasSuccessful()
-                return self.success
-
-        # reset doctest state on every run
-        import doctest
-        doctest.master = None
-
         # construct list of plugins, omitting the existing doctest plugin
         import nose.plugins.builtin
         from noseclasses import NumpyDoctest, KnownFailure
@@ -271,10 +208,46 @@
         for p in nose.plugins.builtin.plugins:
             plug = p()
             if plug.name == 'doctest':
+                # skip the builtin doctest plugin
                 continue
 
             plugins.append(plug)
 
+        return argv, plugins
+
+    def test(self, label='fast', verbose=1, extra_argv=None, doctests=False,
+             coverage=False):
+        ''' Run tests for module using nose
+
+        %(test_header)s
+        doctests : boolean
+            If True, run doctests in module, default False
+        coverage : boolean
+            If True, report coverage of NumPy code, default False
+            (Requires the coverage module:
+             http://nedbatchelder.com/code/modules/coverage.html)
+        '''
+
+        # cap verbosity at 3 because nose becomes *very* verbose beyond that
+        verbose = min(verbose, 3)
+
+        import utils
+        utils.verbose = verbose
+
+        if doctests:
+            print "Running unit tests and doctests for %s" % self.package_name
+        else:
+            print "Running unit tests for %s" % self.package_name
+
+        self._show_system_info()
+
+        # reset doctest state on every run
+        import doctest
+        doctest.master = None
+
+        argv, plugins = self.prepare_test_args(label, verbose, extra_argv,
+                                               doctests, coverage)
+        from noseclasses import NumpyTestProgram
         t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins)
         return t.result
 
@@ -286,9 +259,10 @@
         print "Running benchmarks for %s" % self.package_name
         self._show_system_info()
 
-        nose = import_nose()
         argv = self._test_argv(label, verbose, extra_argv)
         argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep]
+
+        nose = import_nose()
         return nose.run(argv=argv)
 
     # generate method docstrings

Modified: branches/dynamic_cpu_configuration/numpy/testing/numpytest.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/testing/numpytest.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/testing/numpytest.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -1,91 +1,16 @@
 import os
-import re
 import sys
-import imp
-import types
-import unittest
 import traceback
-import warnings
 
-__all__ = ['set_package_path', 'set_local_path', 'restore_path',
-           'IgnoreException', 'NumpyTestCase', 'NumpyTest', 'importall',]
+__all__ = ['IgnoreException', 'importall',]
 
 DEBUG=0
-from numpy.testing.utils import jiffies
 get_frame = sys._getframe
 
 class IgnoreException(Exception):
     "Ignoring this exception due to disabled feature"
 
 
-def set_package_path(level=1):
-    """ Prepend package directory to sys.path.
-
-    set_package_path should be called from a test_file.py that
-    satisfies the following tree structure:
-
-      <somepath>/<somedir>/test_file.py
-
-    Then the first existing path name from the following list
-
-      <somepath>/build/lib.<platform>-<version>
-      <somepath>/..
-
-    is prepended to sys.path.
-    The caller is responsible for removing this path by using
-
-      restore_path()
-    """
-    warnings.warn("set_package_path will be removed in NumPy 1.3; please "
-                  "update your code", DeprecationWarning, stacklevel=2)
-
-    from distutils.util import get_platform
-    f = get_frame(level)
-    if f.f_locals['__name__']=='__main__':
-        testfile = sys.argv[0]
-    else:
-        testfile = f.f_locals['__file__']
-    d = os.path.dirname(os.path.dirname(os.path.abspath(testfile)))
-    d1 = os.path.join(d,'build','lib.%s-%s'%(get_platform(),sys.version[:3]))
-    if not os.path.isdir(d1):
-        d1 = os.path.dirname(d)
-    if DEBUG:
-        print 'Inserting %r to sys.path for test_file %r' % (d1, testfile)
-    sys.path.insert(0,d1)
-    return
-
-
-def set_local_path(reldir='', level=1):
-    """ Prepend local directory to sys.path.
-
-    The caller is responsible for removing this path by using
-
-      restore_path()
-    """
-    warnings.warn("set_local_path will be removed in NumPy 1.3; please "
-                  "update your code", DeprecationWarning, stacklevel=2)
-
-    f = get_frame(level)
-    if f.f_locals['__name__']=='__main__':
-        testfile = sys.argv[0]
-    else:
-        testfile = f.f_locals['__file__']
-    local_path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(testfile)),reldir))
-    if DEBUG:
-        print 'Inserting %r to sys.path' % (local_path)
-    sys.path.insert(0,local_path)
-    return
-
-def restore_path():
-    warnings.warn("restore_path will be removed in NumPy 1.3; please "
-                  "update your code", DeprecationWarning, stacklevel=2)
-
-    if DEBUG:
-        print 'Removing %r from sys.path' % (sys.path[0])
-    del sys.path[0]
-    return
-
-
 def output_exception(printstream = sys.stdout):
     try:
         type, value, tb = sys.exc_info()
@@ -99,576 +24,6 @@
         type = value = tb = None # clean up
     return
 
-
-class _dummy_stream:
-    def __init__(self,stream):
-        self.data = []
-        self.stream = stream
-    def write(self,message):
-        if not self.data and not message.startswith('E'):
-            self.stream.write(message)
-            self.stream.flush()
-            message = ''
-        self.data.append(message)
-    def writeln(self,message):
-        self.write(message+'\n')
-    def flush(self):
-        self.stream.flush()
-
-
-class NumpyTestCase (unittest.TestCase):
-    def __init__(self, *args, **kwds):
-        warnings.warn("NumpyTestCase will be removed in the next release; please update your code to use nose or unittest",
-                         DeprecationWarning, stacklevel=2)
-        unittest.TestCase.__init__(self, *args, **kwds)
-
-    def measure(self,code_str,times=1):
-        """ Return elapsed time for executing code_str in the
-        namespace of the caller for given times.
-        """
-        frame = get_frame(1)
-        locs,globs = frame.f_locals,frame.f_globals
-        code = compile(code_str,
-                       'NumpyTestCase runner for '+self.__class__.__name__,
-                       'exec')
-        i = 0
-        elapsed = jiffies()
-        while i<times:
-            i += 1
-            exec code in globs,locs
-        elapsed = jiffies() - elapsed
-        return 0.01*elapsed
-
-    def __call__(self, result=None):
-        if result is None or not hasattr(result, 'errors') \
-                or not hasattr(result, 'stream'):
-            return unittest.TestCase.__call__(self, result)
-
-        nof_errors = len(result.errors)
-        save_stream = result.stream
-        result.stream = _dummy_stream(save_stream)
-        unittest.TestCase.__call__(self, result)
-        if nof_errors != len(result.errors):
-            test, errstr = result.errors[-1][:2]
-            if isinstance(errstr, tuple):
-                errstr = str(errstr[0])
-            elif isinstance(errstr, str):
-                errstr = errstr.split('\n')[-2]
-            else:
-                # allow for proxy classes
-                errstr = str(errstr).split('\n')[-2]
-            l = len(result.stream.data)
-            if errstr.startswith('IgnoreException:'):
-                if l==1:
-                    assert result.stream.data[-1]=='E', \
-                            repr(result.stream.data)
-                    result.stream.data[-1] = 'i'
-                else:
-                    assert result.stream.data[-1]=='ERROR\n', \
-                            repr(result.stream.data)
-                    result.stream.data[-1] = 'ignoring\n'
-                del result.errors[-1]
-        map(save_stream.write, result.stream.data)
-        save_stream.flush()
-        result.stream = save_stream
-
-    def warn(self, message):
-        from numpy.distutils.misc_util import yellow_text
-        print>>sys.stderr,yellow_text('Warning: %s' % (message))
-        sys.stderr.flush()
-    def info(self, message):
-        print>>sys.stdout, message
-        sys.stdout.flush()
-
-    def rundocs(self, filename=None):
-        """ Run doc string tests found in filename.
-        """
-        import doctest
-        if filename is None:
-            f = get_frame(1)
-            filename = f.f_globals['__file__']
-        name = os.path.splitext(os.path.basename(filename))[0]
-        path = [os.path.dirname(filename)]
-        file, pathname, description = imp.find_module(name, path)
-        try:
-            m = imp.load_module(name, file, pathname, description)
-        finally:
-            file.close()
-        if sys.version[:3]<'2.4':
-            doctest.testmod(m, verbose=False)
-        else:
-            tests = doctest.DocTestFinder().find(m)
-            runner = doctest.DocTestRunner(verbose=False)
-            for test in tests:
-                runner.run(test)
-        return
-
-
-def _get_all_method_names(cls):
-    names = dir(cls)
-    if sys.version[:3]<='2.1':
-        for b in cls.__bases__:
-            for n in dir(b)+_get_all_method_names(b):
-                if n not in names:
-                    names.append(n)
-    return names
-
-
-# for debug build--check for memory leaks during the test.
-class _NumPyTextTestResult(unittest._TextTestResult):
-    def startTest(self, test):
-        unittest._TextTestResult.startTest(self, test)
-        if self.showAll:
-            N = len(sys.getobjects(0))
-            self._totnumobj = N
-            self._totrefcnt = sys.gettotalrefcount()
-        return
-
-    def stopTest(self, test):
-        if self.showAll:
-            N = len(sys.getobjects(0))
-            self.stream.write("objects: %d ===> %d;   " % (self._totnumobj, N))
-            self.stream.write("refcnts: %d ===> %d\n" % (self._totrefcnt,
-                              sys.gettotalrefcount()))
-        return
-
-class NumPyTextTestRunner(unittest.TextTestRunner):
-    def _makeResult(self):
-        return _NumPyTextTestResult(self.stream, self.descriptions, self.verbosity)
-
-
-class NumpyTest:
-    """ Numpy tests site manager.
-
-    Usage: NumpyTest(<package>).test(level=1,verbosity=1)
-
-    <package> is package name or its module object.
-
-    Package is supposed to contain a directory tests/ with test_*.py
-    files where * refers to the names of submodules.  See .rename()
-    method to redefine name mapping between test_*.py files and names of
-    submodules. Pattern test_*.py can be overwritten by redefining
-    .get_testfile() method.
-
-    test_*.py files are supposed to define a classes, derived from
-    NumpyTestCase or unittest.TestCase, with methods having names
-    starting with test or bench or check. The names of TestCase classes
-    must have a prefix test. This can be overwritten by redefining
-    .check_testcase_name() method.
-
-    And that is it! No need to implement test or test_suite functions
-    in each .py file.
-
-    Old-style test_suite(level=1) hooks are also supported.
-    """
-    _check_testcase_name = re.compile(r'test.*|Test.*').match
-    def check_testcase_name(self, name):
-        """ Return True if name matches TestCase class.
-        """
-        return not not self._check_testcase_name(name)
-
-    testfile_patterns = ['test_%(modulename)s.py']
-    def get_testfile(self, module, verbosity = 0):
-        """ Return path to module test file.
-        """
-        mstr = self._module_str
-        short_module_name = self._get_short_module_name(module)
-        d = os.path.split(module.__file__)[0]
-        test_dir = os.path.join(d,'tests')
-        local_test_dir = os.path.join(os.getcwd(),'tests')
-        if os.path.basename(os.path.dirname(local_test_dir)) \
-               == os.path.basename(os.path.dirname(test_dir)):
-            test_dir = local_test_dir
-        for pat in self.testfile_patterns:
-            fn = os.path.join(test_dir, pat % {'modulename':short_module_name})
-            if os.path.isfile(fn):
-                return fn
-        if verbosity>1:
-            self.warn('No test file found in %s for module %s' \
-                      % (test_dir, mstr(module)))
-        return
-
-    def __init__(self, package=None):
-        warnings.warn("NumpyTest will be removed in the next release; please update your code to use nose or unittest",
-                         DeprecationWarning, stacklevel=2)
-        if package is None:
-            from numpy.distutils.misc_util import get_frame
-            f = get_frame(1)
-            package = f.f_locals.get('__name__',f.f_globals.get('__name__',None))
-            assert package is not None
-        self.package = package
-        self._rename_map = {}
-
-    def rename(self, **kws):
-        """Apply renaming submodule test file test_<name>.py to
-        test_<newname>.py.
-
-        Usage: self.rename(name='newname') before calling the
-        self.test() method.
-
-        If 'newname' is None, then no tests will be executed for a given
-        module.
-        """
-        for k,v in kws.items():
-            self._rename_map[k] = v
-        return
-
-    def _module_str(self, module):
-        filename = module.__file__[-30:]
-        if filename!=module.__file__:
-            filename = '...'+filename
-        return '<module %r from %r>' % (module.__name__, filename)
-
-    def _get_method_names(self,clsobj,level):
-        names = []
-        for mthname in _get_all_method_names(clsobj):
-            if mthname[:5] not in ['bench','check'] \
-               and mthname[:4] not in ['test']:
-                continue
-            mth = getattr(clsobj, mthname)
-            if type(mth) is not types.MethodType:
-                continue
-            d = mth.im_func.func_defaults
-            if d is not None:
-                mthlevel = d[0]
-            else:
-                mthlevel = 1
-            if level>=mthlevel:
-                if mthname not in names:
-                    names.append(mthname)
-            for base in clsobj.__bases__:
-                for n in self._get_method_names(base,level):
-                    if n not in names:
-                        names.append(n)
-        return names
-
-    def _get_short_module_name(self, module):
-        d,f = os.path.split(module.__file__)
-        short_module_name = os.path.splitext(os.path.basename(f))[0]
-        if short_module_name=='__init__':
-            short_module_name = module.__name__.split('.')[-1]
-        short_module_name = self._rename_map.get(short_module_name,short_module_name)
-        return short_module_name
-
-    def _get_module_tests(self, module, level, verbosity):
-        mstr = self._module_str
-
-        short_module_name = self._get_short_module_name(module)
-        if short_module_name is None:
-            return []
-
-        test_file = self.get_testfile(module, verbosity)
-
-        if test_file is None:
-            return []
-
-        if not os.path.isfile(test_file):
-            if short_module_name[:5]=='info_' \
-               and short_module_name[5:]==module.__name__.split('.')[-2]:
-                return []
-            if short_module_name in ['__cvs_version__','__svn_version__']:
-                return []
-            if short_module_name[-8:]=='_version' \
-               and short_module_name[:-8]==module.__name__.split('.')[-2]:
-                return []
-            if verbosity>1:
-                self.warn(test_file)
-                self.warn('   !! No test file %r found for %s' \
-                          % (os.path.basename(test_file), mstr(module)))
-            return []
-
-        if test_file in self.test_files:
-            return []
-
-        parent_module_name = '.'.join(module.__name__.split('.')[:-1])
-        test_module_name,ext = os.path.splitext(os.path.basename(test_file))
-        test_dir_module = parent_module_name+'.tests'
-        test_module_name = test_dir_module+'.'+test_module_name
-
-        if test_dir_module not in sys.modules:
-            sys.modules[test_dir_module] = imp.new_module(test_dir_module)
-
-        old_sys_path = sys.path[:]
-        try:
-            f = open(test_file,'r')
-            test_module = imp.load_module(test_module_name, f,
-                                          test_file, ('.py', 'r', 1))
-            f.close()
-        except:
-            sys.path[:] = old_sys_path
-            self.warn('FAILURE importing tests for %s' % (mstr(module)))
-            output_exception(sys.stderr)
-            return []
-        sys.path[:] = old_sys_path
-
-        self.test_files.append(test_file)
-
-        return self._get_suite_list(test_module, level, module.__name__)
-
-    def _get_suite_list(self, test_module, level, module_name='__main__',
-                        verbosity=1):
-        suite_list = []
-        if hasattr(test_module, 'test_suite'):
-            suite_list.extend(test_module.test_suite(level)._tests)
-        for name in dir(test_module):
-            obj = getattr(test_module, name)
-            if type(obj) is not type(unittest.TestCase) \
-               or not issubclass(obj, unittest.TestCase) \
-               or not self.check_testcase_name(obj.__name__):
-                continue
-            for mthname in self._get_method_names(obj,level):
-                suite = obj(mthname)
-                if getattr(suite,'isrunnable',lambda mthname:1)(mthname):
-                    suite_list.append(suite)
-        matched_suite_list = [suite for suite in suite_list \
-                              if self.testcase_match(suite.id()\
-                                                     .replace('__main__.',''))]
-        if verbosity>=0:
-            self.info('  Found %s/%s tests for %s' \
-                      % (len(matched_suite_list), len(suite_list), module_name))
-        return matched_suite_list
-
-    def _test_suite_from_modules(self, this_package, level, verbosity):
-        package_name = this_package.__name__
-        modules = []
-        for name, module in sys.modules.items():
-            if not name.startswith(package_name) or module is None:
-                continue
-            if not hasattr(module,'__file__'):
-                continue
-            if os.path.basename(os.path.dirname(module.__file__))=='tests':
-                continue
-            modules.append((name, module))
-
-        modules.sort()
-        modules = [m[1] for m in modules]
-
-        self.test_files = []
-        suites = []
-        for module in modules:
-            suites.extend(self._get_module_tests(module, abs(level), verbosity))
-
-        suites.extend(self._get_suite_list(sys.modules[package_name],
-                                           abs(level), verbosity=verbosity))
-        return unittest.TestSuite(suites)
-
-    def _test_suite_from_all_tests(self, this_package, level, verbosity):
-        importall(this_package)
-        package_name = this_package.__name__
-
-        # Find all tests/ directories under the package
-        test_dirs_names = {}
-        for name, module in sys.modules.items():
-            if not name.startswith(package_name) or module is None:
-                continue
-            if not hasattr(module, '__file__'):
-                continue
-            d = os.path.dirname(module.__file__)
-            if os.path.basename(d)=='tests':
-                continue
-            d = os.path.join(d, 'tests')
-            if not os.path.isdir(d):
-                continue
-            if d in test_dirs_names:
-                continue
-            test_dir_module = '.'.join(name.split('.')[:-1]+['tests'])
-            test_dirs_names[d] = test_dir_module
-
-        test_dirs = test_dirs_names.keys()
-        test_dirs.sort()
-
-        # For each file in each tests/ directory with a test case in it,
-        # import the file, and add the test cases to our list
-        suite_list = []
-        testcase_match = re.compile(r'\s*class\s+\w+\s*\(.*TestCase').match
-        for test_dir in test_dirs:
-            test_dir_module = test_dirs_names[test_dir]
-
-            if test_dir_module not in sys.modules:
-                sys.modules[test_dir_module] = imp.new_module(test_dir_module)
-
-            for fn in os.listdir(test_dir):
-                base, ext = os.path.splitext(fn)
-                if ext != '.py':
-                    continue
-                f = os.path.join(test_dir, fn)
-
-                # check that file contains TestCase class definitions:
-                fid = open(f, 'r')
-                skip = True
-                for line in fid:
-                    if testcase_match(line):
-                        skip = False
-                        break
-                fid.close()
-                if skip:
-                    continue
-
-                # import the test file
-                n = test_dir_module + '.' + base
-                # in case test files import local modules
-                sys.path.insert(0, test_dir)
-                fo = None
-                try:
-                    try:
-                        fo = open(f)
-                        test_module = imp.load_module(n, fo, f,
-                                                      ('.py', 'U', 1))
-                    except Exception, msg:
-                        print 'Failed importing %s: %s' % (f,msg)
-                        continue
-                finally:
-                    if fo:
-                        fo.close()
-                    del sys.path[0]
-
-                suites = self._get_suite_list(test_module, level,
-                                              module_name=n,
-                                              verbosity=verbosity)
-                suite_list.extend(suites)
-
-        all_tests = unittest.TestSuite(suite_list)
-        return all_tests
-
-    def test(self, level=1, verbosity=1, all=True, sys_argv=[],
-             testcase_pattern='.*'):
-        """Run Numpy module test suite with level and verbosity.
-
-        level:
-          None           --- do nothing, return None
-          < 0            --- scan for tests of level=abs(level),
-                             don't run them, return TestSuite-list
-          > 0            --- scan for tests of level, run them,
-                             return TestRunner
-          > 10           --- run all tests (same as specifying all=True).
-                             (backward compatibility).
-
-        verbosity:
-          >= 0           --- show information messages
-          > 1            --- show warnings on missing tests
-
-        all:
-          True            --- run all test files (like self.testall())
-          False (default) --- only run test files associated with a module
-
-        sys_argv          --- replacement of sys.argv[1:] during running
-                              tests.
-
-        testcase_pattern  --- run only tests that match given pattern.
-
-        It is assumed (when all=False) that package tests suite follows
-        the following convention: for each package module, there exists
-        file <packagepath>/tests/test_<modulename>.py that defines
-        TestCase classes (with names having prefix 'test_') with methods
-        (with names having prefixes 'check_' or 'bench_'); each of these
-        methods are called when running unit tests.
-        """
-        if level is None: # Do nothing.
-            return
-
-        if isinstance(self.package, str):
-            exec 'import %s as this_package' % (self.package)
-        else:
-            this_package = self.package
-
-        self.testcase_match = re.compile(testcase_pattern).match
-
-        if all:
-            all_tests = self._test_suite_from_all_tests(this_package,
-                                                        level, verbosity)
-        else:
-            all_tests = self._test_suite_from_modules(this_package,
-                                                      level, verbosity)
-
-        if level < 0:
-            return all_tests
-
-        runner = unittest.TextTestRunner(verbosity=verbosity)
-        old_sys_argv = sys.argv[1:]
-        sys.argv[1:] = sys_argv
-        # Use the builtin displayhook. If the tests are being run
-        # under IPython (for instance), any doctest test suites will
-        # fail otherwise.
-        old_displayhook = sys.displayhook
-        sys.displayhook = sys.__displayhook__
-        try:
-            r = runner.run(all_tests)
-        finally:
-            sys.displayhook = old_displayhook
-        sys.argv[1:] = old_sys_argv
-        return r
-
-    def testall(self, level=1,verbosity=1):
-        """ Run Numpy module test suite with level and verbosity.
-
-        level:
-          None           --- do nothing, return None
-          < 0            --- scan for tests of level=abs(level),
-                             don't run them, return TestSuite-list
-          > 0            --- scan for tests of level, run them,
-                             return TestRunner
-
-        verbosity:
-          >= 0           --- show information messages
-          > 1            --- show warnings on missing tests
-
-        Different from .test(..) method, this method looks for
-        TestCase classes from all files in <packagedir>/tests/
-        directory and no assumptions are made for naming the
-        TestCase classes or their methods.
-        """
-        return self.test(level=level, verbosity=verbosity, all=True)
-
-    def run(self):
-        """ Run Numpy module test suite with level and verbosity
-        taken from sys.argv. Requires optparse module.
-        """
-
-        # delayed import of shlex to reduce startup time
-        import shlex
-
-        try:
-            from optparse import OptionParser
-        except ImportError:
-            self.warn('Failed to import optparse module, ignoring.')
-            return self.test()
-        usage = r'usage: %prog [-v <verbosity>] [-l <level>]'\
-                r' [-s "<replacement of sys.argv[1:]>"]'\
-                r' [-t "<testcase pattern>"]'
-        parser = OptionParser(usage)
-        parser.add_option("-v", "--verbosity",
-                          action="store",
-                          dest="verbosity",
-                          default=1,
-                          type='int')
-        parser.add_option("-l", "--level",
-                          action="store",
-                          dest="level",
-                          default=1,
-                          type='int')
-        parser.add_option("-s", "--sys-argv",
-                          action="store",
-                          dest="sys_argv",
-                          default='',
-                          type='string')
-        parser.add_option("-t", "--testcase-pattern",
-                          action="store",
-                          dest="testcase_pattern",
-                          default=r'.*',
-                          type='string')
-        (options, args) = parser.parse_args()
-        return self.test(options.level,options.verbosity,
-                         sys_argv=shlex.split(options.sys_argv or ''),
-                         testcase_pattern=options.testcase_pattern)
-
-    def warn(self, message):
-        from numpy.distutils.misc_util import yellow_text
-        print>>sys.stderr,yellow_text('Warning: %s' % (message))
-        sys.stderr.flush()
-    def info(self, message):
-        print>>sys.stdout, message
-        sys.stdout.flush()
-
 def importall(package):
     """
     Try recursively to import all subpackages under package.

Deleted: branches/dynamic_cpu_configuration/numpy/testing/parametric.py
===================================================================
--- branches/dynamic_cpu_configuration/numpy/testing/parametric.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/numpy/testing/parametric.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -1,311 +0,0 @@
-"""Support for parametric tests in unittest.
-
-:Author: Fernando Perez
-
-Purpose
-=======
-
-Briefly, the main class in this module allows you to easily and cleanly
-(without the gross name-mangling hacks that are normally needed) to write
-unittest TestCase classes that have parametrized tests.  That is, tests which
-consist of multiple sub-tests that scan for example a parameter range, but
-where you want each sub-test to:
-
-* count as a separate test in the statistics.
-
-* be run even if others in the group error out or fail.
-
-
-The class offers a simple name-based convention to create such tests (see
-simple example at the end), in one of two ways:
-
-* Each sub-test in a group can be run fully independently, with the
-  setUp/tearDown methods being called each time.
-
-* The whole group can be run with setUp/tearDown being called only once for the
-  group.  This lets you conveniently reuse state that may be very expensive to
-  compute for multiple tests.  Be careful not to corrupt it!!!
-
-
-Caveats
-=======
-
-This code relies on implementation details of the unittest module (some key
-methods are heavily modified versions of those, after copying them in).  So it
-may well break either if you make sophisticated use of the unittest APIs, or if
-unittest itself changes in the future.  I have only tested this with Python
-2.5.
-
-"""
-__docformat__ = "restructuredtext en"
-
-import unittest
-import warnings
-
-class _ParametricTestCase(unittest.TestCase):
-    """TestCase subclass with support for parametric tests.
-
-    Subclasses of this class can implement test methods that return a list of
-    tests and arguments to call those with, to do parametric testing (often
-    also called 'data driven' testing."""
-
-    #: Prefix for tests with independent state.  These methods will be run with
-    #: a separate setUp/tearDown call for each test in the group.
-    _indepParTestPrefix = 'testip'
-
-    #: Prefix for tests with shared state.  These methods will be run with
-    #: a single setUp/tearDown call for the whole group.  This is useful when
-    #: writing a group of tests for which the setup is expensive and one wants
-    #: to actually share that state.  Use with care (especially be careful not
-    #: to mutate the state you are using, which will alter later tests).
-    _shareParTestPrefix = 'testsp'
-
-    def __init__(self, methodName = 'runTest'):
-        warnings.warn("ParametricTestCase will be removed in the next NumPy "
-                      "release", DeprecationWarning)
-        unittest.TestCase.__init__(self, methodName)
-
-    def exec_test(self,test,args,result):
-        """Execute a single test.  Returns a success boolean"""
-
-        ok = False
-        try:
-            test(*args)
-            ok = True
-        except self.failureException:
-            result.addFailure(self, self._exc_info())
-        except KeyboardInterrupt:
-            raise
-        except:
-            result.addError(self, self._exc_info())
-
-        return ok
-
-    def set_testMethodDoc(self,doc):
-        self._testMethodDoc = doc
-        self._TestCase__testMethodDoc = doc
-
-    def get_testMethodDoc(self):
-        return self._testMethodDoc
-
-    testMethodDoc = property(fset=set_testMethodDoc, fget=get_testMethodDoc)
-
-    def get_testMethodName(self):
-        try:
-            return getattr(self,"_testMethodName")
-        except:
-            return getattr(self,"_TestCase__testMethodName")
-
-    testMethodName = property(fget=get_testMethodName)
-
-    def run_test(self, testInfo,result):
-        """Run one test with arguments"""
-
-        test,args = testInfo[0],testInfo[1:]
-
-        # Reset the doc attribute to be the docstring of this particular test,
-        # so that in error messages it prints the actual test's docstring and
-        # not that of the test factory.
-        self.testMethodDoc = test.__doc__
-        result.startTest(self)
-        try:
-            try:
-                self.setUp()
-            except KeyboardInterrupt:
-                raise
-            except:
-                result.addError(self, self._exc_info())
-                return
-
-            ok = self.exec_test(test,args,result)
-
-            try:
-                self.tearDown()
-            except KeyboardInterrupt:
-                raise
-            except:
-                result.addError(self, self._exc_info())
-                ok = False
-            if ok: result.addSuccess(self)
-        finally:
-            result.stopTest(self)
-
-    def run_tests(self, tests,result):
-        """Run many tests with a common setUp/tearDown.
-
-        The entire set of tests is run with a single setUp/tearDown call."""
-
-        try:
-            self.setUp()
-        except KeyboardInterrupt:
-            raise
-        except:
-            result.testsRun += 1
-            result.addError(self, self._exc_info())
-            return
-
-        saved_doc = self.testMethodDoc
-
-        try:
-            # Run all the tests specified
-            for testInfo in tests:
-                test,args = testInfo[0],testInfo[1:]
-
-                # Set the doc argument for this test.  Note that even if we do
-                # this, the fail/error tracebacks still print the docstring for
-                # the parent factory, because they only generate the message at
-                # the end of the run, AFTER we've restored it.  There is no way
-                # to tell the unittest system (without overriding a lot of
-                # stuff) to extract this information right away, the logic is
-                # hardcoded to pull it later, since unittest assumes it doesn't
-                # change.
-                self.testMethodDoc = test.__doc__
-                result.startTest(self)
-                ok = self.exec_test(test,args,result)
-                if ok: result.addSuccess(self)
-
-        finally:
-            # Restore docstring info and run tearDown once only.
-            self.testMethodDoc = saved_doc
-            try:
-                self.tearDown()
-            except KeyboardInterrupt:
-                raise
-            except:
-                result.addError(self, self._exc_info())
-
-    def run(self, result=None):
-        """Test runner."""
-
-        #print
-        #print '*** run for method:',self._testMethodName  # dbg
-        #print '***            doc:',self._testMethodDoc  # dbg
-
-        if result is None: result = self.defaultTestResult()
-
-        # Independent tests: each gets its own setup/teardown
-        if self.testMethodName.startswith(self._indepParTestPrefix):
-            for t in getattr(self,self.testMethodName)():
-                self.run_test(t,result)
-        # Shared-state test: single setup/teardown for all
-        elif self.testMethodName.startswith(self._shareParTestPrefix):
-            tests = getattr(self,self.testMethodName,'runTest')()
-            self.run_tests(tests,result)
-        # Normal unittest Test methods
-        else:
-            unittest.TestCase.run(self,result)
-
-# The underscore was added to the class name to keep nose from trying
-# to run the test class (nose ignores class names that begin with an
-# underscore by default).
-ParametricTestCase = _ParametricTestCase
-
-#############################################################################
-# Quick and dirty interactive example/test
-if __name__ == '__main__':
-
-    class ExampleTestCase(ParametricTestCase):
-
-        #-------------------------------------------------------------------
-        # An instrumented setUp method so we can see when it gets called and
-        # how many times per instance
-        counter = 0
-
-        def setUp(self):
-            self.counter += 1
-            print 'setUp count: %2s for: %s' % (self.counter,
-                                                self.testMethodDoc)
-
-        #-------------------------------------------------------------------
-        # A standard test method, just like in the unittest docs.
-        def test_foo(self):
-            """Normal test for feature foo."""
-            pass
-
-        #-------------------------------------------------------------------
-        # Testing methods that need parameters.  These can NOT be named test*,
-        # since they would be picked up by unittest and called without
-        # arguments.  Instead, call them anything else (I use tst*) and then
-        # load them via the factories below.
-        def tstX(self,i):
-            "Test feature X with parameters."
-            print 'tstX, i=',i
-            if i==1 or i==3:
-                # Test fails
-                self.fail('i is bad, bad: %s' % i)
-
-        def tstY(self,i):
-            "Test feature Y with parameters."
-            print 'tstY, i=',i
-            if i==1:
-                # Force an error
-                1/0
-
-        def tstXX(self,i,j):
-            "Test feature XX with parameters."
-            print 'tstXX, i=',i,'j=',j
-            if i==1:
-                # Test fails
-                self.fail('i is bad, bad: %s' % i)
-
-        def tstYY(self,i):
-            "Test feature YY with parameters."
-            print 'tstYY, i=',i
-            if i==2:
-                # Force an error
-                1/0
-
-        def tstZZ(self):
-            """Test feature ZZ without parameters, needs multiple runs.
-
-            This could be a random test that you want to run multiple times."""
-            pass
-
-        #-------------------------------------------------------------------
-        # Parametric test factories that create the test groups to call the
-        # above tst* methods with their required arguments.
-        def testip(self):
-            """Independent parametric test factory.
-
-            A separate setUp() call is made for each test returned by this
-            method.
-
-            You must return an iterable (list or generator is fine) containing
-            tuples with the actual method to be called as the first argument,
-            and the arguments for that call later."""
-            return [(self.tstX,i) for i in range(5)]
-
-        def testip2(self):
-            """Another independent parametric test factory"""
-            return [(self.tstY,i) for i in range(5)]
-
-        def testip3(self):
-            """Test factory combining different subtests.
-
-            This one shows how to assemble calls to different tests."""
-            return [(self.tstX,3),(self.tstX,9),(self.tstXX,4,10),
-                    (self.tstZZ,),(self.tstZZ,)]
-
-        def testsp(self):
-            """Shared parametric test factory
-
-            A single setUp() call is made for all the tests returned by this
-            method.
-            """
-            return [(self.tstXX,i,i+1) for i in range(5)]
-
-        def testsp2(self):
-            """Another shared parametric test factory"""
-            return [(self.tstYY,i) for i in range(5)]
-
-        def testsp3(self):
-            """Another shared parametric test factory.
-
-            This one simply calls the same test multiple times, without any
-            arguments.  Note that you must still return tuples, even if there
-            are no arguments."""
-            return [(self.tstZZ,) for i in range(10)]
-
-
-    # This test class runs normally under unittest's default runner
-    unittest.main()

Copied: branches/dynamic_cpu_configuration/numpy/testing/tests/test_decorators.py (from rev 6368, trunk/numpy/testing/tests/test_decorators.py)

Modified: branches/dynamic_cpu_configuration/setup.py
===================================================================
--- branches/dynamic_cpu_configuration/setup.py	2009-02-14 22:42:29 UTC (rev 6368)
+++ branches/dynamic_cpu_configuration/setup.py	2009-02-15 12:03:15 UTC (rev 6369)
@@ -44,14 +44,6 @@
 # a lot more robust than what was previously being used.
 __builtin__.__NUMPY_SETUP__ = True
 
-def setup_doc_files(configuration):
-    # Add doc sources
-    configuration.add_data_dir("doc/release")
-    configuration.add_data_dir("doc/source")
-    configuration.add_data_dir("doc/sphinxext")
-    configuration.add_data_files(("doc/Makefile"), ("doc/postprocess.py"))
-
-
 def configuration(parent_package='',top_path=None):
     from numpy.distutils.misc_util import Configuration
 
@@ -69,8 +61,6 @@
 
     config.get_version('numpy/version.py') # sets config.version
 
-    setup_doc_files(config)
-
     return config
 
 def setup_package():



More information about the Numpy-svn mailing list