[Numpy-svn] r6125 - in trunk/doc: . source sphinxext sphinxext/tests

numpy-svn@scip... numpy-svn@scip...
Sun Nov 30 08:45:00 CST 2008


Author: ptvirtan
Date: 2008-11-30 08:44:38 -0600 (Sun, 30 Nov 2008)
New Revision: 6125

Added:
   trunk/doc/sphinxext/
   trunk/doc/sphinxext/LICENSE.txt
   trunk/doc/sphinxext/__init__.py
   trunk/doc/sphinxext/autosummary.py
   trunk/doc/sphinxext/autosummary_generate.py
   trunk/doc/sphinxext/comment_eater.py
   trunk/doc/sphinxext/compiler_unparse.py
   trunk/doc/sphinxext/docscrape.py
   trunk/doc/sphinxext/docscrape_sphinx.py
   trunk/doc/sphinxext/numpydoc.py
   trunk/doc/sphinxext/only_directives.py
   trunk/doc/sphinxext/phantom_import.py
   trunk/doc/sphinxext/plot_directive.py
   trunk/doc/sphinxext/tests/
   trunk/doc/sphinxext/tests/test_docscrape.py
   trunk/doc/sphinxext/traitsdoc.py
Modified:
   trunk/doc/Makefile
   trunk/doc/source/conf.py
Log:
Move Sphinx extensions under Numpy's SVN trunk

Modified: trunk/doc/Makefile
===================================================================
--- trunk/doc/Makefile	2008-11-29 14:54:29 UTC (rev 6124)
+++ trunk/doc/Makefile	2008-11-30 14:44:38 UTC (rev 6125)
@@ -37,15 +37,12 @@
 	cd build/dist && tar czf ../dist.tar.gz *
 
 generate: build/generate-stamp
-build/generate-stamp: $(wildcard source/reference/*.rst) ext
+build/generate-stamp: $(wildcard source/reference/*.rst)
 	mkdir -p build
-	./ext/autosummary_generate.py source/reference/*.rst \
+	./sphinxext/autosummary_generate.py source/reference/*.rst \
 		-p dump.xml -o source/reference/generated 
 	touch build/generate-stamp
 
-ext:
-	svn co http://sphinx.googlecode.com/svn/contrib/trunk/numpyext ext
-
 html: generate
 	mkdir -p build/html build/doctrees
 	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) build/html

Modified: trunk/doc/source/conf.py
===================================================================
--- trunk/doc/source/conf.py	2008-11-29 14:54:29 UTC (rev 6124)
+++ trunk/doc/source/conf.py	2008-11-30 14:44:38 UTC (rev 6125)
@@ -5,7 +5,7 @@
 # If your extensions are in another directory, add it here. If the directory
 # is relative to the documentation root, use os.path.abspath to make it
 # absolute, like shown here.
-sys.path.append(os.path.abspath('../ext'))
+sys.path.append(os.path.abspath('../sphinxext'))
 
 # Check Sphinx version
 import sphinx

Added: trunk/doc/sphinxext/LICENSE.txt
===================================================================
--- trunk/doc/sphinxext/LICENSE.txt	2008-11-29 14:54:29 UTC (rev 6124)
+++ trunk/doc/sphinxext/LICENSE.txt	2008-11-30 14:44:38 UTC (rev 6125)
@@ -0,0 +1,97 @@
+-------------------------------------------------------------------------------
+    The files
+    - numpydoc.py
+    - autosummary.py
+    - autosummary_generate.py
+    - docscrape.py
+    - docscrape_sphinx.py
+    - phantom_import.py
+    have the following license:
+
+Copyright (C) 2008 Stefan van der Walt <stefan@mentat.za.net>, Pauli Virtanen <pav@iki.fi>
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in
+    the documentation and/or other materials provided with the
+    distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+-------------------------------------------------------------------------------
+    The files
+    - compiler_unparse.py
+    - comment_eater.py
+    - traitsdoc.py
+    have the following license:
+
+This software is OSI Certified Open Source Software.
+OSI Certified is a certification mark of the Open Source Initiative.
+
+Copyright (c) 2006, Enthought, Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+ * Neither the name of Enthought, Inc. nor the names of its contributors may
+   be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+-------------------------------------------------------------------------------
+    The files
+    - only_directives.py
+    - plot_directive.py
+    originate from Matplotlib (http://matplotlib.sf.net/) which has
+    the following license:
+
+Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved.
+
+1. This LICENSE AGREEMENT is between John D. Hunter (“JDH”), and the Individual or Organization (“Licensee”) accessing and otherwise using matplotlib software in source or binary form and its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, JDH hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use matplotlib 0.98.3 alone or in any derivative version, provided, however, that JDH’s License Agreement and JDH’s notice of copyright, i.e., “Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved” are retained in matplotlib 0.98.3 alone or in any derivative version prepared by Licensee.
+
+3. In the event Licensee prepares a derivative work that is based on or incorporates matplotlib 0.98.3 or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to matplotlib 0.98.3.
+
+4. JDH is making matplotlib 0.98.3 available to Licensee on an “AS IS” basis. JDH MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, JDH MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB 0.98.3 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. JDH SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB 0.98.3 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING MATPLOTLIB 0.98.3, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material breach of its terms and conditions.
+
+7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between JDH and Licensee. This License Agreement does not grant permission to use JDH trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party.
+
+8. By copying, installing or otherwise using matplotlib 0.98.3, Licensee agrees to be bound by the terms and conditions of this License Agreement.
+

Added: trunk/doc/sphinxext/__init__.py
===================================================================

Added: trunk/doc/sphinxext/autosummary.py
===================================================================
--- trunk/doc/sphinxext/autosummary.py	2008-11-29 14:54:29 UTC (rev 6124)
+++ trunk/doc/sphinxext/autosummary.py	2008-11-30 14:44:38 UTC (rev 6125)
@@ -0,0 +1,334 @@
+"""
+===========
+autosummary
+===========
+
+Sphinx extension that adds an autosummary:: directive, which can be
+used to generate function/method/attribute/etc. summary lists, similar
+to those output eg. by Epydoc and other API doc generation tools.
+
+An :autolink: role is also provided.
+
+autosummary directive
+---------------------
+
+The autosummary directive has the form::
+
+    .. autosummary::
+       :nosignatures:
+       :toctree: generated/
+       
+       module.function_1
+       module.function_2
+       ...
+
+and it generates an output table (containing signatures, optionally)
+
+    ========================  =============================================
+    module.function_1(args)   Summary line from the docstring of function_1
+    module.function_2(args)   Summary line from the docstring
+    ...
+    ========================  =============================================
+
+If the :toctree: option is specified, files matching the function names
+are inserted to the toctree with the given prefix:
+
+    generated/module.function_1
+    generated/module.function_2
+    ...
+
+Note: The file names contain the module:: or currentmodule:: prefixes.
+
+.. seealso:: autosummary_generate.py
+
+
+autolink role
+-------------
+
+The autolink role functions as ``:obj:`` when the name referred can be
+resolved to a Python object, and otherwise it becomes simple emphasis.
+This can be used as the default role to make links 'smart'.
+
+"""
+import sys, os, posixpath, re
+
+from docutils.parsers.rst import directives
+from docutils.statemachine import ViewList
+from docutils import nodes
+
+import sphinx.addnodes, sphinx.roles, sphinx.builder
+from sphinx.util import patfilter
+
+from docscrape_sphinx import get_doc_object
+
+
+def setup(app):
+    app.add_directive('autosummary', autosummary_directive, True, (0, 0, False),
+                      toctree=directives.unchanged,
+                      nosignatures=directives.flag)
+    app.add_role('autolink', autolink_role)
+    
+    app.add_node(autosummary_toc,
+                 html=(autosummary_toc_visit_html, autosummary_toc_depart_noop),
+                 latex=(autosummary_toc_visit_latex, autosummary_toc_depart_noop))
+    app.connect('doctree-read', process_autosummary_toc)
+
+#------------------------------------------------------------------------------
+# autosummary_toc node
+#------------------------------------------------------------------------------
+
+class autosummary_toc(nodes.comment):
+    pass
+
+def process_autosummary_toc(app, doctree):
+    """
+    Insert items described in autosummary:: to the TOC tree, but do
+    not generate the toctree:: list.
+
+    """
+    env = app.builder.env
+    crawled = {}
+    def crawl_toc(node, depth=1):
+        crawled[node] = True
+        for j, subnode in enumerate(node):
+            try:
+                if (isinstance(subnode, autosummary_toc)
+                    and isinstance(subnode[0], sphinx.addnodes.toctree)):
+                    env.note_toctree(env.docname, subnode[0])
+                    continue
+            except IndexError:
+                continue
+            if not isinstance(subnode, nodes.section):
+                continue
+            if subnode not in crawled:
+                crawl_toc(subnode, depth+1)
+    crawl_toc(doctree)
+
+def autosummary_toc_visit_html(self, node):
+    """Hide autosummary toctree list in HTML output"""
+    raise nodes.SkipNode
+
+def autosummary_toc_visit_latex(self, node):
+    """Show autosummary toctree (= put the referenced pages here) in Latex"""
+    pass
+
+def autosummary_toc_depart_noop(self, node):
+    pass
+
+#------------------------------------------------------------------------------
+# .. autosummary::
+#------------------------------------------------------------------------------
+
+def autosummary_directive(dirname, arguments, options, content, lineno,
+                          content_offset, block_text, state, state_machine):
+    """
+    Pretty table containing short signatures and summaries of functions etc.
+
+    autosummary also generates a (hidden) toctree:: node.
+
+    """
+
+    names = []
+    names += [x.strip() for x in content if x.strip()]
+
+    table, warnings, real_names = get_autosummary(names, state,
+                                                  'nosignatures' in options)
+    node = table
+
+    env = state.document.settings.env
+    suffix = env.config.source_suffix
+    all_docnames = env.found_docs.copy()
+    dirname = posixpath.dirname(env.docname)
+
+    if 'toctree' in options:
+        tree_prefix = options['toctree'].strip()
+        docnames = []
+        for name in names:
+            name = real_names.get(name, name)
+
+            docname = tree_prefix + name
+            if docname.endswith(suffix):
+                docname = docname[:-len(suffix)]
+            docname = posixpath.normpath(posixpath.join(dirname, docname))
+            if docname not in env.found_docs:
+                warnings.append(state.document.reporter.warning(
+                    'toctree references unknown document %r' % docname,
+                    line=lineno))
+            docnames.append(docname)
+
+        tocnode = sphinx.addnodes.toctree()
+        tocnode['includefiles'] = docnames
+        tocnode['maxdepth'] = -1
+        tocnode['glob'] = None
+
+        tocnode = autosummary_toc('', '', tocnode)
+        return warnings + [node] + [tocnode]
+    else:
+        return warnings + [node]
+
+def get_autosummary(names, state, no_signatures=False):
+    """
+    Generate a proper table node for autosummary:: directive.
+
+    Parameters
+    ----------
+    names : list of str
+        Names of Python objects to be imported and added to the table.
+    document : document
+        Docutils document object
+    
+    """
+    document = state.document
+    
+    real_names = {}
+    warnings = []
+
+    prefixes = ['']
+    prefixes.insert(0, document.settings.env.currmodule)
+
+    table = nodes.table('')
+    group = nodes.tgroup('', cols=2)
+    table.append(group)
+    group.append(nodes.colspec('', colwidth=30))
+    group.append(nodes.colspec('', colwidth=70))
+    body = nodes.tbody('')
+    group.append(body)
+
+    def append_row(*column_texts):
+        row = nodes.row('')
+        for text in column_texts:
+            node = nodes.paragraph('')
+            vl = ViewList()
+            vl.append(text, '<autosummary>')
+            state.nested_parse(vl, 0, node)
+            row.append(nodes.entry('', node))
+        body.append(row)
+
+    for name in names:
+        try:
+            obj, real_name = import_by_name(name, prefixes=prefixes)
+        except ImportError:
+            warnings.append(document.reporter.warning(
+                'failed to import %s' % name))
+            append_row(":obj:`%s`" % name, "")
+            continue
+
+        real_names[name] = real_name
+
+        doc = get_doc_object(obj)
+
+        if doc['Summary']:
+            title = " ".join(doc['Summary'])
+        else:
+            title = ""
+        
+        col1 = ":obj:`%s <%s>`" % (name, real_name)
+        if doc['Signature']:
+            sig = re.sub('^[a-zA-Z_0-9.-]*', '', doc['Signature'])
+            if '=' in sig:
+                # abbreviate optional arguments
+                sig = re.sub(r', ([a-zA-Z0-9_]+)=', r'[, \1=', sig, count=1)
+                sig = re.sub(r'\(([a-zA-Z0-9_]+)=', r'([\1=', sig, count=1)
+                sig = re.sub(r'=[^,)]+,', ',', sig)
+                sig = re.sub(r'=[^,)]+\)$', '])', sig)
+                # shorten long strings
+                sig = re.sub(r'(\[.{16,16}[^,)]*?),.*?\]\)', r'\1, ...])', sig)
+            else:
+                sig = re.sub(r'(\(.{16,16}[^,)]*?),.*?\)', r'\1, ...)', sig)
+            col1 += " " + sig
+        col2 = title
+        append_row(col1, col2)
+
+    return table, warnings, real_names
+
+def import_by_name(name, prefixes=[None]):
+    """
+    Import a Python object that has the given name, under one of the prefixes.
+
+    Parameters
+    ----------
+    name : str
+        Name of a Python object, eg. 'numpy.ndarray.view'
+    prefixes : list of (str or None), optional
+        Prefixes to prepend to the name (None implies no prefix).
+        The first prefixed name that results to successful import is used.
+
+    Returns
+    -------
+    obj
+        The imported object
+    name
+        Name of the imported object (useful if `prefixes` was used)
+    
+    """
+    for prefix in prefixes:
+        try:
+            if prefix:
+                prefixed_name = '.'.join([prefix, name])
+            else:
+                prefixed_name = name
+            return _import_by_name(prefixed_name), prefixed_name
+        except ImportError:
+            pass
+    raise ImportError
+
+def _import_by_name(name):
+    """Import a Python object given its full name"""
+    try:
+        # try first interpret `name` as MODNAME.OBJ
+        name_parts = name.split('.')
+        try:
+            modname = '.'.join(name_parts[:-1])
+            __import__(modname)
+            return getattr(sys.modules[modname], name_parts[-1])
+        except (ImportError, IndexError, AttributeError):
+            pass
+       
+        # ... then as MODNAME, MODNAME.OBJ1, MODNAME.OBJ1.OBJ2, ...
+        last_j = 0
+        modname = None
+        for j in reversed(range(1, len(name_parts)+1)):
+            last_j = j
+            modname = '.'.join(name_parts[:j])
+            try:
+                __import__(modname)
+            except ImportError:
+                continue
+            if modname in sys.modules:
+                break
+
+        if last_j < len(name_parts):
+            obj = sys.modules[modname]
+            for obj_name in name_parts[last_j:]:
+                obj = getattr(obj, obj_name)
+            return obj
+        else:
+            return sys.modules[modname]
+    except (ValueError, ImportError, AttributeError, KeyError), e:
+        raise ImportError(e)
+
+#------------------------------------------------------------------------------
+# :autolink: (smart default role)
+#------------------------------------------------------------------------------
+
+def autolink_role(typ, rawtext, etext, lineno, inliner,
+                  options={}, content=[]):
+    """
+    Smart linking role.
+
+    Expands to ":obj:`text`" if `text` is an object that can be imported;
+    otherwise expands to "*text*".
+    """
+    r = sphinx.roles.xfileref_role('obj', rawtext, etext, lineno, inliner,
+                                   options, content)
+    pnode = r[0][0]
+
+    prefixes = [None]
+    #prefixes.insert(0, inliner.document.settings.env.currmodule)
+    try:
+        obj, name = import_by_name(pnode['reftarget'], prefixes)
+    except ImportError:
+        content = pnode[0]
+        r[0][0] = nodes.emphasis(rawtext, content[0].astext(),
+                                 classes=content['classes'])
+    return r

Added: trunk/doc/sphinxext/autosummary_generate.py
===================================================================
--- trunk/doc/sphinxext/autosummary_generate.py	2008-11-29 14:54:29 UTC (rev 6124)
+++ trunk/doc/sphinxext/autosummary_generate.py	2008-11-30 14:44:38 UTC (rev 6125)
@@ -0,0 +1,189 @@
+#!/usr/bin/env python
+r"""
+autosummary_generate.py OPTIONS FILES
+
+Generate automatic RST source files for items referred to in
+autosummary:: directives.
+
+Each generated RST file contains a single auto*:: directive which
+extracts the docstring of the referred item.
+
+Example Makefile rule::
+
+    generate:
+            ./ext/autosummary_generate.py -o source/generated source/*.rst
+
+"""
+import glob, re, inspect, os, optparse
+from autosummary import import_by_name
+
+try:
+    from phantom_import import import_phantom_module
+except ImportError:
+    import_phantom_module = lambda x: x
+
+def main():
+    p = optparse.OptionParser(__doc__.strip())
+    p.add_option("-p", "--phantom", action="store", type="string",
+                 dest="phantom", default=None,
+                 help="Phantom import modules from a file")
+    p.add_option("-o", "--output-dir", action="store", type="string",
+                 dest="output_dir", default=None,
+                 help=("Write all output files to the given directory (instead "
+                       "of writing them as specified in the autosummary:: "
+                       "directives)"))
+    options, args = p.parse_args()
+
+    if len(args) == 0:
+        p.error("wrong number of arguments")
+
+    if options.phantom and os.path.isfile(options.phantom):
+        import_phantom_module(options.phantom)
+
+    # read
+    names = {}
+    for name, loc in get_documented(args).items():
+        for (filename, sec_title, keyword, toctree) in loc:
+            if toctree is not None:
+                path = os.path.join(os.path.dirname(filename), toctree)
+                names[name] = os.path.abspath(path)
+
+    # write
+    for name, path in sorted(names.items()):
+        if options.output_dir is not None:
+            path = options.output_dir
+        
+        if not os.path.isdir(path):
+            os.makedirs(path)
+
+        try:
+            obj, name = import_by_name(name)
+        except ImportError, e:
+            print "Failed to import '%s': %s" % (name, e)
+            continue
+
+        fn = os.path.join(path, '%s.rst' % name)
+
+        if os.path.exists(fn):
+            # skip
+            continue
+
+        f = open(fn, 'w')
+
+        try:
+            f.write('%s\n%s\n\n' % (name, '='*len(name)))
+
+            if inspect.isclass(obj):
+                if issubclass(obj, Exception):
+                    f.write(format_modulemember(name, 'autoexception'))
+                else:
+                    f.write(format_modulemember(name, 'autoclass'))
+            elif inspect.ismodule(obj):
+                f.write(format_modulemember(name, 'automodule'))
+            elif inspect.ismethod(obj) or inspect.ismethoddescriptor(obj):
+                f.write(format_classmember(name, 'automethod'))
+            elif callable(obj):
+                f.write(format_modulemember(name, 'autofunction'))
+            elif hasattr(obj, '__get__'):
+                f.write(format_classmember(name, 'autoattribute'))
+            else:
+                f.write(format_modulemember(name, 'autofunction'))
+        finally:
+            f.close()
+
+def format_modulemember(name, directive):
+    parts = name.split('.')
+    mod, name = '.'.join(parts[:-1]), parts[-1]
+    return ".. currentmodule:: %s\n\n.. %s:: %s\n" % (mod, directive, name)
+
+def format_classmember(name, directive):
+    parts = name.split('.')
+    mod, name = '.'.join(parts[:-2]), '.'.join(parts[-2:])
+    return ".. currentmodule:: %s\n\n.. %s:: %s\n" % (mod, directive, name)
+
+def get_documented(filenames):
+    """
+    Find out what items are documented in source/*.rst
+    
+    Returns
+    -------
+    documented : dict of list of (filename, title, keyword, toctree)
+        Dictionary whose keys are documented names of objects.
+        The value is a list of locations where the object was documented.
+        Each location is a tuple of filename, the current section title,
+        the name of the directive, and the value of the :toctree: argument
+        (if present) of the directive.
+
+    """
+    title_underline_re = re.compile("^[-=*_^#]{3,}\s*$")
+    autodoc_re = re.compile(".. auto(function|method|attribute|class|exception|module)::\s*([A-Za-z0-9_.]+)\s*$")
+    autosummary_re = re.compile(r'^\.\.\s+autosummary::\s*')
+    module_re = re.compile(r'^\.\.\s+(current)?module::\s*([a-zA-Z0-9_.]+)\s*$')
+    autosummary_item_re = re.compile(r'^\s+([_a-zA-Z][a-zA-Z0-9_.]*)\s*')
+    toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$')
+    
+    documented = {}
+    
+    for filename in filenames:
+        current_title = []
+        last_line = None
+        toctree = None
+        current_module = None
+        in_autosummary = False
+
+        f = open(filename, 'r')
+        for line in f:
+            try:
+                if in_autosummary:
+                    m = toctree_arg_re.match(line)
+                    if m:
+                        toctree = m.group(1)
+                        continue
+
+                    if line.strip().startswith(':'):
+                        continue # skip options
+
+                    m = autosummary_item_re.match(line)
+                    if m:
+                        name = m.group(1).strip()
+                        if current_module and not name.startswith(current_module + '.'):
+                            name = "%s.%s" % (current_module, name)
+                        documented.setdefault(name, []).append(
+                            (filename, current_title, 'autosummary', toctree))
+                        continue
+                    if line.strip() == '':
+                        continue
+                    in_autosummary = False
+                
+                m = autosummary_re.match(line)
+                if m:
+                    in_autosummary = True
+                    continue
+                
+                m = autodoc_re.search(line)
+                if m:
+                    name = m.group(2).strip()
+                    if current_module and not name.startswith(current_module + '.'):
+                        name = "%s.%s" % (current_module, name)
+                    if m.group(1) == "module":
+                        current_module = name
+                    documented.setdefault(name, []).append(
+                        (filename, current_title, "auto" + m.group(1), None))
+                    continue
+
+                m = title_underline_re.match(line)
+                if m and last_line:
+                    current_title = last_line.strip()
+                    continue
+
+                m = module_re.match(line)
+                if m:
+                    current_module = m.group(2)
+                    continue
+            finally:
+                last_line = line
+    
+    return documented
+
+if __name__ == "__main__":
+    main()


Property changes on: trunk/doc/sphinxext/autosummary_generate.py
___________________________________________________________________
Name: svn:executable
   + *

Added: trunk/doc/sphinxext/comment_eater.py
===================================================================
--- trunk/doc/sphinxext/comment_eater.py	2008-11-29 14:54:29 UTC (rev 6124)
+++ trunk/doc/sphinxext/comment_eater.py	2008-11-30 14:44:38 UTC (rev 6125)
@@ -0,0 +1,158 @@
+from cStringIO import StringIO
+import compiler
+import inspect
+import textwrap
+import tokenize
+
+from compiler_unparse import unparse
+
+
+class Comment(object):
+    """ A comment block.
+    """
+    is_comment = True
+    def __init__(self, start_lineno, end_lineno, text):
+        # int : The first line number in the block. 1-indexed.
+        self.start_lineno = start_lineno
+        # int : The last line number. Inclusive!
+        self.end_lineno = end_lineno
+        # str : The text block including '#' character but not any leading spaces.
+        self.text = text
+
+    def add(self, string, start, end, line):
+        """ Add a new comment line.
+        """
+        self.start_lineno = min(self.start_lineno, start[0])
+        self.end_lineno = max(self.end_lineno, end[0])
+        self.text += string
+
+    def __repr__(self):
+        return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno,
+            self.end_lineno, self.text)
+
+
+class NonComment(object):
+    """ A non-comment block of code.
+    """
+    is_comment = False
+    def __init__(self, start_lineno, end_lineno):
+        self.start_lineno = start_lineno
+        self.end_lineno = end_lineno
+
+    def add(self, string, start, end, line):
+        """ Add lines to the block.
+        """
+        if string.strip():
+            # Only add if not entirely whitespace.
+            self.start_lineno = min(self.start_lineno, start[0])
+            self.end_lineno = max(self.end_lineno, end[0])
+
+    def __repr__(self):
+        return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno,
+            self.end_lineno)
+
+
+class CommentBlocker(object):
+    """ Pull out contiguous comment blocks.
+    """
+    def __init__(self):
+        # Start with a dummy.
+        self.current_block = NonComment(0, 0)
+
+        # All of the blocks seen so far.
+        self.blocks = []
+
+        # The index mapping lines of code to their associated comment blocks.
+        self.index = {}
+
+    def process_file(self, file):
+        """ Process a file object.
+        """
+        for token in tokenize.generate_tokens(file.next):
+            self.process_token(*token)
+        self.make_index()
+
+    def process_token(self, kind, string, start, end, line):
+        """ Process a single token.
+        """
+        if self.current_block.is_comment:
+            if kind == tokenize.COMMENT:
+                self.current_block.add(string, start, end, line)
+            else:
+                self.new_noncomment(start[0], end[0])
+        else:
+            if kind == tokenize.COMMENT:
+                self.new_comment(string, start, end, line)
+            else:
+                self.current_block.add(string, start, end, line)
+
+    def new_noncomment(self, start_lineno, end_lineno):
+        """ We are transitioning from a noncomment to a comment.
+        """
+        block = NonComment(start_lineno, end_lineno)
+        self.blocks.append(block)
+        self.current_block = block
+
+    def new_comment(self, string, start, end, line):
+        """ Possibly add a new comment.
+        
+        Only adds a new comment if this comment is the only thing on the line.
+        Otherwise, it extends the noncomment block.
+        """
+        prefix = line[:start[1]]
+        if prefix.strip():
+            # Oops! Trailing comment, not a comment block.
+            self.current_block.add(string, start, end, line)
+        else:
+            # A comment block.
+            block = Comment(start[0], end[0], string)
+            self.blocks.append(block)
+            self.current_block = block
+
+    def make_index(self):
+        """ Make the index mapping lines of actual code to their associated
+        prefix comments.
+        """
+        for prev, block in zip(self.blocks[:-1], self.blocks[1:]):
+            if not block.is_comment:
+                self.index[block.start_lineno] = prev
+
+    def search_for_comment(self, lineno, default=None):
+        """ Find the comment block just before the given line number.
+
+        Returns None (or the specified default) if there is no such block.
+        """
+        if not self.index:
+            self.make_index()
+        block = self.index.get(lineno, None)
+        text = getattr(block, 'text', default)
+        return text
+
+
+def strip_comment_marker(text):
+    """ Strip # markers at the front of a block of comment text.
+    """
+    lines = []
+    for line in text.splitlines():
+        lines.append(line.lstrip('#'))
+    text = textwrap.dedent('\n'.join(lines))
+    return text
+
+
+def get_class_traits(klass):
+    """ Yield all of the documentation for trait definitions on a class object.
+    """
+    # FIXME: gracefully handle errors here or in the caller?
+    source = inspect.getsource(klass)
+    cb = CommentBlocker()
+    cb.process_file(StringIO(source))
+    mod_ast = compiler.parse(source)
+    class_ast = mod_ast.node.nodes[0]
+    for node in class_ast.code.nodes:
+        # FIXME: handle other kinds of assignments?
+        if isinstance(node, compiler.ast.Assign):
+            name = node.nodes[0].name
+            rhs = unparse(node.expr).strip()
+            doc = strip_comment_marker(cb.search_for_comment(node.lineno, default=''))
+            yield name, rhs, doc
+

Added: trunk/doc/sphinxext/compiler_unparse.py
===================================================================
--- trunk/doc/sphinxext/compiler_unparse.py	2008-11-29 14:54:29 UTC (rev 6124)
+++ trunk/doc/sphinxext/compiler_unparse.py	2008-11-30 14:44:38 UTC (rev 6125)
@@ -0,0 +1,860 @@
+""" Turn compiler.ast structures back into executable python code.
+
+    The unparse method takes a compiler.ast tree and transforms it back into
+    valid python code.  It is incomplete and currently only works for
+    import statements, function calls, function definitions, assignments, and
+    basic expressions.
+
+    Inspired by python-2.5-svn/Demo/parser/unparse.py
+
+    fixme: We may want to move to using _ast trees because the compiler for
+           them is about 6 times faster than compiler.compile.
+"""
+
+import sys
+import cStringIO
+from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add
+
+def unparse(ast, single_line_functions=False):
+    s = cStringIO.StringIO()
+    UnparseCompilerAst(ast, s, single_line_functions)
+    return s.getvalue().lstrip()
+
+op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2,
+                  'compiler.ast.Add':1, 'compiler.ast.Sub':1 }
+
+class UnparseCompilerAst:
+    """ Methods in this class recursively traverse an AST and
+        output source code for the abstract syntax; original formatting
+        is disregarged.
+    """
+
+    #########################################################################
+    # object interface.
+    #########################################################################
+
+    def __init__(self, tree, file = sys.stdout, single_line_functions=False):
+        """ Unparser(tree, file=sys.stdout) -> None.
+
+            Print the source for tree to file.
+        """
+        self.f = file
+        self._single_func = single_line_functions
+        self._do_indent = True
+        self._indent = 0
+        self._dispatch(tree)
+        self._write("\n")
+        self.f.flush()
+
+    #########################################################################
+    # Unparser private interface.
+    #########################################################################
+
+    ### format, output, and dispatch methods ################################
+
+    def _fill(self, text = ""):
+        "Indent a piece of text, according to the current indentation level"
+        if self._do_indent:
+            self._write("\n"+"    "*self._indent + text)
+        else:
+            self._write(text)
+
+    def _write(self, text):
+        "Append a piece of text to the current line."
+        self.f.write(text)
+
+    def _enter(self):
+        "Print ':', and increase the indentation."
+        self._write(": ")
+        self._indent += 1
+
+    def _leave(self):
+        "Decrease the indentation level."
+        self._indent -= 1
+
+    def _dispatch(self, tree):
+        "_dispatcher function, _dispatching tree type T to method _T."
+        if isinstance(tree, list):
+            for t in tree:
+                self._dispatch(t)
+            return
+        meth = getattr(self, "_"+tree.__class__.__name__)
+        if tree.__class__.__name__ == 'NoneType' and not self._do_indent:
+            return
+        meth(tree)
+
+
+    #########################################################################
+    # compiler.ast unparsing methods.
+    #
+    # There should be one method per concrete grammar type. They are
+    # organized in alphabetical order.
+    #########################################################################
+
+    def _Add(self, t):
+        self.__binary_op(t, '+')
+
+    def _And(self, t):
+        self._write(" (")
+        for i, node in enumerate(t.nodes):
+            self._dispatch(node)
+            if i != len(t.nodes)-1:
+                self._write(") and (")
+        self._write(")")
+               
+    def _AssAttr(self, t):
+        """ Handle assigning an attribute of an object
+        """
+        self._dispatch(t.expr)
+        self._write('.'+t.attrname)
+ 
+    def _Assign(self, t):
+        """ Expression Assignment such as "a = 1".
+
+            This only handles assignment in expressions.  Keyword assignment
+            is handled separately.
+        """
+        self._fill()
+        for target in t.nodes:
+            self._dispatch(target)
+            self._write(" = ")
+        self._dispatch(t.expr)
+        if not self._do_indent:
+            self._write('; ')
+
+    def _AssName(self, t):
+        """ Name on left hand side of expression.
+
+            Treat just like a name on the right side of an expression.
+        """
+        self._Name(t)
+
+    def _AssTuple(self, t):
+        """ Tuple on left hand side of an expression.
+        """
+
+        # _write each elements, separated by a comma.
+        for element in t.nodes[:-1]:
+            self._dispatch(element)
+            self._write(", ")
+
+        # Handle the last one without writing comma
+        last_element = t.nodes[-1]
+        self._dispatch(last_element)
+
+    def _AugAssign(self, t):
+        """ +=,-=,*=,/=,**=, etc. operations
+        """
+        
+        self._fill()
+        self._dispatch(t.node)
+        self._write(' '+t.op+' ')
+        self._dispatch(t.expr)
+        if not self._do_indent:
+            self._write(';')
+            
+    def _Bitand(self, t):
+        """ Bit and operation.
+        """
+        
+        for i, node in enumerate(t.nodes):
+            self._write("(")
+            self._dispatch(node)
+            self._write(")")
+            if i != len(t.nodes)-1:
+                self._write(" & ")
+                
+    def _Bitor(self, t):
+        """ Bit or operation
+        """
+        
+        for i, node in enumerate(t.nodes):
+            self._write("(")
+            self._dispatch(node)
+            self._write(")")
+            if i != len(t.nodes)-1:
+                self._write(" | ")
+                
+    def _CallFunc(self, t):
+        """ Function call.
+        """
+        self._dispatch(t.node)
+        self._write("(")
+        comma = False
+        for e in t.args:
+            if comma: self._write(", ")
+            else: comma = True
+            self._dispatch(e)
+        if t.star_args:
+            if comma: self._write(", ")
+            else: comma = True
+            self._write("*")
+            self._dispatch(t.star_args)
+        if t.dstar_args:
+            if comma: self._write(", ")
+            else: comma = True
+            self._write("**")
+            self._dispatch(t.dstar_args)
+        self._write(")")
+
+    def _Compare(self, t):
+        self._dispatch(t.expr)
+        for op, expr in t.ops:
+            self._write(" " + op + " ")
+            self._dispatch(expr)
+
+    def _Const(self, t):
+        """ A constant value such as an integer value, 3, or a string, "hello".
+        """
+        self._dispatch(t.value)
+
+    def _Decorators(self, t):
+        """ Handle function decorators (eg. @has_units)
+        """
+        for node in t.nodes:
+            self._dispatch(node)
+
+    def _Dict(self, t):
+        self._write("{")
+        for  i, (k, v) in enumerate(t.items):
+            self._dispatch(k)
+            self._write(": ")
+            self._dispatch(v)
+            if i < len(t.items)-1:
+                self._write(", ")
+        self._write("}")
+
+    def _Discard(self, t):
+        """ Node for when return value is ignored such as in "foo(a)".
+        """
+        self._fill()
+        self._dispatch(t.expr)
+
+    def _Div(self, t):
+        self.__binary_op(t, '/')
+
+    def _Ellipsis(self, t):
+        self._write("...")
+
+    def _From(self, t):
+        """ Handle "from xyz import foo, bar as baz".
+        """
+        # fixme: Are From and ImportFrom handled differently?
+        self._fill("from ")
+        self._write(t.modname)
+        self._write(" import ")
+        for i, (name,asname) in enumerate(t.names):
+            if i != 0:
+                self._write(", ")
+            self._write(name)
+            if asname is not None:
+                self._write(" as "+asname)
+                
+    def _Function(self, t):
+        """ Handle function definitions
+        """
+        if t.decorators is not None:
+            self._fill("@")
+            self._dispatch(t.decorators)
+        self._fill("def "+t.name + "(")
+        defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults)
+        for i, arg in enumerate(zip(t.argnames, defaults)):
+            self._write(arg[0])
+            if arg[1] is not None:
+                self._write('=')
+                self._dispatch(arg[1])
+            if i < len(t.argnames)-1:
+                self._write(', ')
+        self._write(")")
+        if self._single_func:
+            self._do_indent = False
+        self._enter()
+        self._dispatch(t.code)
+        self._leave()
+        self._do_indent = True
+
+    def _Getattr(self, t):
+        """ Handle getting an attribute of an object
+        """
+        if isinstance(t.expr, (Div, Mul, Sub, Add)):
+            self._write('(')
+            self._dispatch(t.expr)
+            self._write(')')
+        else:
+            self._dispatch(t.expr)
+            
+        self._write('.'+t.attrname)
+        
+    def _If(self, t):
+        self._fill()
+        
+        for i, (compare,code) in enumerate(t.tests):
+            if i == 0:
+                self._write("if ")
+            else:
+                self._write("elif ")
+            self._dispatch(compare)
+            self._enter()
+            self._fill()
+            self._dispatch(code)
+            self._leave()
+            self._write("\n")
+
+        if t.else_ is not None:
+            self._write("else")
+            self._enter()
+            self._fill()
+            self._dispatch(t.else_)
+            self._leave()
+            self._write("\n")
+            
+    def _IfExp(self, t):
+        self._dispatch(t.then)
+        self._write(" if ")
+        self._dispatch(t.test)
+
+        if t.else_ is not None:
+            self._write(" else (")
+            self._dispatch(t.else_)
+            self._write(")")
+
+    def _Import(self, t):
+        """ Handle "import xyz.foo".
+        """
+        self._fill("import ")
+        
+        for i, (name,asname) in enumerate(t.names):
+            if i != 0:
+                self._write(", ")
+            self._write(name)
+            if asname is not None:
+                self._write(" as "+asname)
+
+    def _Keyword(self, t):
+        """ Keyword value assignment within function calls and definitions.
+        """
+        self._write(t.name)
+        self._write("=")
+        self._dispatch(t.expr)
+        
+    def _List(self, t):
+        self._write("[")
+        for  i,node in enumerate(t.nodes):
+            self._dispatch(node)
+            if i < len(t.nodes)-1:
+                self._write(", ")
+        self._write("]")
+
+    def _Module(self, t):
+        if t.doc is not None:
+            self._dispatch(t.doc)
+        self._dispatch(t.node)
+
+    def _Mul(self, t):
+        self.__binary_op(t, '*')
+
+    def _Name(self, t):
+        self._write(t.name)
+
+    def _NoneType(self, t):
+        self._write("None")
+        
+    def _Not(self, t):
+        self._write('not (')
+        self._dispatch(t.expr)
+        self._write(')')
+        
+    def _Or(self, t):
+        self._write(" (")
+        for i, node in enumerate(t.nodes):
+            self._dispatch(node)
+            if i != len(t.nodes)-1:
+                self._write(") or (")
+        self._write(")")
+                
+    def _Pass(self, t):
+        self._write("pass\n")
+
+    def _Printnl(self, t):
+        self._fill("print ")
+        if t.dest:
+            self._write(">> ")
+            self._dispatch(t.dest)
+            self._write(", ")
+        comma = False
+        for node in t.nodes:
+            if comma: self._write(', ')
+            else: comma = True
+            self._dispatch(node)
+
+    def _Power(self, t):
+        self.__binary_op(t, '**')
+
+    def _Return(self, t):
+        self._fill("return ")
+        if t.value:
+            if isinstance(t.value, Tuple):
+                text = ', '.join([ name.name for name in t.value.asList() ])
+                self._write(text)
+            else:
+                self._dispatch(t.value)
+            if not self._do_indent:
+                self._write('; ')
+
+    def _Slice(self, t):
+        self._dispatch(t.expr)
+        self._write("[")
+        if t.lower:
+            self._dispatch(t.lower)
+        self._write(":")
+        if t.upper:
+            self._dispatch(t.upper)
+        #if t.step:
+        #    self._write(":")
+        #    self._dispatch(t.step)
+        self._write("]")
+
+    def _Sliceobj(self, t):
+        for i, node in enumerate(t.nodes):
+            if i != 0:
+                self._write(":")
+            if not (isinstance(node, Const) and node.value is None):
+                self._dispatch(node)
+
+    def _Stmt(self, tree):
+        for node in tree.nodes:
+            self._dispatch(node)
+
+    def _Sub(self, t):
+        self.__binary_op(t, '-')
+
+    def _Subscript(self, t):
+        self._dispatch(t.expr)
+        self._write("[")
+        for i, value in enumerate(t.subs):
+            if i != 0:
+                self._write(",")
+            self._dispatch(value)
+        self._write("]")
+
+    def _TryExcept(self, t):
+        self._fill("try")
+        self._enter()
+        self._dispatch(t.body)
+        self._leave()
+
+        for handler in t.handlers:
+            self._fill('except ')
+            self._dispatch(handler[0])
+            if handler[1] is not None:
+                self._write(', ')
+                self._dispatch(handler[1])
+            self._enter()
+            self._dispatch(handler[2])
+            self._leave()
+            
+        if t.else_:
+            self._fill("else")
+            self._enter()
+            self._dispatch(t.else_)
+            self._leave()
+
+    def _Tuple(self, t):
+
+        if not t.nodes:
+            # Empty tuple.
+            self._write("()")
+        else:
+            self._write("(")
+
+            # _write each elements, separated by a comma.
+            for element in t.nodes[:-1]:
+                self._dispatch(element)
+                self._write(", ")
+
+            # Handle the last one without writing comma
+            last_element = t.nodes[-1]
+            self._dispatch(last_element)
+
+            self._write(")")
+            
+    def _UnaryAdd(self, t):
+        self._write("+")
+        self._dispatch(t.expr)
+        
+    def _UnarySub(self, t):
+        self._write("-")
+        self._dispatch(t.expr)        
+
+    def _With(self, t):
+        self._fill('with ')
+        self._dispatch(t.expr)
+        if t.vars:
+            self._write(' as ')
+            self._dispatch(t.vars.name)
+        self._enter()
+        self._dispatch(t.body)
+        self._leave()
+        self._write('\n')
+        
+    def _int(self, t):
+        self._write(repr(t))
+
+    def __binary_op(self, t, symbol):
+        # Check if parenthesis are needed on left side and then dispatch
+        has_paren = False
+        left_class = str(t.left.__class__)
+        if (left_class in op_precedence.keys() and
+            op_precedence[left_class] < op_precedence[str(t.__class__)]):
+            has_paren = True
+        if has_paren:
+            self._write('(')
+        self._dispatch(t.left)
+        if has_paren:
+            self._write(')')
+        # Write the appropriate symbol for operator
+        self._write(symbol)
+        # Check if parenthesis are needed on the right side and then dispatch
+        has_paren = False
+        right_class = str(t.right.__class__)
+        if (right_class in op_precedence.keys() and
+            op_precedence[right_class] < op_precedence[str(t.__class__)]):
+            has_paren = True
+        if has_paren:
+            self._write('(')
+        self._dispatch(t.right)
+        if has_paren:
+            self._write(')')
+
+    def _float(self, t):
+        # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001'
+        # We prefer str here.
+        self._write(str(t))
+
+    def _str(self, t):
+        self._write(repr(t))
+        
+    def _tuple(self, t):
+        self._write(str(t))
+
+    #########################################################################
+    # These are the methods from the _ast modules unparse.
+    #
+    # As our needs to handle more advanced code increase, we may want to
+    # modify some of the methods below so that they work for compiler.ast.
+    #########################################################################
+
+#    # stmt
+#    def _Expr(self, tree):
+#        self._fill()
+#        self._dispatch(tree.value)
+#
+#    def _Import(self, t):
+#        self._fill("import ")
+#        first = True
+#        for a in t.names:
+#            if first:
+#                first = False
+#            else:
+#                self._write(", ")
+#            self._write(a.name)
+#            if a.asname:
+#                self._write(" as "+a.asname)
+#
+##    def _ImportFrom(self, t):
+##        self._fill("from ")
+##        self._write(t.module)
+##        self._write(" import ")
+##        for i, a in enumerate(t.names):
+##            if i == 0:
+##                self._write(", ")
+##            self._write(a.name)
+##            if a.asname:
+##                self._write(" as "+a.asname)
+##        # XXX(jpe) what is level for?
+##
+#
+#    def _Break(self, t):
+#        self._fill("break")
+#
+#    def _Continue(self, t):
+#        self._fill("continue")
+#
+#    def _Delete(self, t):
+#        self._fill("del ")
+#        self._dispatch(t.targets)
+#
+#    def _Assert(self, t):
+#        self._fill("assert ")
+#        self._dispatch(t.test)
+#        if t.msg:
+#            self._write(", ")
+#            self._dispatch(t.msg)
+#
+#    def _Exec(self, t):
+#        self._fill("exec ")
+#        self._dispatch(t.body)
+#        if t.globals:
+#            self._write(" in ")
+#            self._dispatch(t.globals)
+#        if t.locals:
+#            self._write(", ")
+#            self._dispatch(t.locals)
+#
+#    def _Print(self, t):
+#        self._fill("print ")
+#        do_comma = False
+#        if t.dest:
+#            self._write(">>")
+#            self._dispatch(t.dest)
+#            do_comma = True
+#        for e in t.values:
+#            if do_comma:self._write(", ")
+#            else:do_comma=True
+#            self._dispatch(e)
+#        if not t.nl:
+#            self._write(",")
+#
+#    def _Global(self, t):
+#        self._fill("global")
+#        for i, n in enumerate(t.names):
+#            if i != 0:
+#                self._write(",")
+#            self._write(" " + n)
+#
+#    def _Yield(self, t):
+#        self._fill("yield")
+#        if t.value:
+#            self._write(" (")
+#            self._dispatch(t.value)
+#            self._write(")")
+#
+#    def _Raise(self, t):
+#        self._fill('raise ')
+#        if t.type:
+#            self._dispatch(t.type)
+#        if t.inst:
+#            self._write(", ")
+#            self._dispatch(t.inst)
+#        if t.tback:
+#            self._write(", ")
+#            self._dispatch(t.tback)
+#
+#
+#    def _TryFinally(self, t):
+#        self._fill("try")
+#        self._enter()
+#        self._dispatch(t.body)
+#        self._leave()
+#
+#        self._fill("finally")
+#        self._enter()
+#        self._dispatch(t.finalbody)
+#        self._leave()
+#
+#    def _excepthandler(self, t):
+#        self._fill("except ")
+#        if t.type:
+#            self._dispatch(t.type)
+#        if t.name:
+#            self._write(", ")
+#            self._dispatch(t.name)
+#        self._enter()
+#        self._dispatch(t.body)
+#        self._leave()
+#
+#    def _ClassDef(self, t):
+#        self._write("\n")
+#        self._fill("class "+t.name)
+#        if t.bases:
+#            self._write("(")
+#            for a in t.bases:
+#                self._dispatch(a)
+#                self._write(", ")
+#            self._write(")")
+#        self._enter()
+#        self._dispatch(t.body)
+#        self._leave()
+#
+#    def _FunctionDef(self, t):
+#        self._write("\n")
+#        for deco in t.decorators:
+#            self._fill("@")
+#            self._dispatch(deco)
+#        self._fill("def "+t.name + "(")
+#        self._dispatch(t.args)
+#        self._write(")")
+#        self._enter()
+#        self._dispatch(t.body)
+#        self._leave()
+#
+#    def _For(self, t):
+#        self._fill("for ")
+#        self._dispatch(t.target)
+#        self._write(" in ")
+#        self._dispatch(t.iter)
+#        self._enter()
+#        self._dispatch(t.body)
+#        self._leave()
+#        if t.orelse:
+#            self._fill("else")
+#            self._enter()
+#            self._dispatch(t.orelse)
+#            self._leave
+#
+#    def _While(self, t):
+#        self._fill("while ")
+#        self._dispatch(t.test)
+#        self._enter()
+#        self._dispatch(t.body)
+#        self._leave()
+#        if t.orelse:
+#            self._fill("else")
+#            self._enter()
+#            self._dispatch(t.orelse)
+#            self._leave
+#
+#    # expr
+#    def _Str(self, tree):
+#        self._write(repr(tree.s))
+##
+#    def _Repr(self, t):
+#        self._write("`")
+#        self._dispatch(t.value)
+#        self._write("`")
+#
+#    def _Num(self, t):
+#        self._write(repr(t.n))
+#
+#    def _ListComp(self, t):
+#        self._write("[")
+#        self._dispatch(t.elt)
+#        for gen in t.generators:
+#            self._dispatch(gen)
+#        self._write("]")
+#
+#    def _GeneratorExp(self, t):
+#        self._write("(")
+#        self._dispatch(t.elt)
+#        for gen in t.generators:
+#            self._dispatch(gen)
+#        self._write(")")
+#
+#    def _comprehension(self, t):
+#        self._write(" for ")
+#        self._dispatch(t.target)
+#        self._write(" in ")
+#        self._dispatch(t.iter)
+#        for if_clause in t.ifs:
+#            self._write(" if ")
+#            self._dispatch(if_clause)
+#
+#    def _IfExp(self, t):
+#        self._dispatch(t.body)
+#        self._write(" if ")
+#        self._dispatch(t.test)
+#        if t.orelse:
+#            self._write(" else ")
+#            self._dispatch(t.orelse)
+#
+#    unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"}
+#    def _UnaryOp(self, t):
+#        self._write(self.unop[t.op.__class__.__name__])
+#        self._write("(")
+#        self._dispatch(t.operand)
+#        self._write(")")
+#
+#    binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%",
+#                    "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&",
+#                    "FloorDiv":"//", "Pow": "**"}
+#    def _BinOp(self, t):
+#        self._write("(")
+#        self._dispatch(t.left)
+#        self._write(")" + self.binop[t.op.__class__.__name__] + "(")
+#        self._dispatch(t.right)
+#        self._write(")")
+#
+#    boolops = {_ast.And: 'and', _ast.Or: 'or'}
+#    def _BoolOp(self, t):
+#        self._write("(")
+#        self._dispatch(t.values[0])
+#        for v in t.values[1:]:
+#            self._write(" %s " % self.boolops[t.op.__class__])
+#            self._dispatch(v)
+#        self._write(")")
+#
+#    def _Attribute(self,t):
+#        self._dispatch(t.value)
+#        self._write(".")
+#        self._write(t.attr)
+#
+##    def _Call(self, t):
+##        self._dispatch(t.func)
+##        self._write("(")
+##        comma = False
+##        for e in t.args:
+##            if comma: self._write(", ")
+##            else: comma = True
+##            self._dispatch(e)
+##        for e in t.keywords:
+##            if comma: self._write(", ")
+##            else: comma = True
+##            self._dispatch(e)
+##        if t.starargs:
+##            if comma: self._write(", ")
+##            else: comma = True
+##            self._write("*")
+##            self._dispatch(t.starargs)
+##        if t.kwargs:
+##            if comma: self._write(", ")
+##            else: comma = True
+##            self._write("**")
+##            self._dispatch(t.kwargs)
+##        self._write(")")
+#
+#    # slice
+#    def _Index(self, t):
+#        self._dispatch(t.value)
+#
+#    def _ExtSlice(self, t):
+#        for i, d in enumerate(t.dims):
+#            if i != 0:
+#                self._write(': ')
+#            self._dispatch(d)
+#
+#    # others
+#    def _arguments(self, t):
+#        first = True
+#        nonDef = len(t.args)-len(t.defaults)
+#        for a in t.args[0:nonDef]:
+#            if first:first = False
+#            else: self._write(", ")
+#            self._dispatch(a)
+#        for a,d in zip(t.args[nonDef:], t.defaults):
+#            if first:first = False
+#            else: self._write(", ")
+#            self._dispatch(a),
+#            self._write("=")
+#            self._dispatch(d)
+#        if t.vararg:
+#            if first:first = False
+#            else: self._write(", ")
+#            self._write("*"+t.vararg)
+#        if t.kwarg:
+#            if first:first = False
+#            else: self._write(", ")
+#            self._write("**"+t.kwarg)
+#
+##    def _keyword(self, t):
+##        self._write(t.arg)
+##        self._write("=")
+##        self._dispatch(t.value)
+#
+#    def _Lambda(self, t):
+#        self._write("lambda ")
+#        self._dispatch(t.args)
+#        self._write(": ")
+#        self._dispatch(t.body)
+
+
+

Added: trunk/doc/sphinxext/docscrape.py
===================================================================
--- trunk/doc/sphinxext/docscrape.py	2008-11-29 14:54:29 UTC (rev 6124)
+++ trunk/doc/sphinxext/docscrape.py	2008-11-30 14:44:38 UTC (rev 6125)
@@ -0,0 +1,492 @@
+"""Extract reference documentation from the NumPy source tree.
+
+"""
+
+import inspect
+import textwrap
+import re
+import pydoc
+from StringIO import StringIO
+from warnings import warn
+4
+class Reader(object):
+    """A line-based string reader.
+
+    """
+    def __init__(self, data):
+        """
+        Parameters
+        ----------
+        data : str
+           String with lines separated by '\n'.
+
+        """
+        if isinstance(data,list):
+            self._str = data
+        else:
+            self._str = data.split('\n') # store string as list of lines
+
+        self.reset()
+
+    def __getitem__(self, n):
+        return self._str[n]
+
+    def reset(self):
+        self._l = 0 # current line nr
+
+    def read(self):
+        if not self.eof():
+            out = self[self._l]
+            self._l += 1
+            return out
+        else:
+            return ''
+
+    def seek_next_non_empty_line(self):
+        for l in self[self._l:]:
+            if l.strip():
+                break
+            else:
+                self._l += 1
+
+    def eof(self):
+        return self._l >= len(self._str)
+
+    def read_to_condition(self, condition_func):
+        start = self._l
+        for line in self[start:]:
+            if condition_func(line):
+                return self[start:self._l]
+            self._l += 1
+            if self.eof():
+                return self[start:self._l+1]
+        return []
+
+    def read_to_next_empty_line(self):
+        self.seek_next_non_empty_line()
+        def is_empty(line):
+            return not line.strip()
+        return self.read_to_condition(is_empty)
+
+    def read_to_next_unindented_line(self):
+        def is_unindented(line):
+            return (line.strip() and (len(line.lstrip()) == len(line)))
+        return self.read_to_condition(is_unindented)
+
+    def peek(self,n=0):
+        if self._l + n < len(self._str):
+            return self[self._l + n]
+        else:
+            return ''
+
+    def is_empty(self):
+        return not ''.join(self._str).strip()
+
+
+class NumpyDocString(object):
+    def __init__(self,docstring):
+        docstring = textwrap.dedent(docstring).split('\n')
+
+        self._doc = Reader(docstring)
+        self._parsed_data = {
+            'Signature': '',
+            'Summary': [''],
+            'Extended Summary': [],
+            'Parameters': [],
+            'Returns': [],
+            'Raises': [],
+            'Warns': [],
+            'Other Parameters': [],
+            'Attributes': [],
+            'Methods': [],
+            'See Also': [],
+            'Notes': [],
+            'Warnings': [],
+            'References': '',
+            'Examples': '',
+            'index': {}
+            }
+
+        self._parse()
+
+    def __getitem__(self,key):
+        return self._parsed_data[key]
+
+    def __setitem__(self,key,val):
+        if not self._parsed_data.has_key(key):
+            warn("Unknown section %s" % key)
+        else:
+            self._parsed_data[key] = val
+
+    def _is_at_section(self):
+        self._doc.seek_next_non_empty_line()
+
+        if self._doc.eof():
+            return False
+
+        l1 = self._doc.peek().strip()  # e.g. Parameters
+
+        if l1.startswith('.. index::'):
+            return True
+
+        l2 = self._doc.peek(1).strip() #    ---------- or ==========
+        return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
+
+    def _strip(self,doc):
+        i = 0
+        j = 0
+        for i,line in enumerate(doc):
+            if line.strip(): break
+
+        for j,line in enumerate(doc[::-1]):
+            if line.strip(): break
+
+        return doc[i:len(doc)-j]
+
+    def _read_to_next_section(self):
+        section = self._doc.read_to_next_empty_line()
+
+        while not self._is_at_section() and not self._doc.eof():
+            if not self._doc.peek(-1).strip(): # previous line was empty
+                section += ['']
+
+            section += self._doc.read_to_next_empty_line()
+
+        return section
+
+    def _read_sections(self):
+        while not self._doc.eof():
+            data = self._read_to_next_section()
+            name = data[0].strip()
+
+            if name.startswith('..'): # index section
+                yield name, data[1:]
+            elif len(data) < 2:
+                yield StopIteration
+            else:
+                yield name, self._strip(data[2:])
+
+    def _parse_param_list(self,content):
+        r = Reader(content)
+        params = []
+        while not r.eof():
+            header = r.read().strip()
+            if ' : ' in header:
+                arg_name, arg_type = header.split(' : ')[:2]
+            else:
+                arg_name, arg_type = header, ''
+
+            desc = r.read_to_next_unindented_line()
+            desc = dedent_lines(desc)
+
+            params.append((arg_name,arg_type,desc))
+
+        return params
+
+    
+    _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
+                           r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
+    def _parse_see_also(self, content):
+        """
+        func_name : Descriptive text
+            continued text
+        another_func_name : Descriptive text
+        func_name1, func_name2, :meth:`func_name`, func_name3
+
+        """
+        items = []
+
+        def parse_item_name(text):
+            """Match ':role:`name`' or 'name'"""
+            m = self._name_rgx.match(text)
+            if m:
+                g = m.groups()
+                if g[1] is None:
+                    return g[3], None
+                else:
+                    return g[2], g[1]
+            raise ValueError("%s is not a item name" % text)
+
+        def push_item(name, rest):
+            if not name:
+                return
+            name, role = parse_item_name(name)
+            items.append((name, list(rest), role))
+            del rest[:]
+
+        current_func = None
+        rest = []
+        
+        for line in content:
+            if not line.strip(): continue
+
+            m = self._name_rgx.match(line)
+            if m and line[m.end():].strip().startswith(':'):
+                push_item(current_func, rest)
+                current_func, line = line[:m.end()], line[m.end():]
+                rest = [line.split(':', 1)[1].strip()]
+                if not rest[0]:
+                    rest = []
+            elif not line.startswith(' '):
+                push_item(current_func, rest)
+                current_func = None
+                if ',' in line:
+                    for func in line.split(','):
+                        push_item(func, [])
+                elif line.strip():
+                    current_func = line
+            elif current_func is not None:
+                rest.append(line.strip())
+        push_item(current_func, rest)
+        return items
+
+    def _parse_index(self, section, content):
+        """
+        .. index: default
+           :refguide: something, else, and more
+
+        """
+        def strip_each_in(lst):
+            return [s.strip() for s in lst]
+
+        out = {}
+        section = section.split('::')
+        if len(section) > 1:
+            out['default'] = strip_each_in(section[1].split(','))[0]
+        for line in content:
+            line = line.split(':')
+            if len(line) > 2:
+                out[line[1]] = strip_each_in(line[2].split(','))
+        return out
+    
+    def _parse_summary(self):
+        """Grab signature (if given) and summary"""
+        if self._is_at_section():
+            return
+
+        summary = self._doc.read_to_next_empty_line()
+        summary_str = " ".join([s.strip() for s in summary]).strip()
+        if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
+            self['Signature'] = summary_str
+            if not self._is_at_section():
+                self['Summary'] = self._doc.read_to_next_empty_line()
+        else:
+            self['Summary'] = summary
+
+        if not self._is_at_section():
+            self['Extended Summary'] = self._read_to_next_section()
+    
+    def _parse(self):
+        self._doc.reset()
+        self._parse_summary()
+
+        for (section,content) in self._read_sections():
+            if not section.startswith('..'):
+                section = ' '.join([s.capitalize() for s in section.split(' ')])
+            if section in ('Parameters', 'Attributes', 'Methods',
+                           'Returns', 'Raises', 'Warns'):
+                self[section] = self._parse_param_list(content)
+            elif section.startswith('.. index::'):
+                self['index'] = self._parse_index(section, content)
+            elif section == 'See Also':
+                self['See Also'] = self._parse_see_also(content)
+            else:
+                self[section] = content
+
+    # string conversion routines
+
+    def _str_header(self, name, symbol='-'):
+        return [name, len(name)*symbol]
+
+    def _str_indent(self, doc, indent=4):
+        out = []
+        for line in doc:
+            out += [' '*indent + line]
+        return out
+
+    def _str_signature(self):
+        if self['Signature']:
+            return [self['Signature'].replace('*','\*')] + ['']
+        else:
+            return ['']
+
+    def _str_summary(self):
+        if self['Summary']:
+            return self['Summary'] + ['']
+        else:
+            return []
+
+    def _str_extended_summary(self):
+        if self['Extended Summary']:
+            return self['Extended Summary'] + ['']
+        else:
+            return []
+
+    def _str_param_list(self, name):
+        out = []
+        if self[name]:
+            out += self._str_header(name)
+            for param,param_type,desc in self[name]:
+                out += ['%s : %s' % (param, param_type)]
+                out += self._str_indent(desc)
+            out += ['']
+        return out
+
+    def _str_section(self, name):
+        out = []
+        if self[name]:
+            out += self._str_header(name)
+            out += self[name]
+            out += ['']
+        return out
+
+    def _str_see_also(self, func_role):
+        if not self['See Also']: return []
+        out = []
+        out += self._str_header("See Also")
+        last_had_desc = True
+        for func, desc, role in self['See Also']:
+            if role:
+                link = ':%s:`%s`' % (role, func)
+            elif func_role:
+                link = ':%s:`%s`' % (func_role, func)
+            else:
+                link = "`%s`_" % func
+            if desc or last_had_desc:
+                out += ['']
+                out += [link]
+            else:
+                out[-1] += ", %s" % link
+            if desc:
+                out += self._str_indent([' '.join(desc)])
+                last_had_desc = True
+            else:
+                last_had_desc = False
+        out += ['']
+        return out
+
+    def _str_index(self):
+        idx = self['index']
+        out = []
+        out += ['.. index:: %s' % idx.get('default','')]
+        for section, references in idx.iteritems():
+            if section == 'default':
+                continue
+            out += ['   :%s: %s' % (section, ', '.join(references))]
+        return out
+
+    def __str__(self, func_role=''):
+        out = []
+        out += self._str_signature()
+        out += self._str_summary()
+        out += self._str_extended_summary()
+        for param_list in ('Parameters','Returns','Raises'):
+            out += self._str_param_list(param_list)
+        out += self._str_section('Warnings')
+        out += self._str_see_also(func_role)
+        for s in ('Notes','References','Examples'):
+            out += self._str_section(s)
+        out += self._str_index()
+        return '\n'.join(out)
+
+
+def indent(str,indent=4):
+    indent_str = ' '*indent
+    if str is None:
+        return indent_str
+    lines = str.split('\n')
+    return '\n'.join(indent_str + l for l in lines)
+
+def dedent_lines(lines):
+    """Deindent a list of lines maximally"""
+    return textwrap.dedent("\n".join(lines)).split("\n")
+
+def header(text, style='-'):
+    return text + '\n' + style*len(text) + '\n'
+
+
+class FunctionDoc(NumpyDocString):
+    def __init__(self, func, role='func'):
+        self._f = func
+        self._role = role # e.g. "func" or "meth"
+        try:
+            NumpyDocString.__init__(self,inspect.getdoc(func) or '')
+        except ValueError, e:
+            print '*'*78
+            print "ERROR: '%s' while parsing `%s`" % (e, self._f)
+            print '*'*78
+            #print "Docstring follows:"
+            #print doclines
+            #print '='*78
+
+        if not self['Signature']:
+            func, func_name = self.get_func()
+            try:
+                # try to read signature
+                argspec = inspect.getargspec(func)
+                argspec = inspect.formatargspec(*argspec)
+                argspec = argspec.replace('*','\*')
+                signature = '%s%s' % (func_name, argspec)
+            except TypeError, e:
+                signature = '%s()' % func_name
+            self['Signature'] = signature
+
+    def get_func(self):
+        func_name = getattr(self._f, '__name__', self.__class__.__name__)
+        if inspect.isclass(self._f):
+            func = getattr(self._f, '__call__', self._f.__init__)
+        else:
+            func = self._f
+        return func, func_name
+            
+    def __str__(self):
+        out = ''
+
+        func, func_name = self.get_func()
+        signature = self['Signature'].replace('*', '\*')
+
+        roles = {'func': 'function',
+                 'meth': 'method'}
+
+        if self._role:
+            if not roles.has_key(self._role):
+                print "Warning: invalid role %s" % self._role
+            out += '.. %s:: %s\n    \n\n' % (roles.get(self._role,''),
+                                             func_name)
+
+        out += super(FunctionDoc, self).__str__(func_role=self._role)
+        return out
+
+
+class ClassDoc(NumpyDocString):
+    def __init__(self,cls,modulename='',func_doc=FunctionDoc):
+        if not inspect.isclass(cls):
+            raise ValueError("Initialise using a class. Got %r" % cls)
+        self._cls = cls
+
+        if modulename and not modulename.endswith('.'):
+            modulename += '.'
+        self._mod = modulename
+        self._name = cls.__name__
+        self._func_doc = func_doc
+
+        NumpyDocString.__init__(self, pydoc.getdoc(cls))
+
+    @property
+    def methods(self):
+        return [name for name,func in inspect.getmembers(self._cls)
+                if not name.startswith('_') and callable(func)]
+
+    def __str__(self):
+        out = ''
+        out += super(ClassDoc, self).__str__()
+        out += "\n\n"
+
+        #for m in self.methods:
+        #    print "Parsing `%s`" % m
+        #    out += str(self._func_doc(getattr(self._cls,m), 'meth')) + '\n\n'
+        #    out += '.. index::\n   single: %s; %s\n\n' % (self._name, m)
+
+        return out
+
+

Added: trunk/doc/sphinxext/docscrape_sphinx.py
===================================================================
--- trunk/doc/sphinxext/docscrape_sphinx.py	2008-11-29 14:54:29 UTC (rev 6124)
+++ trunk/doc/sphinxext/docscrape_sphinx.py	2008-11-30 14:44:38 UTC (rev 6125)
@@ -0,0 +1,133 @@
+import re, inspect, textwrap, pydoc
+from docscrape import NumpyDocString, FunctionDoc, ClassDoc
+
+class SphinxDocString(NumpyDocString):
+    # string conversion routines
+    def _str_header(self, name, symbol='`'):
+        return ['.. rubric:: ' + name, '']
+
+    def _str_field_list(self, name):
+        return [':' + name + ':']
+
+    def _str_indent(self, doc, indent=4):
+        out = []
+        for line in doc:
+            out += [' '*indent + line]
+        return out
+
+    def _str_signature(self):
+        return ['']
+        if self['Signature']:
+            return ['``%s``' % self['Signature']] + ['']
+        else:
+            return ['']
+
+    def _str_summary(self):
+        return self['Summary'] + ['']
+
+    def _str_extended_summary(self):
+        return self['Extended Summary'] + ['']
+
+    def _str_param_list(self, name):
+        out = []
+        if self[name]:
+            out += self._str_field_list(name)
+            out += ['']
+            for param,param_type,desc in self[name]:
+                out += self._str_indent(['**%s** : %s' % (param.strip(),
+                                                          param_type)])
+                out += ['']
+                out += self._str_indent(desc,8)
+                out += ['']
+        return out
+
+    def _str_section(self, name):
+        out = []
+        if self[name]:
+            out += self._str_header(name)
+            out += ['']
+            content = textwrap.dedent("\n".join(self[name])).split("\n")
+            out += content
+            out += ['']
+        return out
+
+    def _str_see_also(self, func_role):
+        out = []
+        if self['See Also']:
+            see_also = super(SphinxDocString, self)._str_see_also(func_role)
+            out = ['.. seealso::', '']
+            out += self._str_indent(see_also[2:])
+        return out
+
+    def _str_warnings(self):
+        out = []
+        if self['Warnings']:
+            out = ['.. warning::', '']
+            out += self._str_indent(self['Warnings'])
+        return out
+
+    def _str_index(self):
+        idx = self['index']
+        out = []
+        if len(idx) == 0:
+            return out
+
+        out += ['.. index:: %s' % idx.get('default','')]
+        for section, references in idx.iteritems():
+            if section == 'default':
+                continue
+            elif section == 'refguide':
+                out += ['   single: %s' % (', '.join(references))]
+            else:
+                out += ['   %s: %s' % (section, ','.join(references))]
+        return out
+
+    def _str_references(self):
+        out = []
+        if self['References']:
+            out += self._str_header('References')
+            if isinstance(self['References'], str):
+                self['References'] = [self['References']]
+            out.extend(self['References'])
+            out += ['']
+        return out
+
+    def __str__(self, indent=0, func_role="obj"):
+        out = []
+        out += self._str_signature()
+        out += self._str_index() + ['']
+        out += self._str_summary()
+        out += self._str_extended_summary()
+        for param_list in ('Parameters', 'Attributes', 'Methods',
+                           'Returns','Raises'):
+            out += self._str_param_list(param_list)
+        out += self._str_warnings()
+        out += self._str_see_also(func_role)
+        out += self._str_section('Notes')
+        out += self._str_references()
+        out += self._str_section('Examples')
+        out = self._str_indent(out,indent)
+        return '\n'.join(out)
+
+class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
+    pass
+
+class SphinxClassDoc(SphinxDocString, ClassDoc):
+    pass
+
+def get_doc_object(obj, what=None):
+    if what is None:
+        if inspect.isclass(obj):
+            what = 'class'
+        elif inspect.ismodule(obj):
+            what = 'module'
+        elif callable(obj):
+            what = 'function'
+        else:
+            what = 'object'
+    if what == 'class':
+        return SphinxClassDoc(obj, '', func_doc=SphinxFunctionDoc)
+    elif what in ('function', 'method'):
+        return SphinxFunctionDoc(obj, '')
+    else:
+        return SphinxDocString(pydoc.getdoc(obj))

Added: trunk/doc/sphinxext/numpydoc.py
===================================================================
--- trunk/doc/sphinxext/numpydoc.py	2008-11-29 14:54:29 UTC (rev 6124)
+++ trunk/doc/sphinxext/numpydoc.py	2008-11-30 14:44:38 UTC (rev 6125)
@@ -0,0 +1,111 @@
+"""
+========
+numpydoc
+========
+
+Sphinx extension that handles docstrings in the Numpy standard format. [1]
+
+It will:
+
+- Convert Parameters etc. sections to field lists.
+- Convert See Also section to a See also entry.
+- Renumber references.
+- Extract the signature from the docstring, if it can't be determined otherwise.
+
+.. [1] http://projects.scipy.org/scipy/numpy/wiki/CodingStyleGuidelines#docstring-standard
+
+"""
+
+import os, re, pydoc
+from docscrape_sphinx import get_doc_object, SphinxDocString
+import inspect
+
+def mangle_docstrings(app, what, name, obj, options, lines,
+                      reference_offset=[0]):
+    if what == 'module':
+        # Strip top title
+        title_re = re.compile(r'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*',
+                              re.I|re.S)
+        lines[:] = title_re.sub('', "\n".join(lines)).split("\n")
+    else:
+        doc = get_doc_object(obj, what)
+        lines[:] = str(doc).split("\n")
+
+    if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \
+           obj.__name__:
+        v = dict(full_name=obj.__name__)
+        lines += [''] + (app.config.numpydoc_edit_link % v).split("\n")
+
+    # replace reference numbers so that there are no duplicates
+    references = []
+    for l in lines:
+        l = l.strip()
+        if l.startswith('.. ['):
+            try:
+                references.append(int(l[len('.. ['):l.index(']')]))
+            except ValueError:
+                print "WARNING: invalid reference in %s docstring" % name
+
+    # Start renaming from the biggest number, otherwise we may
+    # overwrite references.
+    references.sort()
+    if references:
+        for i, line in enumerate(lines):
+            for r in references:
+                new_r = reference_offset[0] + r
+                lines[i] = lines[i].replace('[%d]_' % r,
+                                            '[%d]_' % new_r)
+                lines[i] = lines[i].replace('.. [%d]' % r,
+                                            '.. [%d]' % new_r)
+
+    reference_offset[0] += len(references)
+
+def mangle_signature(app, what, name, obj, options, sig, retann):
+    # Do not try to inspect classes that don't define `__init__`
+    if (inspect.isclass(obj) and
+        'initializes x; see ' in pydoc.getdoc(obj.__init__)):
+        return '', ''
+
+    if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return
+    if not hasattr(obj, '__doc__'): return
+
+    doc = SphinxDocString(pydoc.getdoc(obj))
+    if doc['Signature']:
+        sig = re.sub("^[^(]*", "", doc['Signature'])
+        return sig, ''
+
+def initialize(app):
+    try:
+        app.connect('autodoc-process-signature', mangle_signature)
+    except:
+        monkeypatch_sphinx_ext_autodoc()
+
+def setup(app, get_doc_object_=get_doc_object):
+    global get_doc_object
+    get_doc_object = get_doc_object_
+    
+    app.connect('autodoc-process-docstring', mangle_docstrings)
+    app.connect('builder-inited', initialize)
+    app.add_config_value('numpydoc_edit_link', None, True)
+
+#------------------------------------------------------------------------------
+# Monkeypatch sphinx.ext.autodoc to accept argspecless autodocs (Sphinx < 0.5)
+#------------------------------------------------------------------------------
+
+def monkeypatch_sphinx_ext_autodoc():
+    global _original_format_signature
+    import sphinx.ext.autodoc
+
+    if sphinx.ext.autodoc.format_signature is our_format_signature:
+        return
+
+    print "[numpydoc] Monkeypatching sphinx.ext.autodoc ..."
+    _original_format_signature = sphinx.ext.autodoc.format_signature
+    sphinx.ext.autodoc.format_signature = our_format_signature
+
+def our_format_signature(what, obj):
+    r = mangle_signature(None, what, None, obj, None, None, None)
+    if r is not None:
+        return r[0]
+    else:
+        return _original_format_signature(what, obj)

Added: trunk/doc/sphinxext/only_directives.py
===================================================================
--- trunk/doc/sphinxext/only_directives.py	2008-11-29 14:54:29 UTC (rev 6124)
+++ trunk/doc/sphinxext/only_directives.py	2008-11-30 14:44:38 UTC (rev 6125)
@@ -0,0 +1,87 @@
+#
+# A pair of directives for inserting content that will only appear in
+# either html or latex.
+#
+
+from docutils.nodes import Body, Element
+from docutils.writers.html4css1 import HTMLTranslator
+from sphinx.latexwriter import LaTeXTranslator
+from docutils.parsers.rst import directives
+
+class html_only(Body, Element):
+    pass
+
+class latex_only(Body, Element):
+    pass
+
+def run(content, node_class, state, content_offset):
+    text = '\n'.join(content)
+    node = node_class(text)
+    state.nested_parse(content, content_offset, node)
+    return [node]
+
+try:
+    from docutils.parsers.rst import Directive
+except ImportError:
+    from docutils.parsers.rst.directives import _directives
+
+    def html_only_directive(name, arguments, options, content, lineno,
+                            content_offset, block_text, state, state_machine):
+        return run(content, html_only, state, content_offset)
+
+    def latex_only_directive(name, arguments, options, content, lineno,
+                             content_offset, block_text, state, state_machine):
+        return run(content, latex_only, state, content_offset)
+
+    for func in (html_only_directive, latex_only_directive):
+        func.content = 1
+        func.options = {}
+        func.arguments = None
+
+    _directives['htmlonly'] = html_only_directive
+    _directives['latexonly'] = latex_only_directive
+else:
+    class OnlyDirective(Directive):
+        has_content = True
+        required_arguments = 0
+        optional_arguments = 0
+        final_argument_whitespace = True
+        option_spec = {}
+
+        def run(self):
+            self.assert_has_content()
+            return run(self.content, self.node_class,
+                       self.state, self.content_offset)
+
+    class HtmlOnlyDirective(OnlyDirective):
+        node_class = html_only
+
+    class LatexOnlyDirective(OnlyDirective):
+        node_class = latex_only
+
+    directives.register_directive('htmlonly', HtmlOnlyDirective)
+    directives.register_directive('latexonly', LatexOnlyDirective)
+
+def setup(app):
+    app.add_node(html_only)
+    app.add_node(latex_only)
+
+    # Add visit/depart methods to HTML-Translator:
+    def visit_perform(self, node):
+        pass
+    def depart_perform(self, node):
+        pass
+    def visit_ignore(self, node):
+        node.children = []
+    def depart_ignore(self, node):
+        node.children = []
+
+    HTMLTranslator.visit_html_only = visit_perform
+    HTMLTranslator.depart_html_only = depart_perform
+    HTMLTranslator.visit_latex_only = visit_ignore
+    HTMLTranslator.depart_latex_only = depart_ignore
+
+    LaTeXTranslator.visit_html_only = visit_ignore
+    LaTeXTranslator.depart_html_only = depart_ignore
+    LaTeXTranslator.visit_latex_only = visit_perform
+    LaTeXTranslator.depart_latex_only = depart_perform

Added: trunk/doc/sphinxext/phantom_import.py
===================================================================
--- trunk/doc/sphinxext/phantom_import.py	2008-11-29 14:54:29 UTC (rev 6124)
+++ trunk/doc/sphinxext/phantom_import.py	2008-11-30 14:44:38 UTC (rev 6125)
@@ -0,0 +1,162 @@
+"""
+==============
+phantom_import
+==============
+
+Sphinx extension to make directives from ``sphinx.ext.autodoc`` and similar
+extensions to use docstrings loaded from an XML file.
+
+This extension loads an XML file in the Pydocweb format [1] and
+creates a dummy module that contains the specified docstrings. This
+can be used to get the current docstrings from a Pydocweb instance
+without needing to rebuild the documented module.
+
+.. [1] http://code.google.com/p/pydocweb
+
+"""
+import imp, sys, compiler, types, os, inspect, re
+
+def setup(app):
+    app.connect('builder-inited', initialize)
+    app.add_config_value('phantom_import_file', None, True)
+
+def initialize(app):
+    fn = app.config.phantom_import_file
+    if (fn and os.path.isfile(fn)):
+        print "[numpydoc] Phantom importing modules from", fn, "..."
+        import_phantom_module(fn)
+
+#------------------------------------------------------------------------------
+# Creating 'phantom' modules from an XML description
+#------------------------------------------------------------------------------
+def import_phantom_module(xml_file):
+    """
+    Insert a fake Python module to sys.modules, based on a XML file.
+
+    The XML file is expected to conform to Pydocweb DTD. The fake
+    module will contain dummy objects, which guarantee the following:
+
+    - Docstrings are correct.
+    - Class inheritance relationships are correct (if present in XML).
+    - Function argspec is *NOT* correct (even if present in XML).
+      Instead, the function signature is prepended to the function docstring.
+    - Class attributes are *NOT* correct; instead, they are dummy objects.
+
+    Parameters
+    ----------
+    xml_file : str
+        Name of an XML file to read
+    
+    """
+    import lxml.etree as etree
+
+    object_cache = {}
+
+    tree = etree.parse(xml_file)
+    root = tree.getroot()
+
+    # Sort items so that
+    # - Base classes come before classes inherited from them
+    # - Modules come before their contents
+    all_nodes = dict([(n.attrib['id'], n) for n in root])
+    
+    def _get_bases(node, recurse=False):
+        bases = [x.attrib['ref'] for x in node.findall('base')]
+        if recurse:
+            j = 0
+            while True:
+                try:
+                    b = bases[j]
+                except IndexError: break
+                if b in all_nodes:
+                    bases.extend(_get_bases(all_nodes[b]))
+                j += 1
+        return bases
+
+    type_index = ['module', 'class', 'callable', 'object']
+    
+    def base_cmp(a, b):
+        x = cmp(type_index.index(a.tag), type_index.index(b.tag))
+        if x != 0: return x
+
+        if a.tag == 'class' and b.tag == 'class':
+            a_bases = _get_bases(a, recurse=True)
+            b_bases = _get_bases(b, recurse=True)
+            x = cmp(len(a_bases), len(b_bases))
+            if x != 0: return x
+            if a.attrib['id'] in b_bases: return -1
+            if b.attrib['id'] in a_bases: return 1
+        
+        return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.'))
+
+    nodes = root.getchildren()
+    nodes.sort(base_cmp)
+
+    # Create phantom items
+    for node in nodes:
+        name = node.attrib['id']
+        doc = (node.text or '').decode('string-escape') + "\n"
+        if doc == "\n": doc = ""
+
+        # create parent, if missing
+        parent = name
+        while True:
+            parent = '.'.join(parent.split('.')[:-1])
+            if not parent: break
+            if parent in object_cache: break
+            obj = imp.new_module(parent)
+            object_cache[parent] = obj
+            sys.modules[parent] = obj
+
+        # create object
+        if node.tag == 'module':
+            obj = imp.new_module(name)
+            obj.__doc__ = doc
+            sys.modules[name] = obj
+        elif node.tag == 'class':
+            bases = [object_cache[b] for b in _get_bases(node)
+                     if b in object_cache]
+            bases.append(object)
+            init = lambda self: None
+            init.__doc__ = doc
+            obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init})
+            obj.__name__ = name.split('.')[-1]
+        elif node.tag == 'callable':
+            funcname = node.attrib['id'].split('.')[-1]
+            argspec = node.attrib.get('argspec')
+            if argspec:
+                argspec = re.sub('^[^(]*', '', argspec)
+                doc = "%s%s\n\n%s" % (funcname, argspec, doc)
+            obj = lambda: 0
+            obj.__argspec_is_invalid_ = True
+            obj.func_name = funcname
+            obj.__name__ = name
+            obj.__doc__ = doc
+            if inspect.isclass(object_cache[parent]):
+                obj.__objclass__ = object_cache[parent]
+        else:
+            class Dummy(object): pass
+            obj = Dummy()
+            obj.__name__ = name
+            obj.__doc__ = doc
+            if inspect.isclass(object_cache[parent]):
+                obj.__get__ = lambda: None
+        object_cache[name] = obj
+
+        if parent:
+            if inspect.ismodule(object_cache[parent]):
+                obj.__module__ = parent
+                setattr(object_cache[parent], name.split('.')[-1], obj)
+
+    # Populate items
+    for node in root:
+        obj = object_cache.get(node.attrib['id'])
+        if obj is None: continue
+        for ref in node.findall('ref'):
+            if node.tag == 'class':
+                if ref.attrib['ref'].startswith(node.attrib['id'] + '.'):
+                    setattr(obj, ref.attrib['name'],
+                            object_cache.get(ref.attrib['ref']))
+            else:
+                setattr(obj, ref.attrib['name'],
+                        object_cache.get(ref.attrib['ref']))

Added: trunk/doc/sphinxext/plot_directive.py
===================================================================
--- trunk/doc/sphinxext/plot_directive.py	2008-11-29 14:54:29 UTC (rev 6124)
+++ trunk/doc/sphinxext/plot_directive.py	2008-11-30 14:44:38 UTC (rev 6125)
@@ -0,0 +1,295 @@
+# plot_directive.py from matplotlib.sf.net
+"""A special directive for including a matplotlib plot.
+
+Given a path to a .py file, it includes the source code inline, then:
+
+- On HTML, will include a .png with a link to a high-res .png.
+
+- On LaTeX, will include a .pdf
+
+This directive supports all of the options of the `image` directive,
+except for `target` (since plot will add its own target).
+
+Additionally, if the :include-source: option is provided, the literal
+source will be included inline, as well as a link to the source.
+
+.. warning::
+
+   This is a hacked version of plot_directive.py from Matplotlib.
+   It's very much subject to change!
+
+"""
+
+import sys, os, glob, shutil, imp, warnings, cStringIO, re
+from docutils.parsers.rst import directives
+try:
+    # docutils 0.4
+    from docutils.parsers.rst.directives.images import align
+except ImportError:
+    # docutils 0.5
+    from docutils.parsers.rst.directives.images import Image
+    align = Image.align
+
+import matplotlib
+import matplotlib.cbook as cbook
+matplotlib.use('Agg')
+import matplotlib.pyplot as plt
+import matplotlib.image as image
+from matplotlib import _pylab_helpers
+
+def runfile(fullpath, is_doctest=False):
+    # Change the working directory to the directory of the example, so
+    # it can get at its data files, if any.
+    pwd = os.getcwd()
+    path, fname = os.path.split(fullpath)
+    os.chdir(path)
+    stdout = sys.stdout
+    sys.stdout = cStringIO.StringIO()
+    try:
+        code = ""
+        if is_doctest:
+            fd = cStringIO.StringIO()
+            for line in open(fname):
+                m = re.match(r'^\s*(>>>|...) (.*)$', line)
+                if m:
+                    code += m.group(2) + "\n"
+        else:
+            code = open(fname).read()
+
+        ns = {}
+        exec setup.config.plot_pre_code in ns
+        exec code in ns
+    finally:
+        os.chdir(pwd)
+        sys.stdout = stdout
+    return ns
+
+options = {'alt': directives.unchanged,
+           'height': directives.length_or_unitless,
+           'width': directives.length_or_percentage_or_unitless,
+           'scale': directives.nonnegative_int,
+           'align': align,
+           'class': directives.class_option,
+           'include-source': directives.flag,
+           'doctest-format': directives.flag
+           }
+
+template = """
+.. htmlonly::
+
+   [`source code <%(linkdir)s/%(sourcename)s>`__,
+   `png <%(linkdir)s/%(outname)s.hires.png>`__,
+   `pdf <%(linkdir)s/%(outname)s.pdf>`__]
+
+   .. image:: %(linkdir)s/%(outname)s.png
+%(options)s
+
+.. latexonly::
+   .. image:: %(linkdir)s/%(outname)s.pdf
+%(options)s
+
+"""
+
+exception_template = """
+.. htmlonly::
+
+   [`source code <%(linkdir)s/%(sourcename)s>`__]
+
+Exception occurred rendering plot.
+
+"""
+
+
+def out_of_date(original, derived):
+    """
+    Returns True if derivative is out-of-date wrt original,
+    both of which are full file paths.
+    """
+    return (not os.path.exists(derived)
+            or os.stat(derived).st_mtime < os.stat(original).st_mtime)
+
+def makefig(fullpath, outdir, is_doctest=False):
+    """
+    run a pyplot script and save the low and high res PNGs and a PDF in _static
+
+    """
+
+    fullpath = str(fullpath)  # todo, why is unicode breaking this
+
+    print '    makefig: fullpath=%s, outdir=%s'%( fullpath, outdir)
+    formats = [('png', 80),
+               ('hires.png', 200),
+               ('pdf', 50),
+               ]
+
+    basedir, fname = os.path.split(fullpath)
+    basename, ext = os.path.splitext(fname)
+    if ext != '.py':
+        basename = fname
+    sourcename = fname
+    all_exists = True
+
+    if basedir != outdir:
+        shutil.copyfile(fullpath, os.path.join(outdir, fname))
+
+    # Look for single-figure output files first
+    for format, dpi in formats:
+        outname = os.path.join(outdir, '%s.%s' % (basename, format))
+        if out_of_date(fullpath, outname):
+            all_exists = False
+            break
+
+    if all_exists:
+        print '    already have %s'%fullpath
+        return 1
+
+    # Then look for multi-figure output files, assuming
+    # if we have some we have all...
+    i = 0
+    while True:
+        all_exists = True
+        for format, dpi in formats:
+            outname = os.path.join(outdir, '%s_%02d.%s' % (basename, i, format))
+            if out_of_date(fullpath, outname):
+                all_exists = False
+                break
+        if all_exists:
+            i += 1
+        else:
+            break
+
+    if i != 0:
+        print '    already have %d figures for %s' % (i, fullpath)
+        return i
+
+    # We didn't find the files, so build them
+
+    print '    building %s'%fullpath
+    plt.close('all')    # we need to clear between runs
+    matplotlib.rcdefaults()
+    # Set a figure size that doesn't overflow typical browser windows
+    matplotlib.rcParams['figure.figsize'] = (5.5, 4.5)
+
+    try:
+        runfile(fullpath, is_doctest=is_doctest)
+    except:
+	s = cbook.exception_to_str("Exception running plot %s" % fullpath)
+        warnings.warn(s)
+        return 0
+
+    fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
+    for i, figman in enumerate(fig_managers):
+        for format, dpi in formats:
+            if len(fig_managers) == 1:
+                outname = basename
+            else:
+                outname = "%s_%02d" % (basename, i)
+            outpath = os.path.join(outdir, '%s.%s' % (outname, format))
+            try:
+                figman.canvas.figure.savefig(outpath, dpi=dpi)
+            except:
+                s = cbook.exception_to_str("Exception running plot %s" % fullpath)
+                warnings.warn(s)
+                return 0
+
+    return len(fig_managers)
+
+def run(arguments, options, state_machine, lineno):
+    reference = directives.uri(arguments[0])
+    basedir, fname = os.path.split(reference)
+    basename, ext = os.path.splitext(fname)
+    if ext != '.py':
+        basename = fname
+    sourcename = fname
+    #print 'plotdir', reference, basename, ext
+
+    # get the directory of the rst file
+    rstdir, rstfile = os.path.split(state_machine.document.attributes['source'])
+    reldir = rstdir[len(setup.confdir)+1:]
+    relparts = [p for p in os.path.split(reldir) if p.strip()]
+    nparts = len(relparts)
+    #print '    rstdir=%s, reldir=%s, relparts=%s, nparts=%d'%(rstdir, reldir, relparts, nparts)
+    #print 'RUN', rstdir, reldir
+    outdir = os.path.join(setup.confdir, setup.config.plot_output_dir, basedir)
+    if not os.path.exists(outdir):
+        cbook.mkdirs(outdir)
+
+    linkdir = ('../' * nparts) + setup.config.plot_output_dir.replace(os.path.sep, '/') + '/' + basedir
+    #linkdir = os.path.join('..', outdir)
+    num_figs = makefig(reference, outdir,
+                       is_doctest=('doctest-format' in options))
+    #print '    reference="%s", basedir="%s", linkdir="%s", outdir="%s"'%(reference, basedir, linkdir, outdir)
+
+    if options.has_key('include-source'):
+        contents = open(reference, 'r').read()
+        if 'doctest-format' in options:
+            lines = ['']
+        else:
+            lines = ['.. code-block:: python', '']
+        lines += ['    %s'%row.rstrip() for row in contents.split('\n')]
+        del options['include-source']
+    else:
+        lines = []
+
+    if 'doctest-format' in options:
+        del options['doctest-format']
+    
+    if num_figs > 0:
+        options = ['      :%s: %s' % (key, val) for key, val in
+                   options.items()]
+        options = "\n".join(options)
+
+        for i in range(num_figs):
+            if num_figs == 1:
+                outname = basename
+            else:
+                outname = "%s_%02d" % (basename, i)
+            lines.extend((template % locals()).split('\n'))
+    else:
+        lines.extend((exception_template % locals()).split('\n'))
+
+    if len(lines):
+        state_machine.insert_input(
+            lines, state_machine.input_lines.source(0))
+    return []
+
+
+
+try:
+    from docutils.parsers.rst import Directive
+except ImportError:
+    from docutils.parsers.rst.directives import _directives
+
+    def plot_directive(name, arguments, options, content, lineno,
+                       content_offset, block_text, state, state_machine):
+        return run(arguments, options, state_machine, lineno)
+    plot_directive.__doc__ = __doc__
+    plot_directive.arguments = (1, 0, 1)
+    plot_directive.options = options
+
+    _directives['plot'] = plot_directive
+else:
+    class plot_directive(Directive):
+        required_arguments = 1
+        optional_arguments = 0
+        final_argument_whitespace = True
+        option_spec = options
+        def run(self):
+            return run(self.arguments, self.options,
+                       self.state_machine, self.lineno)
+    plot_directive.__doc__ = __doc__
+
+    directives.register_directive('plot', plot_directive)
+
+def setup(app):
+    setup.app = app
+    setup.config = app.config
+    setup.confdir = app.confdir
+
+    app.add_config_value('plot_output_dir', '_static', True)
+    app.add_config_value('plot_pre_code', '', True)
+
+plot_directive.__doc__ = __doc__
+
+directives.register_directive('plot', plot_directive)
+

Added: trunk/doc/sphinxext/tests/test_docscrape.py
===================================================================
--- trunk/doc/sphinxext/tests/test_docscrape.py	2008-11-29 14:54:29 UTC (rev 6124)
+++ trunk/doc/sphinxext/tests/test_docscrape.py	2008-11-30 14:44:38 UTC (rev 6125)
@@ -0,0 +1,490 @@
+# -*- encoding:utf-8 -*-
+
+import sys, os
+sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
+
+from docscrape import NumpyDocString, FunctionDoc
+from docscrape_sphinx import SphinxDocString
+from nose.tools import *
+
+doc_txt = '''\
+  numpy.multivariate_normal(mean, cov, shape=None)
+
+  Draw values from a multivariate normal distribution with specified
+  mean and covariance.
+
+  The multivariate normal or Gaussian distribution is a generalisation
+  of the one-dimensional normal distribution to higher dimensions.
+
+  Parameters
+  ----------
+  mean : (N,) ndarray
+      Mean of the N-dimensional distribution.
+
+      .. math::
+
+         (1+2+3)/3
+
+  cov : (N,N) ndarray
+      Covariance matrix of the distribution.
+  shape : tuple of ints
+      Given a shape of, for example, (m,n,k), m*n*k samples are
+      generated, and packed in an m-by-n-by-k arrangement.  Because
+      each sample is N-dimensional, the output shape is (m,n,k,N).
+
+  Returns
+  -------
+  out : ndarray
+      The drawn samples, arranged according to `shape`.  If the
+      shape given is (m,n,...), then the shape of `out` is is
+      (m,n,...,N).
+
+      In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
+      value drawn from the distribution.
+
+  Warnings
+  --------
+  Certain warnings apply.
+
+  Notes
+  -----
+
+  Instead of specifying the full covariance matrix, popular
+  approximations include:
+
+    - Spherical covariance (`cov` is a multiple of the identity matrix)
+    - Diagonal covariance (`cov` has non-negative elements only on the diagonal)
+
+  This geometrical property can be seen in two dimensions by plotting
+  generated data-points:
+
+  >>> mean = [0,0]
+  >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
+
+  >>> x,y = multivariate_normal(mean,cov,5000).T
+  >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
+
+  Note that the covariance matrix must be symmetric and non-negative
+  definite.
+
+  References
+  ----------
+  .. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
+         Processes," 3rd ed., McGraw-Hill Companies, 1991
+  .. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
+         2nd ed., Wiley, 2001.
+
+  See Also
+  --------
+  some, other, funcs
+  otherfunc : relationship
+
+  Examples
+  --------
+  >>> mean = (1,2)
+  >>> cov = [[1,0],[1,0]]
+  >>> x = multivariate_normal(mean,cov,(3,3))
+  >>> print x.shape
+  (3, 3, 2)
+
+  The following is probably true, given that 0.6 is roughly twice the
+  standard deviation:
+
+  >>> print list( (x[0,0,:] - mean) < 0.6 )
+  [True, True]
+
+  .. index:: random
+     :refguide: random;distributions, random;gauss
+
+  '''
+doc = NumpyDocString(doc_txt)
+
+
+def test_signature():
+    assert doc['Signature'].startswith('numpy.multivariate_normal(')
+    assert doc['Signature'].endswith('shape=None)')
+
+def test_summary():
+    assert doc['Summary'][0].startswith('Draw values')
+    assert doc['Summary'][-1].endswith('covariance.')
+
+def test_extended_summary():
+    assert doc['Extended Summary'][0].startswith('The multivariate normal')
+
+def test_parameters():
+    assert_equal(len(doc['Parameters']), 3)
+    assert_equal([n for n,_,_ in doc['Parameters']], ['mean','cov','shape'])
+
+    arg, arg_type, desc = doc['Parameters'][1]
+    assert_equal(arg_type, '(N,N) ndarray')
+    assert desc[0].startswith('Covariance matrix')
+    assert doc['Parameters'][0][-1][-2] == '   (1+2+3)/3'
+
+def test_returns():
+    assert_equal(len(doc['Returns']), 1)
+    arg, arg_type, desc = doc['Returns'][0]
+    assert_equal(arg, 'out')
+    assert_equal(arg_type, 'ndarray')
+    assert desc[0].startswith('The drawn samples')
+    assert desc[-1].endswith('distribution.')
+
+def test_notes():
+    assert doc['Notes'][0].startswith('Instead')
+    assert doc['Notes'][-1].endswith('definite.')
+    assert_equal(len(doc['Notes']), 17)
+
+def test_references():
+    assert doc['References'][0].startswith('..')
+    assert doc['References'][-1].endswith('2001.')
+
+def test_examples():
+    assert doc['Examples'][0].startswith('>>>')
+    assert doc['Examples'][-1].endswith('True]')
+
+def test_index():
+    assert_equal(doc['index']['default'], 'random')
+    print doc['index']
+    assert_equal(len(doc['index']), 2)
+    assert_equal(len(doc['index']['refguide']), 2)
+
+def non_blank_line_by_line_compare(a,b):
+    a = [l for l in a.split('\n') if l.strip()]
+    b = [l for l in b.split('\n') if l.strip()]
+    for n,line in enumerate(a):
+        if not line == b[n]:
+            raise AssertionError("Lines %s of a and b differ: "
+                                 "\n>>> %s\n<<< %s\n" %
+                                 (n,line,b[n]))
+def test_str():
+    non_blank_line_by_line_compare(str(doc),
+"""numpy.multivariate_normal(mean, cov, shape=None)
+
+Draw values from a multivariate normal distribution with specified
+mean and covariance.
+
+The multivariate normal or Gaussian distribution is a generalisation
+of the one-dimensional normal distribution to higher dimensions.
+
+Parameters
+----------
+mean : (N,) ndarray
+    Mean of the N-dimensional distribution.
+
+    .. math::
+
+       (1+2+3)/3
+
+cov : (N,N) ndarray
+    Covariance matrix of the distribution.
+shape : tuple of ints
+    Given a shape of, for example, (m,n,k), m*n*k samples are
+    generated, and packed in an m-by-n-by-k arrangement.  Because
+    each sample is N-dimensional, the output shape is (m,n,k,N).
+
+Returns
+-------
+out : ndarray
+    The drawn samples, arranged according to `shape`.  If the
+    shape given is (m,n,...), then the shape of `out` is is
+    (m,n,...,N).
+
+    In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
+    value drawn from the distribution.
+
+Warnings
+--------
+Certain warnings apply.
+
+See Also
+--------
+`some`_, `other`_, `funcs`_
+
+`otherfunc`_
+    relationship
+
+Notes
+-----
+Instead of specifying the full covariance matrix, popular
+approximations include:
+
+  - Spherical covariance (`cov` is a multiple of the identity matrix)
+  - Diagonal covariance (`cov` has non-negative elements only on the diagonal)
+
+This geometrical property can be seen in two dimensions by plotting
+generated data-points:
+
+>>> mean = [0,0]
+>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
+
+>>> x,y = multivariate_normal(mean,cov,5000).T
+>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
+
+Note that the covariance matrix must be symmetric and non-negative
+definite.
+
+References
+----------
+.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
+       Processes," 3rd ed., McGraw-Hill Companies, 1991
+.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
+       2nd ed., Wiley, 2001.
+
+Examples
+--------
+>>> mean = (1,2)
+>>> cov = [[1,0],[1,0]]
+>>> x = multivariate_normal(mean,cov,(3,3))
+>>> print x.shape
+(3, 3, 2)
+
+The following is probably true, given that 0.6 is roughly twice the
+standard deviation:
+
+>>> print list( (x[0,0,:] - mean) < 0.6 )
+[True, True]
+
+.. index:: random
+   :refguide: random;distributions, random;gauss""")
+
+
+def test_sphinx_str():
+    sphinx_doc = SphinxDocString(doc_txt)
+    non_blank_line_by_line_compare(str(sphinx_doc),
+"""
+.. index:: random
+   single: random;distributions, random;gauss
+
+Draw values from a multivariate normal distribution with specified
+mean and covariance.
+
+The multivariate normal or Gaussian distribution is a generalisation
+of the one-dimensional normal distribution to higher dimensions.
+
+:Parameters:
+
+    **mean** : (N,) ndarray
+
+        Mean of the N-dimensional distribution.
+
+        .. math::
+
+           (1+2+3)/3
+
+    **cov** : (N,N) ndarray
+
+        Covariance matrix of the distribution.
+
+    **shape** : tuple of ints
+
+        Given a shape of, for example, (m,n,k), m*n*k samples are
+        generated, and packed in an m-by-n-by-k arrangement.  Because
+        each sample is N-dimensional, the output shape is (m,n,k,N).
+
+:Returns:
+
+    **out** : ndarray
+
+        The drawn samples, arranged according to `shape`.  If the
+        shape given is (m,n,...), then the shape of `out` is is
+        (m,n,...,N).
+        
+        In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
+        value drawn from the distribution.
+
+.. warning::
+
+    Certain warnings apply.
+
+.. seealso::
+    
+    :obj:`some`, :obj:`other`, :obj:`funcs`
+    
+    :obj:`otherfunc`
+        relationship
+    
+.. rubric:: Notes
+
+Instead of specifying the full covariance matrix, popular
+approximations include:
+
+  - Spherical covariance (`cov` is a multiple of the identity matrix)
+  - Diagonal covariance (`cov` has non-negative elements only on the diagonal)
+
+This geometrical property can be seen in two dimensions by plotting
+generated data-points:
+
+>>> mean = [0,0]
+>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
+
+>>> x,y = multivariate_normal(mean,cov,5000).T
+>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
+
+Note that the covariance matrix must be symmetric and non-negative
+definite.
+
+.. rubric:: References
+
+.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
+       Processes," 3rd ed., McGraw-Hill Companies, 1991
+.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
+       2nd ed., Wiley, 2001.
+
+.. rubric:: Examples
+
+>>> mean = (1,2)
+>>> cov = [[1,0],[1,0]]
+>>> x = multivariate_normal(mean,cov,(3,3))
+>>> print x.shape
+(3, 3, 2)
+
+The following is probably true, given that 0.6 is roughly twice the
+standard deviation:
+
+>>> print list( (x[0,0,:] - mean) < 0.6 )
+[True, True]
+""")
+
+       
+doc2 = NumpyDocString("""
+    Returns array of indices of the maximum values of along the given axis.
+
+    Parameters
+    ----------
+    a : {array_like}
+        Array to look in.
+    axis : {None, integer}
+        If None, the index is into the flattened array, otherwise along
+        the specified axis""")
+
+def test_parameters_without_extended_description():
+    assert_equal(len(doc2['Parameters']), 2)
+
+doc3 = NumpyDocString("""
+    my_signature(*params, **kwds)
+
+    Return this and that.
+    """)
+
+def test_escape_stars():
+    signature = str(doc3).split('\n')[0]
+    assert_equal(signature, 'my_signature(\*params, \*\*kwds)')
+
+doc4 = NumpyDocString(
+    """a.conj()
+
+    Return an array with all complex-valued elements conjugated.""")
+
+def test_empty_extended_summary():
+    assert_equal(doc4['Extended Summary'], [])
+
+doc5 = NumpyDocString(
+    """
+    a.something()
+
+    Raises
+    ------
+    LinAlgException
+        If array is singular.
+
+    """)
+
+def test_raises():
+    assert_equal(len(doc5['Raises']), 1)
+    name,_,desc = doc5['Raises'][0]
+    assert_equal(name,'LinAlgException')
+    assert_equal(desc,['If array is singular.'])
+
+def test_see_also():
+    doc6 = NumpyDocString(
+    """
+    z(x,theta)
+
+    See Also
+    --------
+    func_a, func_b, func_c
+    func_d : some equivalent func
+    foo.func_e : some other func over
+             multiple lines
+    func_f, func_g, :meth:`func_h`, func_j,
+    func_k
+    :obj:`baz.obj_q`
+    :class:`class_j`: fubar
+        foobar
+    """)
+
+    assert len(doc6['See Also']) == 12
+    for func, desc, role in doc6['See Also']:
+        if func in ('func_a', 'func_b', 'func_c', 'func_f',
+                    'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'):
+            assert(not desc)
+        else:
+            assert(desc)
+
+        if func == 'func_h':
+            assert role == 'meth'
+        elif func == 'baz.obj_q':
+            assert role == 'obj'
+        elif func == 'class_j':
+            assert role == 'class'
+        else:
+            assert role is None
+
+        if func == 'func_d':
+            assert desc == ['some equivalent func']
+        elif func == 'foo.func_e':
+            assert desc == ['some other func over', 'multiple lines']
+        elif func == 'class_j':
+            assert desc == ['fubar', 'foobar']
+
+def test_see_also_print():
+    class Dummy(object):
+        """
+        See Also
+        --------
+        func_a, func_b
+        func_c : some relationship
+                 goes here
+        func_d
+        """
+        pass
+
+    obj = Dummy()
+    s = str(FunctionDoc(obj, role='func'))
+    assert(':func:`func_a`, :func:`func_b`' in s)
+    assert('    some relationship' in s)
+    assert(':func:`func_d`' in s)
+
+doc7 = NumpyDocString("""
+
+        Doc starts on second line.
+
+        """)
+
+def test_empty_first_line():
+    assert doc7['Summary'][0].startswith('Doc starts')
+
+
+def test_no_summary():
+    str(SphinxDocString("""
+    Parameters
+    ----------"""))
+
+
+def test_unicode():
+    doc = SphinxDocString("""
+    öäöäöäöäöåååå
+
+    öäöäöäööäååå
+
+    Parameters
+    ----------
+    ååå : äää
+        ööö
+
+    Returns
+    -------
+    ååå : ööö
+        äää
+
+    """)
+    assert doc['Summary'][0] == u'öäöäöäöäöåååå'.encode('utf-8')

Added: trunk/doc/sphinxext/traitsdoc.py
===================================================================
--- trunk/doc/sphinxext/traitsdoc.py	2008-11-29 14:54:29 UTC (rev 6124)
+++ trunk/doc/sphinxext/traitsdoc.py	2008-11-30 14:44:38 UTC (rev 6125)
@@ -0,0 +1,140 @@
+"""
+=========
+traitsdoc
+=========
+
+Sphinx extension that handles docstrings in the Numpy standard format, [1]
+and support Traits [2].
+
+This extension can be used as a replacement for ``numpydoc`` when support
+for Traits is required.
+
+.. [1] http://projects.scipy.org/scipy/numpy/wiki/CodingStyleGuidelines#docstring-standard
+.. [2] http://code.enthought.com/projects/traits/
+
+"""
+
+import inspect
+import os
+import pydoc
+
+import docscrape
+import docscrape_sphinx
+from docscrape_sphinx import SphinxClassDoc, SphinxFunctionDoc, SphinxDocString
+
+import numpydoc
+
+import comment_eater
+
+class SphinxTraitsDoc(SphinxClassDoc):
+    def __init__(self, cls, modulename='', func_doc=SphinxFunctionDoc):
+        if not inspect.isclass(cls):
+            raise ValueError("Initialise using a class. Got %r" % cls)
+        self._cls = cls
+
+        if modulename and not modulename.endswith('.'):
+            modulename += '.'
+        self._mod = modulename
+        self._name = cls.__name__
+        self._func_doc = func_doc
+
+        docstring = pydoc.getdoc(cls)
+        docstring = docstring.split('\n')
+
+        # De-indent paragraph
+        try:
+            indent = min(len(s) - len(s.lstrip()) for s in docstring
+                         if s.strip())
+        except ValueError:
+            indent = 0
+
+        for n,line in enumerate(docstring):
+            docstring[n] = docstring[n][indent:]
+
+        self._doc = docscrape.Reader(docstring)
+        self._parsed_data = {
+            'Signature': '',
+            'Summary': '',
+            'Description': [],
+            'Extended Summary': [],
+            'Parameters': [],
+            'Returns': [],
+            'Raises': [],
+            'Warns': [],
+            'Other Parameters': [],
+            'Traits': [],
+            'Methods': [],
+            'See Also': [],
+            'Notes': [],
+            'References': '',
+            'Example': '',
+            'Examples': '',
+            'index': {}
+            }
+
+        self._parse()
+
+    def _str_summary(self):
+        return self['Summary'] + ['']
+
+    def _str_extended_summary(self):
+        return self['Description'] + self['Extended Summary'] + ['']
+
+    def __str__(self, indent=0, func_role="func"):
+        out = []
+        out += self._str_signature()
+        out += self._str_index() + ['']
+        out += self._str_summary()
+        out += self._str_extended_summary()
+        for param_list in ('Parameters', 'Traits', 'Methods',
+                           'Returns','Raises'):
+            out += self._str_param_list(param_list)
+        out += self._str_see_also("obj")
+        out += self._str_section('Notes')
+        out += self._str_references()
+        out += self._str_section('Example')
+        out += self._str_section('Examples')
+        out = self._str_indent(out,indent)
+        return '\n'.join(out)
+
+def looks_like_issubclass(obj, classname):
+    """ Return True if the object has a class or superclass with the given class
+    name.
+
+    Ignores old-style classes.
+    """
+    t = obj
+    if t.__name__ == classname:
+        return True
+    for klass in t.__mro__:
+        if klass.__name__ == classname:
+            return True
+    return False
+
+def get_doc_object(obj, what=None):
+    if what is None:
+        if inspect.isclass(obj):
+            what = 'class'
+        elif inspect.ismodule(obj):
+            what = 'module'
+        elif callable(obj):
+            what = 'function'
+        else:
+            what = 'object'
+    if what == 'class':
+        doc = SphinxTraitsDoc(obj, '', func_doc=SphinxFunctionDoc)
+        if looks_like_issubclass(obj, 'HasTraits'):
+            for name, trait, comment in comment_eater.get_class_traits(obj):
+                # Exclude private traits.
+                if not name.startswith('_'):
+                    doc['Traits'].append((name, trait, comment.splitlines()))
+        return doc
+    elif what in ('function', 'method'):
+        return SphinxFunctionDoc(obj, '')
+    else:
+        return SphinxDocString(pydoc.getdoc(obj))
+
+def setup(app):
+    # init numpydoc
+    numpydoc.setup(app, get_doc_object)
+



More information about the Numpy-svn mailing list