#@+leo-ver=4
#@+node:@file leoScripts.txt
# We use this file to make it easier to use cvs.

#@+all
#@+node:Clear all uA's, tnodeLists, etc.
# Use these with caution.
#@nonl
#@+node:Clean unused tnodeLists
count = 0
for p in c.allNodes_iter():
    count += 1
    # Empty tnodeLists are not errors because they never get written to the .leo file.
    v = p.v
    if hasattr(v,"tnodeList") and len(v.tnodeList) > 0 and not v.isAnyAtFileNode():
        g.es("deleting tnodeList for " + `v`,color="blue")
        delattr(v,"tnodeList")
        c.setChanged(True)

s = "%d nodes" % count
print s ; g.es(s)
#@nonl
#@-node:Clean unused tnodeLists
#@+node:Clean unused tnodeLists
v = c.rootVnode()
while v:
    # Empty tnodeLists are not errors because they never get written to the .leo file.
    # New in 4.2: tnode list is in tnode.
    if hasattr(v.t,"tnodeList") and len(v.t.tnodeList) > 0 and not v.isAnyAtFileNode():
        g.es("deleting tnodeList for ",v,color="blue")
        delattr(v.t,"tnodeList")
        c.setChanged(True)
    v = v.threadNext()

g.es("tnodeList script complete")
#@nonl
#@-node:Clean unused tnodeLists
#@+node:Clear all timestamps
# About the only time you should run this script is when:
# - changing the format of timestamps in nodeIndices.setTimestamp or
# - when making a retroactive change to leoID.txt.

if 0: # This is usually a very bad idea.

    v = c.rootVnode()
    while v:
        v.t.fileIndex = None
        v = v.threadNext()

    g.es("all timestamps cleared")
#@nonl
#@-node:Clear all timestamps
#@+node:Clear all uAs (unknown attributes)
put = g.es_print
for p in c.allNodes_iter():
    if hasattr(p.v,"unknownAttributes"):
        put("deleting v.unknownAttributes:",
            p.headString(),
            g.listToString(p.v.unknownAttributes.keys()))
        delattr(p.v,"unknownAttributes")
    if hasattr(p.v.t,"unknownAttributes"):
        put("deleting t.unknownAttributes:",
            p.headString(),
            g.listToString(p.v.t.unknownAttributes.keys()))
        delattr(p.v.t,"unknownAttributes")
put('done') 
c.redraw()
#@nonl
#@-node:Clear all uAs (unknown attributes)
#@-node:Clear all uA's, tnodeLists, etc.
#@+node:Contributed @button nodes
#@+node:@@button Show other clones
@
Ever have a clone that is difficult to understand outside the context of its
original parent? Here's some code to help. It displays the headline of the
current node plus the headlines of all the parents of all the clones of the
current node. Selecting a displayed parent headline moves the current node to
the corresponding clone in the outline.

The idea is to be able to quickly see the context of all the clones of the
current node and to be able to easily navigate from one clone instance to the
next.
@c

@others
c.cn = cloneNavigator(c)
c.cn.displayClones(c)
#@+node:class cloneNavigator
class cloneNavigator:
    '''
       Displays the headline of the current node plus the headlines of
       all the parents of all the clones of the current node.  Selecting
       a displayed parent headline moves the current node to the
       corresponding clone in the outline.

       The idea is to be able to quickly see the context of all the clones
       of the current node and to be able to easily navigate from one clone
       instance to the next.
    '''
    @others
#@nonl
#@+node:init
def __init__ (self,c):
    self.c = c
    import Tkinter as Tk
    if 0:
        f = Tk.Toplevel()
    else:
        log = c.frame.log
        log.selectTab('Clones')
        f = log.tabFrame
        for w in f.winfo_children():
            w.destroy()

    # Create and pack empty label and listbox
    self.title = Tk.Label(f)
    self.title.pack(anchor="nw")
    self.lb = Tk.Listbox(f)
    self.lb.pack(expand=1,fill="both")
#@nonl
#@-node:init
#@+node:getAllClones
def getAllClones(self,p):
    c = self.c
    def clonesOf(p,p1=p):
        return p.v.t == p1.v.t
    return filter(clonesOf, c.allNodes_iter(copy=True))
#@nonl
#@-node:getAllClones
#@+node:displayClones
def displayClones(self,c):
    '''Displays the parent headline for all the clones of the current position'''
    cp = c.currentPosition()

    # "Title" is the headline of the current node
    self.title.configure(text=cp.headString())

    # Initialize listbox and clone list
    clones = self.getAllClones(cp)
    self.lb.delete(0,self.lb.size()-1)

    <<Fill listbox with clone parent headlines>>    
    <<Goto selected position when listbox selection changes>>
#@+node:<<Fill listbox with clone parent headlines>>
# Add the headlines of all the clone parents to the listbox
for p in clones:
    if p.parent():
        text = p.parent().headString()
    else:
        text = "<root>"
    self.lb.insert(self.lb.size(),text)

    # Initial listbox selection corresponds to current position
    if p.v == cp.v:
        self.lb.selection_set(self.lb.size()-1)
#@nonl
#@-node:<<Fill listbox with clone parent headlines>>
#@+node:<<Goto selected position when listbox selection changes>>
# Callback for when a listbox entry is selected            
def gotoSelectedPosition(event,lb=self.lb,c=c,positions=clones):
    idx = int(lb.curselection()[0])
    p = positions[idx]
    c.frame.tree.expandAllAncestors(p)
    c.selectPosition(p)
    return
self.lb.bind(g.angleBrackets("ListboxSelect"), gotoSelectedPosition)
#@nonl
#@-node:<<Goto selected position when listbox selection changes>>
#@-node:displayClones
#@-node:class cloneNavigator
#@-node:@@button Show other clones
#@+node:@@button LeoToHtml
# LeoToHTML by Dan Rahmel

<< docstring >>

flagIgnoreFiles = True
flagJustHeadlines = True
filePath = "c:\\"

myFileName = c.frame.shortFileName()    # Get current outline filename
myFileName = myFileName[:-4]            # Remove .leo suffix
# Open file for output
f=open(filePath + myFileName + ".htm", 'w')

# Write HTML header information
f.write("<HTML>")
f.write("<BODY>")

for p in c.allNodes_iter():
    myLevel = str(p.level() + 1)
    myHeadline = p.headString()
    # Check for node with LeoToHTML and ignore it
    if not myHeadline.upper() == "LEOTOHTML":
        if myHeadline[:5] != "@file" and not flagIgnoreFiles:
            # Write headline at current style level. indent level: 3
            f.write("<H" + myLevel + ">" + myHeadline + "</H" + myLevel + ">")
            # If including body text, convert it to HTML usable format
            if not flagJustHeadlines:
                myBody = p.bodyString().encode( "utf-8" )
                f.write("<P>" + myBody)

# Write closing HTML info
f.write("</BODY>")  
f.write("</HTML>")

# Close file
f.close()
g.es(" Leo -> HTML completed.",color="turquoise4")
#@nonl
#@+node:<< docstring >>
'''
This script takes an outline stored in LEO and outputs it to HTML code.
The HTML can be stuck into a web page, loaded into the Microsoft Word
outline view for printing, or a million other uses.

--- Instructions ---
In LEO, open your outline, insert a node, and set the headline to LEOTOHTML.
Paste this code into the text body. To generate a .htm file of the outline,
select the Execute Script option under the Edit menu while the node is selected.

The switches below allow you to customize some of the settings. Set the flag
properties to True or False.
'''
#@nonl
#@-node:<< docstring >>
#@-node:@@button LeoToHtml
#@+node:@@button LeoToRtf
# LeoToRTF by Dan Rahmel

<< docstring >>

# The switches below allow you to customize some of the settings.
flagIgnoreFiles = True
flagJustHeadlines = False
filePath = "c:\\"

myFileName = c.frame.shortFileName() # Get current outline filename
myFileName = myFileName[:-4] # Remove .leo suffix

g.es(" Leo -> RTF started...",color="turquoise4")

# Open file for output
f=open(filePath + myFileName + ".rtf", 'w')

# Write RTF header information
f.write("{\\rtf1\\ansi\\ansicpg1252\\deff0\\deflang1033{\\fonttbl{\\f0\\fswiss\\
fcharset0 Arial;}}")
f.write("{\\*\\generator LEOtoRTF;}\\viewkind4\\uc1\\pard\\f0\\fs20")

for p in c.allNodes_iter():
    myLevel = str(p.level() + 1)
    myHeadline = p.headString()
    # Check for node with LeoToHTML and ignore it
    if not myHeadline.upper() == "LEOTORTF":
        if not (myHeadline[:5] == "@file" and flagIgnoreFiles):
            # Write headline with correct # of tabs for indentation
            myOutput = ("\\tab"*int(myLevel)) + " " + myHeadline +"\\par"
            myOutput = myOutput.encode( "utf-8" )
            f.write(myOutput)
            # If including outline body text, convert it to RTF usable format
            if not flagJustHeadlines:
                myBody = p.bodyString().encode( "utf-8" ) 
                f.write("\\tab"*int(myLevel) + " " + myBody + "\\par")

# Write RTF close
f.write("}")  

# Close file
f.close()
g.es(" Leo -> RTF completed.",color="turquoise4")
#@nonl
#@+node:<< docstring >>
'''
This script takes an outline stored in LEO and outputs it to an RTF.
The tabbed RTF file can be loaded into Microsoft Word and formatted as a
proper outline with the Bullets and Numbering formatting options

--- Instructions ---
In LEO, open your outline, insert a node, and set the headline to LEOTORTF.
Paste this code into the text body. To generate a .rtf file of the outline,
select the Execute Script option under the Edit menu while the node is
selected.
'''
#@nonl
#@-node:<< docstring >>
#@-node:@@button LeoToRtf
#@+node:@@button shadow
"""
Look for @thin files in the current subtree.
Convert those thin files into a file with a shadow file,
if this shadow file does not exist already.

FIXME: the line end convention is currently changed:
      unix lineendings are converted to DOS lineendings,
      if files are converted on Windows.
      Not sure if that is a probem or not.
"""

import mod_shadow_core, os, shutil

def marker_from_extension(filename):
    marker = g.comment_delims_from_extension(filename)[0]
    return marker and marker + '@'

shadow_subdir = c.config.getString("shadow_subdir").strip()
if not shadow_subdir: assert False,'No shadow_subdir setting'
prefix = c.config.getString("shadow_prefix")

for p in p.self_and_subtree_iter():
   h = p.headString().strip()
   if h.startswith("@thin"):
       start = h.find("@thin") + len("@thin")
       leofiledir = os.path.split(c.mFileName)[0]
       filename = h[start:].strip()
       fullfilename = os.path.join(leofiledir, filename)
       theDir = os.path.split(fullfilename)[0]
       leoFolder = os.path.join(leofiledir, theDir, shadow_subdir)
       if not os.path.exists(leoFolder):
           os.mkdir(leoFolder)
           assert os.path.exists(leoFolder)
       else:
           assert os.path.isdir(leoFolder)
       junk, name = os.path.split(filename)
       newname = os.path.join(leoFolder, prefix + name)
       if os.path.exists(newname):
           continue
       g.es( "renaming %s to %s" % (filename, newname))
       shutil.copy2(fullfilename, newname)
       os.unlink(fullfilename)
       f = file(fullfilename, "w")
       f.close()
       mod_shadow_core.copy_file_removing_sentinels(
           sourcefilename=newname,
           targetfilename=fullfilename,
           marker_from_extension = marker_from_extension)
       g.es("File %s is now shadowed" % filename)
#@nonl
#@-node:@@button shadow
#@-node:Contributed @button nodes
#@+node:Find scripts
#@+node:Find and replace all functions in leoGlobals.py
import string

@others

if 1:
    << set nameList to the list of functions in leoGlobals.py >>
else:
    p = g.findNodeAnywhere("@file leoGlobals.py")
    nameList = findFunctionsInTree(p)

    nameList.sort() ; g.enl()
    for name in nameList: g.es("'%s'," % name)

    s = "%d functions in leoGlobals.py" % len(nameList)
    g.es_print(s)

if 0:
    p = g.findTopLevelNode(c,"Code")
    g.enl() ; g.enl()
    count = prependNamesInTree(p,nameList,"g.",replace=True) # Just prints if replace==False.
    s = "%d --- done --- " % count
    g.es_print(s)
#@nonl
#@+node:findFunctionsInTree
def findFunctionsInTree(p):

    nameList = []
    for p in p.self_and_subtree_iter():
        names = findDefs(p.bodyString())
        if names:
            for name in names:
                if name not in nameList:
                    nameList.append(name)
    return nameList
#@nonl
#@-node:findFunctionsInTree
#@+node:findDefs
def findDefs(body):

    lines = body.split('\n')
    names = []
    for s in lines:
        i = g.skip_ws(s,0)
        if g.match(s,i,"class"):
            return [] # The classes are defined in a single node.
        if g.match(s,i,"def"):
            i = g.skip_ws(s,i+3)
            j = g.skip_c_id(s,i)
            if j > i:
                name = s[i:j]
                if g.match(name,0,"__init__"): 
                    return [] # Disallow other class methods.
                names.append(name)
    return names
#@nonl
#@-node:findDefs
#@+node:prependNamesInTree
def prependNamesInTree(p,nameList,prefix,replace=False):

    c = p.c

    assert(len(prefix) > 0)
    ch1 = string.letters + '_'
    ch2 = string.letters + string.digits + '_'
    def_s = "def " ; def_n = len(def_s)
    prefix_n = len(prefix)
    total = 0
    c.beginUpdate()
    try:
        for p in p.self_and_subtree_iter():
            count = 0 ; s = p.bodyString()
            printFlag = False
            if s:
                for name in nameList:
                    i = 0 ; n = len(name)
                    while 1:
                        << look for name followed by '(' >>
                if count and replace:
                    if 0:
                        << print before and after >>
                    c.setBodyString(p,s)
                    p.setDirty()
            g.es("%3d %s" % (count,p.headString()))
            total += count
    finally:
        c.endUpdate()
    return total
#@nonl
#@+node:<< look for name followed by '(' >>
i = s.find(name,i)
if i == -1:
    break
elif g.match(s,i-1,'.'):
    i += n # Already an attribute.
elif g.match(s,i-prefix_n,prefix):
    i += n # Already preceded by the prefix.
elif g.match(s,i-def_n,def_s):
    i += n # preceded by "def"
elif i > 0 and s[i-1] in ch1:
    i += n # Not a word match.
elif i+n < len(s) and s[i+n] in ch2:
    i += n # Not a word match.
else:
    j = i + n
    j = g.skip_ws(s,j)
    if j >= len(s) or s[j] != '(':
        i += n
    else: # Replace name by prefix+name
        s = s[:i] + prefix + name + s[i+n:]
        i += n ; count += 1
        # g.es('.',newline=False)
        if 1:
            if not printFlag:
                printFlag = True
                # print p.headString()
            print g.get_line(s,i-n)
#@nonl
#@-node:<< look for name followed by '(' >>
#@+node:<< print before and after >>
print "-"*10,count,p.headString()
print "before..."
print p.bodyString()
print "-"*10,"after..."
print s
#@nonl
#@-node:<< print before and after >>
#@-node:prependNamesInTree
#@+node:<< set nameList to the list of functions in leoGlobals.py >>
nameList = (
'alert',
'angleBrackets',
'appendToList',
'callerName',
'CheckVersion',
'choose',
'clearAllIvars',
'clear_stats',
'collectGarbage',
'computeLeadingWhitespace',
'computeWidth',
'computeWindowTitle',
'createTopologyList',
'create_temp_name',
'disableIdleTimeHook',
'doHook',
'dump',
'ecnl',
'ecnls',
'enableIdleTimeHook',
'enl',
'ensure_extension',
'es',
'esDiffTime',
'es_error',
'es_event_exception',
'es_exception',
'escaped',
'executeScript',
'file_date',
'findNodeAnywhere',
'findTopLevelNode',
'findNodeInTree',
'findReference',
'find_line_start',
'find_on_line',
'flattenList',
'funcToMethod',
'getBaseDirectory',
'getOutputNewline',
'getTime',
'get_Sherlock_args',
'get_directives_dict',
'get_leading_ws',
'get_line',
'get_line_after',
'getpreferredencoding',
'idleTimeHookHandler',
'importFromPath',
'initScriptFind',
'init_sherlock',
'init_trace',
'isUnicode',
'isValidEncoding',
'is_c_id',
'is_nl',
'is_special',
'is_ws',
'is_ws_or_nl',
'joinLines',
'listToString',
'makeAllNonExistentDirectories',
'makeDict',
'match',
'match_c_word',
'match_ignoring_case',
'match_word',
'module_date',
'openWithFileName',
'optimizeLeadingWhitespace',
'os_path_abspath',
'os_path_basename',
'os_path_dirname',
'os_path_exists',
'os_path_getmtime',
'os_path_isabs',
'os_path_isdir',
'os_path_isfile',
'os_path_join',
'os_path_norm',
'os_path_normcase',
'os_path_normpath',
'os_path_split',
'os_path_splitext',
'pause',
'plugin_date',
'plugin_signon',
'printDiffTime',
'printGc',
'printGcRefs',
'printGlobals',
'printLeoModules',
'print_bindings',
'print_stats',
'readlineForceUnixNewline',
'redirectStderr',
'redirectStdout',
'removeLeadingWhitespace',
'removeTrailingWs',
'reportBadChars',
'restoreStderr',
'restoreStdout',
'sanitize_filename',
'scanAtEncodingDirective',
'scanAtFileOptions',
'scanAtLineendingDirective',
'scanAtPagewidthDirective',
'scanAtRootOptions',
'scanAtTabwidthDirective',
'scanDirectives',
'scanError',
'scanf',
'set_delims_from_language',
'set_delims_from_string',
'set_language',
'shortFileName',
'skip_blank_lines',
'skip_block_comment',
'skip_braces',
'skip_c_id',
'skip_heredoc_string',
'skip_leading_ws',
'skip_leading_ws_with_indent',
'skip_line',
'skip_long',
'skip_matching_delims',
'skip_nl',
'skip_non_ws',
'skip_parens',
'skip_pascal_begin_end',
'skip_pascal_block_comment',
'skip_pascal_braces',
'skip_pascal_string',
'skip_php_braces',
'skip_pp_directive',
'skip_pp_if',
'skip_pp_part',
'skip_python_string',
'skip_string',
'skip_to_char',
'skip_to_end_of_line',
'skip_to_semicolon',
'skip_typedef',
'skip_ws',
'skip_ws_and_nl',
'splitLines',
'stat',
'stdErrIsRedirected',
'stdOutIsRedirected',
'toEncodedString',
'toUnicode',
'toUnicodeFileEncoding',
'top',
'trace',
'trace_tag',
'update_file_if_changed',
'utils_rename',
'windows',
'wrap_lines')
#@nonl
#@-node:<< set nameList to the list of functions in leoGlobals.py >>
#@-node:Find and replace all functions in leoGlobals.py
#@+node:Find script to change OnX to x in body (didn't quite work :-)
# Script to change OnXXX to xxx in all body text.
if 0:

    v = c.currentVnode()
    v = v.threadNext()
    after = v.nodeAfterTree()
    count = 0
    while v and v != after:
        while 1:
            s = v.bodyString()
            if not s: break
            i = s.find("c.On")
            if i == -1: break
            c = s[i+4].lower()
            s = s[:i] + 'c.' + c + s[i+5:]
            c.setBodyString(v,s)
            count += 1
        v = v.threadNext()
    print count
#@nonl
#@-node:Find script to change OnX to x in body (didn't quite work :-)
#@+node:Find scripts to change OnX to x in headline
if 0: # Script to change OnXxx to xxx in all headlines
    v = c.currentVnode()
    after = v.nodeAfterTree()
    while v and v != after:
        h = v.headString()
        if g.match(h,0,"On") and len(h) > 2:
            h = h[2].lower() + h[3:]
            print h
            v.setHeadString(h)
        v = v.threadNext()

# Script to change OnXXX to xxx in all body text.
if 0:
    v = c.currentVnode()
    after = v.nodeAfterTree()
    while v and v != after:
        s = v.bodyString()
        if s:
            i = s.find("def ")
            if i > -1:
                c = s[i+6].lower()
                s = s[:i] + "def " + c + s[i+7:]
                print v.headString()
                c.setBodyString(v,s)
        v = v.threadNext()
#@nonl
#@-node:Find scripts to change OnX to x in headline
#@+node:findDosFile
import fnmatch, os

def findDosFile(pattern, dirname):

    """Check for crlf in files"""

    files = os.listdir(dirname)
    names = fnmatch.filter(files, pattern)
    for name in names:
        path = g.os_path_join(dirname, name)
        if g.os_path_isfile(path):
            bytes = open(path, 'rb').read()
            count = bytes.count('\r\n')
            if '\0' not in bytes and count:
                print "%4d %s" % (count,path)

dir = "c:\prog\leoCvs\leo"
print ; findDosFile("*",dir)
#@nonl
#@-node:findDosFile
#@+node:headfind
# Run this script from a scriptButton.
<< about this script >>
import re

def headfind():
    """Search with re and 
    - GO to found headline beginning with the selected text or clipboard buffer
    or also GO when line begins with @ and word or string in variable sMyOwnPrefix
    - EXCEPT when found search string is '-info' node
    (BOTH  1.followed by ' -info'
    AND  2.appears anywhere in headline(preceding space or start) 
    THEN  just SHOW found info node's body text in cleared Log pane.
    """
    s = c.frame.body.getSelectedText() or g.app.gui.getTextFromClipboard()
    if s:
        if len(s) == 1: s = "index -info" #if select is one char try to goto this named index node
        s = re.escape(s.lower())
        sUseLogTrigger = re.escape(" -info")
        sMyOwnPrefix = re.escape("FOLLOWING FILE IS: ").lower()
        sAllowablePrefixRe = "\@([A-Za-z][A-Za-z0-9\-]+) "
            # @ char, followed by alpha,some alphanum or dash chars, then space ...matches Leo special nodes
        for p in c.allNodes_iter():
            srch="(^%s%s|^%s%s|^%s| %s%s)" % (sMyOwnPrefix,s,sAllowablePrefixRe,s,s,s,sUseLogTrigger) #all re
            if re.findall(srch,p.headString().lower()):
                g.es("found " + s)
                sUseLogTrigger_srch="(^| )%s%s" % (s,sUseLogTrigger) #first just Log trigger re
                if re.findall(sUseLogTrigger_srch,p.headString().lower()):
                    body2=p.bodyString()
                    c.frame.log.logCtrl.delete("1.0","end"); # clear Log pane before message
                    # g.es(body2,color="orange")
                    return
                else:
                    c.beginUpdate()
                    try:
                        c.frame.tree.expandAllAncestors(p)
                        c.selectVnode(p)
                    finally:
                        c.endUpdate()
                    return

        g.es("no headline matches '%s'" % (s),color="blue")
    else:
        g.es("no selected text & clipboard empty",color="blue")

headfind()
#@nonl
#@+node:<< about this script >>
@ PREFIXES: Now will jump to any headline where search is preceded by an @+chars+space
and alternatively a fixed prefix string+space.

CLEAR LOG: Now also clears Log for display of -info nodes. 

NOTE:I already had a file with a bunch of text files each preceded by "THE FOLLOWING
FILE IS: " and a list of these files at the top of everything. After global
changing these lines with "- " (and at first line), I imported flattened outline...
and "there you go" a index-driven Leo version. :)

bill p
#@nonl
#@-node:<< about this script >>
#@-node:headfind
#@+node:Find entries in k.guiBindNamesDict
# A script to find uses of the names defined in k.guiBindNamesDict in Leo's core.

k = c.k ; d = k.guiBindNamesDict
keys = [z for z in d.values() if z not in k.tkNamesList]
keys.sort()
h = ' tkKeys.defineSpecialKeys'
for key in keys:
    for p in c.allNodes_iter():
        if p.headString() != h and p.headString().find('keywords') == -1:
            s = p.bodyString()
            for z in ('"%s"' % (key), "'%s'" % (key)):
                if s.find(z) != -1:
                    print '%20s %s' % (z,p.headString())
#@nonl
#@-node:Find entries in k.guiBindNamesDict
#@+node:find w.xxx
import string
import leoTkinterFrame
words = {}
word_chars = string.ascii_letters + string.digits + '_'
p = g.findTopLevelNode(c,'Code')
p1 = p.copy()
baseClass = leoTkinterFrame.leoTkTextWidget
allMatches = True
seen = {}
for p in p.self_and_subtree_iter():
    if seen.get(p.v.t): continue # search clones once.
    seen[p.v.t] = True
    s = p.bodyString()
    i = 0
    while 1:
        j = s.find('w.',i)
        if j == -1: break
        ch = s[j-1]
        if j == 0 or ch not in word_chars:
            j += 2
            k = g.skip_c_id(s,j)
            word = s[j:k]
            if allMatches or not hasattr(baseClass,word):
                words[word] = 1 + words.get(word,0)
            i = k
        else:
            i += 2
keys = words.keys()
keys.sort()
aList = ['%3d %s' % (words.get(key),str(key)) for key in keys]
print g.listToString(aList)
g.es('searched %s' % p1.headString())
#@nonl
#@-node:find w.xxx
#@-node:Find scripts
#@+node:Fun...
#@+node:Drawing experiments
#@+node:Rectangles & ovals
import Tkinter as Tk
import random as r ; rand = r.randint

top = Tk.Toplevel(None) ; top.title("Drawing")
canvas = Tk.Canvas(top,height="5i",width="9i") # ,background="white")
canvas.pack() ; top.update()

mincolor,maxcolor=125,225

for n in xrange(5000):
    x,y = rand(0,900),rand(0,500)
    w = rand(1,10) ; h = w * r.uniform(0.5,1.5)
    color = "#%02x%02x%02x" % (rand(0,maxcolor/2),rand(mincolor,maxcolor),rand(mincolor,maxcolor))
    kind = rand(1,3)
    sign = rand(-1,1) # rand(0,2)-1
    if kind == 1:
        canvas.create_rectangle(x,y,x+w,y+h,fill=color,width=0)
    elif kind == 2:
        canvas.create_oval(x,y,x+w,y+h,fill=color,width=0)
    else:
        canvas.create_line(x,y,x+sign*5*w,y+5*h,fill=color)
    if 0: # Redrawing slows things down a lot.
        if (n % 1000) == 0: top.update()
#@nonl
#@-node:Rectangles & ovals
#@+node:Lines & arcs
import Tkinter as Tk
import random as r

top = Tk.Toplevel(None) ; top.title("Drawing")
canvas = Tk.Canvas(top, height = "5i", width = "9i")
canvas.pack() ; top.update()
rand = r.randint

x,y = 10,10
mincolor,maxcolor=125,225

for n in xrange(2000):
    x2,y2 = rand(0,900),rand(0,500)
    color = "#%02x%02x%02x" % (rand(mincolor,maxcolor),rand(mincolor,maxcolor),rand(mincolor,maxcolor))
    width = "%fm" % r.uniform(0.1,0.6)
    canvas.create_line(x,y,x2,y2,fill=color,width=width)
    extent = rand(180,270)
    canvas.create_arc(x,y,x2,y2,outline=color,width=width,style="arc",extent=extent)
    x,y = x2,y2
    # if (n % 1000) == 0: top.update()
#@nonl
#@-node:Lines & arcs
#@+node:Paul Klee
import Tkinter as Tk
import random as r ; rand = r.randint

top = Tk.Toplevel(None) ; top.title("Paul Klee")
canvas = Tk.Canvas(top,height="5i",width="9i") # ,background="white")
canvas.pack() ; top.update()

# Paul Klee
mincolor,maxcolor=125,225
xmax,ymax = 800,400
stipples = [None,"gray75"] # "gray12","gray25","gray50",]
h=w=90
for x in xrange(10,xmax,w):
    for y in xrange(10,ymax,h):
        color = "#%02x%02x%02x" % (rand(mincolor,maxcolor),rand(mincolor,maxcolor),rand(mincolor,maxcolor))
        range = h/6
        stipple = stipples[rand(0,len(stipples)-1)]
        dx = r.uniform(0.0,range) - range/2
        dy = r.uniform(0.0,range) - range/2
        canvas.create_rectangle(x+dx,y+dy,x+dx+w,y+dy+h,fill=color,width=0,stipple=stipple)
#@nonl
#@-node:Paul Klee
#@+node:Complex functions
import Tkinter as Tk
import random as r ; rand = r.randint

mincolor,maxcolor=125,225
xmax,ymax = 300,300
h=w=1
i = r.uniform(2.0,4.0)
j = r.uniform(-5.0,5.0)
power = r.uniform(1.1,1.2)
    #(1.5,1.7)

top = Tk.Toplevel(None)
top.title("Complex function: (%f,%f)*(x,y)**%f" % (i,j,power))
canvas = Tk.Canvas(top,height="5i",width="9i") # ,background="white")
canvas.pack() ; top.update()

@others

for n in xrange(1):
    i += 0.5
    j += 0.5
    power += 0.01
    # print "i,j,power:",i,j,power
    c = complex(i,j)
    for format,m in (
        #("#%02x%02x%02x",256),
        #("#%03x%03x%03x",256*8),
        ("#%04x%04x%04x",256*256),
    ):
        for x in xrange(0,xmax,1):
            for y in xrange(0,ymax,1):
                n = complex(x,y)
                z = pow(c*n,power)
                n1 = int(z.real*m) ; n2 = int(z.imag*m)
                color = format % (n1%m,n2%m,abs(n1-n2)%m)
                # canvas.create_rectangle(w*x,h*y,w*x+w,h*y+h,fill=color,width=0)
                canvas.create_line(x,y,x+1,y+1,fill=color,width=1)
        top.update()
print "done"
#@nonl
#@-node:Complex functions
#@-node:Drawing experiments
#@+node:elispToPy
#@+node:The project is doomed
@language lisp
@nocolor
@

This project is doomed.  It would take AI (lots of special cases) to
translate elisp to readable Python.

Furthermore, how are we to simulate the semantics of lisp constructs
such as interactive, fboundp, setcdr and nconc?

For example, the following is going to cause all kinds of semantic problems.
@c

@color

(defun dired-do-igrep (program expression &optional options arg)
  "*Run `grep` on the marked (or next prefix ARG) files.
See `\\[igrep]'."

  (interactive
   (let ((igrep-args
	  (let ((current-prefix-arg nil))
	    (igrep-read-args t))))
     ;; Delete FILES:
     (setcdr (nthcdr 1 igrep-args) (nthcdr 3 igrep-args))
     ;; Append ARG:
     (nconc igrep-args (list current-prefix-arg))))

  (igrep program
	 expression
	 (funcall (cond ((fboundp 'dired-get-marked-files) ; GNU Emacs
			 'dired-get-marked-files)
			((fboundp 'dired-mark-get-files) ; XEmacs
			 'dired-mark-get-files))
		  t arg)
	 options))

; The following is not going to be easy to translate!

(defun spam (prompt)
    (if cond
        then-part
        else-part1
        else-part2
    )
)

@nocolor
# There is no *simple* tranlation of the above to Python!

def spam (prompt):
    return  # need ternary operator!

# Need complex code generators!
def spam (prompt):
    if cond:
        return then-part
    else:
        else-part1
        return else-part2


#@-node:The project is doomed
#@+node:older e2pyScript
@language python
@tabwidth -4

import string

tabWidth = 4 # how many blanks in a tab.
printFlag = False
doLeoTranslations,dontDoLeoTranslations = True,False

gClassName = "" # The class name for the present function.  Used to modify ivars.
gIvars = [] # List of ivars to be converted to self.ivar

@others

run(c)
#@nonl
#@+node:run
def run (c):

    import leoTest
    u = leoTest.testUtils() 
    input  = u.findNodeAnywhere(c,'-elisp2py-input-')
    output = u.findNodeAnywhere(c,'-elisp2py-output-')
    assert input and output

    print ; print '*' * 60
    e = e2py(dumping=False)
    for p in input.children_iter():
        print ; print '-' * 10, p.headString()
        print p.bodyString()
        result = e.doOuterBlock(p.bodyString())
        print '-' * 20
        print result
#@nonl
#@-node:run
#@+node:class e2py
class e2py:

    '''A text-based (not token-based) approach to parsing.'''

    @others
#@nonl
#@+node:ctor
def __init__ (self,dumping=False):

    self.dumping = dumping
#@nonl
#@-node:ctor
#@+node:doBlock
def doBlock (self,s,strip=True):

    '''Handle an outer block or a formerly parenthesized block.'''

    i = 0 ; result = []
    while i < len(s):
        j = s.find('(',i)
        if j == -1:
            tail = s[i:]
            if tail:
                # g.trace(repr(tail))
                result.extend(g.splitLines(tail))
            break
        else:
            prev = s[i:j]
            if prev: result.extend(g.splitLines(prev))
            i = j
            n,ok = self.findMatchingBracket(s[i:])
            if ok:
                block = s[i:i+n]
                block = self.stripLws(block)
                block_result = self.doParenBlock(block)
                if block_result:
                    result.extend(block_result)
            i += n

    result = self.removeBlankLines(result)
    self.dump(result)
    return result
#@nonl
#@-node:doBlock
#@+node:doParenBlock
def doParenBlock (self,s):

    ''' - Strip outer parens.
        - Call doBlock recursively for all inner parens.
        - Add one level of indentation to each line.'''
    n,ok = self.findMatchingBracket(s)
    assert n == len(s) and ok
    s = s[1:-1] # Strip the brackets
    i = 0
    i = g.skip_ws(s,i)
    j,id = self.skip_id(s,i)
    if id:
        s = s[j:].lstrip()
        if id == 'defun': result = self.doDef(s,id)
        elif id == 'let': result = self.doLet(s)
        elif id == 'if': result = self.doIf(s)
        elif id in ('prog1','progn'): result = self.doProg(s,id)
        else: result = self.doId(s,id)
    elif s[i] == '(':
        s = s[i:].lstrip()
        result = self.doDoubleParen(s)
    else: result = self.doBlock(s)

    self.dump(result)
    return result
#@nonl
#@-node:doParenBlock
#@+node:doDef
def doDef(self,s,id):
    # g.trace(id)
    if id == 'defun':
        kind,i,j,name = self.getToken(s,0)
        if kind == 'id':
            kind,i,j,params = self.getToken(s,j)
            if kind == '()':
                s = s[j:]
                result = ['def %s %s:' % (name,params)]
                result.extend(self.indent(self.doBlock(s)))
                self.dump(result)
                return result

    # Fall through if error.
    result = [id]
    result.extend(self.indent(self.doBlock(s)))
    self.dump(result)
    return result
#@nonl
#@-node:doDef
#@+node:doDoubleParen
def doDoubleParen (self,s):

    n,ok = self.findMatchingBracket(s)
    if ok:
        s2 = s[:n] ; s3 = s[n:]
        result = ['(:']
        result.extend(self.indent(self.doParenBlock(s2)))
        result.extend([':)'])
        result.extend(self.doBlock(s3))
    else:
        result = self.doBlock(s)

    self.dump(result)
    return result
#@-node:doDoubleParen
#@+node:doId
def doId(self,s,id):

    # g.trace(id)
    if 1:
        result = ['(%s:' % id]
        result.extend(self.indent(self.doBlock(s)))
        result.extend([':%s)' % id])
    else:
        result = [id]
        result.extend(self.doBlock(s))

    self.dump(result)
    return result
#@nonl
#@-node:doId
#@+node:doIf
def doIf(self,s):

    # g.trace()

    if 1:
        result = ['if:']
        result.extend(self.indent(self.doBlock(s)))
        result.extend([':if'])

    else: # not yet.  Don't suck everything into the 'if' statement!
        block = self.doBlock(s)
        result = ['if (%s):' % ' '.join(block)]

    self.dump(result)
    return result
#@nonl
#@-node:doIf
#@+node:doLet
def doLet(self,s):

    # g.trace()

    result = ['let:']
    result.extend(self.indent(self.doBlock(s)))
    result.extend([':let'])

    self.dump(result)
    return result
#@-node:doLet
#@+node:doOuterBlock
def doOuterBlock (self,s):

    '''Handle outermost code.  Return a string, not a list.'''

    s = self.stripLws(s)
    result = self.doBlock(s)
    result = self.removeBlankLines(result)
    return '\n'.join(result)
#@nonl
#@-node:doOuterBlock
#@+node:doProg
def doProg(self,s,id):

    # g.trace(id)

    result = [id]
    result.extend(self.indent(self.doBlock(s)))

    self.dump(result)
    return result
#@nonl
#@-node:doProg
#@+node:dump
def dump(self,lines):

    if self.dumping:
        print '%s returns...' % g.callerName(2)
        lines = [str(line) for line in lines]
        print g.listToString(lines)
#@nonl
#@-node:dump
#@+node:findMatchingBracket
def findMatchingBracket(self,s,i=0):

    ch1 = s[i]
    assert ch1 in "({["
    delim = self.matchingBracket(ch1)
    level = 1
    for ch in s[i+1:]:
        i += 1
        # g.trace(level,ch)
        if ch == ch1:
            level += 1
        elif ch == delim:
            level -= 1
            if level == 0: return i+1,True
    print "%s not matched by %s in %s" % (ch1,delim,s)
    return len(s),False
#@nonl
#@-node:findMatchingBracket
#@+node:getToken
def getToken (self,s,i=0):

    i = g.skip_ws(s,i)
    if i < len(s):
        ch = s[i]
        if ch == '"':
            j = self.skipString(s,i)
            val = '"',i,j,s[i:j]
        elif ch in string.ascii_letters or ch in string.digits or ch in '-_':
            j,name = self.skip_id(s,i)
            val = 'id',i,j,name
        elif ch == '(':
            j,ok = self.findMatchingBracket(s,i)
            if ok:
                val = '()',i,j,s[i:j]
            else:
                val = '(',i,i,'('
        else:
            val = ch,i,i,ch
    else:
        val = None,i,i,None

    # g.trace(repr(s[i]),val)
    return val
#@nonl
#@-node:getToken
#@+node:indent
def indent (self,lines,strip=True):

    '''Add a tab to each element of a list.'''

    return ['    ' + line for line in lines if not strip or line.strip()]
#@nonl
#@-node:indent
#@+node:matchingBracket
def matchingBracket (self,ch):

    assert ch in "({["

    if   ch == '(': return ')'
    elif ch == '{': return '}'
    else:           return ']'
#@nonl
#@-node:matchingBracket
#@+node:skip_id
def skip_id(self,s,i=0):

    j = g.skip_id(s,i,chars='-')
    id = s[i:j]
    return j,id
#@nonl
#@-node:skip_id
#@+node:skipString
def skipString(self,s,i):

    # Skip the opening double quote.
    i1 = i
    ch = s[i]
    i += 1
    assert(ch == '"')

    while i < len(s):
        ch = s[i]
        i += 1
        if ch == '"': return i
        elif ch == '\\': i += 1

    print "run-on elisp string: %s" % g.get_line(s[i1:])
    return i
#@nonl
#@-node:skipString
#@+node:removeBlankLines
def removeBlankLines (self,lines):

    return [line for line in lines if line.strip()]
#@nonl
#@-node:removeBlankLines
#@+node:stripLws
def stripLws(self,s):

    lines = g.splitLines(s)
    result = [line.lstrip() for line in lines]
    return ''.join(result)
#@nonl
#@-node:stripLws
#@-node:class e2py
#@-node:older e2pyScript
#@+node:newer elispToPy
#@+node:new-elisp2py-input-
@language lisp
#@+node:if-test
@language lisp

; What would the translation of this be???

; def (spam):
;   '''docstring'''
;   return ?? # A python if statement can't be used here!

(defun spam (prompt)
    "docstring"
    (if igrep-insert-default-key
        (define-key minibuffer)
        (do-else2)
        'do-else1
    )
)

#@-node:if-test
#@-node:new-elisp2py-input-
#@+node:new-elisp2py-output-
#@-node:new-elisp2py-output-
#@+node:@@button elisp test @key = Alt-3
@first # -*- coding: utf-8 -*-

import leoTest
import leoImport

@others

u = leoTest.testUtils(c) 
input_node  = u.findNodeAnywhere('new-elisp2py-input-')
output_node = u.findNodeAnywhere('new-elisp2py-output-')
assert input_node and output_node

for p in input_node.children_iter():

    if 0: # No need for child node yet.
        root = output_node.insertAsLastChild()
        root.initHeadString(p.headString())

    s = p.bodyString()
    scanner = elispScanner()
    aList = scanner.parse(s)
    if 1:
        print 'parse tree...'
        print scanner.dumpList(aList)
        print 'end parse tree'
        print
    result = scanner.gen(aList,indent=0,init=True)
    print scanner.dumpList(result)
#@+node:class elispScanner
class elispScanner:

    @others
#@+node: __init__
def __init__ (self):

    # Debugging.
    self.debug = True
    self.trace = False

    # Semantic info.
    self.def_keywords = ('defun','defvar',)
    self.indent_keywords = ('if','prog','prog1','progn','set',)
    self.expr_keywords = ('and','not','or',)

    self.indent = 0 # Indentation of production output.

    # Dispatch dictionary.
    self.dispatchDict = {}

#@-node: __init__
#@+node:class token
class token:

    '''Representing one elisp syntactic entity,
    with a list of preceding comments.'''

    def __init__ (self,comments,kind,val):
        self.comments = comments[:]
        self.kind = kind
        self.val = val
            # For blocks, a list of tokens.
            # For all other tokens, the spelling of the token.

    def __repr__ (self):
        return '<token kind: %s, val: %s>' % (self.kind,self.val)

    def __str__ (self):
        if self.kind == 'block:':
            return 'block: [snip]'
        elif self.kind == 'string:':
            return '%s%s' % (self.kind,self.val[:20])
        else:
            return '%s%s' % (self.kind,self.val)
#@-node:class token
#@+node:choose
def choose(self,cond,a,b): # warning: evaluates all arguments

    if cond: return a
    else: return b
#@-node:choose
#@+node:dumpList
def dumpList(self,aList):

    if type(aList) == type([]):
        result = self.dumpListHelper(aList,indent=0)
        return '\n'.join(result)
    else:
        return repr(aList)

def dumpListHelper(self,aList,indent):

    result = []
    leading = ' ' * (4 * indent)

    for z in aList:
        if z is None:
            result.append('%s%s' % (leading,'None'))
        elif z == []:
            result.append('%s%s' % (leading,'[]'))
        elif type(z) == type([]):
            result.append('%s%s' % (leading,'['))
            result.extend(self.dumpListHelper(z,indent+1))
            result.append('%s%s' % (leading,']'))
        elif isinstance(z,self.token):
            if z.kind=='block:':
                result.append('%s%s' % (leading,'block:'))
                result.extend(self.dumpListHelper(z.val,indent+1))
                # result.append('%s%s' % (leading,'block:]'))
            else:
                result.append('%s%s' % (leading,str(z)))
        else:
            result.append('%s%s' % (leading,str(z)))

    return result

listToString = dumpList
#@nonl
#@-node:dumpList
#@+node:Parsing...
#@+node:parse
def parse(self,s):

    # Generate the nodes, including directive and section references.
    return self.scanForest(s)
#@-node:parse
#@+node:scan & helpers
def scan(self,s,i):

    '''Scan an elisp expression.'''

    start = i ; end = len(s) ; result = []
    comments = [] ; token = self.token
    # A hack. ignore initial @language lisp
    tag = '@language lisp'
    if i == 0 and s[i:i+len(tag)]==tag:
        i += len(tag)
    while i < end:
        progress = i
        ch = s[i]
        if ch == ';':
            j = self.skipComment(s,i)
            # Comments are not tokens, they are attached to tokens.
            comments.append(s[i:j]) 
            start = i = j
        elif ch == '"':
            j = self.skipString(s,i)
            result.append(token(comments,'string:',s[i:j]))
            comments = []
            start = i = j
        elif ch.isalnum() or ch == u'_':
            j = self.skipId(s,i)
            result.append(token(comments,'id:',s[i:j]))
            comments = []
            start = i = j
        elif ch =='(':
            i += 1
            j,aList = self.scan(s,i)
            result.append(token(comments,'block:',aList))
            start = i = j
        elif ch == ')':
            i += 1
            return i,result
        else:
            if ch == "'": ch = 'quote'
            if ch not in (' ','\t','\n','\r'):
                result.append(token(comments,'op:',ch))
            i += 1
        assert progress < i,'i: %d, ch: %s' % (i,repr(s[i]))

    if start < end:
        tail = s[start:end].strip()
        if tail:
            result.append(token(comments,'tail:',tail))
            comment = []
    if comments:
        result.append(token(comments,'trailing-comment:',''))

    return i,result
#@+node:skipComment
def skipComment (self,s,i):

    '''Skip a comment.'''

    while i < len(s):
        if s[i] == '\n':
            break
        i += 1
    return i
#@-node:skipComment
#@+node:skipId
def skipId (self,s,i):

    while i < len(s):
        ch = s[i]
        if ch.isalnum() or ch in ('_','-'):
            i += 1
        else:
            break
    return i
#@-node:skipId
#@+node:skipString
def skipString(self,s,i):

    """Skip a string literal."""

    assert(s[i] == '"')
    i += 1
    while i < len(s):
        ch = s[i]
        if ch == '\\' : i += 2
        elif ch == '"':
            i += 1 ; break
        else: i += 1

    return i
#@nonl
#@-node:skipString
#@-node:scan & helpers
#@+node:scanForest
def scanForest (self,s):

    i = 0 ; result = []

    while i < len(s):
        progress = i
        i,aList = self.scan(s,i)
        aList and result.extend(aList)
        assert i > progress

    return result


#@-node:scanForest
#@-node:Parsing...
#@+node:Code generators...
@ From Richard Deibenkorn:

1. Attempt what is not certain. Certainty may or may not come later. It may then
be a valuable delusion.

2. The pretty, initial position which falls short of completeness is not to be
valued--except as stimulus for further moves.

3. Do search.  But in order to find other than what is looked for.

4. Use and respond to the initial fresh qualities but consider them absolutely
expendable.
#@+node:gen
def gen(self,tokens,indent,init=False):

    result = []

    if init: result.append('='*40)

    for token in tokens:
        aList = self.gen_token(token,indent)
        result.extend(aList)

    if init: result.append('-'*40)

    return result

#@-node:gen
#@+node:gen_token
def gen_token(self,token,indent):

    result = []

    if self.debug:
        if token.kind == 'block:':
            aList = self.gen_block(token,indent)
            result.extend(aList)
        else:
            self.put_token(token,indent,result)
    else:
        if token.kind == 'block:':
            self.gen_block(token,indent)
        else:
            self.put_code_token(token)

    return result
#@-node:gen_token
#@+node:gen_block & helper
def gen_block (self,token,indent):

    if not (token and token.val):
        return []

    blockList = token.val
    token2 = blockList[0]
    result = []

    if token2.kind.startswith('id'):
        aList = self.gen_block_id(token2.val,blockList,indent)
    else:
        if self.debug:
            self.put('block...',[],indent,result)
        aList = self.gen(token.val,indent+1)

    result.extend(aList)
    return result
#@+node:gen_block_id
def gen_block_id (self,theId,tokens,indent):

    result = []

    # Eventually there will be a lookup of the dispatch dict here.
    if theId == 'let':
        aList = self.gen_let(tokens,indent)
    elif theId == 'if':
        aList = self.gen_if(tokens,indent)
    elif theId in self.def_keywords:
        aList = self.gen_def(theId,tokens,indent)
    elif theId in self.indent_keywords:
        self.put('%s...' % (theId),[],indent,result)
        aList = self.gen(tokens[1:],indent+1)
    elif theId in self.expr_keywords:
        aList = self.gen_expr(theId,tokens[1:],indent+1)
    else:
        aList = self.gen_call(theId,tokens[1:],indent)

    result.extend(aList)
    return result
#@-node:gen_block_id
#@-node:gen_block & helper
#@+node:gen_call & helper
def gen_call (self,funcId,tokens,indent):

    result = []

    if self.debug:
        self.put('call: %s' % (funcId),[],indent,result)
        for token in tokens:
            aList = self.gen_arg(token,indent+1)
            result.extend(aList)
    else:
        self.put_code_line('%s(' % (funcId))
        for token in tokens:
            self.gen_arg(token,indent+1)
        self.put_code(')')

    return result

#@+node:gen_arg
def gen_arg(self,token,indent):

    result = []

    if self.debug:
        if token.kind == 'block:':
            self.put('arg block:...',[],indent,result)
            aList = self.gen_block(token,indent)
            result.extend(aList)
        else:
            self.put_token(token,indent,result)
    else:
        if token.kind == 'block:':
            aList = self.gen_block(token,indent)
            self.put_code(''.join(aList))
        else:
            self.put_code_token(token)

    return result
#@-node:gen_arg
#@-node:gen_call & helper
#@+node:gen_def
def gen_def(self,theId,tokens,indent):

    result = []

    if not tokens or len(tokens) < 3:
        result.append('*** bad def tokens')
        return result

    defToken,idToken = tokens[0:2]
    if idToken.kind != 'id:':
        result.append('*** bad def id')
        return result

    if self.debug:
        self.put(theId,idToken.val,indent,result)
        aList = self.gen(tokens[2:],indent+1)
        result.extend(aList)
    else:
        self.put_code('def %s (' % idToken.val)
        self.gen_token(tokens[2],indent)
        self.put_code('): # end def\n')
        self.indent += 1
        self.gen(tokens[3:],indent+1)
        self.indent -= 1

    return result
#@-node:gen_def
#@+node:gen_if & helpers
@ if condition then-form else-forms.

If the evaluated condition is non-nil, then-form is evaluated and the result
returned. Otherwise, the else-forms are evaluated in textual order, and the
value of the last one is returned. If condition has the value nil, and no
else-forms are given, if returns nil.
@c

def gen_if (self,tokens,indent):

    # tokens[0]: id:if
    # tokens[1] cond

    for i in xrange(len(tokens)):
        g.trace('tokens[%d]: %s' % (i,self.dumpList(tokens[i])))

    # g.trace(self.dumpList(tokens))

    result = []

    if self.debug:
        self.put('if...',[],indent,result)
        aList = self.gen(tokens[1:],indent+1)
        result.extend(aList)
    else:
        self.put_code('if ')
        self.gen(tokens[1:],indent+1)
        self.put_code(': # end if\n')

    return result
#@+node:gen_then
def gen_then (self,token):

    pass
#@-node:gen_then
#@-node:gen_if & helpers
#@+node:gen_expr
def gen_expr (self,theId,aList,indent):

    binops = ('and','or',)
    result = []

    self.put(theId,[],indent,result)
    aList = self.gen(aList,indent+1)

    result.extend(aList)
    return result
#@-node:gen_expr
#@+node:gen_let & helper
@
(let ((variable value)
      (variable value)
      ...)
  body...)
@c

def gen_let (self,tokens,indent):

    if not tokens: return []
    if len(tokens) != 3:
        g.trace('unusual let')
        result = self.gen(tokens,indent+1)
        return result

    if 0:
        for i in xrange(len(tokens)):
            g.trace('token',i,tokens[i])

    letToken,bindingToken,bodyToken = tokens
    result = []
    self.put('let...',[],indent,result)
    self.put('let-bindings...',[],indent+1,result)
    aList = self.gen_let_bindings(bindingToken,indent+2)
    result.extend(aList)
    self.put('let-block...',[],indent+1,result)
    aList = self.gen_block(bodyToken,indent+2)
    result.extend(aList)
    return result
#@nonl
#@+node:gen_let_bindings
def gen_let_bindings (self,token,indent):

    result = []

    if token.kind != 'block:':
        g.trace('unexpected let')
        return result

    for z in token.val:
        if z.kind == 'block:': # one (id,val) pair
            if z.val and len(z.val) == 2:
                token1 = z.val[0]
                token2 = z.val[1]
                self.put('let-id',token1,indent,result)
                self.put('let-val...',[],indent,result)
                if token2.kind == 'block:':
                    aList = self.gen_block(token2,indent+1)
                    result.extend(aList)
                else:
                    #g.trace('no let list')
                    self.put_token(token2,indent+1,result)
            else:
                g.trace('unexpected let 2')

    return result
#@-node:gen_let_bindings
#@-node:gen_let & helper
#@+node:put...
#@+node:put 
def put (self,kind,val,indent,result):

    '''Append one or more lines of output to result.'''

    leading = '%2d: %s' % (indent,' ' * indent)

    if kind == 'string:':
        val = self.choose(len(val)>20,val[:20]+'..."',val)

    if val:
        s = '%s%s %s' % (leading,str(kind),str(val))
    else:
        s = '%s%s' % (leading,str(kind))

    result.append(s)
#@-node:put 
#@+node:put_token
def put_token (self,token,indent,result):

    for z in token.comments:
        self.put('comment:',z,indent,result)

    self.put(token.kind,token.val,indent,result)
#@-node:put_token
#@+node:put_code & put_code_line
def put_code_line (self,s):

    s2 = '%s%s' % (' '*self.indent,s)
    print s2,

def put_code (self,s):

    print s,
#@-node:put_code & put_code_line
#@+node:put_code_token
def put_code_token (self,token):

    if token.kind == 'block:':
        self.put_code('<block>')
    else:
        self.put_code(token.val)

#@-node:put_code_token
#@-node:put...
#@-node:Code generators...
#@-node:class elispScanner
#@-node:@@button elisp test @key = Alt-3
#@-node:newer elispToPy
#@-node:elispToPy
#@+node:sudoku puzzle
@tabwidth -4

# Solves the sudoku puzzle.

import copy

digits = '123456789' # valid digits.

if 0:
    << 1-star puzzles >>
    << 2-star puzzles >>
    << 3-star puzzles >>
    << 4-star puzzles >>
    << 5-star puzzles >>

<< define data >>

@others

print '-' * 40

solver = sudokuClass(data=data)
if not solver.errors:
    solver.solve()
#@+node:<< define data >>
data = (
    '',
) 

data = (     # 9/2107 5 stars
    '7xx1xx6xx',
    'xxxxx6x14',
    '1xxx5xxx2',
    'x3x4xx82x',
    'xxx7x2xxx',
    'x71xx9x3x',
    '8xxx2xxx3',
    '31x8xxxxx',
    'xx7xx5xx8',
)
#@-node:<< define data >>
#@+node:<< 5-star puzzles >>
# Neither of these are solvable without guessing.

data = ( 
    'xx24xxxxx',
    'x41x3xxxx',
    '8xxx6xx4x',
    'x6xxx3xx9',
    'x7x9x8x3x',
    '2xx6xxx7x',
    'x2xx4xxx1',
    'xxxx1x72x',
    'xxxxx64xx') # 4/27/07 5 stars (very hard)

data = ( 
    '1x5xxx37x',
    'xxxxxx2xx',
    'x973xxx1x',
    'xxxx531x2',
    '3xx8x1xx4',
    '2x147xxxx',
    'x7xxx864x',
    'xx8xxxxxx',
    'x12xxx8x7') # 8/4 5 stars

data = ( 
    '2xxxx1834',
    'xxxx9xxxx',
    'x1x3xxx5x',
    'xx75xxxxx',
    '16xxxxx72',
    'xxxxx93xx',
    'x7xxx4x1x',
    'xxxx8xxxx',
    '8549xxxx3') # 8/9 5 stars (may be invalid)
#@nonl
#@-node:<< 5-star puzzles >>
#@+node:<< 4-star puzzles >>
data = (
    'x13xxxxx2',
    '6x2xx4xx8',
    '4xx3xxx6x',
    '2xx8xxxx7',
    'xxx715xxx',
    '9xxxx3xx4',
    'x2xxx1xx3',
    '1xx5xx4x9',
    '8xxxxx62x') # 9/10/05 4 stars
#@nonl
#@-node:<< 4-star puzzles >>
#@+node:<< 3-star puzzles >>
data = (
    '8xxx9x21x',
    'x9x4xxxxx',
    'xx58x7xx9',
    '7xx1xx9xx',
    'xxxx5xxxx',
    'xx6xx3x28',
    '6xx5x93xx',
    'xxxxx6x7x',
    'x48x1xxx6',
)

data = ( # 8/3: solvable.
    'x5xx9xxxx',
    'xx48xxxx9',
    'xxx1x728x',
    '56xxxx137',
    'xxxxxxxxx',
    '173xxxx42',
    'x215x8xxx',
    '6xxxx38xx',
    'xxxx1xx6x')

data = ( # 1 stars
    'x4xxxx179',
    'xx2xx8x54',
    'xx6xx5xx8',
    'x8xx7x91x',
    'x5xx9xx3x',
    'x1xx6xx4x',
    '3xx4xx7xx',
    '57x1xx2xx',
    '928xxxx6x')

data = (
    '6xx75x1xx',
    '8xxxx34xx',
    'x3x96xx25',
    'xxx4xx3x2',
    '7xxxxxxx6',
    '2x1xx5xxx',
    '31xx89x4x',
    'xx65xxxx1',
    'xx5x42xx3') # solvable.

data = (
    'xxxxx6xx5',
    'xx41xx8xx',
    'x5xx78x42',
    '58xxxx9xx',
    '3xxxxxxx7',
    'xx6xxxx18',
    '24x39xx7x',
    'xx7xx52xx',
    '9xx7xxxxx') # solvable

data = (
    'xxxxx6xx5',
    'xx41xx8xx',
    'x5xx78x42',
    '58xxxx9xx',
    '3xxxxxxx7',
    'xx6xxxx18',
    '24x39xx7x',
    'xx7xx52xx',
    '9xx7xxxxx') # 9/8/05 3 stars

data = (
    'xxxx64x15',
    'x549xx6x2',
    'xxxxxxx7x',
    'xxxx8x2xx',
    '1x8xxx5x7',
    'xx7x4xxxx',
    'x3xxxxxxx',
    '8x2xx319x',
    '94x87xxxx') # 9/9/05 3 stars

data = (
    '8xxxxxxxx',
    'x915x36xx',
    'x62xxxx8x',
    'xx9xx8xxx',
    'x752x984x',
    'xxx4xx9xx',
    'x1xxxx42x',
    'xx49x276x',
    'xxx7xxxx5',
)
#@nonl
#@-node:<< 3-star puzzles >>
#@+node:<< 2-star puzzles >>
data = (
    '4xxxxxxxx',
    '96xxx85xx',
    'x374x6xx1',
    '3x48xxx6x',
    'xxxx1xxxx',
    'x5xxx92x7',
    '5xx1x267x',
    'xx95xxx82',
    'xxxxxxxx9',
)

data = (
    'xxx395xxx',
    'xx5xx89x2',
    'xxxx2xxx5',
    '6x2xxxxx7',
    'x84xxx53x',
    '7xxxxx1x6',
    '3xxx6xxxx',
    '5x62xx7xx',
    'xxx831xxx',
)

data = ( # 8/6 2 stars
    '2x6xxxx49',
    'x37xx9xxx',
    '1xx7xxxx6',
    'xxx58x9xx',
    '7x5xxx8x4',
    'xx9x62xxx',
    '9xxxx4xx1',
    'xxx3xx49x',
    '41xxxx2x8')

data = (
    '9xx7x3xx6',
    'x87xx2xxx',
    '15xxxxx9x',
    'xxx6xx82x',
    'xx8xxx1xx',
    'x26xx8xxx',
    'x6xxxxx31',
    'xxx4xx97x',
    '4xx2x1xx8') # 8/30 2 stars

data = (
    '8xx3xxx7x',
    'xx57xxxxx',
    '9xx165x3x',
    '34xxxxxx9',
    'xxx5x4xxx',
    '7xxxxxx83',
    'x8x253xx4',
    'xxxxx65xx',
    'x2xxx1xx6',
) # 9/6/ 2 stars

data = ( 
    'x6x29xx8x',
    'xx8xxxx3x',
    'x1xx78xxx',
    'x217x9xx8',
    '6xxxxxxx3',
    '7xx6x492x',
    'xxx12xx4x',
    'x7xxxx3xx',
    'x5xx86x7x') # 2 stars
#@nonl
#@-node:<< 2-star puzzles >>
#@+node:<< 1-star puzzles >>
data = (
    'x4735xxx9',
    'x5x8x93xx',
    'xx84xx12x',
    '12x57xxx8',
    '7x5xxx2x6',
    '3xxx82x17',
    'x12xx59xx',
    'xx92x6x4x',
    '4xxx1875x',
)

data = (
    '19xxx84xx',
    '7xxx9xxxx',
    'x5xxxx986',
    'x19xxxx6x',
    '8xxxxxxx5',
    'x6xxxx72x',
    '684xxxx3x',
    'xxxx2xxx7',
    'xx26xxx14',
)

data = (
    'xx2xx7xx9',
    'x8x249x3x',
    'x31xx572x',
    'xx9xx8xx1',
    'x65xxx847',
    '4xx7xx2xx',
    'x931xx65x',
    'x5x862x73',
    '8xx5xx4xx') # 1 star

data = ( # 8/10 1 star
    'x6xxx5x19',
    'x9x34xxxx',
    'x8x96x5xx',
    'xxx8x93x1',
    '8x9xxx4x7',
    '2x17x4xxx',
    'xx3x86x4x',
    'xxxx27x6x',
    '47x5xxx8x')
#@nonl
#@-node:<< 1-star puzzles >>
#@+node:class sudokuClass
class sudokuClass:

    '''A class to solve the sudoku puzzle.'''

    @others
#@nonl
#@+node: ctor (main) & helpers
def __init__ (self,data):

    # g.trace('main')
    self.cells = []
    self.cols = []
    self.colGroups = []
    self.data = data
    self.errors = 0
    self.excludedGroupNumbers = []
    self.level = 0
    self.rowGroups = []
    self.rows = []
    self.squareGroups = []
    self.tracing = True
    self.valid = True
    self.verbose = False

    # Check the data and finish the init process.
    self.checkData()
    self.initFromData()
    self.printData()
    self.finishInit()
    # self.dump()
#@nonl
#@+node:initFromData
def initFromData (self):

    i = 0
    for row in self.data:
        thisRow = []
        j = 0
        for ch in row:
            if ch in digits:    val = ch
            else:               val = None
            self.cells.append(self.cellClass(self,val,i,j))
            thisRow.append(val)
            j += 1
        self.rows.append(thisRow)
        i += 1

    for j in xrange(9):
        col = [row[j] for row in self.rows]
        self.cols.append(col)
#@nonl
#@-node:initFromData
#@+node:finishInit
def finishInit (self):

    for i in xrange(9):
        self.squareGroups.append(self.squareGroupClass(self,i))
        self.rowGroups.append(self.colGroupClass(self,i))
        self.colGroups.append(self.rowGroupClass(self,i))

    for z in self.squareGroups:
        z.finishCreate()
    for z in self.rowGroups:
        z.finishCreate()
    for z in self.colGroups:
        z.finishCreate()

    # Must be done last!
    for z in self.squareGroups:
        z.computeRelatedGroups()
    for z in self.cells:
        z.finishCreate()
#@nonl
#@-node:finishInit
#@+node:checkData
def checkData (self):

    rows = len(self.data)

    if rows != 9:
        return self.error('wrong number of rows: %d' % rows)

    for row in self.data:
        cols = len(row)
        if cols != 9:
            return self.error('wrong number of columns in row %d: %d' % (i,cols))
#@nonl
#@-node:checkData
#@+node:check & helper
def check (self):

    for groups in (self.colGroups,self.rowGroups,self.squareGroups):
        for group in groups:
            if not self.checkGroup(group):
                return False
    return True
#@+node:checkGroup
def checkGroup (self,group):

    vals = []
    for cell in group.cells:
        n = len(cell.values)
        if n == 1:
            val = cell.values[0]
            if val in vals:
                g.trace('%s appears twice in group %s' % (val,repr(group)))
                return False
            vals.append(val)
    return True
#@nonl
#@-node:checkGroup
#@-node:check & helper
#@-node: ctor (main) & helpers
#@+node:printing & dumping
#@+node:dump
def dump (self):

    if 0:
        print ; print 'groups...'
        for group in self.groups:
            print 'group %d, rowsNumbers: %s colNumbers: %s' % (
                group.groupNumber,group.rowNumbers,group.colNumbers)

    if 0:
        print ; print 'row groups...'
        for group in self.rowGroups:
            print '%d %s' % (group.rowNumber, group.rowcol)

    if 0:
        print ; print 'related groups...'
        for group in self.groups:     
            print 'Groups related to group %d: %s' % (
                group.groupNumber,[g.groupNumber for g in group.relatedGroups])
#@nonl
#@-node:dump
#@+node:printData
def printData (self,tag='initial data'):

    print ; print tag ; print

    i = 0
    for row in self.rows:
        i += 1
        print
        for ch in row:
            if ch:
                print ('  %s  ') % ch,
            else:
                print ' ___ ',
        if i % 3 == 0 and i < 9:
            print ; print ; print '_' * 53
        print
    print
#@nonl
#@-node:printData
#@+node:printCells
def printCells (self,tag=''):

    print
    if tag: print tag ; print

    i = 0
    for cell in self.cells:
        if len(cell.values) == 9:
            print '%7s' % '1..9',
        else:
            print '%7s' % ''.join(cell.values),
        i += 1
        if i % 9 == 0:
            print
#@nonl
#@-node:printCells
#@-node:printing & dumping
#@+node:Utils
#@+node:groupNumber
def groupNumber (self,row,col):

    return (3 * (row // 3)) + (col // 3)
#@nonl
#@-node:groupNumber
#@+node:error
def error (self,s):

    print 'oops',s
    self.errors += 1
#@nonl
#@-node:error
#@+node:trace
def trace (self,s):

    if self.tracing:
        print s
#@nonl
#@-node:trace
#@+node:isFinished
def isFinished (self):

    for cell in self.cells:
        n = len(cell.values)
        assert(n > 0) # We should have check for self.valid previously.
        if n > 1:
            return False
    return True
#@nonl
#@-node:isFinished
#@-node:Utils
#@+node:Guesses
#@+node:findBestGroup
def findBestGroup (self,excludedGroupNumbers):

    bestGroup = None
    bestKnown = 0
    for group in self.squareGroups:
        n = 0 # Number of known cells
        for cell in group.cells:
            if len(cell.values) == 1:
                n += 1
        if 9 > n > bestKnown:
            if group.groupNumber not in excludedGroupNumbers:
                bestGroup = group
                bestKnown = n

    if bestGroup:
        if self.tracing:
            print ; print 'best group %d' % bestGroup.groupNumber
            if 0:
                if self.verbose:
                    print 'unknown cells',
                    for cell in bestGroup.unknownCells():
                        print cell,
                    print 'unknown vals',
                    for val in bestGroup.unknownVals():
                        print val,
                    print

    return bestGroup
#@nonl
#@-node:findBestGroup
#@+node:findGuesses
def findGuesses (self):

    guesses = []
    group = self.findBestGroup(self.excludedGroupNumbers)
    self.excludedGroupNumbers.append(group.groupNumber)
    if not group:
        g.trace('No groups left to guess: %s' % excludedGroupNumbers)
        self.valid = False
        return []

    # Generate all combinations of cells and unkown vals.
    cells = [cell for cell in group.cells if len(cell.values) > 1]
    vals = []
    for cell in cells:
        for val in cell.values:
            if val not in vals: vals.append(val)
    n = len(vals)
    for i in xrange(n):
        guess = [] ; j = 0
        for cell in cells:
            bunch = g.bunch(cell=cell,val=vals[(i+j)%n])
            j += 1
            guess.append(bunch)
        if self.isValidGuess(guess):
            guesses.append(guess)

    if not guesses:
        g.trace('No valid guess for group %d' % group.groupNumber)
        self.valid = False
        return []

    if 0: # Another trace is in initFromGuess
        print 'level %d guesses...' % self.level
        for guess in guesses:
            for bunch in guess:
                print bunch.cell,bunch.val

    return guesses
#@nonl
#@-node:findGuesses
#@+node:isValidGuess
def isValidGuess (self,guess):

    return True ##

    for bunch in guess:
        if not bunch.cell.valIsPossible(bunch.val):
            return False

    return True
#@nonl
#@-node:isValidGuess
#@-node:Guesses
#@+node:solve (main)
def solve (self):

    n = 0 ; self.valid = True
    while not self.errors and self.valid:
        n += 1
        self.progress = 0
        if self.tracing:
            print '*' * 40
            print 'solve: iteration %d at level %d' % (n,self.level)
        if not self.check(): return False
        if self.tracing: self.printCells()
        for cell in self.cells:
            # Reduce the possible values for the cell.
            cell.reduce()
        if not self.valid: break
        for cell in self.cells:
            # Find any values that appear only in one place in a group.
            cell.unique()
        if self.isFinished():
            self.printCells('success!') ; return True
        if self.tracing: self.printCells()
        for cell in self.cells:
            # Remove any possible values that would make other groups impossible.
            cell.removeConflicts()
        if not self.valid: break
        if self.isFinished():
            if self.level == 0: self.printCells('success!')
            return True
        if self.progress == 0:
            << guess an answer >>

    if self.tracing:
        if not self.valid:
            print ; print 'reached invalid state'
        if self.progress == 0:
            print ; print 'no progress'
        self.printCells()
    return False
#@nonl
#@+node:<< guess an answer >>
if self.level < 2:
    # Save the previous data.
    save = [g.bunch(cell=cell,values=cell.values[:]) for cell in self.cells]
    guesses = self.findGuesses()
    if self.tracing:
        print '-'*20,'%d valid guesses' % len(guesses)
    if not guesses:
        return False
    n = 0
    for guess in guesses:
        # Restore the previous state.
        for b in save:
            b.cell.values = b.values[:]
        # Make the guess.
        self.level += 1 ; n += 1
        if self.tracing:
            print ; print '-'*40,'making guess %d at level %d' % (n,self.level)
        for b in guess:
            b.cell.values = str(b.val)
            if self.tracing: g.trace(b.cell,b.val)
        if self.tracing:
            self.printCells()
        # Call ourselves recursively.
        ok = self.solve()
        self.level -= 1
        if ok: return True
    if self.tracing or self.level == 0:
        print 'no solution is possible at level %d' % self.level
    # Restore the previous state.
    for b in save:
        b.cell.values = b.values[:]
    return False
else:
    if self.tracing:
        print 'maximum depth exceeded'
return False
#@nonl
#@-node:<< guess an answer >>
#@-node:solve (main)
#@+node:group classes
@ A group is essentially just a collection of cells.
#@nonl
#@+node:class squareGroupClass
class squareGroupClass:

    @others
#@nonl
#@+node: ctor
def __init__ (self,sudoku,n):

    # g.trace('square',n)
    self.groupNumber = n
    self.main = sudoku

    # Set later...
    self.cells = []
    self.colNumbers = []
    self.rowNumbers = []
    self.relatedGroups = []
#@nonl
#@-node: ctor
#@+node:__repr__ & __str__
def __repr__ (self):

    return '<square group %d>' % self.groupNumber

__str__ = __repr__
#@nonl
#@-node:__repr__ & __str__
#@+node:finishCreate
def finishCreate (self):

    main = self.main

    self.cells = [cell for cell in main.cells if cell.groupNumber == self.groupNumber]

    for cell in self.cells:
        cell.squareGroup = self

    self.rowNumbers = []
    for cell in self.cells:
        if cell.i not in self.rowNumbers:
            self.rowNumbers.append(cell.i)
    self.rowNumbers.sort()

    self.colNumbers = []
    for cell in self.cells:
        if cell.j not in self.colNumbers:
            self.colNumbers.append(cell.j)
    self.colNumbers.sort()
#@nonl
#@-node:finishCreate
#@+node:computeRelatedGroups
def computeRelatedGroups (self):

    self.relatedGroups = []
    for group in self.main.squareGroups:
        if group is not self:
            related = False
            for n in group.colNumbers:
                if n in self.colNumbers:
                    related = True
            for n in group.rowNumbers:
                if n in self.rowNumbers:
                    related = True
            if related and group not in self.relatedGroups:
                self.relatedGroups.append(group)
#@nonl
#@-node:computeRelatedGroups
#@-node:class squareGroupClass
#@+node:class colGroupClass
class colGroupClass:

    @others
#@nonl
#@+node:ctor
def __init__ (self,sudoku,j):

    # g.trace('col',j)
    self.j = j
    self.main = sudoku

    # Set later...
    self.cells = []
    self.col = None
#@-node:ctor
#@+node:__repr__ & __str__
def __repr__ (self):

    return '<col group %d>' % self.j

__str__ = __repr__
#@nonl
#@-node:__repr__ & __str__
#@+node:finishCreate
def finishCreate(self):

    j = self.j ; main = self.main

    self.col = self.main.cols[j]

    self.cells = [cell for cell in main.cells if cell.j == j]

    for cell in self.cells:
        cell.colGroup = self
#@nonl
#@-node:finishCreate
#@-node:class colGroupClass
#@+node:class rowGroupClass
class rowGroupClass:

    @others
#@nonl
#@+node:ctor
def __init__ (self,sudoku,i):

    # g.trace('row',i)
    self.i = i
    self.main = sudoku

    # Set later...
    self.cells = []
    self.row = None
#@nonl
#@-node:ctor
#@+node:__repr__ & __str__
def __repr__ (self):

    return '<row group %d>' % self.i

__str__ = __repr__
#@nonl
#@-node:__repr__ & __str__
#@+node:finishCreate
def finishCreate(self):

    i = self.i ; main = self.main

    self.row = self.main.rows[i]

    self.cells = [cell for cell in main.cells if cell.i == i]

    for cell in self.cells:
        cell.rowGroup = self
#@nonl
#@-node:finishCreate
#@-node:class rowGroupClass
#@-node:group classes
#@+node:class cellClass
class cellClass:

    '''A class representing what is known about a particular cell.'''

    @others
#@nonl
#@+node: birth
#@+node: ctor (cell)
def __init__ (self,sudoku,val,i,j):

    # g.trace('cell',i,j,val)
    self.i = i
    self.j = j
    self.groupNumber = sudoku.groupNumber(i,j)
    self.main = sudoku
    if val is None:     self.values = [digit for digit in digits]
    else:               self.values = [str(val)]
    self.verbose = self.main.verbose

    # Set by group ctors...
    self.colGroup = None
    self.rowGroup = None
    self.squareGroup = None
#@nonl
#@-node: ctor (cell)
#@+node:__repr__ & __str__
def __repr__ (self):

    return 'cell[%d,%d]' % (self.i, self.j)

__str__ = __repr__
#@nonl
#@-node:__repr__ & __str__
#@+node:finishCreate
def finishCreate(self):

    # g.trace(self)
    assert(self.colGroup)
    assert(self.rowGroup)
    assert(self.squareGroup)

    self.colGroups = [group for group in self.main.squareGroups if self.j in group.colNumbers]
    self.rowGroups = [group for group in self.main.squareGroups if self.i in group.rowNumbers]
#@nonl
#@-node:finishCreate
#@-node: birth
#@+node:error & trace
def error (self,s):

    self.main.error(s)

def trace (self,s):

    self.main.trace(s)
#@nonl
#@-node:error & trace
#@+node:reduce
def reduce (self):

    '''Reduce the possible values in self.values: remove an item from
    self.values if any cell in this cell's groups contains only that value.

    Increments self.main.progress or set self.main.valid = False to indicate status.
    '''

    if not self.main.valid: return
    n = len(self.values)
    if n == 0: self.main.valid = False
    if n < 2:  return

    for group in (
        self.colGroup,
        self.rowGroup,
        self.squareGroup,
    ):
        for cell in group.cells:
            if (
                cell is not self and
                len(cell.values) == 1 and 
                cell.values[0] in self.values
            ):
                self.values.remove(cell.values[0])
                n -= 1 
                if n == 0:
                    self.main.valid = False
                    return
                if n == 1:
                    self.setValue(self.values[0])
                else:
                    self.main.progress += 1
#@nonl
#@-node:reduce
#@+node:removeConflicts
# This is about the most sophisticated deduction that a human could make.

def removeConflicts (self):

    '''Remove a possible value if assigning to this value would make it
    impossible to satisfy a related group.

    Increments self.main.progress or set self.main.valid = False to indicate status. '''

    if not self.main.valid: return
    n = len(self.values)
    if n == 0: self.main.valid = False
    if n < 2:  return

    i,j = self.i,self.j

    colGroups = [group for group in self.colGroups if group != self.squareGroup]
    rowGroups = [group for group in self.rowGroups if group != self.squareGroup]

    # Check for row conflicts.
    for val in self.values:
        for group in rowGroups:
            spots = 0
            for cell in group.cells:
                if i != cell.i and val in cell.values:
                    spots += 1
            if spots == 0:
                self.trace('row conflict: cell: %s, val: %s, group %s' % (self,val,group))
                self.values.remove(val)
                n -= 1 ; self.main.progress += 1
                if n == 0:
                    self.main.valid = False
                return

    # Check for col conflicts.
    for val in self.values:
        for group in colGroups:
            spots = 0
            for cell in group.cells:
                if j != cell.j and val in cell.values:
                    spots += 1
            if spots == 0:
                self.trace('col conflict: cell: %s, val: %s, group %s' % (self,val,group))
                self.values.remove(val)
                n -= 1
                if n == 0:
                    self.main.valid = False
                if n == 1:
                    self.setValue(self.values[0])
                else:
                    self.main.progress += 1
                return
#@nonl
#@-node:removeConflicts
#@+node:setValue
def setValue (self,val):

    '''We have discovered the proper value for this cell.
    Set self.values=[val] and remove val from self.values from all *other* cells of this groups.'''

    values = self.values
    if self.main.tracing:
        g.trace(self,val,values)
    assert(val in values)
    values.remove(val)
    self.main.progress += 1
    self.values = [str(val)]

    for group in (
        self.colGroup,
        self.rowGroup,
        self.squareGroup,
    ):
        for cell in group.cells:
            if cell is not self:
                if val in cell.values:
                    cell.values.remove(str(val))
                    self.main.progress += 1
                    if len(cell.values) == 0:
                        self.main.valid = False
#@nonl
#@-node:setValue
#@+node:unique
def unique (self):

    '''Set self.values to [val] if val appears in only one place in any of this cells groups.

    Increments self.main.progress or set self.main.valid = False to indicate status.
    '''

    if not self.main.valid: return
    n = len(self.values)
    if n == 0: self.main.valid = False
    if n < 2:  return

    for group in (
        self.colGroup,
        self.rowGroup,
        self.squareGroup,
    ):
        for val in self.values:
            spots = 0
            for cell in group.cells:
                if val in cell.values:
                    spots += 1
            # val is in self.values, and self is in each of its groups.
            assert(spots>0)
            if spots == 1:
                # We have found the only possible place for this value.
                self.setValue(val) # Increments self.main.progress.
                return
#@nonl
#@-node:unique
#@+node:valIsPossible
def valIsPossible (self,val):

    # g.trace(self,val)
    assert(val is not None)

    for cell in self.rowGroup.cells:
        if cell is not self:
            if len(cell.values) == 1 and cell.values[0] == val:
                if self.main.tracing:
                    g.trace('invalid guess: %s in row %d: %s' % (val,self.i,self.rowGroup.cells))
                return False

    for cell in self.colGroup.cells:
        if cell is not self:
            if len(cell.values) == 1 and cell.values[0] == val:
                if self.main.tracing:
                    g.trace('invalid guess: %s in col %d: %s' % (val,self.j,self.colGroup.cells))
                return False

    return True
#@nonl
#@-node:valIsPossible
#@-node:class cellClass
#@-node:class sudokuClass
#@-node:sudoku puzzle
#@+node:sudoku changes
# There are bugs in the guessing logic.
# The following does not fix all of them.

In findGuesses:

    guesses = self.findAllGuesses(vals,cells)
    g.trace('%d raw guess' % len(guesses))
    guesses = [z for z in guesses if self.isValidGuess(z)]

findAllGuesses:def findAllGuesses(self,vals,cells):

    if not vals or not cells:
        return []

    guesses = []
    cell = cells[0]
    for val in vals:
        guess = [g.bunch(cell=cell,val=val)]
        vals2 = [z for z in vals if z != val]
        self.completeGuess(guess,vals2,cells[1:])
        guesses.append(guess)
    return guesses

def completeGuess(self,guess,vals,cells):

    if not vals or not cells:
        return []

    cell = cells[0]
    for val in vals:
        guess.append(g.bunch(cell=cell,val=val))
        vals2 = [z for z in vals if z != val]
        self.completeGuess(guess,vals2,cells[1:])


This works for the following 5* problem:

data = (     # 5 stars
    '1xx2x9xx3',
    'xx6xx3xx7',
    'x3xxxx8xx',
    'xxxx9xxx5',
    '78xxxxx94',
    '9xxx2xxxx',
    'xx1xxxx3x',
    '2xx8xx4xx',
    '8xx5x6xx2',
)
#@-node:sudoku changes
#@+node:Print all Tango icons in wiki-markup format
# Prints all icons in the Icons/Tango folder in wiki-markup format.
# This allows the icons to be inserted into Leo's body pane,
# provided that the color_markup and add_directives plugins are enabled.

import glob

folders = (
    'actions','animations','apps','categories','devices',
    'emblems','emotes','mimetypes','places','status',)

for z in folders:
    theDir = g.os_path_join(g.app.loadDir,'..','Icons','Tango','16x16',z)
    print ; print z
    aList = glob.glob(g.os_path_normpath(g.os_path_join(theDir,'*.*')))
    aList.sort()
    aList = ['{picture file=%s} %s' % (z,g.shortFileName(z)) for z in aList]
    aList = [str(z) for z in aList]
    s = g.listToString(aList).replace("'",'').replace('[','').replace(']','')
    print s
#@nonl
#@-node:Print all Tango icons in wiki-markup format
#@+node:square problem
limit = 1000*1000

def computeSquares(limit):
    squares = {}
    i = 2
    while i < limit:
        n = i*i
        squares[n] = i # big keys
        i += 1
    return squares

def inc(n):
    if n <= 0: return None
    n1 = n ; digits = []
    while n > 0:
        digit = n%10
        if digit == 9: return None
        digits.append(digit)
        n = n//10
    digits.reverse()
    n = len(digits) ; result = 0 ; i = 0
    while i < n:
        digit = digits[i]
        if result > 0 or digit > 0:
            base = (n-i-1)
            result += (digit+1)*(10**base)
        i += 1
    return result

squares = computeSquares(limit)

i = 5
while i < 1000:
    j = i * i
    k = inc(j)
    k2 = squares.get(k)
    if k2:
        assert(k2*k2==k)
        print '**found**',i,'*',i,'=',j,k,'= %d*%d' % (k2,k2)
    # else: print i,j,k
    i += 1
print 'tested all numbers <',i
#@-node:square problem
#@-node:Fun...
#@+node:Important
#@+node:create-at-auto-nodes
# A script to create @auto nodes from all .py files in a directory.

import glob,os

reallyCreate = False

baseDir = r'c:\Python25\Lib\distutils'

dirs = ('','command',) # '' represents the base dir.
print '-' * 40

c.beginUpdate()
try:
    for theDir in dirs:
        pattern = g.os_path_join(baseDir,theDir,'*.py')
        files = glob.glob(pattern)
        print ; print 'pattern:', pattern ; print
        # print g.listToString(files)
        for name in files:
            h = '@auto %s' % (name[len(baseDir) + 1:].strip())
            print 'creating',h
            if reallyCreate:
                child = p.insertAsLastChild()
                child.initHeadString(h)
finally:
    c.endUpdate()
#@-node:create-at-auto-nodes
#@+node:@@button print g.es stats
@first # -*- coding: utf-8 -*-

# To do: put list of to-be-translated strings in a doc part.

<< docstring >>
<< version history >>

class controllerClass:
    @others

x = controllerClass(c,p)
x.main()
#@+node:<< docstring >>
'''Here is a cute Leo script. It discovers all g.es statements and the unbound
variables in those statements in a single pass over the selected outline. It
then puts the decls followed by the statements in the body text of the @scan_g.es_results node.

Thus, after this script, you can **execute** the body text of the @scan_g.es_results node.
In particular, assuming that g.translateString converts all translated text to upper case,
it is easy to tell which strings have been translated.
'''
#@nonl
#@-node:<< docstring >>
#@+node:<< version history >>
@nocolor
@

2008/2/19 EKR: Initial 2-pass version.
2008/2/20 EKR:
- Convert to single-pass using decls and statements arrays.
- Add 'tabName' to list of keyword arguments.
- Fixed various bugs.
2008/2/20 EKR 2:
- Generate g.es statements to inicate beginnings of files.
  This allows us to execute all generated statements at once.
- Convert between '.' and '__' as needed to avoid using real ivars.
2008/2/21 EKR:
- Added mungeString.
- Rewrote mungeStatements, scanParens and scanIdChain.
These changes now properly handle all arguments.
2008/2/21 EKR 2:
- Added stringize and related logic.
- Added setToBeTranslated to put list of to-be-translated strings into leading docstring.
#@nonl
#@-node:<< version history >>
#@+node: ctor
def __init__ (self,c,p):

    self.c = c
    self.p = p

    self.decls = {}
    self.keywordArgs = ('color','newline','tabName',)
    self.nodeName = '@scan_g.es_results'
    self.statements = []
    self.printStatements = ('g.es','g.es_print',)
    self.suppressTranslation = False # True: keyword arg to g.es seen.
    self.toBeTranslated = []
    self.traceFlag = False

    self.root = g.findNodeAnywhere(c,self.nodeName)
#@-node: ctor
#@+node:error & trace
def error (self,s):

    self.statements.append('***error: %s' % s)

def trace(self,s):

    if self.traceFlag:
        self.statements.append(s)
#@-node:error & trace
#@+node:main
def main (self):

    '''The main line.'''

    c = self.c
    self.scan()
    self.mungeStatements()
    keys = self.decls.keys()
    keys.sort()
    # This is such a great hack :-)
    decls = ["%s = '%s'" % (self.mungeString(z),self.stringize(z,delim="'")) for z in keys]

    # Put to-be-translated.
    results = ['@ To be translated...',]
    self.toBeTranslated.sort()
    results.extend(self.toBeTranslated)
    results.extend(['@c',])
    # Put decls.
    results.extend(['# Decls...',])
    results.extend(["color='red'","newline=True","tabName='Log'"])
    results.extend(decls)
    # Put statements
    results.extend(["g.app.translateToUpperCase=True",])
    results.extend(self.statements)
    results.extend(["g.app.translateToUpperCase=False",])

    results = '\n'.join(results)

    # Put the results in the root node, or print them.
    if self.root:
        c.setBodyString(self.root,results)
        c.beginUpdate()
        try:
            c.selectPosition(self.root)
        finally:
            c.endUpdate()
    else:
        print 'node not found',self.rootName
        print results

    g.es('done')
#@-node:main
#@+node:munging...
#@+node:mungeString
def mungeString (self,s):

    '''Convert string s so that it is a valid Python name.'''

    s = s.strip()
    # s = s.replace('@','_AT_')
    # s = s.replace('<','_LT_')
    # s = s.replace('>','_GT_')
    s = s.replace(',','_COMMA_')
    s = s.replace('.','_DOT_')
    s = s.replace('()','_PARENS_')
    s = s.replace('(','_LP_')
    s = s.replace(')','_RP_')
    s = s.replace('"','_DQ_')
    s = s.replace("'",'_SQ_')
    s = ''.join([g.choose(z == '_' or z.isalnum(),z,'_') for z in s])
    return s
#@-node:mungeString
#@+node:mungeHeadline
def mungeHeadline (self,s):

    s = s.strip()
    s = s.replace('<<','< <')
    s = s.replace('>>','> >')
    return s
#@-node:mungeHeadline
#@+node:mungeStatements
def mungeStatements (self):

    '''Convert a references to an known id chain by the corresponding decl.'''

    keys = self.decls.keys()
    for tag in self.printStatements:
        if tag in keys: keys.remove(tag)

    results = []
    for s in self.statements:
        i = 0 ; inner_result = []
        while i < len(s):
            progress = i
            for key in keys:
                if g.match_word(s,i,key):
                    inner_result.append(self.mungeString(key))
                    i += len(key)
                    break
            else:
                inner_result.append(s[i])
            i = max(i,progress+1)
        new_statement = ''.join(inner_result)
        if new_statement != s:
            # Add the original lines as comments.
            for z in g.splitLines(s):
                results.append ('# %s' % (z.rstrip()))
        results.append(new_statement) # Add the munged line.

    self.statements = results

#@-node:mungeStatements
#@+node:stringize
def stringize (self,s,delim):

    s = s.strip()
    s = s.replace(delim,'\\%s' % (delim))
    return s
#@-node:stringize
#@-node:munging...
#@+node:scan & helpers
def scan(self):


    for p in self.p.self_and_subtree_iter():

        self.scanNode(p)
#@nonl
#@+node:scanIdChain
def scanIdChain (self,s,i,outer):

    assert g.is_c_id(s[i])
    start = i 

    while i < len(s):
        progress = i
        # Skip a.b.c...
        if not g.is_c_id(s[i]): break
        i = g.skip_id(s,i,chars='.')
        i = g.skip_ws(s,i)
        word = s[start:i].strip()
        if outer and g.match(s,i,'=') and word in (self.keywordArgs):
            # g.trace('***','=',word)
            self.suppressTranslation = True
        # Only a paren is valid now.
        if not g.match(s,i,'('): break
        # We shall munge the entire paren expression.
        i = g.skip_matching_python_parens(s,i)
        if g.match(s,i,')'): i += 1
        # Only a dot is valid now.
        if not g.match(s,i,'.'): break
        i += 1
        assert i > progress

    self.setDecl(s[start:i])
    return i
#@-node:scanIdChain
#@+node:scanNode
def scanNode(self,p):

    tags = ('g.es','g.es_print',)
    s,h = p.bodyString(), p.headString()
    print_h = True

    if h.startswith('@thin'):
        << insert statements highlighting the @thin node >>

    i = 0
    while i < len(s):
        progress = i
        for tag in tags:
            if g.match_word(s,i,tag):
                start = i ; i += len(tag)
                self.suppressTranslation = False
                i = g.skip_ws(s,i)
                if g.match(s,i,'('):
                    << Insert a comment containing the headline >>
                    i = self.scanParens(s,i,outer=True)
                    statement = s[start:i]
                    self.statements.append(statement)
                break
        i = max(i,progress+1)
#@+node:<< insert statements highlighting the @thin node >>
self.statements.append('# ---- %s' % (h))
self.statements.append("g.es_print('','%s %s',color='red')" % ('-'*20,h))
#@nonl
#@-node:<< insert statements highlighting the @thin node >>
#@+node:<< Insert a comment containing the headline >>
if print_h:
    print_h = False
    if not h.startswith('@thin'):
        self.statements.append('# -- node %s' % (self.mungeHeadline(h)))
#@nonl
#@-node:<< Insert a comment containing the headline >>
#@-node:scanNode
#@+node:scanParens
def scanParens(self,s,i,outer):

    '''Scan a parenthesized list of args.'''

    assert g.match(s,i,'(')
    j = g.skip_matching_python_parens(s,i)
    if j == -1:
        self.error('**incomplete paren at: %s' % s[i:])
        return i+1 # Just skip the paren

    assert g.match(s,j,')')
    n = 1
    while i < j:
        progress = i ; ch = s[i]

        if ch in ("'",'"',):
            i = self.scanStringArg(s,i,n,outer)
            n += 1

        elif g.is_c_id(ch):
            i = self.scanIdChain(s,i,outer)
            n += 1

        i = max(i,progress+1)

    return j + 1 # return the character after the paren.
#@-node:scanParens
#@+node:scanStringArg
def scanStringArg(self,s,i,n,outer):

    '''Skip a string, and possible %'''

    j = g.skip_python_string(s,i)
    if j <= i: return i

    # g.trace(n,s[i:j])

    if outer:
        self.setToBeTranslated(s[i:j],n)

    i = g.skip_ws(s,j)
    if not g.match(s,i,'%'):
        return i

    # Handle what follows the '%'
    i = g.skip_ws(s,i+1)

    if g.match(s,i,'('):
        i = self.scanParens(s,i,outer=False)
    elif i < len(s) and g.is_c_id(s[i]):
        i = self.scanIdChain(s,i,outer)

    return i
#@-node:scanStringArg
#@-node:scan & helpers
#@+node:setDecl
def setDecl (self,s):

    s = s.strip()

    if s in ('c','g','p','len','str','repr','and','or','not','is',):
        return # Don't need a binding for these.

    if s in ('color','newline','tabName',):
        return # Special binding will be used for these.

    if s.isdigit():
        return # Don't bind integers.

    self.decls[s] = True
#@-node:setDecl
#@+node:setToBeTranslated
def setToBeTranslated (self,s,n):


    s = s.strip()

    if s.startswith('"') or s.startswith("'"):
        s = s[1:]
    if s.endswith('"') or s.endswith("'"):
        s = s[:-1]

    s = s.strip()

    # Suppress translation when a keyword is seen, regardless of position.
    if s in self.keywordArgs:
        self.suppressTranslation = True

    if self.suppressTranslation:
        # g.trace('not added',s)
        return

    if (n % 2) == 1 and s not in self.toBeTranslated:
        self.toBeTranslated.append(s)
#@-node:setToBeTranslated
#@-node:@@button print g.es stats
#@+node:@@button change leo imports
change = True # True: actually make the changes.
trace = True

print '-' * 40

tag = 'import leo' ; n = len(tag)

for p in p.self_and_subtree_iter():
    result = []
    for s in g.splitlines(p.bodyString()):
        i = s.find(tag,0)
        if i > -1:
            # Do nothing if we have already done the translation.
            i2 = s.find('<<') ; i3 = s.find('import leo.core')
            if i2 == -1 and i3 == -1:
                ch = s[i+n:i+n+1]
                ch = g.choose(ch.isspace(),'leo',ch.lower()) # special case: import leo.
                s = s[:i] + 'import leo.core.' + ch + s[i+n+1:]
                if trace:
                    print p.headString()
                    j,k=g.getLine(s,i) ; print s[j:k]
        result.append(s)
    result = ''.join(result)

    if change and result != p.bodyString():
        c.setBodyString(p,result)


#@-node:@@button change leo imports
#@+node:c2py  Convert C code to Python syntax
@first
@language python
@tabwidth -4

import string

@ When using c2py as a script to translate entire files, use convertCFileToPython().  When using c2py within Leo, use convertCurrentTree().

Please set user data in the << specifying user types >> section.
@c

<< what c2py does >>
<< theory of operation >>
<< specify user types >>
tabWidth = 4 # how many blanks in a tab.
printFlag = False
doLeoTranslations = True ; dontDoLeoTranslations = False
<< define testData >>
@others

gClassName = "" # The class name for the present function.  Used to modify ivars.
gIvars = [] # List of ivars to be converted to self.ivar

def test():
    global printFlag ; printFlag = True
    for s in testData:
        convertCStringToPython(s, doLeoTranslations)

def go():
    test()

if __name__ == "__main__":
    speedTest(2)
#@nonl
#@+node:<< what c2py does >>
@
c2py converts C or C++ text into python text. The conversion is not complete.
Nevertheless, c2py eliminates much of the tedious text manipulation that would
otherwise be required.

The following is a list of the translations performed by convertCodeList:

I.  Prepass

These translations happen before removing all curly braces.

Suppose we are translating:

    aTypeSpec aClass::aMethod(t1 v1,...,tn vn)
    {
        body
    }

1. Translates the function prototype, i.e., translates:

    aTypeSpec aClass::aMethod(t1 v1,...,tn vn)
to:
    def aMethod(v1,...vn):

As a special case, c2py translates:

    aTypeSpec aClass::aClass(t1 v1,...,tn vn)
to:
    aClass.__init__(t1 v1,...,tn vn)

Yes, I know, aClass.__init__ isn't proper Python, but retaining the class name is useful.

2. Let t denote any member of typeList or classList.

    a) Removes all casts of the form (t) or (t*) or (t**), etc.
    b) Converts t x, t *x, t **x, etc. to x.
    c) Converts x = new t(...) to x = t(...)
    d) For all i in ivarsDict[aClass] converts this -> i to self.i
    e) For all i in ivarsDict[aClass] converts i to self.i

3. Converts < < x > > = to @c.  This Leo-specific translation is not done when translating files.

II.  Main Pass

This pass does the following simple translations everywhere except in comments and strings.

Changes all -> to .
Changes all this.self to self (This corrects problems during the prepass.)
Removes all curly braces
Changes all #if to if
Changes all else if to elif
Changes all #else to else:
Changes all else to else:
Removes all #endif
Changes all && to and
Changes all || to or
Changes all TRUE to True
Changes all FALSE to False
Changes all NULL to None
Changes all this to self
Changes all @code to @c.  This Leo-specific translation is not done when translating files.

III.  Complex Pass

This pass attempts more complex translations.

Converts if ( x ) to if x:
Converts elif ( x ) to elif x:
Converts while ( x ) to while x:
Converts for ( x ; y ; z ) to for x SEMI y SEMI z:

IV.  Final Pass

This pass completes the translation.

Removes all semicolons.
Removes @c if it starts the text.  This Leo-specific translation is not done when translating files.
Removes all blank lines.
Removes excess whitespace from all lines, leaving leading whitespace unchanged.
Replaces C/C++ comments by Python comments.
Removes trailing whitespace from all lines.
#@-node:<< what c2py does >>
#@+node:<< theory of operation >>
@ Strategy and Performance

c2py is straightforward.  The speed of c2py is unimportant.  We don't care about the memory used because we translate only small pieces of text at a time.

We can do body[i:j] = x, regardless of len(x).  We can also do del body[i:j] to delete characters.

We scan repeatedly through the text.  Using many passes greatly simplifies the code and does not slow down c2py significantly.

No scans are done within strings or comments.  The idiom to handle such scans is the following:

def someScan(body):
    i = 0
    while i < body(len):
        if isStringOrComment(body,i):
            i = skipStringOrComment(body,i)
        elif << found what we are looking for ? >> :
            << convert what we are looking for, setting i >>
        else: i += 1

That's about all there is to it.  The code was remarkably easy to write and seems clear to me.
#@-node:<< theory of operation >>
#@+node:<< specify user types >>
@ Please change the following lists so they contain the types and classes used by your program.

c2py removes all type definitions correctly; it converts
    new aType(...)
to
    aType(...)
@c

classList = [
    "vnode", "tnode", "Commands",
    "wxString", "wxTreeCtrl", "wxTextCtrl", "wxSplitterWindow" ]

typeList = ["char", "void", "short", "long", "int", "double", "float"]

@ Please change ivarsDict so it represents the instance variables (ivars) used by your program's classes.

ivarsDict is a dictionary used to translate ivar i of class c to self.i.  It also translates this->i to self.i.
@c

ivarsDict = {
    "atFile": [ "mCommands", "mErrors", "mStructureErrors",
        "mTargetFileName", "mOutputFileName", "mOutputStream",
        "mStartSentinelComment", "mEndSentinelComment", "mRoot"],

    "vnode": ["mCommands", "mJoinList", "mIconVal", "mTreeID", "mT", "mStatusBits"],

    "tnode": ["mBodyString", "mBodyRTF", "mJoinHead", "mStatusBits", "mFileIndex",
        "mSelectionStart", "mSelectionLength", "mCloneIndex"],

    "LeoFrame": ["mNextFrame", "mPrevFrame", "mCommands"],

    "Commands": [
        # public
        "mCurrentVnode", "mLeoFrame", "mInhibitOnTreeChanged", "mMaxTnodeIndex",
        "mTreeCtrl", "mBodyCtrl", "mFirstWindowAndNeverSaved",
        #private
        "mTabWidth", "mChanged", "mOutlineExpansionLevel", "mUsingClipboard",
        "mFileName", "mMemoryInputStream", "mMemoryOutputStream", "mFileInputStream",
        "mInputFile", "mFileOutputStream", "mFileSize", "mTopVnode", "mTagList",
        "mMaxVnodeTag",
        "mUndoType", "mUndoVnode", "mUndoParent", "mUndoBack", "mUndoN",
        "mUndoDVnodes", "mUndoLastChild", "mUndoablyDeletedVnode" ]}
#@nonl
#@-node:<< specify user types >>
#@+node:<< define testData >>
testData = [ "\n@doc\n\
This is a doc part: format, whilest, {};->.\n\
<<\
section def>>=\n\
LeoFrame::LeoFrame(vnode *v, char *s, int i)\n\
{\n\
    // test ; {} /* */.\n\
    #if 0 //comment\n\
        if(gLeoFrameList)gLeoFrameList -> mPrevFrame = this ;\n\
        else\n\
            this -> mNextFrame = gLeoFrameList ;\n\
    #else\n\
        \n\
        vnode *v = new vnode(a,b);\n\
        Commands *commander = (Commands) NULL ; // after cast\n\
        this -> mPrevFrame = NULL ;\n\
    #endif\n\
    if (a==b)\n\
        a = 2;\n\
    else if (a ==c)\n\
        a = 3;\n\
    else return; \n\
    /* Block comment test:\n\
        if(2):while(1): end.*/\n\
    for(int i = 1; i < limit; ++i){\n\
        mVisible = FALSE ;\n\
        mOnTop = TRUE ;\n\
    }\n\
    // trailing ws.	 \n\
    mCommands = new Commands(this, mTreeCtrl, mTextCtrl) ;\n\
    gActiveFrame = this ;\n\
}\n\
    ", "<<" +
"vnode methods >>=\n\
\n\
void vnode::OnCopyNode(wxCommandEvent& WXUNUSED(event))\n\
{\n\
    mCommands -> copyOutline();\n\
}\n\
\n@doc\n\
another doc part if, then, else, -> \n<<" +
"vnode methods >>=\n\
void vnode::OnPasteNode(wxCommandEvent& WXUNUSED(event))\n\
{\n\
    mCommands -> pasteOutline();\n\
}\n" ]
#@nonl
#@-node:<< define testData >>
#@+node:speedTest
def speedTest(passes):

    import time
    file = r"c:\prog\LeoPy\LeoPy.leo"
    f=open(file)
    if not f:
        print "not found: ", file
        return
    s=f.read()
    f.close()
    print "file:", file, " size:", len(s), " passes:", passes
    print "speedTest start"
    time1 = time.clock()
    p = passes
    while p > 0:
        n = len(s) ; i = 0 ; lines = 0
        while -1 < i < n:
            if s[i] == '\n':
                lines += 1 ; i += 1
            else:
                i = s.find('\n',i) # _much_ faster than list-based-find.
            continue
            # match is about 9 times slower than simple test.
            if s[i]=='\n': # match(s,i,'\n'): # 
                i += 1
            else:
                i += 1
        p -= 1
    time2 = time.clock()
    print "lines:", lines
    print "speedTest done:"
    print "elapsed time:", time2-time1
    print "time/pass:", (time2-time1)/passes
#@nonl
#@-node:speedTest
#@+node:leo1to2
#@+node:leo1to2
def leo1to2():

    import leo
    import leoGlobals
    c=leoGlobals.top()
    v=c.currentVnode()
    convertLeo1to2(v,c)
#@-node:leo1to2
#@+node:convertLeo1to2
def convertLeo1to2(v,c):

    after=v.nodeAfterTree()
    while v and v != after:
        s=v.bodyString()
        print "converting:", v.headString()
        s=convertStringLeo1to2(s)
        c.setBodyString(v,s)
        v=v.threadNext()

    c.Repaint() # for backward compatibility
    print "end of leo1to2"
#@nonl
#@-node:convertLeo1to2
#@+node:convertStringLeo1to2
def convertStringLeo1to2 (s):

    # print "convertStringLeo1to2:start\n", s
    codeList = stringToList(s) ; outputList = []
    i = 0
    while i < len(codeList):
        j = skipCodePart(codeList,i)
        if j > i:
            code = codeList[i:j]
            convertCodeList1to2(code)
            i = j
            #print "-----code:", listToString(code)
            for item in code:
                outputList.append(item)
        j = skipDocPart(codeList,i)
        if j > i:
            doc = codeList[i:j]
            convertDocList(doc) # same as in c2py
            #print "-----doc:", listToString(doc)
            i = j
            for item in doc:
                outputList.append(item)

    result = listToString(outputList)
    global printFlag
    if printFlag: print "-----:\n", result
    return result
#@nonl
#@-node:convertStringLeo1to2
#@+node:convertCodeList1to2
@ We do _not_ replace @root by @file or insert @others as needed.  Inserting @others can be done easily enough by hand, and may take more global knowledge than we can reasonably expect to have.
@c

def convertCodeList1to2(list):

    if 0: # There isn't much reason to do this.
        removeAtRoot(list)
    safeReplace(list, "@code", "@c")
    replaceSectionDefs(list)
    removeLeadingAtCode(list)
#@-node:convertCodeList1to2
#@-node:leo1to2
#@+node:c2py entry points
@ We separate the processing into two parts,

1) a leo-aware driver that iterates over @file trees and
2) a text-based part that processes one or more files or strings.
#@+node:convertCurrentTree
def convertCurrentTree():

    import c2py
    import leo
    import leoGlobals
    c=leoGlobals.top()
    v = c.currentVnode()
    c2py.convertLeoTree(v,c)
#@nonl
#@-node:convertCurrentTree
#@+node:convertLeoTree
def convertLeoTree(v,c):

    after=v.nodeAfterTree()
    while v and v != after:
        s=v.bodyString()
        print "converting:", v.headString()
        s=convertCStringToPython(s, doLeoTranslations )
        c.setBodyString(v,s)
        v=v.threadNext()
    c.Repaint() # for backward compatibility.
    print "end of c2py"
#@nonl
#@-node:convertLeoTree
#@+node:convertCFileToPython
def convertCFileToPython(file):

    f=open(file, 'r')
    if not f: return
    s = f.read()
    f.close();
    f=open(file + ".py", 'w')
    if not f: return
    s = convertCStringToPython(s, dontDoLeoTranslations )
    f.write(s)
    f.close()
#@nonl
#@-node:convertCFileToPython
#@-node:c2py entry points
#@+node:convertCStringToPython & helpers (top level)
def convertCStringToPython(s, leoFlag):

    # print "convertCStringToPython:start\n", s
    firstPart = True
    codeList = stringToList(s)

    if not leoFlag:
        convertCodeList(codeList, firstPart, dontDoLeoTranslations)
        return listToString(codeList)

    outputList = []
    i = 0
    while i < len(codeList):
        j = skipCodePart(codeList,i)
        if j > i:
            code = codeList[i:j]
            convertCodeList(code, firstPart, doLeoTranslations)
            i = j
            #print "-----code:", listToString(code)
            for item in code:
                outputList.append(item)
        firstPart = False # don't remove @c from here on.
        j = skipDocPart(codeList,i)
        if j > i:
            doc = codeList[i:j]
            convertDocList(doc)
            #print "-----doc:", listToString(doc)
            i = j
            for item in doc:
                outputList.append(item)

    result = listToString(outputList)
    global printFlag
    if printFlag: print "-----:\n", result
    return result
#@nonl
#@+node:convertCodeList (main pattern function)
def convertCodeList(list, firstPart, leoFlag):
    #first
    replace(list, "\r", None)
    convertLeadingBlanks(list)
    if leoFlag:
        replaceSectionDefs(list)
    mungeAllFunctions(list)
    #next
    safeReplace(list, " -> ", '.')
    safeReplace(list, "->", '.')
    safeReplace(list, " . ", '.')
    safeReplace(list, "this.self", "self")
    safeReplace(list, "{", None)
    safeReplace(list, "}", None)
    safeReplace(list, "#if", "if")
    safeReplace(list, "#else", "else")
    safeReplace(list, "#endif", None)
    safeReplace(list, "else if", "elif")
    safeReplace(list, "else", "else:")
    safeReplace(list, "&&", "and")
    safeReplace(list, "||", "or")
    safeReplace(list, "TRUE", "True")
    safeReplace(list, "FALSE", "False")
    safeReplace(list, "NULL", "None")
    safeReplace(list, "this", "self")
    safeReplace(list, "try", "try:")
    safeReplace(list, "catch", "except:")
    if leoFlag:
        safeReplace(list, "@code", "@c")
    #next
    handleAllKeywords(list)
    # after processing for keywords
    removeSemicolonsAtEndOfLines(list)
    #last
    if firstPart and leoFlag: removeLeadingAtCode(list)
    removeBlankLines(list)
    removeExcessWs(list)
    # your taste may vary: in Python I don't like extra whitespace
    safeReplace(list, " :", ":") 
    safeReplace(list, ", ", ",")
    safeReplace(list, " ,", ",")
    safeReplace(list, " (", "(")
    safeReplace(list, "( ", "(")
    safeReplace(list, " )", ")")
    safeReplace(list, ") ", ")")
    replaceComments(list) # should follow all calls to safeReplace
    removeTrailingWs(list)
    safeReplace(list, "\t ", "\t") # happens when deleting declarations.
#@nonl
#@-node:convertCodeList (main pattern function)
#@+node:convertDocList
def convertDocList(docList):

    # print "convertDocList:", docList
    if matchWord(docList, 0, "@doc"):
        i = skipWs(docList, 4)
        if match(docList, i, "\n"):
            i += 1
        docList[0:i] = list("@ ")
#@nonl
#@-node:convertDocList
#@+node:skipDocPart
def skipDocPart(list, i):

    # print "skipDocPart", i
    while i < len(list):
        if matchWord(list, i, "@code") or matchWord(list, i, "@c"):
            break
        elif isSectionDef(list,i):
            break
        else: i = skipPastLine(list, i)
    return i
#@nonl
#@-node:skipDocPart
#@+node:skipCodePart
def skipCodePart(codeList, i):

    # print "skipCodePart", i
    if matchWord(codeList, i, "@doc") or matchWord(codeList, i, "@"):
        return i
    while i < len(codeList):
        if match(codeList, i, "//"):
            i = skipPastLine(codeList,i)
        elif match(codeList, i, "/*"):
            i = skipCBlockComment(codeList,i)
        elif match(codeList, i, '"') or match(codeList, i, "'"):
            i = skipString(codeList,i)
        elif match(codeList, i, "\n"):
            i += 1
            if matchWord(codeList, i, "@doc") or matchWord(codeList, i, "@"):
                break
        else: i += 1
    return i
#@nonl
#@-node:skipCodePart
#@-node:convertCStringToPython & helpers (top level)
#@+node:Scanning & Replacing...
#@+node:convertLeadingBlanks
def convertLeadingBlanks(list):

    global tabWidth
    if tabWidth < 2: return
    i = 0
    while i < len(list):
        n = 0
        while i < len(list) and list[i] == ' ':
            n += 1 ; i += 1
            if n == tabWidth:
                list[i-tabWidth:i] = ['\t']
                i = i - tabWidth + 1
                n = 0
        i = skipPastLine(list, i)
#@nonl
#@-node:convertLeadingBlanks
#@+node:mungeAllFunctions
# We scan for a '{' at the top level that is preceeded by ')'
# @code and < < x > > = have been replaced by @c
def mungeAllFunctions(codeList):

    prevSemi = 0 # Previous semicolon: header contains all previous text
    i = 0
    firstOpen = None
    while i < len(codeList):
        if isStringOrComment(codeList,i):
            i = skipStringOrComment(codeList,i)
            prevSemi = i
        elif match(codeList, i, '('):
            if not firstOpen:
                firstOpen = i
            i += 1
        elif match(codeList, i, '#'):
            i = skipPastLine(codeList, i)
            prevSemi = i
        elif match(codeList, i, ';'):
            i += 1
            prevSemi = i
        elif matchWord(codeList, i, "@code"):
            i += 5
            prevSemi = i # restart the scan
        elif matchWord(codeList, i, "@c"):
            i += 2 ; prevSemi = i # restart the scan
        elif match(codeList, i, "{"):
            i = handlePossibleFunctionHeader(codeList,i,prevSemi,firstOpen)
            prevSemi = i ; firstOpen = None # restart the scan
        else: i += 1
#@nonl
#@+node:handlePossibleFunctionHeader
# converts function header lines from c++ format to python format.
# That is, converts
# x1..nn w::y ( t1 z1,..tn zn) {
# to
# def y (z1,..zn): {

def handlePossibleFunctionHeader(codeList, i, prevSemi, firstOpen):

    assert(match(codeList,i,"{"))
    prevSemi = skipWsAndNl(codeList, prevSemi)
    close = prevNonWsOrNlChar(codeList, i)
    if close < 0 or codeList[close] != ')':
        return 1 + skipToMatchingBracket(codeList, i)
    if not firstOpen:
        return 1 + skipToMatchingBracket(codeList, i)
    close2 = skipToMatchingBracket(codeList, firstOpen)
    if close2 != close:
        return 1 + skipToMatchingBracket(codeList, i)
    open = firstOpen
    assert(codeList[open]=='(')
    head = codeList[prevSemi:open]
    # do nothing if the head starts with "if", "for" or "while"
    k = skipWs(head,0)
    if k >= len(head) or not head[k] in string.letters:
        return 1 + skipToMatchingBracket(codeList, i)
    kk = skipPastWord(head,k)
    if kk > k:
        headString = listToString(head[k:kk])
        # C keywords that might be followed by '{'
        # print "headString:", headString
        if headString in [ "class", "do", "for", "if", "struct", "switch", "while"]:
            return 1 + skipToMatchingBracket(codeList, i)
    args = codeList[open:close+1]
    k = 1 + skipToMatchingBracket(codeList,i)
    body = codeList[i:k]
    #print "head:", listToString(head)
    #print "args:", listToString(args)
    #print "body:", listToString(body)
    #print "tot: ", listToString(codeList[prevSemi:k])
    head = massageFunctionHead(head)
    args = massageFunctionArgs(args)
    body = massageFunctionBody(body)
    #print "head2:", listToString(head)
    #print "args2:", listToString(args)
    #print "body2:", listToString(body)
    #print "tot2: ", listToString(codeList[prevSemi:k])
    result = []
    for item in head:
        result.append(item)
    for item in args:
        result.append(item)
    for item in body:
        result.append(item)
    codeList[prevSemi:k] = result
    return k
#@nonl
#@-node:handlePossibleFunctionHeader
#@+node:massageFunctionArgs
def massageFunctionArgs(args):
    global gClassName
    assert(args[0]=='(')
    assert(args[-1]==')')

    result = ['('] ; lastWord = []
    if gClassName:
        for item in list("self,"): result.append(item) #can put extra comma

    i = 1
    while i < len(args):
        i = skipWsAndNl(args, i)
        c = args[i]
        if c in string.letters:
            j = skipPastWord(args,i)
            lastWord = args[i:j]
            i = j
        elif c == ',' or c == ')':
            for item in lastWord:
                result.append(item)
            if lastWord != [] and c == ',':
                result.append(',')
            lastWord = []
            i += 1
        else: i += 1
    if result[-1] == ',':
        del result[-1]
    result.append(')')
    result.append(':')
    # print "new args:", listToString(result)
    return result
#@nonl
#@-node:massageFunctionArgs
#@+node:massageFunctionHead (sets gClassName)
def massageFunctionHead(head):

    # print "head:", listToString(head)
    result = []
    prevWord = []
    global gClassName ; gClassName = []
    i = 0
    while i < len(head):
        i = skipWsAndNl(head, i)
        if i < len(head) and head[i] in string.letters:
            result = []
            j = skipPastWord(head,i)
            prevWord = head[i:j]
            i = j
            # look for ::word2
            i = skipWs(head,i)
            if match(head,i,"::"):
                # Set the global to the class name.
                gClassName = listToString(prevWord)
                # print "class name:", gClassName
                i = skipWs(head, i+2)
                if i < len(head) and (head[i]=='~' or head[i] in string.letters):
                    j = skipPastWord(head,i)
                    if head[i:j] == prevWord:
                        for item in list("__init__"): result.append(item)
                    elif head[i]=='~' and head[i+1:j] == prevWord:
                        for item in list("__del__"): result.append(item)
                    else:
                        # for item in "::": result.append(item)
                        for item in head[i:j]: result.append(item)
                    i = j
            else:
                for item in prevWord:result.append(item)
        else: i += 1

    finalResult = list("def ")
    for item in result: finalResult.append(item)
    # print "new head:", listToString(finalResult)
    return finalResult
#@nonl
#@-node:massageFunctionHead (sets gClassName)
#@+node:massageFunctionBody
def massageFunctionBody(body):

    body = massageIvars(body)
    body = removeCasts(body)
    body = removeTypeNames(body)
    return body
#@nonl
#@+node:massageIvars
def massageIvars(body):

    if gClassName and ivarsDict.has_key(gClassName):
        ivars = ivarsDict [ gClassName ]
    else:
        ivars = []
    # print "key:ivars=", gClassName, ':', `ivars`

    i = 0
    while i < len(body):
        if isStringOrComment(body,i):
            i = skipStringOrComment(body,i)
        elif body[i] in string.letters:
            j = skipPastWord(body,i)
            word = listToString(body[i:j])
            # print "looking up:", word
            if word in ivars:
                # replace word by self.word
                # print "replacing", word, " by self.", word
                word = "self." + word
                word = list(word)
                body[i:j] = word
                delta = len(word)-(j-i)
                i = j + delta
            else: i = j
        else: i += 1
    return body
#@nonl
#@-node:massageIvars
#@+node:removeCasts
def removeCasts(body):

    i = 0
    while i < len(body):
        if isStringOrComment(body,i):
            i = skipStringOrComment(body,i)
        elif match(body, i, '('):
            start = i
            i = skipWs(body, i+1)
            if body[i] in string.letters:
                j = skipPastWord(body,i)
                word = listToString(body[i:j])
                i = j
                if word in classList or word in typeList:
                    i = skipWs(body, i)
                    while match(body,i,'*'):
                        i += 1
                    i = skipWs(body, i)
                    if match(body,i,')'):
                        i += 1
                        # print "removing cast:", listToString(body[start:i])
                        del body[start:i]
                        i = start
        else: i += 1
    return body
#@nonl
#@-node:removeCasts
#@+node:removeTypeNames
# Do _not_ remove type names when preceeded by new.

def removeTypeNames(body):

    i = 0
    while i < len(body):
        if isStringOrComment(body,i):
            i = skipStringOrComment(body,i)
        elif matchWord(body, i, "new"):
            i = skipPastWord(body,i)
            i = skipWs(body,i)
            # don't remove what follows new.
            if body[i] in string.letters:
                i = skipPastWord(body,i)
        elif body[i] in string.letters:
            j = skipPastWord(body,i)
            word = listToString(body[i:j])
            if word in classList or word in typeList:
                k = skipWs(body, j)
                while match(body,k,'*'):
                    k += 1 ; j = k
                # print "Deleting type name:", listToString(body[i:j])
                del body[i:j]
            else:
                i = j
        else: i += 1
    return body
#@nonl
#@-node:removeTypeNames
#@-node:massageFunctionBody
#@-node:mungeAllFunctions
#@+node:handleAllKeywords
# converts if ( x ) to if x:
# converts while ( x ) to while x:
def handleAllKeywords(codeList):

    # print "handAllKeywords:", listToString(codeList)
    i = 0
    while i < len(codeList):
        if isStringOrComment(codeList,i):
            i = skipStringOrComment(codeList,i)
        elif ( matchWord(codeList,i,"if") or
            matchWord(codeList,i,"while") or
            matchWord(codeList,i,"for") or
            matchWord(codeList,i,"elif") ):
            i = handleKeyword(codeList,i)
        else:
            i += 1
    # print "handAllKeywords2:", listToString(codeList)
#@nonl
#@+node:handleKeyword
def handleKeyword(codeList,i):

    isFor = False
    if (matchWord(codeList,i,"if")):
        i += 2
    elif (matchWord(codeList,i,"elif")):
        i += 4
    elif (matchWord(codeList,i,"while")):
        i += 5
    elif (matchWord(codeList,i,"for")):
        i += 3
        isFor = True
    else: assert(0)
    # Make sure one space follows the keyword
    k = i
    i = skipWs(codeList,i)
    if k == i:
        c = codeList[i]
        codeList[i:i+1] = [ ' ', c ]
        i += 1
    # Remove '(' and matching ')' and add a ':'
    if codeList[i] == "(":
        j = removeMatchingBrackets(codeList,i)
        if j > i and j < len(codeList):
            c = codeList[j]
            codeList[j:j+1] = [":", " ", c]
            j = j + 2
        return j
    return i
#@nonl
#@-node:handleKeyword
#@-node:handleAllKeywords
#@+node:isX...
#@+node:isWs and isWOrNl
def isWs(c):
    return c == ' ' or c == '\t'

def isWsOrNl(c):
    return c == ' ' or c == '\t' or c == '\n'
#@nonl
#@-node:isWs and isWOrNl
#@+node:isSectionDef
# returns the ending index if i points to < < x > > =
def isSectionDef(list, i):

    i = skipWs(list,i)
    if not match(list,i,"<<"): return False
    while i < len(list) and list[i] != '\n':
        if match(list,i,">>="): return i+3
        else: i += 1
    return False
#@nonl
#@-node:isSectionDef
#@+node:isStringOrComment
def isStringOrComment(list, i):

    return match(list,i,"'") or match(list,i,'"') or match(list,i,"//") or match(list,i,"/*")
#@nonl
#@-node:isStringOrComment
#@-node:isX...
#@+node:find... & match...
#@+node:findInCode
def findInCode(codeList, i, findStringOrList):

    findList = stringToList(findStringOrList)

    while i < len(codeList):
        if isStringOrComment(codeList,i):
            i = skipStringOrComment(codeList,i)
        elif match(codeList, i, findList):
            return i
        else: i += 1
    return -1
#@nonl
#@-node:findInCode
#@+node:findInList
def findInList(list, i, findStringOrList):

    findList = stringToList(findStringOrList)

    while i < len(list):
        if match(list, i, findList): return i
        else: i += 1
    return -1
#@nonl
#@-node:findInList
#@+node:match
# returns True if findList matches starting at codeList[i]

def match (codeList, i, findStringOrList):

    findList = stringToList(findStringOrList)
    n = len(findList)
    j = 0
    while i+j < len(codeList) and j < len(findList):
        if codeList[i+j] != findList[j]:
            return False
        else:
            j += 1
            if j == n:
                return i+j
    return False
#@nonl
#@-node:match
#@+node:matchWord
def matchWord (codeList, i, findStringOrList):

    j = match(codeList,i,findStringOrList)
    if not j:
        return False
    elif j >= len(codeList):
        return True
    else:
        c = codeList[j]
        return not (c in string.letters or c in string.digits or c == '_')
#@nonl
#@-node:matchWord
#@-node:find... & match...
#@+node:remove...
#@+node:removeAllCComments
def removeAllCComments(list, delim):

    i = 0
    while i < len(list):
        if match(list,i,"'") or match(list,i,'"'):
            i = skipString(list,i)
        elif match(list,i,"//"):
            j = skipPastLine(list,i)
            print "deleting single line comment:", listToString(list[i:j])
            del list[i:j]
        elif match(list,i,"/*"):
            j = skipCBlockComment(list,i)
            print "deleting block comment:", listToString(list[i:j])
            del list[i:j]
        else:
            i += 1
#@nonl
#@-node:removeAllCComments
#@+node:removeAllCSentinels
def removeAllCSentinels(list, delim):

    i = 0
    while i < len(list):
        if match(list,i,"'") or match(list,i,'"'):
            # string starts a line.
            i = skipString(list,i)
            i = skipPastLine(list,i)
        elif match(list,i,"/*"):
            # block comment starts a line
            i = skipCBlockComment(list,i)
            i = skipPastLine(line,i)
        elif match(list,i,"//@"):
            j = skipPastLine(list,i)
            print "deleting sentinel:", listToString(list[i:j])
            del list[i:j]
        else:
            i = skipPastLine(list,i)
#@nonl
#@-node:removeAllCSentinels
#@+node:removeAllPythonComments
def removeAllPythonComments(list, delim):

    i = 0
    while i < len(list):
        if match(list,i,"'") or match(list,i,'"'):
            i = skipString(list,i)
        elif match(list,i,"#"):
            j = skipPastLine(list,i)
            print "deleting comment:", listToString(list[i:j])
            del list[i:j]
        else:
            i += 1
#@nonl
#@-node:removeAllPythonComments
#@+node:removeAllPythonSentinels
def removeAllPythonSentinels(list, delim):

    i = 0
    while i < len(list):
        if match(list,i,"'") or match(list,i,'"'):
            # string starts a line.
            i = skipString(list,i)
            i = skipPastLine(list,i)
        elif match(list,i,"#@"):
            j = skipPastLine(list,i)
            print "deleting sentinel:", listToString(list[i:j])
            del list[i:j]
        else:
            i = skipPastLine(list,i)
#@nonl
#@-node:removeAllPythonSentinels
#@+node:removeAtRoot
def removeAtRoot (codeList):

    i = skipWs(codeList, 0)
    if matchWord(codeList,i,"@root"):
        j = skipPastLine(codeList,i)
        del codeList[i:j]

    while i < len(codeList):
        if isStringOrComment(codeList,i):
            i = skipStringOrComment(codeList,i)
        elif match(codeList,i,"\n"):
            i = skipWs(codeList, i+1)
            if matchWord (codeList,i,"@root"):
                j = skipPastLine(codeList,i)
                del codeList[i:j]
        else: i += 1
#@-node:removeAtRoot
#@+node:removeBlankLines
def removeBlankLines(codeList):

    i = 0
    while i < len(codeList):
        j = i
        while j < len(codeList) and (codeList[j]==" " or codeList[j]=="\t"):
            j += 1
        if j== len(codeList) or codeList[j] == '\n':
            del codeList[i:j+1]
        else:
            oldi = i
            i = skipPastLine(codeList,i)
#@nonl
#@-node:removeBlankLines
#@+node:removeExcessWs
def removeExcessWs(codeList):

    i = 0
    i = removeExcessWsFromLine(codeList,i)
    while i < len(codeList):
        if isStringOrComment(codeList,i):
            i = skipStringOrComment(codeList,i)
        elif match(codeList,i,'\n'):
            i += 1
            i = removeExcessWsFromLine(codeList,i)
        else: i += 1
#@nonl
#@+node:removeExessWsFromLine
def removeExcessWsFromLine(codeList,i):

    assert(i==0 or codeList[i-1] == '\n')
    i = skipWs(codeList,i)
    while i < len(codeList):
        if isStringOrComment(codeList,i): break # safe
        elif match(codeList, i, '\n'): break
        elif match(codeList, i, ' ') or match(codeList, i, '\t'):
            # Replace all whitespace by one blank.
            k = i
            i = skipWs(codeList,i)
            codeList[k:i] = [' ']
            i = k + 1 # make sure we don't go past a newline!
        else: i += 1
    return i
#@nonl
#@-node:removeExessWsFromLine
#@-node:removeExcessWs
#@+node:removeLeadingAtCode
def removeLeadingAtCode(codeList):

    i = skipWsAndNl(codeList,0)
    if matchWord(codeList,i,"@code"):
        i = skipWsAndNl(codeList,5)
        del codeList[0:i]
    elif matchWord(codeList,i,"@c"):
        i = skipWsAndNl(codeList,2)
        del codeList[0:i]
#@nonl
#@-node:removeLeadingAtCode
#@+node:removeMatchingBrackets
def removeMatchingBrackets(codeList, i):

    j = skipToMatchingBracket(codeList, i)
    if j > i and j < len(codeList):
        # print "del brackets:", listToString(codeList[i:j+1])
        c = codeList[j]
        if c == ')' or c == ']' or c == '}':
            del codeList[j:j+1]
            del codeList[i:i+1]
            # print "returning:", listToString(codeList[i:j])
            return j - 1
        else: return j + 1
    else: return j
#@nonl
#@-node:removeMatchingBrackets
#@+node:removeSemicolonsAtEndOfLines
def removeSemicolonsAtEndOfLines(list):

    i = 0
    while i < len(list):
        if isStringOrComment(list,i):
            i = skipStringOrComment(list,i)
        elif list[i] == ';':
            j = skipWs(list,i+1)
            if j >= len(list) or match(list,j,'\n') or match(list,j,'#') or match(list,j,"//"):
                del list[i]
            else: i += 1
        else: i += 1
#@nonl
#@-node:removeSemicolonsAtEndOfLines
#@+node:removeTrailingWs
def removeTrailingWs(list):

    i = 0
    while i < len(list):
        if isWs(list[i]):
            j = i
            i = skipWs(list,i)
            assert(j < i)
            if i >= len(list) or list[i] == '\n':
                # print "removing trailing ws:", `i-j`
                del list[j:i]
                i = j
        else: i += 1
#@nonl
#@-node:removeTrailingWs
#@-node:remove...
#@+node:replace... & safeReplace
#@+node:replace
# Replaces all occurances of findString by changeString.
# Deletes all occurances if change is None
def replace(codeList, findString, changeString):

    if len(findString)==0: return
    findList = stringToList(findString)
    changeList = stringToList(changeString)

    i = 0
    while i < len(codeList):
        if match(codeList, i, findList):
            codeList[i:i+len(findList)] = changeList
            i += len(changeList)
        else: i += 1
#@nonl
#@-node:replace
#@+node:replaceComments
# For Leo we expect few block comments; doc parts are much more common.

def replaceComments(codeList):

    i = 0
    if match(codeList, i, "//"):
        codeList[0:2] = ['#']
    while i < len(codeList):
        if match(codeList, i, "//"):
            codeList[i:i+2] = ['#']
            i = skipPastLine(codeList,i)
        elif match(codeList, i, "/*"):
            j = skipCBlockComment(codeList,i)
            del codeList[j-2:j]
            codeList[i:i+2] = ['#']
            j -= 2 ; k = i ; delta = -1
            while k < j + delta :
                if codeList[k]=='\n':
                    codeList[k:k+1] = ['\n', '#', ' ']
                    delta += 2 ; k += 3 # progress!
                else: k += 1
            i = j + delta
        elif match(codeList, i, '"') or match(codeList, i, "'"):
            i = skipString(codeList,i)
        else: i += 1
#@nonl
#@-node:replaceComments
#@+node:replaceSectionDefs
# Replaces < < x > > = by @c (at the start of lines).
def replaceSectionDefs(codeList):

    i = 0
    j = isSectionDef(codeList,i)
    if j > 0: codeList[i:j] = list("@c ")

    while i < len(codeList):
        if isStringOrComment(codeList,i):
            i = skipStringOrComment(codeList,i)
        elif match(codeList,i,"\n"):
            i += 1
            j = isSectionDef(codeList,i)
            if j > i: codeList[i:j] = list("@c ")
        else: i += 1
#@nonl
#@-node:replaceSectionDefs
#@+node:safeReplace
# Replaces occurances of findString by changeString outside of C comments and strings.
# Deletes all occurances if change is None.
def safeReplace(codeList, findString, changeString):

    if len(findString)==0: return
    findList = stringToList(findString)
    changeList = stringToList(changeString)
    i = 0
    if findList[0] in string.letters: #use matchWord
        while i < len(codeList):
            if isStringOrComment(codeList,i):
                i = skipStringOrComment(codeList,i)
            elif matchWord(codeList, i, findList):
                codeList[i:i+len(findList)] = changeList
                i += len(changeList)
            else: i += 1
    else: #use match
        while i < len(codeList):
            if match(codeList, i, findList):
                codeList[i:i+len(findList)] = changeList
                i += len(changeList)
            else: i += 1
#@nonl
#@-node:safeReplace
#@-node:replace... & safeReplace
#@+node:skip... & prev...
#@+node:prevNonWsChar and prevNonWsOrNlChar
def prevNonWsChar(list, i):

    i -= 1
    while i >= 0 and isWs(list[i]):
        i -= 1
    return i

def prevNonWsOrNlChar(list, i):

    i -= 1
    while i >= 0 and isWsOrNl(list[i]):
        i -= 1
    return i
#@nonl
#@-node:prevNonWsChar and prevNonWsOrNlChar
#@+node:skipCBlockComment
def skipCBlockComment(codeList, i):

    assert(match(codeList, i, "/*"))
    i += 2

    while i < len(codeList):
        if match(codeList, i, "*/"): return i + 2
        else: i += 1
    return i
#@nonl
#@-node:skipCBlockComment
#@+node:skipPastLine
def skipPastLine(codeList, i):

    while i < len(codeList) and codeList[i] != '\n':
        i += 1
    if i < len(codeList) and codeList[i] == '\n':
        i += 1
    return i
#@nonl
#@-node:skipPastLine
#@+node:skipPastWord
def skipPastWord(list, i):

    assert(list[i] in string.letters or list[i]=='~')

    # Kludge: this helps recognize dtors.
    if list[i]=='~':
        i += 1

    while i < len(list) and (
        list[i] in string.letters or
        list[i] in string.digits or
        list[i]=='_'):
        i += 1
    return i
#@nonl
#@-node:skipPastWord
#@+node:skipString
def skipString(codeList, i):

    delim = codeList[i] # handle either single or double-quoted strings
    assert(delim == '"' or delim == "'")
    i += 1

    while i < len(codeList):
        if codeList[i] == delim: return i + 1
        elif codeList[i] == '\\': i += 2
        else: i += 1
    return i
#@nonl
#@-node:skipString
#@+node:skipStringOrComment
def skipStringOrComment(list,i):

    if match(list,i,"'") or match(list,i,'"'):
        return skipString(list,i)
    if match(list, i, "//"):
        return skipPastLine(list,i)
    elif match(list, i, "/*"):
        return skipCBlockComment(list,i)
    else: assert(0)
#@nonl
#@-node:skipStringOrComment
#@+node:skipToMatchingBracket
def skipToMatchingBracket(codeList, i):

    c = codeList[i]
    if   c == '(': delim = ')'
    elif c == '{': delim = '}'
    elif c == '[': delim = ']'
    else: assert(0)

    i += 1
    while i < len(codeList):
        c = codeList[i]
        if isStringOrComment(codeList,i):
            i = skipStringOrComment(codeList,i)
        elif c == delim:
            return i
        elif c == '(' or c == '[' or c == '{':
            i = skipToMatchingBracket(codeList,i)
            i += 1 # skip the closing bracket.
        else: i += 1
    return i
#@nonl
#@-node:skipToMatchingBracket
#@+node:skipWs and skipWsAndNl
def skipWs(list, i):

    while i < len(list):
        c = list[i]
        if c == ' ' or c == '\t':
            i += 1
        else: break
    return i

def skipWsAndNl(list, i):

    while i < len(list):
        c = list[i]
        if c == ' ' or c == '\t' or c == '\n':
            i += 1
        else: break
    return i
#@nonl
#@-node:skipWs and skipWsAndNl
#@-node:skip... & prev...
#@+node:stringToList & listToString
#@+node:stringToList
# converts a string to a list containing one item per character of the list.
# converts None to the empty string and leaves other types alone.

# list(string) does not work on none.
def stringToList(string):

    if string:
        return list(string)
    else:
        return []
#@nonl
#@-node:stringToList
#@+node:listToString
def listToString(list):

    return string.join(list,"")
#@nonl
#@-node:listToString
#@-node:stringToList & listToString
#@-node:Scanning & Replacing...
#@-node:c2py  Convert C code to Python syntax
#@+node:Catch mixed newlines in distribution files
import glob

@others

verbose = True

# extraFiles = ('..\\install','..\\uninstall','test\\unittest\\perfectImport\\formatter.py',)
extraFiles = ('test\\test-line-ending.py',)

baseDir = g.os_path_abspath(g.os_path_join(g.app.loadDir,'..'))
n = 0
dirs = ('','src','scripts','plugins',)
dirs = ('src',)

if 0:
    for dir2 in dirs:
        theDir = g.os_path_abspath(g.os_path_join(baseDir,dir2))
        print theDir
        files1 = glob.glob('%s\*.py' % theDir)
        files2 = glob.glob('%s\*.leo' % theDir)
        files = files1
        files.extend(files2)
        for name in files:
            f = file(name)
            lines = f.readlines()
            # print '%5d %s' % (len(lines),name)
            f.close()
            check(name,lines)
            n += 1

for z in extraFiles:
    name = g.os_path_abspath(g.os_path_join(baseDir,z))
    f = file(name)
    lines = f.readlines()
    # print '%5d %s' % (len(lines),name)
    f.close()
    check(name,lines)
    n += 1

print 'done.  checked %d files' % (n)
#@+node:check
def check(name,lines):

    cr = 0 ; nl = 0

    for s in lines:
       for ch in s:
        if ch == '\r': cr += 1
        if ch == '\n': nl += 1

    if verbose or cr > 0:
        print "%55s %s cr's %4s nl's" % (name,cr,nl)
#@-node:check
#@-node:Catch mixed newlines in distribution files
#@+node:Catch mixed tab & space in derived files
import glob

@others

baseDir = g.os_path_abspath(g.os_path_join(g.app.loadDir,'..'))
n = 0
for dir2 in ('','src','scripts','plugins',):

    theDir = g.os_path_abspath(g.os_path_join(baseDir,dir2))
    print theDir
    files = glob.glob('%s\*.py' % theDir)
    for name in files:
        f = file(name)
        lines = f.readlines()
        # print '%5d %s' % (len(lines),name)
        f.close()
        check(name,lines)
        n += 1

print 'done.  checked %d files' % (n)
#@+node:check
def check(name,lines):

    n = 0
    for line in lines:
        n += 1
        i = g.skip_ws(line,0)
        lws = line[0:i]
        if lws:
            if lws.find(' ') > -1 and lws.find('\t') > -1:
                print '%20s %5d %s' % (name,n,repr(line))
#@nonl
#@-node:check
#@-node:Catch mixed tab & space in derived files
#@+node:Check Changes
# A script to check to see all changes required by the 'big reorg' got done properly.

<< define data >>
<< define regex patterns >>

@others

g.es_print('='*40)

g.es_print('%s %s' % ('-'*20,'must have a c arg'))
for p in c.allNodes_iter():
    findFunctionCalls(add_c_arg,p,showEmptyArgs=True)

g.es_print('%s %s' % ('-'*20,'these methods should not exist'))
for p in c.allNodes_iter():
    findFunctionCalls(removed,p,showEmptyArgs=True)

g.es_print('%s %s' % ('-'*20,'p methods should have no c arg'))
for p in c.allNodes_iter():
    findFunctionCalls(p_methods,p)

g.es_print('%s %s' % ('-'*20,'v methods should have no c arg'))
for p in c.allNodes_iter():
    findFunctionCalls(v_methods,p)

g.es_print('%s %s' % ('-'*20,'c methods should start with a position arg'))
for p in c.allNodes_iter():
    findFunctionCalls(c_methods,p)

g.es_print('done')
#@nonl
#@+node:<< define data >>
removed = (
    'p.allNodes_iter',
    'p.all_positions_iter',
    'p.appendStringToBody',
    'p.edit_widget',
    'p.lastVisible',
    'p.positionExists',
    'p.setBodyString',
    'p.trimTrailingLines',
    'p.setHeadString',
    'p.setBodyStringOrPane',
    'v.setBodyStringOrPane',
    'c.setBodyStringOrPane',
    'p.setBodyTextOrPane',
    'v.setBodyTextOrPane',
    'c.setBodyTextOrPane',
    'p.setHeadStringOrPane',
    'v.setHeadStringOrPane',
    'c.setHeadStringOrPane',
    'p.setHeadTextOrPane',
    'v.setHeadTextOrPane',
    'c.setHeadTextOrPane',
    #'p.clearMarked',
    #'p.initHeadString',
    #'p.setMarked',
)

add_c_arg = ( # Must have a c arg.
    'g.findReference',
)

p_methods = (  # Should not have c arg.
    'p.clone',
    'p.doDelete',
    'p.linkAsRoot',
    'p.moveAfter'
    'p.moveToLastChildOf',
    'p.moveToNthChildOf',
    'p.moveToRoot',
    'p.setDirty',
    'p.unlink',
)

v_methods = ( # Should not have c arg.
    'v.linkAsRoot',
    'v.moveToRoot',
    'v.unlink',
)

c_methods = (
    'c.appendStringToBody',
    'c.clearMarked',
    'c.edit_widget',
    'c.initHeadString',
    'c.lastVisible',
    'c.positionExists',
    'c.setBodyString',
    'c.setMarked',
    'c.trimTrailingLines',
    'c.setHeadString',
)
#@-node:<< define data >>
#@+node:<< define regex patterns >>
import re

# Allow . in word.
function_call_re = re.compile(r'([a-zA-Z_][a-zA-Z_0-9.]*)\s*(\(.*\))')
#@nonl
#@-node:<< define regex patterns >>
#@+node:findFunctionCalls
def findFunctionCalls (names,p,showEmptyArgs=False):

    '''Find all call to function f in node p.

    Return a list of tuples (s0, s1, s0) where:

    - s0 is entire match.
    - s1 is the function name.
    - s2 is the argument list.'''

    result = [] ; start = 0 ; body = p.bodyString()
    while start < len(body):
        mo = function_call_re.search(body,start)
        if not mo: return result
        end = mo.end()
        s0 = mo.group(0) or ''
        s1 = mo.group(1) or ''
        s2 = mo.group(2) or ''
        s2 = s2.strip()
        args = s2[1:-1].strip()
        if s1 in names:
            if args or showEmptyArgs: # or not args.startswith('self'):
                g.es_print('%20s (%s)' % (s1,args))
                result.append((s0,s1,s2),)
        assert(end > start)
        start = end
#@nonl
#@-node:findFunctionCalls
#@-node:Check Changes
#@+node:Create @menus tree from Leo's internal menu tables
# Convert standard tables to list of @item nodes
m = c.frame.menu
# A representation of code/data in defineMenuTables and createMenusFromTables.
@others
data = (
    ('File',[
        (None,m.fileMenuTopTable),
        ('Open &With...',[]), ###
        (None,m.fileMenuTop2Table),
        ('&Read/Write...',m.fileMenuReadWriteMenuTable),
        ('Tan&gle...',m.fileMenuTangleMenuTable),
        ('&Untangle...',m.fileMenuUntangleMenuTable),
        ('&Import...',m.fileMenuImportMenuTable),
        ('&Export...',m.fileMenuExportMenuTable),
        (None,m.fileMenuTop3MenuTable),
        ]),
    ('Edit',[
        (None,m.editMenuTopTable),
        ('Edit &Body...',m.editMenuEditBodyTable),
        ('Edit &Headline...',m.editMenuEditHeadlineTable),
        ('&Find...',m.editMenuFindMenuTable),
        (None,m.editMenuTop2Table),
        ]),
    ('Outline',[
        (None,c.frame.menu.outlineMenuTopMenuTable),
        ('Chec&k...',m.outlineMenuCheckOutlineMenuTable),
        ('E&xpand/Contract...',m.outlineMenuExpandContractMenuTable),
        ('&Move...',m.outlineMenuMoveMenuTable),
        ('M&ark...',m.outlineMenuMarkMenuTable),
        ('&Go To...',m.outlineMenuGoToMenuTable),
        ]),
    ('Plugins',[
        ]), # A placeholder.
    ('Cmds',[
        ('&Abbrev...',m.cmdsMenuAbbrevTable),
        ('Body E&ditors...',m.cmdsMenuBodyEditorsTable),
        ('&Buffers...',m.cmdsMenuBuffersTable),
        ('&Chapters...',m.cmdsMenuChaptersTable),
        ('C&ursor/Selection...',[]), ### Has several submenus...Must be placed by hand.
            ('Cursor &Back...',m.cursorMenuBackTable),
            ('Cursor Back &Extend Selection...',m.cursorMeuuBackExtendTable),
            ('Cursor Back Extend &to...',m.cursorMenuExtendTable),
            ('Cursor &Forward...',m.cursorMenuForwardTable),
            ('Cursor Forward E&xtend Selection...',m.cursorMenuForwardExtendTable),
        ('&Focus...',m.cmdsMenuFocusTable),
        ('&Macro...',m.cmdsMenuMacroTable),
        ('M&inibuffer...',m.cmdsMenuMinibufferTable),
        ('&Pickers...',m.cmdsMenuPickersTable),
        ('&Rectangles...',m.cmdsMenuRectanglesTable),
        ('Re&gisters...',m.cmdsMenuRegistersTable),
        ('R&un Script/Tests...',m.cmdsMenuRunTable),
        ('Scr&olling...',m.cmdsMenuScrollTable),
        ('Spell C&heck...',m.cmdsMenuSpellCheckTable),
        ('&Text Commands...',m.cmdsMenuTextTable),
        ('Toggle Setti&ngs...',m.cmdsMenuToggleTable),
        ]),
    ('Window',[
        (None,m.windowMenuTopTable),
        ]),
    ('Help',[
        (None,m.helpMenuTable),
        ]),
)

c.beginUpdate()
try:
    for menuName,tables in data:
        # print menuName,tables
        p2 = p.insertAsLastChild()
        p2.initHeadString('@menu '+menuName)
        for aTuple in tables:
            subMenuName,aList = aTuple
            if subMenuName:
                p3 = p2.insertAsLastChild()
                p3.initHeadString('@menu '+subMenuName)
            else:
                p3 = p2
            for z in aList:
                p4 = p3.insertAsLastChild()
                setNode(p4,z)

finally:
    c.endUpdate()
#@+node:setNode
def setNode (p,data):

    head = body = None

    if type(data) == type('abc'):
        head = data
    elif type(data) in (type(()),type([])):
        if   len(data) == 1: head = data[0]
        elif len(data) == 2: body, head = data
        else: g.trace('bad tuple: ',repr(data))
    else: g.trace('bad data: ',repr(data))

    if head and head.strip():
        p.initHeadString('@item ' + head.strip())
    if body and body.strip():
        p.setTnodeText(body.strip())
#@-node:setNode
#@-node:Create @menus tree from Leo's internal menu tables
#@+node:Export to treepad
# simple script to export current node and children as a treepad document
# the file format for treepad 2.x is simple.
# See: "TreePad 2.x File format" at http://www.treepad.com/docs/

#need to start the levels at 0
topLevel = p.level()
fileName = "exported.hjt"
nl = "\n"
mode = g.choose(c.config.output_newline=="platform",'w','wb')
try:
    theFile = open(fileName,mode)
    theFile.write("<hj-Treepad version 2.7>" + nl)
    for p in p.copy().self_and_subtree_iter():
        theFile.write("dt=text" + nl)
        theFile.write("<node>" + nl)
        theFile.write(p.headString() + nl)
        theFile.write(repr(p.level() - topLevel) + nl)
        theFile.write(p.bodyString() + nl)
        theFile.write("<end node> 5P9i0s8y19Z" + nl)
    theFile.close()
    g.es("Wrote to file " + fileName,color="blue")
except IOError:
    g.es("Can not open " + fileName,color="blue")
#@nonl
#@-node:Export to treepad
#@+node:Gc/Profile/Timeit/Tracing scripts
@language python
#@nonl
#@+node:runProfile
import profile
import pstats

# Note: the profiled code should do all needed imports.
path = g.os_path_abspath(g.os_path_join(g.app.loadDir,'..','test','leoProfile.txt'))
path = str(path)

if p.bodyString().rstrip():
    s = p.bodyString().rstrip() + '\n'
    profile.run(s,path)
    print '-' * 40
    print "Profiling info sent to %s" % path
    stats = pstats.Stats(path)
    stats.strip_dirs()
    stats.sort_stats('cum','file','name')
    stats.print_stats()
#@-node:runProfile
#@+node:runTimeit
@ Improved timeit script after an idea by 'e'.

Comments of the form #@count nnn set the repeat count.
Comments of the form #@setup comment delimits the end of setup code.
@c

try:
    import timeit # Exists only in Python 2.3 and above.
except ImportError: 
    timeit = None
    print "Can not import timeit"

if timeit and p.bodyString().strip():
    s = p.bodyString().rstrip() + '\n'
    << scan for #@count >>
    << put setup code in s1 and everything else in s2 >>
    t = timeit.Timer(stmt=s2,setup=s1)
    try:
        if 1: # faster.
            result = t.timeit(count)
        else: # better results.
            result = min(t.repeat(3, count))  
        print "count: %d : %f %s" % (
            count, result, p.headString().strip())
    except:
        t.print_exc()
#@+node:<< scan for #@count >>
lines = s.split('\n')

count = 1000000 # default count
tag = "#@count"

for line in lines:
    i = g.skip_ws(line,0)
    if g.match(line,0,tag):
        i += len(tag)
        i = g.skip_ws(line,i)
        junk,val = g.skip_long(line,i)
        if val is not None:
            count = abs(val)
            # print "Setting count to",count
            break
#@nonl
#@-node:<< scan for #@count >>
#@+node:<< put setup code in s1 and everything else in s2 >>
lines = s.split('\n')

for i in xrange(len(lines)):
    if lines[i].strip() == "#@setup":
        break

if i < len(lines):
    # Split at the #@setup line and delete the #@setup line
    s1 = '\n'.join(lines[:i])
    s2 = '\n'.join(lines[i:])
    #print "setup",repr(s1)
    #print "code",repr(s2)
else:
    # There is no setup.
    s1 = None
    s2 = s

if not s1: s1 = 'pass'
if not s2: s2 = 'pass'
#@nonl
#@-node:<< put setup code in s1 and everything else in s2 >>
#@-node:runTimeit
#@+node:Run gc
import leoTest

leoTest.runGc(disable=True)
#@nonl
#@-node:Run gc
#@+node:Set tracing
tracePositions = False
traceDrawing = True

# This trace catches leaks of positions.
g.app.tracePositions = tracePositions

# This trace shows overall position statistics.
c.frame.tree.trace = traceDrawing
c.frame.tree.verbose = True
#@nonl
#@-node:Set tracing
#@+node:test of new timit script
import string

#@verbatim
#@count 10000
#@verbatim
#@setup

s = ''
for i in range(0, 50):
    s += '%d'%i
s = string.replace(s, '2', '1')
#@nonl
#@-node:test of new timit script
#@-node:Gc/Profile/Timeit/Tracing scripts
#@+node:HTMLize scripts
#@+node:dyna htmlize
'''A script to send the colorized text of a script to the default web browser.

Based on a dynascript by 'e'.'''

@language python
@tabwidth -4

<< imports >>

hopts = {
  'stripcomments':  False,
  'stripsentinals': True,
  'stripnodesents': False, # False: leave node sentinels.
  'stripdirectives':False,
  'noNUMBER':       False,
  'noOP':           False,
  'noNAME':         True,  
  'timestring':     '' # time.strftime('%m/%d/%Y %H:%M.%S'),
}

<< init globals >>

filename = g.os_path_join(g.app.loadDir,'..','test','leoTemp.html')

@others

htmlize(c,p)
#@nonl
#@+node:<< imports >>
import cgi
import cStringIO
import keyword
import os
import re
import sys
import time
import token
import tokenize
import webbrowser
#@nonl
#@-node:<< imports >>
#@+node:<< init globals >>
_KEYWORD = token.NT_OFFSET + 1
_TEXT    = token.NT_OFFSET + 2

_colors = {
    token.NUMBER:     '#483D8B', #black/darkslateblue
    token.OP:         '#000080', #black/navy
    token.STRING:     '#00AA00', #green 00cc66
    tokenize.COMMENT: '#DD0000', #red cc0033
    token.NAME:       '#4B0082', #black/indigo
    token.ERRORTOKEN: '#FF8080', #redred bare null does it
    _KEYWORD:         '#0066ff', #blue
    _TEXT:            '#000000', #black /is text fg color too
    '_LeoDir':        '#228B22', #directive, forest comment
    '_LeoSen':        '#BC8F8F', #sentinal, tan fade comment
    'bg':             '#FFFAFA', #snow
}

if hopts['noNUMBER']: del _colors[token.NUMBER]
if hopts['noOP']:     del _colors[token.OP]
if hopts['noNAME']:   del _colors[token.NAME]
#@nonl
#@-node:<< init globals >>
#@+node:stripSentinels
def stripSentinels(s):

    '''Strip sentinal lines from s.'''

    lines = s.splitlines()
    result = [] ; verbatim = False
    tag1 = '#@+node:'
    tag2 = '#@-node:'
    n = len(tag1)

    for line in lines:
        s = line.strip()
        if verbatim:
            result.append(line)
            verbatim = False
        elif s.startswith('#@verbatim'):
            verbatim = True
        elif s.startswith('#@@'):
            if not hopts['stripdirectives']:
                result.append(line)
        elif s.startswith(tag1):
            if not hopts['stripnodesents']:
                i = line.find(tag1)
                result.append(line[:i] + '#@+' + line[i+n:].strip())
        elif s.startswith(tag2):
            if not hopts['stripnodesents']:
                i = line.find(tag2)
                result.append(line[:i] + '#@-' + line[i+n:].strip())
        elif not s.startswith('#@'):
            result.append(line)

    return '\n'.join(result)
#@nonl
#@-node:stripSentinels
#@+node:sanitize
def sanitize(s):

    """Leo's sanitize_filename is too aggressive and too lax."""

    if not s: return

    res = re.compile(
        r"""[|\\ /!@=\#\$%,\x5E&\x3F:;.\x22\x27<>`~\*\+\t\n\f\r\b\a]""",
        re.IGNORECASE | re.VERBOSE)

    # should test for unicode before str()
    return res.sub('_', str(s.strip())).replace('__','_')[:128]
#@nonl
#@-node:sanitize
#@+node:class Parser
class Parser(object):
    """ prep the source for any language
        parse and Send colored python source.
    """
    @others
#@nonl
#@+node:__init__
def __init__(self,lang,raw,title):
    """ Store the source text."""

    self.title = title
    self.raw = raw.strip().expandtabs(4)

    if lang == 'python':
        cmtdelim = '#'
    else:
        d = g.scanDirectives(c, p) 
        cmtdelim = d.get('delims',['#'])
        cmtdelim = cmtdelim[0] or cmtdelim[1]

    self.fnd = re.compile(r"%s@\s*@+."%(cmtdelim,) )

    if hopts['stripsentinals']: 
        # have to add option to strip all comments as well
        self.raw = stripSentinels(self.raw)
#@nonl
#@-node:__init__
#@+node:format
def format(self,formatter,form):
    """ Parse and send the colored source."""

    # store line offsets in self.lines
    self.lines = [0,0]
    pos = 0
    while 1:
        pos = self.raw.find('\n',pos) + 1
        if not pos: break
        self.lines.append(pos)
    self.lines.append(len(self.raw))
    self.pos = 0
    text = cStringIO.StringIO(self.raw)
    sys.stdout.write('<html><head><title>')
    sys.stdout.write('%s </title>\n'%(sanitize(self.title), ))

    #here would be a good spot for @noindent directive but skip a line
    s = """<STYLE TYPE="text/css"><!--
pre, H1 {color:%s; FONT-SIZE: 80%%; FONT-WEIGHT: bold; }
Text {background:%s;}
--></STYLE>
<SCRIPT LANGUAGE="JavaScript">
<!-- //
//-->
</SCRIPT>""" % (_colors[_TEXT],_colors['bg'])

    sys.stdout.write(s)
    sys.stdout.write('</head><body text="%s" bgColor="%s">' % (_colors[_TEXT],_colors['bg']))
    sys.stdout.write('<H3># %s</H3>\n'%self.title)
    sys.stdout.write('<pre>')  # style
    sys.stdout.write('<font face="Lucida,Courier New">')
    # parse the source and write it
    try:
        tokenize.tokenize(text.readline,self)
    except tokenize.TokenError, ex:
        msg = ex[0]
        line = ex[1][0]
        print "<h3>ERROR: %s</h3>%s" % (msg, self.raw[self.lines[line]:])
    sys.stdout.write('</font></pre>')
    sys.stdout.write('</body"></html>')
#@nonl
#@-node:format
#@+node:__call__
def __call__(self, toktype, toktext, (srow,scol), (erow,ecol), line):

    """ Token handler."""

    if 0: print "type", toktype, token.tok_name[toktype], "text",\
            toktext, "start", srow,scol, "end", erow,ecol, "<br>"

    # calculate new positions
    oldpos = self.pos
    newpos = self.lines[srow] + scol
    self.pos = newpos + len(toktext)

    # handle newlines
    if toktype in [token.NEWLINE, tokenize.NL]:
        print
        return

    style = ''
    if toktype == tokenize.COMMENT:
        if toktext.lstrip().startswith('#@'):
            if self.fnd.findall(toktext):
                toktype = '_LeoDir'
            else:
                toktype = '_LeoSen'

    # Send the original whitespace.
    if newpos > oldpos:
        sys.stdout.write(self.raw[oldpos:newpos])

    # Skip indenting tokens.
    if toktype in [token.INDENT,token.DEDENT]:
        self.pos = newpos
        return

    # Map token type to a color group
    if token.LPAR <= toktype and toktype <= token.OP:
        toktype = token.OP
    elif toktype == token.NAME and keyword.iskeyword(toktext):
        toktype = _KEYWORD
    if toktype == token.ERRORTOKEN:
        style = ' style="border: solid 1.5pt #FF0000;"'

    dofont = True
    try:
        color = _colors[toktype]
    except Exception:
        dofont = False

    if dofont: sys.stdout.write('<font color="%s"%s>' % (color, style))
    sys.stdout.write(cgi.escape(toktext))
    if dofont: sys.stdout.write('</font>')
#@nonl
#@-node:__call__
#@-node:class Parser
#@+node:htmlize
def htmlize(c,p):
    lang = g.scanForAtLanguage(c,p)
    lang = str(lang).lower()
    source = g.getScript(c,p)
    timestring = hopts['timestring']
    # title = "%s Leo %s script %s" % (p.headString()[:75],lang,timestring)
    title = "%s %s" % (p.headString()[:75],timestring)
    try:
        if not source: raise ValueError
        g.es('output', lang, p.headString())
        theParser = Parser(lang,source,title)
        sys.stdout = open(filename,'wb') 
        if lang == 'python':
            theParser.format(None,None)
        else:
            << colorize with silvercity >>
        sys.stdout.close()
        sys.stdout = sys.__stdout__
        webbrowser.open(filename, new= 1)
    except ValueError:
        g.es('no @path set, unsupported lang or empty script',color='tomato')
        g.es(lang, p.headString())
    except Exception:
        g.es('htmlize malfunction?', color='tomato')
        g.es_exception(full= True)
#@nonl
#@+node:<< colorize with silvercity >>
if lang in [ # Leo may not have all of these yet
    'csharp', 'c', 'c++', 'cpp', # (C and C++)
    'css', # (Cascading Style Sheets)
    'htm', 'html', # HTML/PHP w/ JavaScript, VBScript, Python
    'plain', #null (No styling)
    'perlpod', 'perl', # (Perl)
    #'python', # (Python)
    'ruby', # (Ruby)
    'smart_python', # (Python with styled strings)
    'sql', # (SQL)
    'xml', # (XML)
    'xslt', # (XSLT)
    'yaml', # (YAML)
    # basic & java? missing. might send java as c?
    'elisp', 'php', 'java', 'rapidq', 'actionscript', 'css',
]:
    if lang in ('htm','html','php','java','rapidq','actionscript', 'css'):
        lang = 'html'
    elif lang in ['c','c++','cpp']: lang = 'cpp'
    elif lang in ['perlpod','perl']: lang = 'perl'
    elif lang in ['elisp',]: lang = 'perl'
    if lang in ('plain',None): lang = 'null'

    g.es('writing tmpname', tmpfile )
    fo = file(tmpfile, 'w')
    fo.writelines(pars.raw + "\n")
    fo.close()

    cmd = g.os_path_join(pypath, 'Scripts', 'source2html.py')

    # Send the output to stdout
    #" --view %N  %N.html"
    # --css=file copy silver_city.css where the filename will be
    # source2html.py --list-generators
    params = ' --generator=%s --title=%s --css=silver_city.css %s'%(
       lang, sanitize_(title), tmpfile,)  

    if not g.os_path_exists(cmd):
        g.es('cant find source2html install silvercity')
        print 'cant find source2html from silvercity'
    else:
        g.es('running silvercity \n', py + cmd + params )
        out, err = runcmd(py + cmd + params )
        for x in (out + err).splitlines():
            print x
else:
    print '<i>not a known htmlize supported language</i>'
    #might have to do a sequential dump of nodes, or @rst?
    #is title and first headline set the same for all options?
    print '<Pre>'    
    print pars.raw
    print '</Pre>'
#@nonl
#@-node:<< colorize with silvercity >>
#@-node:htmlize
#@-node:dyna htmlize
#@+node:HTML to Browser
@language python
@tabwidth -4

import webbrowser

if 0:
    << alternate code doesn't work well for me >>

def showHtml(html):
    '''Display html in a web browser'''
    fileName = g.os_path_join(g.app.loadDir,'..','test','leoTemp.html')
    f = file(fileName,'w')
    f.write(html)
    f.close()
    webbrowser.open(fileName)

# Display the selected node in the browser.
showHtml('<pre>%s</pre>' % c.currentPosition().bodyString())

# To do: use a stylesheet to colorize the code.
#@nonl
#@+node:<< alternate code doesn't work well for me >>
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/347810
import BaseHTTPServer

def showHtml(html):
    """Display html in the default web browser without creating a temp file.

    Instantiates a trivial http server and calls webbrowser.open with a URL
    to retrieve html from that server.
    """

    class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
        def do_GET(self):
            g.trace(html)
            bufferSize = 1024*1024
            for i in xrange(0, len(html), bufferSize):
                self.wfile.write(html[i:i+bufferSize])

    server = BaseHTTPServer.HTTPServer(('127.0.0.1',0), RequestHandler)
    webbrowser.open('http://127.0.0.1:%s' % server.server_port)
    server.handle_request()
#@nonl
#@-node:<< alternate code doesn't work well for me >>
#@-node:HTML to Browser
#@+node:Code from Bill P
@ Here is a function to convert the font tags to equivalent span tags. Note use
of font-size:x-large to show how styling can be used for emphasis.

Uses a regular expression to insert style CSS classes before </STYLE> closing
tag ...so <STYLE></STYLE> is expected to exist in input html.
@c

def font2span(colorizedusingFONT):
    aa=colorizedusingFONT
    styleClasses="""    .token_STRING    {color:#00AA00;} 
    .token_NUMBER    {color:#483D8B;}
    .token_OP        {color:#000080;} 
    .tokenize_COMMENT{color:#DD0000;} 
    .token_NAME      {color:#4B0082;} 
    .token_ERRORTOKEN{color:#FF8080;} 
    .KEYWORD        {color:#0066ff;} 
    .TEXT           {color:#000000;} 
    .LeoDir       {color:#228B22;} 
    .LeoSen       {color:#BC8F8F;font-size:x-large;} 
    .bg            {color:#FFFAFA;} 
    """
    myRE=re.compile(r"</STYLE>")              ;aa=myRE.sub(styleClasses+"\n</STYLE>",aa)
    myRE=re.compile(r'<font color="#00AA00">');aa=myRE.sub('<span class="token_STRING">',aa)
    myRE=re.compile(r'<font color="#483D8B">');aa=myRE.sub('<span class="token_NUMBER">',aa)
    myRE=re.compile(r'<font color="#000080">');aa=myRE.sub('<span class="token_OP">',aa)
    myRE=re.compile(r'<font color="#DD0000">');aa=myRE.sub('<span class="tokenize_COMMENT">',aa)
    myRE=re.compile(r'<font color="#4B0082">');aa=myRE.sub('<span class="token_NAME">',aa)
    myRE=re.compile(r'<font color="#FF8080">');aa=myRE.sub('<span class="token_ERRORTOKEN">',aa)
    myRE=re.compile(r'<font color="#0066ff">');aa=myRE.sub('<span class="KEYWORD">',aa)
    myRE=re.compile(r'<font color="#000000">');aa=myRE.sub('<span class="TEXT">',aa)
    myRE=re.compile(r'<font color="#228B22">');aa=myRE.sub('<span class="LeoDir">',aa)
    myRE=re.compile(r'<font color="#BC8F8F">');aa=myRE.sub('<span class="LeoSen">',aa)
    myRE=re.compile(r'<font color="#FFFAFA">');aa=myRE.sub('<span class="bg">',aa)
    myRE=re.compile(r'</font>')               ;aa=myRE.sub('</span>',aa)

    basefontSize="12pt"
    basefontFamily="Lucida,Courier New"
    myRE=re.compile(r'<font face="Lucida,Courier New">');
    aa=myRE.sub('<span style="font:'+basefontSize+' '+basefontFamily+';">',aa)

    return aa
#@nonl
#@+node:NewHeadline
@ To adjust this script internally to use span tags and CSS style classes make
the following changes to  three nodes as follows:
@c

# I. in "init globals" node change the _colors dictionary to use lists 
# (with 3 values representing color, CSS class, extra styling) as follows:

_colors = {
    token.NUMBER:     ['#483D8B','token_NUMBER',' '], #black/darkslateblue
    token.OP:         ['#000080','token_OP',' '], #black/navy
    token.STRING:     ['#00AA00','token_STRING',' '], #green 00cc66
    tokenize.COMMENT: ['#DD0000','tokenize_COMMENT',' '], #red cc0033
    token.NAME:       ['#4B0082','token_NAME',' '], #black/indigo
    token.ERRORTOKEN: ['#FF8080','token_ERRORTOKEN',' '], #redred bare null does it
    _KEYWORD:         ['#0066ff','KEYWORD',' '], #blue
    _TEXT:            ['#000000','TEXT',' '], #black /is text fg color too
    '_LeoDir':        ['#228B22','LeoDir',' '], #directive, forest comment
    '_LeoSen':        ['#BC8F8F','LeoSen','font-size:x-large;'], #sentinal, tan fade comment
    'bg':             ['#FFFAFA','bg',' '], #snow
}
#@nonl
#@-node:NewHeadline
#@-node:Code from Bill P
#@-node:HTMLize scripts
#@+node:jEdit2Py
#@+node:Colorizing test scripts
#@+node:@@button colorize
w = c.frame.body.bodyCtrl # comment
c.frame.body.colorizer.colorize(p)
names = w.tag_names()
for name in names:
    theList = w.tag_ranges(name)
    if theList:
        print 'tag',name,len(theList)
#@nonl
#@-node:@@button colorize
#@+node:@@button red:f1
w = c.frame.body.bodyCtrl
w.tag_configure('red',background='red')
w.tag_add('red','insert-1c','insert+1c')
w.tag_add('f1','insert-1c','insert+1c')
#@nonl
#@-node:@@button red:f1
#@+node:@@button blue:f2
w = c.frame.body.bodyCtrl
w.tag_configure('blue',background='blue')
w.tag_add('blue','insert-1c','insert+1c')
w.tag_add('f2','insert-1c','insert+1c')
#@nonl
#@-node:@@button blue:f2
#@+node:@@button print tags
w = c.frame.body.bodyCtrl
names = w.tag_names()
print '-' * 20
total = 0
for name in names:
    theList = w.tag_ranges(name)
    if theList:
        print name,len(theList)/2
        total += len(theList)/2
print 'total tag ranges',total
#@nonl
#@-node:@@button print tags
#@+node:@@button remove tags
w = c.frame.body.bodyCtrl
names = w.tag_names()
for name in names:
    theList = w.tag_ranges(name)
    if theList:
        print 'removing',name,len(theList)
        while theList:
            a,b = theList[0],theList[1]
            w.tag_remove(name,theList[0],theList[1])
            theList = theList[2:]
#@nonl
#@-node:@@button remove tags
#@+node:@@button print f1
w = c.frame.body.bodyCtrl

def f1(a,b):
    print 'f1','a',a,'b',b

theList = w.tag_ranges('f1')
while theList:
    a,b = theList[0],theList[1]
    f1(a,b)
    theList = theList[2:]
#@nonl
#@-node:@@button print f1
#@+node:test
@color

abdddddddddddddc
xyz

<< section ref >>

@nocolor
#@nonl
#@-node:test
#@+node:latex keywords
#If you see two idenitical words, with minor capitalization differences
#DO NOT ASSUME that they are the same word. For example \vert produces
#a single vertical line and \Vert produces a double vertical line

latex_special_keyword_characters = "@(){}%"

# This is a comment.
'This is a string' # One more comment. Still fast.  Oh joy.

latex_keywords = [
    #special keyworlds.
    "\\%", # 11/9/03
    "\\@", "\\(", "\\)", "\\{", "\\}",
    #A
    "\\acute", "\\addcontentsline", "\\addtocontents", "\\addtocounter", "\\address",
    "\\addtolength", "\\addvspace", "\\AE", "\\ae", "\\aleph", "\\alph", "\\angle",
    "\\appendix", 
    "\\approx", "\\arabic", "\\arccos", "\\arcsin", "\\arctan", "\\ast", "\\author",
    #B
    "\\b", "\\backmatter", "\\backslash", "\\bar", "\\baselineskip", "\\baselinestretch",
    "\\begin", "\\beta", "\\bezier", "\\bf", "\\bfseries", "\\bibitem", "\\bigcap",
    "\\bigcup", "\\bigodot", "\\bigoplus", "\\bigotimes", "\\bigskip", "\\biguplus",
    "\\bigvee", "\\bigwedge", "\\bmod", "\\boldmath", "\\Box", "\\breve", "\\bullet",
    #C
    "\\c", "\\cal", "\\caption", "\\cdot", "\\cdots", "\\centering", "\\chapter",
    "\\check", "\\chi", "\\circ", "\\circle", "\\cite", "\\cleardoublepage", "\\clearpage",
    "\\cline", "\\closing", "\\clubsuit", "\\coprod", "\\copywright", "\\cos", "\\cosh",
    "\\cot", "\\coth", "csc",
    #D
    "\\d", "\\dag", "\\dashbox", "\\date", "\\ddag", "\\ddot", "\\ddots", "\\decl",
    "\\deg", "\\Delta", 
    "\\delta", "\\depthits", "\\det", 
    "\\DH", "\\dh", "\\Diamond", "\\diamondsuit", "\\dim", "\\div", "\\DJ", "\\dj",
    "\\documentclass", "\\documentstyle", 
    "\\dot", "\\dotfil", "\\downarrow",
    #E
    "\\ell", "\\em", "\\emph", "\\end", "\\enlargethispage", "\\ensuremath",
    "\\enumi", "\\enuii", "\\enumiii", "\\enuiv", "\\epsilon", "\\equation", "\\equiv",
    "\\eta", "\\example", "\\exists", "\\exp",
    #F
    "\\fbox", "\\figure", "\\flat", "\\flushbottom", "\\fnsymbol", "\\footnote",
    "\\footnotemark", "\\fotenotesize", 
    "\\footnotetext", "\\forall", "\\frac", "\\frame", "\\framebox", "\\frenchspacing",
    "\\frontmatter",
    #G
    "\\Gamma", "\\gamma", "\\gcd", "\\geq", "\\gg", "\\grave", "\\guillemotleft", 
    "\\guillemotright", "\\guilsinglleft", "\\guilsinglright",
    #H
    "\\H", "\\hat", "\\hbar", "\\heartsuit", "\\heightits", "\\hfill", "\\hline", "\\hom",
    "\\hrulefill", "\\hspace", "\\huge", "\\Huge", "\\hyphenation"
    #I
    "\\Im", "\\imath", "\\include", "includeonly", "indent", "\\index", "\\inf", "\\infty", 
    "\\input", "\\int", "\\iota", "\\it", "\\item", "\\itshape",
    #J
    "\\jmath", "\\Join",
    #K
    "\\k", "\\kappa", "\\ker", "\\kill",
    #L
    "\\label", "\\Lambda", "\\lambda", "\\langle", "\\large", "\\Large", "\\LARGE", 
    "\\LaTeX", "\\LaTeXe", 
    "\\ldots", "\\leadsto", "\\left", "\\Leftarrow", "\\leftarrow", "\\lefteqn", "\\leq",
    "\\lg", "\\lhd", "\\lim", "\\liminf", "\\limsup", "\\line", "\\linebreak", 
    "\\linethickness", "\\linewidth", "\\listfiles",
    "\\ll", "\\ln", "\\location", "\\log", "\\Longleftarrow", "\\longleftarrow", 
    "\\Longrightarrow", "longrightarrow",
    #M
    "\\mainmatter", "\\makebox", "\\makeglossary", "\\makeindex","\\maketitle", "\\markboth", "\\markright",
    "\\mathbf", "\\mathcal", "\\mathit", "\\mathnormal", "\\mathop",
    "\\mathrm", "\\mathsf", "\\mathtt", "\\max", "\\mbox", "\\mdseries", "\\medskip",
    "\\mho", "\\min", "\\mp", "\\mpfootnote", "\\mu", "\\multicolumn", "\\multiput",
    #N
    "\\nabla", "\\natural", "\\nearrow", "\\neq", "\\newcommand", "\\newcounter", 
    "\\newenvironment", "\\newfont",
    "\\newlength", "\\newline", "\\newpage", "\\newsavebox", "\\newtheorem", "\\NG", "\\ng",
    "\\nocite", "\\noindent", "\\nolinbreak", "\\nopagebreak", "\\normalsize",
    "\\not", "\\nu", "nwarrow",
    #O
    "\\Omega", "\\omega", "\\onecolumn", "\\oint", "\\opening", "\\oval", 
    "\\overbrace", "\\overline",
    #P
    "\\P", "\\page", "\\pagebreak", "\\pagenumbering", "\\pageref", "\\pagestyle", 
    "\\par", "\\parbox", "\\paragraph", "\\parindent", "\\parskip", "\\part", 
    "\\partial", "\\per", "\\Phi", "\\phi", "\\Pi", "\\pi", "\\pm", 
    "\\pmod", "\\pounds", "\\prime", "\\printindex", "\\prod", "\\propto", "\\protext", 
    "\\providecomamnd", "\\Psi", "\\psi", "\\put",
    #Q
    "\\qbezier", "\\quoteblbase", "\\quotesinglbase",
    #R
    "\\r", "\\raggedbottom", "\\raggedleft", "\\raggedright", "\\raisebox", "\\rangle", 
    "\\Re", "\\ref", "\\renewcommand", "\\renewenvironment", "\\rhd", "\\rho", "\\right", 
    "\\Rightarrow", "\\rightarrow", "\\rm", "\\rmfamily",
    "\\Roman", "\\roman", "\\rule", 
    #S
    "\\s", "\\samepage", "\\savebox", "\\sbox", "\\sc", "\\scriptsize", "\\scshape", 
    "\\searrow", "\\sec", "\\section",
    "\\setcounter", "\\setlength", "\\settowidth", "\\settodepth", "\\settoheight", 
    "\\settowidth", "\\sf", "\\sffamily", "\\sharp", "\\shortstack", "\\Sigma", "\\sigma", 
    "\\signature", "\\sim", "\\simeq", "\\sin", "\\sinh", "\\sl", "\\SLiTeX",
    "\\slshape", "\\small", "\\smallskip", "\\spadesuit", "\\sqrt", "\\sqsubset",
    "\\sqsupset", "\\SS",
    "\\stackrel", "\\star", "\\subsection", "\\subset", 
    "\\subsubsection", "\\sum", "\\sup", "\\supressfloats", "\\surd", "\\swarrow",
    #T
    "\\t", "\\table", "\\tableofcontents", "\\tabularnewline", "\\tan", "\\tanh", 
    "\\tau", "\\telephone", "\\TeX", "\\textbf",
    "\\textbullet", "\\textcircled", "\\textcompworkmark", "\\textemdash", 
    "\\textendash", "\\textexclamdown", "\\textheight", "\\textquestiondown", 
    "\\textquoteblleft", "\\textquoteblright", "\\textquoteleft",
    "\\textperiod", "\\textquotebl", "\\textquoteright", "\\textmd", "\\textit", "\\textrm", 
    "\\textsc", "\\textsl", "\\textsf", "\\textsuperscript", "\\texttt", "\\textup",
    "\\textvisiblespace", "\\textwidth", "\\TH", "\\th", "\\thanks", "\\thebibligraphy",
    "\\Theta", "theta", 
    "\\tilde", "\\thinlines", 
    "\\thispagestyle", "\\times", "\\tiny", "\\title", "\\today", "\\totalheightits", 
    "\\triangle", "\\tt", 
    "\\ttfamily", "\\twocoloumn", "\\typeout", "\\typein",
    #U
    "\\u", "\\underbrace", "\\underline", "\\unitlength", "\\unlhd", "\\unrhd", "\\Uparrow",
    "\\uparrow", "\\updownarrow", "\\upshape", "\\Upsilon", "\\upsilon", "\\usebox",
    "\\usecounter", "\\usepackage", 
    #V
    "\\v", "\\value", "\\varepsilon", "\\varphi", "\\varpi", "\\varrho", "\\varsigma", 
    "\\vartheta", "\\vdots", "\\vec", "\\vector", "\\verb", "\\Vert", "\\vert", "\\vfill",
    "\\vline", "\\vphantom", "\\vspace",
    #W
    "\\widehat", "\\widetilde", "\\widthits", "\\wp",
    #X
    "\\Xi", "\\xi",
    #Z
    "\\zeta" ]

# A test at the very end.  This is jolly.

latex_keywords = [
    #special keyworlds.
    "\\%", # 11/9/03
    "\\@", "\\(", "\\)", "\\{", "\\}",
    #A
    "\\acute", "\\addcontentsline", "\\addtocontents", "\\addtocounter", "\\address",
    "\\addtolength", "\\addvspace", "\\AE", "\\ae", "\\aleph", "\\alph", "\\angle",
    "\\appendix", 
    "\\approx", "\\arabic", "\\arccos", "\\arcsin", "\\arctan", "\\ast", "\\author",
    #B
    "\\b", "\\backmatter", "\\backslash", "\\bar", "\\baselineskip", "\\baselinestretch",
    "\\begin", "\\beta", "\\bezier", "\\bf", "\\bfseries", "\\bibitem", "\\bigcap",
    "\\bigcup", "\\bigodot", "\\bigoplus", "\\bigotimes", "\\bigskip", "\\biguplus",
    "\\bigvee", "\\bigwedge", "\\bmod", "\\boldmath", "\\Box", "\\breve", "\\bullet",
    #C
    "\\c", "\\cal", "\\caption", "\\cdot", "\\cdots", "\\centering", "\\chapter",
    "\\check", "\\chi", "\\circ", "\\circle", "\\cite", "\\cleardoublepage", "\\clearpage",
    "\\cline", "\\closing", "\\clubsuit", "\\coprod", "\\copywright", "\\cos", "\\cosh",
    "\\cot", "\\coth", "csc",
    #D
    "\\d", "\\dag", "\\dashbox", "\\date", "\\ddag", "\\ddot", "\\ddots", "\\decl",
    "\\deg", "\\Delta", 
    "\\delta", "\\depthits", "\\det", 
    "\\DH", "\\dh", "\\Diamond", "\\diamondsuit", "\\dim", "\\div", "\\DJ", "\\dj",
    "\\documentclass", "\\documentstyle", 
    "\\dot", "\\dotfil", "\\downarrow",
    #E
    "\\ell", "\\em", "\\emph", "\\end", "\\enlargethispage", "\\ensuremath",
    "\\enumi", "\\enuii", "\\enumiii", "\\enuiv", "\\epsilon", "\\equation", "\\equiv",
    "\\eta", "\\example", "\\exists", "\\exp",
    #F
    "\\fbox", "\\figure", "\\flat", "\\flushbottom", "\\fnsymbol", "\\footnote",
    "\\footnotemark", "\\fotenotesize", 
    "\\footnotetext", "\\forall", "\\frac", "\\frame", "\\framebox", "\\frenchspacing",
    "\\frontmatter",
    #G
    "\\Gamma", "\\gamma", "\\gcd", "\\geq", "\\gg", "\\grave", "\\guillemotleft", 
    "\\guillemotright", "\\guilsinglleft", "\\guilsinglright",
    #H
    "\\H", "\\hat", "\\hbar", "\\heartsuit", "\\heightits", "\\hfill", "\\hline", "\\hom",
    "\\hrulefill", "\\hspace", "\\huge", "\\Huge", "\\hyphenation"
    #I
    "\\Im", "\\imath", "\\include", "includeonly", "indent", "\\index", "\\inf", "\\infty", 
    "\\input", "\\int", "\\iota", "\\it", "\\item", "\\itshape",
    #J
    "\\jmath", "\\Join",
    #K
    "\\k", "\\kappa", "\\ker", "\\kill",
    #L
    "\\label", "\\Lambda", "\\lambda", "\\langle", "\\large", "\\Large", "\\LARGE", 
    "\\LaTeX", "\\LaTeXe", 
    "\\ldots", "\\leadsto", "\\left", "\\Leftarrow", "\\leftarrow", "\\lefteqn", "\\leq",
    "\\lg", "\\lhd", "\\lim", "\\liminf", "\\limsup", "\\line", "\\linebreak", 
    "\\linethickness", "\\linewidth", "\\listfiles",
    "\\ll", "\\ln", "\\location", "\\log", "\\Longleftarrow", "\\longleftarrow", 
    "\\Longrightarrow", "longrightarrow",
    #M
    "\\mainmatter", "\\makebox", "\\makeglossary", "\\makeindex","\\maketitle", "\\markboth", "\\markright",
    "\\mathbf", "\\mathcal", "\\mathit", "\\mathnormal", "\\mathop",
    "\\mathrm", "\\mathsf", "\\mathtt", "\\max", "\\mbox", "\\mdseries", "\\medskip",
    "\\mho", "\\min", "\\mp", "\\mpfootnote", "\\mu", "\\multicolumn", "\\multiput",
    #N
    "\\nabla", "\\natural", "\\nearrow", "\\neq", "\\newcommand", "\\newcounter", 
    "\\newenvironment", "\\newfont",
    "\\newlength", "\\newline", "\\newpage", "\\newsavebox", "\\newtheorem", "\\NG", "\\ng",
    "\\nocite", "\\noindent", "\\nolinbreak", "\\nopagebreak", "\\normalsize",
    "\\not", "\\nu", "nwarrow",
    #O
    "\\Omega", "\\omega", "\\onecolumn", "\\oint", "\\opening", "\\oval", 
    "\\overbrace", "\\overline",
    #P
    "\\P", "\\page", "\\pagebreak", "\\pagenumbering", "\\pageref", "\\pagestyle", 
    "\\par", "\\parbox", "\\paragraph", "\\parindent", "\\parskip", "\\part", 
    "\\partial", "\\per", "\\Phi", "\\phi", "\\Pi", "\\pi", "\\pm", 
    "\\pmod", "\\pounds", "\\prime", "\\printindex", "\\prod", "\\propto", "\\protext", 
    "\\providecomamnd", "\\Psi", "\\psi", "\\put",
    #Q
    "\\qbezier", "\\quoteblbase", "\\quotesinglbase",
    #R
    "\\r", "\\raggedbottom", "\\raggedleft", "\\raggedright", "\\raisebox", "\\rangle", 
    "\\Re", "\\ref", "\\renewcommand", "\\renewenvironment", "\\rhd", "\\rho", "\\right", 
    "\\Rightarrow", "\\rightarrow", "\\rm", "\\rmfamily",
    "\\Roman", "\\roman", "\\rule", 
    #S
    "\\s", "\\samepage", "\\savebox", "\\sbox", "\\sc", "\\scriptsize", "\\scshape", 
    "\\searrow", "\\sec", "\\section",
    "\\setcounter", "\\setlength", "\\settowidth", "\\settodepth", "\\settoheight", 
    "\\settowidth", "\\sf", "\\sffamily", "\\sharp", "\\shortstack", "\\Sigma", "\\sigma", 
    "\\signature", "\\sim", "\\simeq", "\\sin", "\\sinh", "\\sl", "\\SLiTeX",
    "\\slshape", "\\small", "\\smallskip", "\\spadesuit", "\\sqrt", "\\sqsubset",
    "\\sqsupset", "\\SS",
    "\\stackrel", "\\star", "\\subsection", "\\subset", 
    "\\subsubsection", "\\sum", "\\sup", "\\supressfloats", "\\surd", "\\swarrow",
    #T
    "\\t", "\\table", "\\tableofcontents", "\\tabularnewline", "\\tan", "\\tanh", 
    "\\tau", "\\telephone", "\\TeX", "\\textbf",
    "\\textbullet", "\\textcircled", "\\textcompworkmark", "\\textemdash", 
    "\\textendash", "\\textexclamdown", "\\textheight", "\\textquestiondown", 
    "\\textquoteblleft", "\\textquoteblright", "\\textquoteleft",
    "\\textperiod", "\\textquotebl", "\\textquoteright", "\\textmd", "\\textit", "\\textrm", 
    "\\textsc", "\\textsl", "\\textsf", "\\textsuperscript", "\\texttt", "\\textup",
    "\\textvisiblespace", "\\textwidth", "\\TH", "\\th", "\\thanks", "\\thebibligraphy",
    "\\Theta", "theta", 
    "\\tilde", "\\thinlines", 
    "\\thispagestyle", "\\times", "\\tiny", "\\title", "\\today", "\\totalheightits", 
    "\\triangle", "\\tt", 
    "\\ttfamily", "\\twocoloumn", "\\typeout", "\\typein",
    #U
    "\\u", "\\underbrace", "\\underline", "\\unitlength", "\\unlhd", "\\unrhd", "\\Uparrow",
    "\\uparrow", "\\updownarrow", "\\upshape", "\\Upsilon", "\\upsilon", "\\usebox",
    "\\usecounter", "\\usepackage", 
    #V
    "\\v", "\\value", "\\varepsilon", "\\varphi", "\\varpi", "\\varrho", "\\varsigma", 
    "\\vartheta", "\\vdots", "\\vec", "\\vector", "\\verb", "\\Vert", "\\vert", "\\vfill",
    "\\vline", "\\vphantom", "\\vspace",
    #W
    "\\widehat", "\\widetilde", "\\widthits", "\\wp",
    #X
    "\\Xi", "\\xi",
    #Z
    "\\zeta" ]

# A test at the very end.  This is jolly.

latex_keywords = [
    #special keyworlds.
    "\\%", # 11/9/03
    "\\@", "\\(", "\\)", "\\{", "\\}",
    #A
    "\\acute", "\\addcontentsline", "\\addtocontents", "\\addtocounter", "\\address",
    "\\addtolength", "\\addvspace", "\\AE", "\\ae", "\\aleph", "\\alph", "\\angle",
    "\\appendix", 
    "\\approx", "\\arabic", "\\arccos", "\\arcsin", "\\arctan", "\\ast", "\\author",
    #B
    "\\b", "\\backmatter", "\\backslash", "\\bar", "\\baselineskip", "\\baselinestretch",
    "\\begin", "\\beta", "\\bezier", "\\bf", "\\bfseries", "\\bibitem", "\\bigcap",
    "\\bigcup", "\\bigodot", "\\bigoplus", "\\bigotimes", "\\bigskip", "\\biguplus",
    "\\bigvee", "\\bigwedge", "\\bmod", "\\boldmath", "\\Box", "\\breve", "\\bullet",
    #C
    "\\c", "\\cal", "\\caption", "\\cdot", "\\cdots", "\\centering", "\\chapter",
    "\\check", "\\chi", "\\circ", "\\circle", "\\cite", "\\cleardoublepage", "\\clearpage",
    "\\cline", "\\closing", "\\clubsuit", "\\coprod", "\\copywright", "\\cos", "\\cosh",
    "\\cot", "\\coth", "csc",
    #D
    "\\d", "\\dag", "\\dashbox", "\\date", "\\ddag", "\\ddot", "\\ddots", "\\decl",
    "\\deg", "\\Delta", 
    "\\delta", "\\depthits", "\\det", 
    "\\DH", "\\dh", "\\Diamond", "\\diamondsuit", "\\dim", "\\div", "\\DJ", "\\dj",
    "\\documentclass", "\\documentstyle", 
    "\\dot", "\\dotfil", "\\downarrow",
    #E
    "\\ell", "\\em", "\\emph", "\\end", "\\enlargethispage", "\\ensuremath",
    "\\enumi", "\\enuii", "\\enumiii", "\\enuiv", "\\epsilon", "\\equation", "\\equiv",
    "\\eta", "\\example", "\\exists", "\\exp",
    #F
    "\\fbox", "\\figure", "\\flat", "\\flushbottom", "\\fnsymbol", "\\footnote",
    "\\footnotemark", "\\fotenotesize", 
    "\\footnotetext", "\\forall", "\\frac", "\\frame", "\\framebox", "\\frenchspacing",
    "\\frontmatter",
    #G
    "\\Gamma", "\\gamma", "\\gcd", "\\geq", "\\gg", "\\grave", "\\guillemotleft", 
    "\\guillemotright", "\\guilsinglleft", "\\guilsinglright",
    #H
    "\\H", "\\hat", "\\hbar", "\\heartsuit", "\\heightits", "\\hfill", "\\hline", "\\hom",
    "\\hrulefill", "\\hspace", "\\huge", "\\Huge", "\\hyphenation"
    #I
    "\\Im", "\\imath", "\\include", "includeonly", "indent", "\\index", "\\inf", "\\infty", 
    "\\input", "\\int", "\\iota", "\\it", "\\item", "\\itshape",
    #J
    "\\jmath", "\\Join",
    #K
    "\\k", "\\kappa", "\\ker", "\\kill",
    #L
    "\\label", "\\Lambda", "\\lambda", "\\langle", "\\large", "\\Large", "\\LARGE", 
    "\\LaTeX", "\\LaTeXe", 
    "\\ldots", "\\leadsto", "\\left", "\\Leftarrow", "\\leftarrow", "\\lefteqn", "\\leq",
    "\\lg", "\\lhd", "\\lim", "\\liminf", "\\limsup", "\\line", "\\linebreak", 
    "\\linethickness", "\\linewidth", "\\listfiles",
    "\\ll", "\\ln", "\\location", "\\log", "\\Longleftarrow", "\\longleftarrow", 
    "\\Longrightarrow", "longrightarrow",
    #M
    "\\mainmatter", "\\makebox", "\\makeglossary", "\\makeindex","\\maketitle", "\\markboth", "\\markright",
    "\\mathbf", "\\mathcal", "\\mathit", "\\mathnormal", "\\mathop",
    "\\mathrm", "\\mathsf", "\\mathtt", "\\max", "\\mbox", "\\mdseries", "\\medskip",
    "\\mho", "\\min", "\\mp", "\\mpfootnote", "\\mu", "\\multicolumn", "\\multiput",
    #N
    "\\nabla", "\\natural", "\\nearrow", "\\neq", "\\newcommand", "\\newcounter", 
    "\\newenvironment", "\\newfont",
    "\\newlength", "\\newline", "\\newpage", "\\newsavebox", "\\newtheorem", "\\NG", "\\ng",
    "\\nocite", "\\noindent", "\\nolinbreak", "\\nopagebreak", "\\normalsize",
    "\\not", "\\nu", "nwarrow",
    #O
    "\\Omega", "\\omega", "\\onecolumn", "\\oint", "\\opening", "\\oval", 
    "\\overbrace", "\\overline",
    #P
    "\\P", "\\page", "\\pagebreak", "\\pagenumbering", "\\pageref", "\\pagestyle", 
    "\\par", "\\parbox", "\\paragraph", "\\parindent", "\\parskip", "\\part", 
    "\\partial", "\\per", "\\Phi", "\\phi", "\\Pi", "\\pi", "\\pm", 
    "\\pmod", "\\pounds", "\\prime", "\\printindex", "\\prod", "\\propto", "\\protext", 
    "\\providecomamnd", "\\Psi", "\\psi", "\\put",
    #Q
    "\\qbezier", "\\quoteblbase", "\\quotesinglbase",
    #R
    "\\r", "\\raggedbottom", "\\raggedleft", "\\raggedright", "\\raisebox", "\\rangle", 
    "\\Re", "\\ref", "\\renewcommand", "\\renewenvironment", "\\rhd", "\\rho", "\\right", 
    "\\Rightarrow", "\\rightarrow", "\\rm", "\\rmfamily",
    "\\Roman", "\\roman", "\\rule", 
    #S
    "\\s", "\\samepage", "\\savebox", "\\sbox", "\\sc", "\\scriptsize", "\\scshape", 
    "\\searrow", "\\sec", "\\section",
    "\\setcounter", "\\setlength", "\\settowidth", "\\settodepth", "\\settoheight", 
    "\\settowidth", "\\sf", "\\sffamily", "\\sharp", "\\shortstack", "\\Sigma", "\\sigma", 
    "\\signature", "\\sim", "\\simeq", "\\sin", "\\sinh", "\\sl", "\\SLiTeX",
    "\\slshape", "\\small", "\\smallskip", "\\spadesuit", "\\sqrt", "\\sqsubset",
    "\\sqsupset", "\\SS",
    "\\stackrel", "\\star", "\\subsection", "\\subset", 
    "\\subsubsection", "\\sum", "\\sup", "\\supressfloats", "\\surd", "\\swarrow",
    #T
    "\\t", "\\table", "\\tableofcontents", "\\tabularnewline", "\\tan", "\\tanh", 
    "\\tau", "\\telephone", "\\TeX", "\\textbf",
    "\\textbullet", "\\textcircled", "\\textcompworkmark", "\\textemdash", 
    "\\textendash", "\\textexclamdown", "\\textheight", "\\textquestiondown", 
    "\\textquoteblleft", "\\textquoteblright", "\\textquoteleft",
    "\\textperiod", "\\textquotebl", "\\textquoteright", "\\textmd", "\\textit", "\\textrm", 
    "\\textsc", "\\textsl", "\\textsf", "\\textsuperscript", "\\texttt", "\\textup",
    "\\textvisiblespace", "\\textwidth", "\\TH", "\\th", "\\thanks", "\\thebibligraphy",
    "\\Theta", "theta", 
    "\\tilde", "\\thinlines", 
    "\\thispagestyle", "\\times", "\\tiny", "\\title", "\\today", "\\totalheightits", 
    "\\triangle", "\\tt", 
    "\\ttfamily", "\\twocoloumn", "\\typeout", "\\typein",
    #U
    "\\u", "\\underbrace", "\\underline", "\\unitlength", "\\unlhd", "\\unrhd", "\\Uparrow",
    "\\uparrow", "\\updownarrow", "\\upshape", "\\Upsilon", "\\upsilon", "\\usebox",
    "\\usecounter", "\\usepackage", 
    #V
    "\\v", "\\value", "\\varepsilon", "\\varphi", "\\varpi", "\\varrho", "\\varsigma", 
    "\\vartheta", "\\vdots", "\\vec", "\\vector", "\\verb", "\\Vert", "\\vert", "\\vfill",
    "\\vline", "\\vphantom", "\\vspace",
    #W
    "\\widehat", "\\widetilde", "\\widthits", "\\wp", # This is a test.
    #X
    "\\Xi", "\\xi",
    #Z
    "\\zeta" ]

# A test at the very end.  This is jolly.
#@nonl
#@-node:latex keywords
#@-node:Colorizing test scripts
#@+node:Colorizer tests
#@+node:C
@color
@language c

#include abc

// tests

/* test
end of test. */

for (i = 1; i < 6; i++) {
    continue
#@nonl
#@-node:C
#@+node:Python
@color

<< test >>

@doc test
another line

@c

'test'

'''test'''

# @nocolor

for i = 1;
    yield
#@nonl
#@-node:Python
#@+node:php.py
# Leo colorizer control file for php mode.
# This file is in the public domain.

# Properties for php mode.
properties = {
	"commentEnd": "-->",
	"commentStart": "<!--",
	"indentCloseBrackets": "}",
	"indentOpenBrackets": "{",
	"lineUpClosingBracket": "true",
}

# Attributes dict for php_main ruleset.
php_main_attributes_dict = {
	"default": "null",
	"digit_re": "",
	"highlight_digits": "true",
	"ignore_case": "true",
	"no_word_sep": "",
}

# Attributes dict for php_tags ruleset.
php_tags_attributes_dict = {
	"default": "MARKUP",
	"digit_re": "",
	"highlight_digits": "true",
	"ignore_case": "true",
	"no_word_sep": "",
}

# Attributes dict for php_tags_literal ruleset.
php_tags_literal_attributes_dict = {
	"default": "LITERAL1",
	"digit_re": "",
	"highlight_digits": "true",
	"ignore_case": "true",
	"no_word_sep": "",
}

# Attributes dict for php_php ruleset.
php_php_attributes_dict = {
	"default": "LITERAL1",
	"digit_re": "",
	"highlight_digits": "true",
	"ignore_case": "true",
	"no_word_sep": "",
}

# Attributes dict for php_php_literal ruleset.
php_php_literal_attributes_dict = {
	"default": "LITERAL1",
	"digit_re": "",
	"highlight_digits": "true",
	"ignore_case": "true",
	"no_word_sep": "",
}

# Attributes dict for php_javascript ruleset.
php_javascript_attributes_dict = {
	"default": "MARKUP",
	"digit_re": "",
	"highlight_digits": "true",
	"ignore_case": "true",
	"no_word_sep": "",
}

# Attributes dict for php_javascript_php ruleset.
php_javascript_php_attributes_dict = {
	"default": "MARKUP",
	"digit_re": "",
	"highlight_digits": "true",
	"ignore_case": "true",
	"no_word_sep": "",
}

# Attributes dict for php_phpdoc ruleset.
php_phpdoc_attributes_dict = {
	"default": "COMMENT3",
	"digit_re": "",
	"highlight_digits": "true",
	"ignore_case": "true",
	"no_word_sep": "",
}

# Dictionary of attributes dictionaries for php mode.
attributesDictDict = {
	"php_javascript": php_javascript_attributes_dict,
	"php_javascript_php": php_javascript_php_attributes_dict,
	"php_main": php_main_attributes_dict,
	"php_php": php_php_attributes_dict,
	"php_php_literal": php_php_literal_attributes_dict,
	"php_phpdoc": php_phpdoc_attributes_dict,
	"php_tags": php_tags_attributes_dict,
	"php_tags_literal": php_tags_literal_attributes_dict,
}

# Keywords dict for php_main ruleset.
php_main_keywords_dict = {}

# Keywords dict for php_tags ruleset.
php_tags_keywords_dict = {}

# Keywords dict for php_tags_literal ruleset.
php_tags_literal_keywords_dict = {}

# Keywords dict for php_php ruleset.
php_php_keywords_dict = {
	"COM_invoke": "keyword2",
	"COM_load": "keyword2",
	"__CLASS__": "keyword3",
	"__FILE__": "keyword3",
	"__FUNCTION__": "keyword3",
	"__LINE__": "keyword3",
	"__METHOD__": "keyword3",
	"abs": "keyword2",
	"abstract": "keyword1",
	"accept_connect": "keyword2",
	"acos": "keyword2",
	"add": "keyword2",
	"add_iovec": "keyword2",
	"addaction": "keyword2",
	"addcolor": "keyword2",
	"addcslashes": "keyword2",
	"addentry": "keyword2",
	"addfill": "keyword2",
	"addshape": "keyword2",
	"addslashes": "keyword2",
	"addstring": "keyword2",
	"align": "keyword2",
	"and": "operator",
	"apache_child_terminate": "keyword2",
	"apache_lookup_uri": "keyword2",
	"apache_note": "keyword2",
	"apache_sub_req": "keyword2",
	"array": "keyword1",
	"array_combine": "keyword2",
	"array_count_values": "keyword2",
	"array_diff": "keyword2",
	"array_diff_assoc": "keyword2",
	"array_diff_uassoc": "keyword2",
	"array_filter": "keyword2",
	"array_flip": "keyword2",
	"array_intersect": "keyword2",
	"array_intersect_assoc": "keyword2",
	"array_keys": "keyword2",
	"array_map": "keyword2",
	"array_merge": "keyword2",
	"array_merge_recursive": "keyword2",
	"array_multisort": "keyword2",
	"array_pad": "keyword2",
	"array_pop": "keyword2",
	"array_push": "keyword2",
	"array_rand": "keyword2",
	"array_reduce": "keyword2",
	"array_reverse": "keyword2",
	"array_search": "keyword2",
	"array_shift": "keyword2",
	"array_slice": "keyword2",
	"array_splice": "keyword2",
	"array_sum": "keyword2",
	"array_udiff": "keyword2",
	"array_udiff_assoc": "keyword2",
	"array_udiff_uassoc": "keyword2",
	"array_unique": "keyword2",
	"array_unshift": "keyword2",
	"array_values": "keyword2",
	"array_walk": "keyword2",
	"array_walk_recursive": "keyword2",
	"arsort": "keyword2",
	"as": "keyword1",
	"asin": "keyword2",
	"asort": "keyword2",
	"aspell_check": "keyword2",
	"aspell_check_raw": "keyword2",
	"aspell_new": "keyword2",
	"aspell_suggest": "keyword2",
	"assert": "keyword2",
	"assert_options": "keyword2",
	"atan": "keyword2",
	"atan2": "keyword2",
	"base64_decode": "keyword2",
	"base64_encode": "keyword2",
	"base_convert": "keyword2",
	"basename": "keyword2",
	"bcadd": "keyword2",
	"bccomp": "keyword2",
	"bcdiv": "keyword2",
	"bcmod": "keyword2",
	"bcmul": "keyword2",
	"bcpow": "keyword2",
	"bcscale": "keyword2",
	"bcsqrt": "keyword2",
	"bcsub": "keyword2",
	"bin2hex": "keyword2",
	"bind": "keyword2",
	"bindec": "keyword2",
	"bindtextdomain": "keyword2",
	"break": "keyword1",
	"build_iovec": "keyword2",
	"bzclose": "keyword2",
	"bzcompress": "keyword2",
	"bzdecompress": "keyword2",
	"bzerrno": "keyword2",
	"bzerror": "keyword2",
	"bzerrstr": "keyword2",
	"bzflush": "keyword2",
	"bzopen": "keyword2",
	"bzread": "keyword2",
	"bzwrite": "keyword2",
	"call_user_func": "keyword2",
	"call_user_func_array": "keyword2",
	"call_user_method": "keyword2",
	"call_user_method_array": "keyword2",
	"case": "keyword1",
	"catch": "keyword1",
	"ccvs_add": "keyword2",
	"ccvs_auth": "keyword2",
	"ccvs_command": "keyword2",
	"ccvs_count": "keyword2",
	"ccvs_delete": "keyword2",
	"ccvs_done": "keyword2",
	"ccvs_init": "keyword2",
	"ccvs_lookup": "keyword2",
	"ccvs_new": "keyword2",
	"ccvs_report": "keyword2",
	"ccvs_return": "keyword2",
	"ccvs_reverse": "keyword2",
	"ccvs_sale": "keyword2",
	"ccvs_status": "keyword2",
	"ccvs_textvalue": "keyword2",
	"ccvs_void": "keyword2",
	"ceil": "keyword2",
	"chdir": "keyword2",
	"checkdate": "keyword2",
	"checkdnsrr": "keyword2",
	"chgrp": "keyword2",
	"chmod": "keyword2",
	"chop": "keyword2",
	"chown": "keyword2",
	"chr": "keyword2",
	"chroot": "keyword2",
	"chunk_split": "keyword2",
	"class": "keyword1",
	"class_exists": "keyword2",
	"clearstatcache": "keyword2",
	"clone": "keyword1",
	"close": "keyword2",
	"closedir": "keyword2",
	"closelog": "keyword2",
	"com_get": "keyword2",
	"com_propget": "keyword2",
	"com_propput": "keyword2",
	"com_propset": "keyword2",
	"com_set": "keyword2",
	"compact": "keyword2",
	"confirm_cybermut_compiled": "keyword2",
	"confirm_extname_compiled": "keyword2",
	"connect": "keyword2",
	"connection_aborted": "keyword2",
	"connection_status": "keyword2",
	"const": "keyword1",
	"constant": "keyword2",
	"continue": "keyword1",
	"convert_cyr_string": "keyword2",
	"convert_uudecode": "keyword2",
	"convert_uuencode": "keyword2",
	"copy": "keyword2",
	"cos": "keyword2",
	"count": "keyword2",
	"count_chars": "keyword2",
	"cpdf_add_annotation": "keyword2",
	"cpdf_add_outline": "keyword2",
	"cpdf_arc": "keyword2",
	"cpdf_begin_text": "keyword2",
	"cpdf_circle": "keyword2",
	"cpdf_clip": "keyword2",
	"cpdf_close": "keyword2",
	"cpdf_closepath": "keyword2",
	"cpdf_closepath_fill_stroke": "keyword2",
	"cpdf_closepath_stroke": "keyword2",
	"cpdf_continue_text": "keyword2",
	"cpdf_curveto": "keyword2",
	"cpdf_end_text": "keyword2",
	"cpdf_fill": "keyword2",
	"cpdf_fill_stroke": "keyword2",
	"cpdf_finalize": "keyword2",
	"cpdf_finalize_page": "keyword2",
	"cpdf_global_set_document_limits": "keyword2",
	"cpdf_import_jpeg": "keyword2",
	"cpdf_lineto": "keyword2",
	"cpdf_moveto": "keyword2",
	"cpdf_newpath": "keyword2",
	"cpdf_open": "keyword2",
	"cpdf_output_buffer": "keyword2",
	"cpdf_page_init": "keyword2",
	"cpdf_place_inline_image": "keyword2",
	"cpdf_rect": "keyword2",
	"cpdf_restore": "keyword2",
	"cpdf_rlineto": "keyword2",
	"cpdf_rmoveto": "keyword2",
	"cpdf_rotate": "keyword2",
	"cpdf_rotate_text": "keyword2",
	"cpdf_save": "keyword2",
	"cpdf_save_to_file": "keyword2",
	"cpdf_scale": "keyword2",
	"cpdf_set_action_url": "keyword2",
	"cpdf_set_char_spacing": "keyword2",
	"cpdf_set_creator": "keyword2",
	"cpdf_set_current_page": "keyword2",
	"cpdf_set_font": "keyword2",
	"cpdf_set_font_directories": "keyword2",
	"cpdf_set_font_map_file": "keyword2",
	"cpdf_set_horiz_scaling": "keyword2",
	"cpdf_set_keywords": "keyword2",
	"cpdf_set_leading": "keyword2",
	"cpdf_set_page_animation": "keyword2",
	"cpdf_set_subject": "keyword2",
	"cpdf_set_text_matrix": "keyword2",
	"cpdf_set_text_pos": "keyword2",
	"cpdf_set_text_rendering": "keyword2",
	"cpdf_set_text_rise": "keyword2",
	"cpdf_set_title": "keyword2",
	"cpdf_set_viewer_preferences": "keyword2",
	"cpdf_set_word_spacing": "keyword2",
	"cpdf_setdash": "keyword2",
	"cpdf_setflat": "keyword2",
	"cpdf_setgray": "keyword2",
	"cpdf_setgray_fill": "keyword2",
	"cpdf_setgray_stroke": "keyword2",
	"cpdf_setlinecap": "keyword2",
	"cpdf_setlinejoin": "keyword2",
	"cpdf_setlinewidth": "keyword2",
	"cpdf_setmiterlimit": "keyword2",
	"cpdf_setrgbcolor": "keyword2",
	"cpdf_setrgbcolor_fill": "keyword2",
	"cpdf_setrgbcolor_stroke": "keyword2",
	"cpdf_show": "keyword2",
	"cpdf_show_xy": "keyword2",
	"cpdf_stringwidth": "keyword2",
	"cpdf_stroke": "keyword2",
	"cpdf_text": "keyword2",
	"cpdf_translate": "keyword2",
	"crack_check": "keyword2",
	"crack_closedict": "keyword2",
	"crack_getlastmessage": "keyword2",
	"crack_opendict": "keyword2",
	"crash": "keyword2",
	"crc32": "keyword2",
	"create_function": "keyword2",
	"crypt": "keyword2",
	"ctype_alnum": "keyword2",
	"ctype_alpha": "keyword2",
	"ctype_cntrl": "keyword2",
	"ctype_digit": "keyword2",
	"ctype_graph": "keyword2",
	"ctype_lower": "keyword2",
	"ctype_print": "keyword2",
	"ctype_punct": "keyword2",
	"ctype_space": "keyword2",
	"ctype_upper": "keyword2",
	"ctype_xdigit": "keyword2",
	"curl_close": "keyword2",
	"curl_errno": "keyword2",
	"curl_error": "keyword2",
	"curl_exec": "keyword2",
	"curl_getinfo": "keyword2",
	"curl_init": "keyword2",
	"curl_setopt": "keyword2",
	"curl_version": "keyword2",
	"current": "keyword2",
	"cv_add": "keyword2",
	"cv_auth": "keyword2",
	"cv_command": "keyword2",
	"cv_count": "keyword2",
	"cv_delete": "keyword2",
	"cv_done": "keyword2",
	"cv_init": "keyword2",
	"cv_lookup": "keyword2",
	"cv_new": "keyword2",
	"cv_report": "keyword2",
	"cv_return": "keyword2",
	"cv_reverse": "keyword2",
	"cv_sale": "keyword2",
	"cv_status": "keyword2",
	"cv_textvalue": "keyword2",
	"cv_void": "keyword2",
	"cybercash_base64_decode": "keyword2",
	"cybercash_base64_encode": "keyword2",
	"cybercash_decr": "keyword2",
	"cybercash_encr": "keyword2",
	"cybermut_creerformulairecm": "keyword2",
	"cybermut_creerreponsecm": "keyword2",
	"cybermut_testmac": "keyword2",
	"date": "keyword2",
	"dba_close": "keyword2",
	"dba_delete": "keyword2",
	"dba_exists": "keyword2",
	"dba_fetch": "keyword2",
	"dba_firstkey": "keyword2",
	"dba_insert": "keyword2",
	"dba_nextkey": "keyword2",
	"dba_open": "keyword2",
	"dba_optimize": "keyword2",
	"dba_popen": "keyword2",
	"dba_replace": "keyword2",
	"dba_sync": "keyword2",
	"dbase_add_record": "keyword2",
	"dbase_close": "keyword2",
	"dbase_create": "keyword2",
	"dbase_delete_record": "keyword2",
	"dbase_get_record": "keyword2",
	"dbase_get_record_with_names": "keyword2",
	"dbase_numfields": "keyword2",
	"dbase_numrecords": "keyword2",
	"dbase_open": "keyword2",
	"dbase_pack": "keyword2",
	"dbase_replace_record": "keyword2",
	"dblist": "keyword2",
	"dbmclose": "keyword2",
	"dbmdelete": "keyword2",
	"dbmexists": "keyword2",
	"dbmfetch": "keyword2",
	"dbmfirstkey": "keyword2",
	"dbminsert": "keyword2",
	"dbmnextkey": "keyword2",
	"dbmopen": "keyword2",
	"dbmreplace": "keyword2",
	"dbx_close": "keyword2",
	"dbx_cmp_asc": "keyword2",
	"dbx_cmp_desc": "keyword2",
	"dbx_connect": "keyword2",
	"dbx_error": "keyword2",
	"dbx_query": "keyword2",
	"dbx_sort": "keyword2",
	"dcgettext": "keyword2",
	"debug_backtrace": "keyword2",
	"debug_print_backtrace": "keyword2",
	"decbin": "keyword2",
	"dechex": "keyword2",
	"declare": "keyword1",
	"decoct": "keyword2",
	"default": "keyword1",
	"define": "keyword2",
	"define_syslog_variables": "keyword2",
	"defined": "keyword2",
	"deg2rad": "keyword2",
	"delete_iovec": "keyword2",
	"dgettext": "keyword2",
	"die": "keyword2",
	"dir": "keyword2",
	"dirname": "keyword2",
	"diskfreespace": "keyword2",
	"display_disabled_function": "keyword2",
	"dl": "keyword2",
	"do": "keyword1",
	"domxml_add_root": "keyword2",
	"domxml_attributes": "keyword2",
	"domxml_children": "keyword2",
	"domxml_dumpmem": "keyword2",
	"domxml_elem_get_attribute": "keyword2",
	"domxml_elem_set_attribute": "keyword2",
	"domxml_get_attribute": "keyword2",
	"domxml_getattr": "keyword2",
	"domxml_new_child": "keyword2",
	"domxml_new_xmldoc": "keyword2",
	"domxml_node": "keyword2",
	"domxml_node_attributes": "keyword2",
	"domxml_node_children": "keyword2",
	"domxml_node_new_child": "keyword2",
	"domxml_node_set_content": "keyword2",
	"domxml_node_unlink_node": "keyword2",
	"domxml_root": "keyword2",
	"domxml_set_attribute": "keyword2",
	"domxml_setattr": "keyword2",
	"domxml_unlink_node": "keyword2",
	"domxml_version": "keyword2",
	"doubleval": "keyword2",
	"drawarc": "keyword2",
	"drawcircle": "keyword2",
	"drawcubic": "keyword2",
	"drawcubicto": "keyword2",
	"drawcurve": "keyword2",
	"drawcurveto": "keyword2",
	"drawglyph": "keyword2",
	"drawline": "keyword2",
	"drawlineto": "keyword2",
	"each": "keyword2",
	"easter_date": "keyword2",
	"easter_days": "keyword2",
	"echo": "keyword1",
	"else": "keyword1",
	"elseif": "keyword1",
	"empty": "keyword1",
	"end": "keyword2",
	"endfor": "keyword1",
	"endforeach": "keyword1",
	"endif": "keyword1",
	"endswitch": "keyword1",
	"endwhile": "keyword1",
	"ereg": "keyword2",
	"ereg_replace": "keyword2",
	"eregi": "keyword2",
	"eregi_replace": "keyword2",
	"error_log": "keyword2",
	"error_reporting": "keyword2",
	"escapeshellarg": "keyword2",
	"escapeshellcmd": "keyword2",
	"exec": "keyword2",
	"exit": "keyword2",
	"exp": "keyword2",
	"explode": "keyword2",
	"extends": "keyword1",
	"extension_loaded": "keyword2",
	"extract": "keyword2",
	"ezmlm_hash": "keyword2",
	"false": "keyword3",
	"fbsql": "keyword2",
	"fbsql_affected_rows": "keyword2",
	"fbsql_autocommit": "keyword2",
	"fbsql_close": "keyword2",
	"fbsql_commit": "keyword2",
	"fbsql_connect": "keyword2",
	"fbsql_create_db": "keyword2",
	"fbsql_data_seek": "keyword2",
	"fbsql_database": "keyword2",
	"fbsql_database_password": "keyword2",
	"fbsql_db_query": "keyword2",
	"fbsql_drop_db": "keyword2",
	"fbsql_errno": "keyword2",
	"fbsql_error": "keyword2",
	"fbsql_fetch_array": "keyword2",
	"fbsql_fetch_assoc": "keyword2",
	"fbsql_fetch_field": "keyword2",
	"fbsql_fetch_lengths": "keyword2",
	"fbsql_fetch_object": "keyword2",
	"fbsql_fetch_row": "keyword2",
	"fbsql_field_flags": "keyword2",
	"fbsql_field_len": "keyword2",
	"fbsql_field_name": "keyword2",
	"fbsql_field_seek": "keyword2",
	"fbsql_field_table": "keyword2",
	"fbsql_field_type": "keyword2",
	"fbsql_free_result": "keyword2",
	"fbsql_hostname": "keyword2",
	"fbsql_insert_id": "keyword2",
	"fbsql_list_dbs": "keyword2",
	"fbsql_list_fields": "keyword2",
	"fbsql_list_tables": "keyword2",
	"fbsql_next_result": "keyword2",
	"fbsql_num_fields": "keyword2",
	"fbsql_num_rows": "keyword2",
	"fbsql_password": "keyword2",
	"fbsql_pconnect": "keyword2",
	"fbsql_query": "keyword2",
	"fbsql_result": "keyword2",
	"fbsql_rollback": "keyword2",
	"fbsql_select_db": "keyword2",
	"fbsql_start_db": "keyword2",
	"fbsql_stop_db": "keyword2",
	"fbsql_username": "keyword2",
	"fbsql_warnings": "keyword2",
	"fclose": "keyword2",
	"fd_alloc": "keyword2",
	"fd_clear": "keyword2",
	"fd_dealloc": "keyword2",
	"fd_isset": "keyword2",
	"fd_set": "keyword2",
	"fd_zero": "keyword2",
	"fdf_add_template": "keyword2",
	"fdf_close": "keyword2",
	"fdf_create": "keyword2",
	"fdf_get_file": "keyword2",
	"fdf_get_status": "keyword2",
	"fdf_get_value": "keyword2",
	"fdf_next_field_name": "keyword2",
	"fdf_open": "keyword2",
	"fdf_save": "keyword2",
	"fdf_set_ap": "keyword2",
	"fdf_set_file": "keyword2",
	"fdf_set_flags": "keyword2",
	"fdf_set_javascript_action": "keyword2",
	"fdf_set_opt": "keyword2",
	"fdf_set_status": "keyword2",
	"fdf_set_submit_form_action": "keyword2",
	"fdf_set_value": "keyword2",
	"feof": "keyword2",
	"fetch_iovec": "keyword2",
	"fflush": "keyword2",
	"fgetc": "keyword2",
	"fgetcsv": "keyword2",
	"fgets": "keyword2",
	"fgetss": "keyword2",
	"file": "keyword2",
	"file_exists": "keyword2",
	"file_get_contents": "keyword2",
	"file_put_contents": "keyword2",
	"fileatime": "keyword2",
	"filectime": "keyword2",
	"filegroup": "keyword2",
	"fileinode": "keyword2",
	"filemtime": "keyword2",
	"fileowner": "keyword2",
	"fileperms": "keyword2",
	"filepro": "keyword2",
	"filepro_fieldcount": "keyword2",
	"filepro_fieldname": "keyword2",
	"filepro_fieldtype": "keyword2",
	"filepro_fieldwidth": "keyword2",
	"filepro_retrieve": "keyword2",
	"filepro_rowcount": "keyword2",
	"filesize": "keyword2",
	"filetype": "keyword2",
	"final": "keyword1",
	"floatval": "keyword2",
	"flock": "keyword2",
	"floor": "keyword2",
	"flush": "keyword2",
	"fopen": "keyword2",
	"fopenstream": "keyword2",
	"for": "keyword1",
	"foreach": "keyword1",
	"fpassthru": "keyword2",
	"fputs": "keyword2",
	"fread": "keyword2",
	"free_iovec": "keyword2",
	"frenchtojd": "keyword2",
	"fribidi_log2vis": "keyword2",
	"fscanf": "keyword2",
	"fseek": "keyword2",
	"fsockopen": "keyword2",
	"fstat": "keyword2",
	"ftell": "keyword2",
	"ftp_alloc": "keyword2",
	"ftp_cdup": "keyword2",
	"ftp_chdir": "keyword2",
	"ftp_connect": "keyword2",
	"ftp_delete": "keyword2",
	"ftp_exec": "keyword2",
	"ftp_fget": "keyword2",
	"ftp_fput": "keyword2",
	"ftp_get": "keyword2",
	"ftp_login": "keyword2",
	"ftp_mdtm": "keyword2",
	"ftp_mkdir": "keyword2",
	"ftp_nlist": "keyword2",
	"ftp_pasv": "keyword2",
	"ftp_put": "keyword2",
	"ftp_pwd": "keyword2",
	"ftp_quit": "keyword2",
	"ftp_rawlist": "keyword2",
	"ftp_rename": "keyword2",
	"ftp_rmdir": "keyword2",
	"ftp_site": "keyword2",
	"ftp_size": "keyword2",
	"ftp_ssl_connect": "keyword2",
	"ftp_systype": "keyword2",
	"ftruncate": "keyword2",
	"func_get_arg": "keyword2",
	"func_get_args": "keyword2",
	"func_num_args": "keyword2",
	"function": "keyword1",
	"function_exists": "keyword2",
	"fwrite": "keyword2",
	"gd_info": "keyword2",
	"get_all_headers": "keyword2",
	"get_browser": "keyword2",
	"get_cfg_var": "keyword2",
	"get_class": "keyword2",
	"get_class_methods": "keyword2",
	"get_class_vars": "keyword2",
	"get_current_user": "keyword2",
	"get_declared_classes": "keyword2",
	"get_declared_interfaces": "keyword2",
	"get_defined_functions": "keyword2",
	"get_defined_vars": "keyword2",
	"get_extension_funcs": "keyword2",
	"get_headers": "keyword2",
	"get_html_translation_table": "keyword2",
	"get_included_files": "keyword2",
	"get_loaded_extensions": "keyword2",
	"get_magic_quotes_gpc": "keyword2",
	"get_magic_quotes_runtime": "keyword2",
	"get_meta_tags": "keyword2",
	"get_object_vars": "keyword2",
	"get_parent_class": "keyword2",
	"get_required_files": "keyword2",
	"get_resource_type": "keyword2",
	"getallheaders": "keyword2",
	"getascent": "keyword2",
	"getcwd": "keyword2",
	"getdate": "keyword2",
	"getdescent": "keyword2",
	"getenv": "keyword2",
	"getheight": "keyword2",
	"gethostbyaddr": "keyword2",
	"gethostbyname": "keyword2",
	"gethostbynamel": "keyword2",
	"getimagesize": "keyword2",
	"getlastmod": "keyword2",
	"getleading": "keyword2",
	"getmxrr": "keyword2",
	"getmyinode": "keyword2",
	"getmypid": "keyword2",
	"getmyuid": "keyword2",
	"getopt": "keyword2",
	"getpeername": "keyword2",
	"getprotobyname": "keyword2",
	"getprotobynumber": "keyword2",
	"getrandmax": "keyword2",
	"getrusage": "keyword2",
	"getservbyname": "keyword2",
	"getservbyport": "keyword2",
	"getshape1": "keyword2",
	"getshape2": "keyword2",
	"getsockname": "keyword2",
	"getsockopt": "keyword2",
	"gettext": "keyword2",
	"gettimeofday": "keyword2",
	"gettype": "keyword2",
	"getwidth": "keyword2",
	"global": "keyword1",
	"gmdate": "keyword2",
	"gmmktime": "keyword2",
	"gmp_abs": "keyword2",
	"gmp_add": "keyword2",
	"gmp_and": "keyword2",
	"gmp_clrbit": "keyword2",
	"gmp_cmp": "keyword2",
	"gmp_com": "keyword2",
	"gmp_div": "keyword2",
	"gmp_div_q": "keyword2",
	"gmp_div_qr": "keyword2",
	"gmp_div_r": "keyword2",
	"gmp_divexact": "keyword2",
	"gmp_fact": "keyword2",
	"gmp_gcd": "keyword2",
	"gmp_gcdext": "keyword2",
	"gmp_hamdist": "keyword2",
	"gmp_init": "keyword2",
	"gmp_intval": "keyword2",
	"gmp_invert": "keyword2",
	"gmp_jacobi": "keyword2",
	"gmp_legendre": "keyword2",
	"gmp_mod": "keyword2",
	"gmp_mul": "keyword2",
	"gmp_neg": "keyword2",
	"gmp_or": "keyword2",
	"gmp_perfect_square": "keyword2",
	"gmp_popcount": "keyword2",
	"gmp_pow": "keyword2",
	"gmp_powm": "keyword2",
	"gmp_prob_prime": "keyword2",
	"gmp_random": "keyword2",
	"gmp_scan0": "keyword2",
	"gmp_scan1": "keyword2",
	"gmp_setbit": "keyword2",
	"gmp_sign": "keyword2",
	"gmp_sqrt": "keyword2",
	"gmp_sqrtrem": "keyword2",
	"gmp_strval": "keyword2",
	"gmp_sub": "keyword2",
	"gmp_xor": "keyword2",
	"gmstrftime": "keyword2",
	"gregoriantojd": "keyword2",
	"gzclose": "keyword2",
	"gzcompress": "keyword2",
	"gzdeflate": "keyword2",
	"gzencode": "keyword2",
	"gzeof": "keyword2",
	"gzfile": "keyword2",
	"gzgetc": "keyword2",
	"gzgets": "keyword2",
	"gzgetss": "keyword2",
	"gzinflate": "keyword2",
	"gzopen": "keyword2",
	"gzpassthru": "keyword2",
	"gzputs": "keyword2",
	"gzread": "keyword2",
	"gzrewind": "keyword2",
	"gzseek": "keyword2",
	"gztell": "keyword2",
	"gzuncompress": "keyword2",
	"gzwrite": "keyword2",
	"header": "keyword2",
	"headers_list": "keyword2",
	"headers_sent": "keyword2",
	"hebrev": "keyword2",
	"hebrevc": "keyword2",
	"hexdec": "keyword2",
	"highlight_file": "keyword2",
	"highlight_string": "keyword2",
	"htmlentities": "keyword2",
	"htmlspecialchars": "keyword2",
	"http_build_query": "keyword2",
	"hw_array2objrec": "keyword2",
	"hw_changeobject": "keyword2",
	"hw_children": "keyword2",
	"hw_childrenobj": "keyword2",
	"hw_close": "keyword2",
	"hw_connect": "keyword2",
	"hw_connection_info": "keyword2",
	"hw_cp": "keyword2",
	"hw_deleteobject": "keyword2",
	"hw_docbyanchor": "keyword2",
	"hw_docbyanchorobj": "keyword2",
	"hw_document_attributes": "keyword2",
	"hw_document_bodytag": "keyword2",
	"hw_document_content": "keyword2",
	"hw_document_setcontent": "keyword2",
	"hw_document_size": "keyword2",
	"hw_dummy": "keyword2",
	"hw_edittext": "keyword2",
	"hw_error": "keyword2",
	"hw_errormsg": "keyword2",
	"hw_free_document": "keyword2",
	"hw_getanchors": "keyword2",
	"hw_getanchorsobj": "keyword2",
	"hw_getandlock": "keyword2",
	"hw_getcgi": "keyword2",
	"hw_getchildcoll": "keyword2",
	"hw_getchildcollobj": "keyword2",
	"hw_getchilddoccoll": "keyword2",
	"hw_getchilddoccollobj": "keyword2",
	"hw_getobject": "keyword2",
	"hw_getobjectbyftquery": "keyword2",
	"hw_getobjectbyftquerycoll": "keyword2",
	"hw_getobjectbyftquerycollobj": "keyword2",
	"hw_getobjectbyftqueryobj": "keyword2",
	"hw_getobjectbyquery": "keyword2",
	"hw_getobjectbyquerycoll": "keyword2",
	"hw_getobjectbyquerycollobj": "keyword2",
	"hw_getobjectbyqueryobj": "keyword2",
	"hw_getparents": "keyword2",
	"hw_getparentsobj": "keyword2",
	"hw_getrellink": "keyword2",
	"hw_getremote": "keyword2",
	"hw_getremotechildren": "keyword2",
	"hw_getsrcbydestobj": "keyword2",
	"hw_gettext": "keyword2",
	"hw_getusername": "keyword2",
	"hw_identify": "keyword2",
	"hw_incollections": "keyword2",
	"hw_info": "keyword2",
	"hw_inscoll": "keyword2",
	"hw_insdoc": "keyword2",
	"hw_insertanchors": "keyword2",
	"hw_insertdocument": "keyword2",
	"hw_insertobject": "keyword2",
	"hw_mapid": "keyword2",
	"hw_modifyobject": "keyword2",
	"hw_mv": "keyword2",
	"hw_new_document": "keyword2",
	"hw_new_document_from_file": "keyword2",
	"hw_objrec2array": "keyword2",
	"hw_output_document": "keyword2",
	"hw_pconnect": "keyword2",
	"hw_pipecgi": "keyword2",
	"hw_pipedocument": "keyword2",
	"hw_root": "keyword2",
	"hw_setlinkroot": "keyword2",
	"hw_stat": "keyword2",
	"hw_unlock": "keyword2",
	"hw_who": "keyword2",
	"ibase_blob_add": "keyword2",
	"ibase_blob_cancel": "keyword2",
	"ibase_blob_close": "keyword2",
	"ibase_blob_create": "keyword2",
	"ibase_blob_echo": "keyword2",
	"ibase_blob_get": "keyword2",
	"ibase_blob_import": "keyword2",
	"ibase_blob_info": "keyword2",
	"ibase_blob_open": "keyword2",
	"ibase_close": "keyword2",
	"ibase_commit": "keyword2",
	"ibase_connect": "keyword2",
	"ibase_errmsg": "keyword2",
	"ibase_execute": "keyword2",
	"ibase_fetch_object": "keyword2",
	"ibase_fetch_row": "keyword2",
	"ibase_field_info": "keyword2",
	"ibase_free_query": "keyword2",
	"ibase_free_result": "keyword2",
	"ibase_num_fields": "keyword2",
	"ibase_pconnect": "keyword2",
	"ibase_prepare": "keyword2",
	"ibase_query": "keyword2",
	"ibase_rollback": "keyword2",
	"ibase_timefmt": "keyword2",
	"ibase_trans": "keyword2",
	"icap_create_calendar": "keyword2",
	"icap_delete_calendar": "keyword2",
	"icap_delete_event": "keyword2",
	"icap_fetch_event": "keyword2",
	"icap_list_alarms": "keyword2",
	"icap_list_events": "keyword2",
	"icap_open": "keyword2",
	"icap_popen": "keyword2",
	"icap_rename_calendar": "keyword2",
	"icap_reopen": "keyword2",
	"icap_snooze": "keyword2",
	"icap_store_event": "keyword2",
	"iconv": "keyword2",
	"iconv_get_encoding": "keyword2",
	"iconv_set_encoding": "keyword2",
	"idate": "keyword2",
	"if": "keyword1",
	"ifx_affected_rows": "keyword2",
	"ifx_blobinfile_mode": "keyword2",
	"ifx_byteasvarchar": "keyword2",
	"ifx_close": "keyword2",
	"ifx_connect": "keyword2",
	"ifx_copy_blob": "keyword2",
	"ifx_create_blob": "keyword2",
	"ifx_create_char": "keyword2",
	"ifx_do": "keyword2",
	"ifx_error": "keyword2",
	"ifx_errormsg": "keyword2",
	"ifx_fetch_row": "keyword2",
	"ifx_fieldproperties": "keyword2",
	"ifx_fieldtypes": "keyword2",
	"ifx_free_blob": "keyword2",
	"ifx_free_char": "keyword2",
	"ifx_free_result": "keyword2",
	"ifx_get_blob": "keyword2",
	"ifx_get_char": "keyword2",
	"ifx_getsqlca": "keyword2",
	"ifx_htmltbl_result": "keyword2",
	"ifx_nullformat": "keyword2",
	"ifx_num_fields": "keyword2",
	"ifx_num_rows": "keyword2",
	"ifx_pconnect": "keyword2",
	"ifx_prepare": "keyword2",
	"ifx_query": "keyword2",
	"ifx_textasvarchar": "keyword2",
	"ifx_update_blob": "keyword2",
	"ifx_update_char": "keyword2",
	"ifxus_close_slob": "keyword2",
	"ifxus_create_slob": "keyword2",
	"ifxus_free_slob": "keyword2",
	"ifxus_open_slob": "keyword2",
	"ifxus_read_slob": "keyword2",
	"ifxus_seek_slob": "keyword2",
	"ifxus_tell_slob": "keyword2",
	"ifxus_write_slob": "keyword2",
	"ignore_user_abort": "keyword2",
	"iis_addserver": "keyword2",
	"iis_getdirsecurity": "keyword2",
	"iis_getscriptmap": "keyword2",
	"iis_getserverbycomment": "keyword2",
	"iis_getserverbypath": "keyword2",
	"iis_getserverright": "keyword2",
	"iis_getservicestate": "keyword2",
	"iis_removeserver": "keyword2",
	"iis_setappsettings": "keyword2",
	"iis_setdirsecurity": "keyword2",
	"iis_setscriptmap": "keyword2",
	"iis_setserverright": "keyword2",
	"iis_startserver": "keyword2",
	"iis_startservice": "keyword2",
	"iis_stopserver": "keyword2",
	"iis_stopservice": "keyword2",
	"image2wbmp": "keyword2",
	"image_type_to_extension": "keyword2",
	"imagealphablending": "keyword2",
	"imagearc": "keyword2",
	"imagechar": "keyword2",
	"imagecharup": "keyword2",
	"imagecolorallocate": "keyword2",
	"imagecolorat": "keyword2",
	"imagecolorclosest": "keyword2",
	"imagecolorclosestalpha": "keyword2",
	"imagecolorclosesthwb": "keyword2",
	"imagecolordeallocate": "keyword2",
	"imagecolorexact": "keyword2",
	"imagecolorexactalpha": "keyword2",
	"imagecolormatch": "keyword2",
	"imagecolorresolve": "keyword2",
	"imagecolorresolvealpha": "keyword2",
	"imagecolorset": "keyword2",
	"imagecolorsforindex": "keyword2",
	"imagecolorstotal": "keyword2",
	"imagecolortransparent": "keyword2",
	"imagecopy": "keyword2",
	"imagecopymerge": "keyword2",
	"imagecopymergegray": "keyword2",
	"imagecopyresampled": "keyword2",
	"imagecopyresized": "keyword2",
	"imagecreate": "keyword2",
	"imagecreatefromgif": "keyword2",
	"imagecreatefromjpeg": "keyword2",
	"imagecreatefrompng": "keyword2",
	"imagecreatefromstring": "keyword2",
	"imagecreatefromwbmp": "keyword2",
	"imagecreatefromxbm": "keyword2",
	"imagecreatefromxpm": "keyword2",
	"imagecreatetruecolor": "keyword2",
	"imagedashedline": "keyword2",
	"imagedestroy": "keyword2",
	"imageellipse": "keyword2",
	"imagefill": "keyword2",
	"imagefilledarc": "keyword2",
	"imagefilledellipse": "keyword2",
	"imagefilledpolygon": "keyword2",
	"imagefilledrectangle": "keyword2",
	"imagefilltoborder": "keyword2",
	"imagefilter": "keyword2",
	"imagefontheight": "keyword2",
	"imagefontwidth": "keyword2",
	"imagegammacorrect": "keyword2",
	"imagegif": "keyword2",
	"imageinterlace": "keyword2",
	"imagejpeg": "keyword2",
	"imagelayereffect": "keyword2",
	"imageline": "keyword2",
	"imageloadfont": "keyword2",
	"imagepalettecopy": "keyword2",
	"imagepng": "keyword2",
	"imagepolygon": "keyword2",
	"imagepsbbox": "keyword2",
	"imagepscopyfont": "keyword2",
	"imagepsencodefont": "keyword2",
	"imagepsextendfont": "keyword2",
	"imagepsfreefont": "keyword2",
	"imagepsloadfont": "keyword2",
	"imagepsslantfont": "keyword2",
	"imagepstext": "keyword2",
	"imagerectangle": "keyword2",
	"imagerotate": "keyword2",
	"imagesetbrush": "keyword2",
	"imagesetpixel": "keyword2",
	"imagesetstyle": "keyword2",
	"imagesetthickness": "keyword2",
	"imagesettile": "keyword2",
	"imagestring": "keyword2",
	"imagestringup": "keyword2",
	"imagesx": "keyword2",
	"imagesy": "keyword2",
	"imagetruecolortopalette": "keyword2",
	"imagettfbbox": "keyword2",
	"imagettftext": "keyword2",
	"imagetypes": "keyword2",
	"imagewbmp": "keyword2",
	"imap_8bit": "keyword2",
	"imap_alerts": "keyword2",
	"imap_append": "keyword2",
	"imap_base64": "keyword2",
	"imap_binary": "keyword2",
	"imap_body": "keyword2",
	"imap_bodystruct": "keyword2",
	"imap_check": "keyword2",
	"imap_clearflag_full": "keyword2",
	"imap_close": "keyword2",
	"imap_create": "keyword2",
	"imap_createmailbox": "keyword2",
	"imap_delete": "keyword2",
	"imap_deletemailbox": "keyword2",
	"imap_errors": "keyword2",
	"imap_expunge": "keyword2",
	"imap_fetch_overview": "keyword2",
	"imap_fetchbody": "keyword2",
	"imap_fetchheader": "keyword2",
	"imap_fetchstructure": "keyword2",
	"imap_fetchtext": "keyword2",
	"imap_get_quota": "keyword2",
	"imap_getmailboxes": "keyword2",
	"imap_getsubscribed": "keyword2",
	"imap_header": "keyword2",
	"imap_headerinfo": "keyword2",
	"imap_headers": "keyword2",
	"imap_last_error": "keyword2",
	"imap_list": "keyword2",
	"imap_listmailbox": "keyword2",
	"imap_listsubscribed": "keyword2",
	"imap_lsub": "keyword2",
	"imap_mail": "keyword2",
	"imap_mail_compose": "keyword2",
	"imap_mail_copy": "keyword2",
	"imap_mail_move": "keyword2",
	"imap_mailboxmsginfo": "keyword2",
	"imap_mime_header_decode": "keyword2",
	"imap_msgno": "keyword2",
	"imap_num_msg": "keyword2",
	"imap_num_recent": "keyword2",
	"imap_open": "keyword2",
	"imap_ping": "keyword2",
	"imap_popen": "keyword2",
	"imap_qprint": "keyword2",
	"imap_rename": "keyword2",
	"imap_renamemailbox": "keyword2",
	"imap_reopen": "keyword2",
	"imap_rfc822_parse_adrlist": "keyword2",
	"imap_rfc822_parse_headers": "keyword2",
	"imap_rfc822_write_address": "keyword2",
	"imap_scan": "keyword2",
	"imap_scanmailbox": "keyword2",
	"imap_search": "keyword2",
	"imap_set_quota": "keyword2",
	"imap_setflag_full": "keyword2",
	"imap_sort": "keyword2",
	"imap_status": "keyword2",
	"imap_subscribe": "keyword2",
	"imap_uid": "keyword2",
	"imap_undelete": "keyword2",
	"imap_unsubscribe": "keyword2",
	"imap_utf7_decode": "keyword2",
	"imap_utf7_encode": "keyword2",
	"imap_utf8": "keyword2",
	"implements": "keyword1",
	"implode": "keyword2",
	"in_array": "keyword2",
	"include": "keyword1",
	"include_once": "keyword1",
	"ingres_autocommit": "keyword2",
	"ingres_close": "keyword2",
	"ingres_commit": "keyword2",
	"ingres_connect": "keyword2",
	"ingres_fetch_array": "keyword2",
	"ingres_fetch_object": "keyword2",
	"ingres_fetch_row": "keyword2",
	"ingres_field_length": "keyword2",
	"ingres_field_name": "keyword2",
	"ingres_field_nullable": "keyword2",
	"ingres_field_precision": "keyword2",
	"ingres_field_scale": "keyword2",
	"ingres_field_type": "keyword2",
	"ingres_num_fields": "keyword2",
	"ingres_num_rows": "keyword2",
	"ingres_pconnect": "keyword2",
	"ingres_query": "keyword2",
	"ingres_rollback": "keyword2",
	"ini_alter": "keyword2",
	"ini_get": "keyword2",
	"ini_restore": "keyword2",
	"ini_set": "keyword2",
	"instanceof": "operator",
	"interface": "keyword1",
	"intval": "keyword2",
	"ip2long": "keyword2",
	"iptcembed": "keyword2",
	"iptcparse": "keyword2",
	"ircg_channel_mode": "keyword2",
	"ircg_disconnect": "keyword2",
	"ircg_html_encode": "keyword2",
	"ircg_ignore_add": "keyword2",
	"ircg_ignore_del": "keyword2",
	"ircg_is_conn_alive": "keyword2",
	"ircg_join": "keyword2",
	"ircg_kick": "keyword2",
	"ircg_lookup_format_messages": "keyword2",
	"ircg_msg": "keyword2",
	"ircg_nick": "keyword2",
	"ircg_nickname_escape": "keyword2",
	"ircg_nickname_unescape": "keyword2",
	"ircg_notice": "keyword2",
	"ircg_part": "keyword2",
	"ircg_pconnect": "keyword2",
	"ircg_register_format_messages": "keyword2",
	"ircg_set_current": "keyword2",
	"ircg_topic": "keyword2",
	"ircg_whois": "keyword2",
	"is_array": "keyword2",
	"is_bool": "keyword2",
	"is_dir": "keyword2",
	"is_double": "keyword2",
	"is_executable": "keyword2",
	"is_file": "keyword2",
	"is_float": "keyword2",
	"is_int": "keyword2",
	"is_integer": "keyword2",
	"is_link": "keyword2",
	"is_long": "keyword2",
	"is_null": "keyword2",
	"is_numeric": "keyword2",
	"is_object": "keyword2",
	"is_readable": "keyword2",
	"is_real": "keyword2",
	"is_resource": "keyword2",
	"is_scalar": "keyword2",
	"is_string": "keyword2",
	"is_subclass_of": "keyword2",
	"is_uploaded_file": "keyword2",
	"is_writable": "keyword2",
	"is_writeable": "keyword2",
	"isset": "keyword1",
	"java_last_exception_clear": "keyword2",
	"java_last_exception_get": "keyword2",
	"jddayofweek": "keyword2",
	"jdmonthname": "keyword2",
	"jdtofrench": "keyword2",
	"jdtogregorian": "keyword2",
	"jdtojewish": "keyword2",
	"jdtojulian": "keyword2",
	"jdtounix": "keyword2",
	"jewishtojd": "keyword2",
	"join": "keyword2",
	"jpeg2wbmp": "keyword2",
	"juliantojd": "keyword2",
	"key": "keyword2",
	"krsort": "keyword2",
	"ksort": "keyword2",
	"labelframe": "keyword2",
	"lcg_value": "keyword2",
	"ldap_8859_to_t61": "keyword2",
	"ldap_add": "keyword2",
	"ldap_bind": "keyword2",
	"ldap_close": "keyword2",
	"ldap_compare": "keyword2",
	"ldap_connect": "keyword2",
	"ldap_count_entries": "keyword2",
	"ldap_delete": "keyword2",
	"ldap_dn2ufn": "keyword2",
	"ldap_err2str": "keyword2",
	"ldap_errno": "keyword2",
	"ldap_error": "keyword2",
	"ldap_explode_dn": "keyword2",
	"ldap_first_attribute": "keyword2",
	"ldap_first_entry": "keyword2",
	"ldap_first_reference": "keyword2",
	"ldap_free_result": "keyword2",
	"ldap_get_attributes": "keyword2",
	"ldap_get_dn": "keyword2",
	"ldap_get_entries": "keyword2",
	"ldap_get_option": "keyword2",
	"ldap_get_values": "keyword2",
	"ldap_get_values_len": "keyword2",
	"ldap_list": "keyword2",
	"ldap_mod_add": "keyword2",
	"ldap_mod_del": "keyword2",
	"ldap_mod_replace": "keyword2",
	"ldap_modify": "keyword2",
	"ldap_next_attribute": "keyword2",
	"ldap_next_entry": "keyword2",
	"ldap_next_reference": "keyword2",
	"ldap_parse_reference": "keyword2",
	"ldap_parse_result": "keyword2",
	"ldap_read": "keyword2",
	"ldap_rename": "keyword2",
	"ldap_search": "keyword2",
	"ldap_set_option": "keyword2",
	"ldap_t61_to_8859": "keyword2",
	"ldap_unbind": "keyword2",
	"leak": "keyword2",
	"levenshtein": "keyword2",
	"link": "keyword2",
	"linkinfo": "keyword2",
	"list": "keyword1",
	"listen": "keyword2",
	"localeconv": "keyword2",
	"localtime": "keyword2",
	"log": "keyword2",
	"log10": "keyword2",
	"long2ip": "keyword2",
	"lstat": "keyword2",
	"ltrim": "keyword2",
	"magic_quotes_runtime": "keyword2",
	"mail": "keyword2",
	"max": "keyword2",
	"mb_convert_case": "keyword2",
	"mb_strtolower": "keyword2",
	"mb_strtoupper": "keyword2",
	"mcal_append_event": "keyword2",
	"mcal_close": "keyword2",
	"mcal_create_calendar": "keyword2",
	"mcal_date_compare": "keyword2",
	"mcal_date_valid": "keyword2",
	"mcal_day_of_week": "keyword2",
	"mcal_day_of_year": "keyword2",
	"mcal_days_in_month": "keyword2",
	"mcal_delete_calendar": "keyword2",
	"mcal_delete_event": "keyword2",
	"mcal_event_add_attribute": "keyword2",
	"mcal_event_init": "keyword2",
	"mcal_event_set_alarm": "keyword2",
	"mcal_event_set_category": "keyword2",
	"mcal_event_set_class": "keyword2",
	"mcal_event_set_description": "keyword2",
	"mcal_event_set_end": "keyword2",
	"mcal_event_set_recur_daily": "keyword2",
	"mcal_event_set_recur_monthly_mday": "keyword2",
	"mcal_event_set_recur_monthly_wday": "keyword2",
	"mcal_event_set_recur_none": "keyword2",
	"mcal_event_set_recur_weekly": "keyword2",
	"mcal_event_set_recur_yearly": "keyword2",
	"mcal_event_set_start": "keyword2",
	"mcal_event_set_title": "keyword2",
	"mcal_fetch_current_stream_event": "keyword2",
	"mcal_fetch_event": "keyword2",
	"mcal_is_leap_year": "keyword2",
	"mcal_list_alarms": "keyword2",
	"mcal_list_events": "keyword2",
	"mcal_next_recurrence": "keyword2",
	"mcal_open": "keyword2",
	"mcal_popen": "keyword2",
	"mcal_rename_calendar": "keyword2",
	"mcal_reopen": "keyword2",
	"mcal_snooze": "keyword2",
	"mcal_store_event": "keyword2",
	"mcal_time_valid": "keyword2",
	"mcal_week_of_year": "keyword2",
	"mcrypt_cbc": "keyword2",
	"mcrypt_cfb": "keyword2",
	"mcrypt_create_iv": "keyword2",
	"mcrypt_decrypt": "keyword2",
	"mcrypt_ecb": "keyword2",
	"mcrypt_enc_get_algorithms_name": "keyword2",
	"mcrypt_enc_get_block_size": "keyword2",
	"mcrypt_enc_get_iv_size": "keyword2",
	"mcrypt_enc_get_key_size": "keyword2",
	"mcrypt_enc_get_modes_name": "keyword2",
	"mcrypt_enc_get_supported_key_sizes": "keyword2",
	"mcrypt_enc_is_block_algorithm": "keyword2",
	"mcrypt_enc_is_block_algorithm_mode": "keyword2",
	"mcrypt_enc_is_block_mode": "keyword2",
	"mcrypt_enc_self_test": "keyword2",
	"mcrypt_encrypt": "keyword2",
	"mcrypt_generic": "keyword2",
	"mcrypt_generic_deinit": "keyword2",
	"mcrypt_generic_end": "keyword2",
	"mcrypt_generic_init": "keyword2",
	"mcrypt_get_block_size": "keyword2",
	"mcrypt_get_cipher_name": "keyword2",
	"mcrypt_get_iv_size": "keyword2",
	"mcrypt_get_key_size": "keyword2",
	"mcrypt_list_algorithms": "keyword2",
	"mcrypt_list_modes": "keyword2",
	"mcrypt_module_close": "keyword2",
	"mcrypt_module_get_algo_block_size": "keyword2",
	"mcrypt_module_get_algo_key_size": "keyword2",
	"mcrypt_module_get_supported_key_sizes": "keyword2",
	"mcrypt_module_is_block_algorithm": "keyword2",
	"mcrypt_module_is_block_algorithm_mode": "keyword2",
	"mcrypt_module_is_block_mode": "keyword2",
	"mcrypt_module_open": "keyword2",
	"mcrypt_module_self_test": "keyword2",
	"mcrypt_ofb": "keyword2",
	"md5": "keyword2",
	"md5_file": "keyword2",
	"mdecrypt_generic": "keyword2",
	"metaphone": "keyword2",
	"method_exists": "keyword2",
	"mhash": "keyword2",
	"mhash_count": "keyword2",
	"mhash_get_block_size": "keyword2",
	"mhash_get_hash_name": "keyword2",
	"mhash_keygen_s2k": "keyword2",
	"microtime": "keyword2",
	"min": "keyword2",
	"ming_setcubicthreshold": "keyword2",
	"ming_setscale": "keyword2",
	"mkdir": "keyword2",
	"mktime": "keyword2",
	"move": "keyword2",
	"move_uploaded_file": "keyword2",
	"movepen": "keyword2",
	"movepento": "keyword2",
	"moveto": "keyword2",
	"msql": "keyword2",
	"msql_affected_rows": "keyword2",
	"msql_close": "keyword2",
	"msql_connect": "keyword2",
	"msql_create_db": "keyword2",
	"msql_createdb": "keyword2",
	"msql_data_seek": "keyword2",
	"msql_db_query": "keyword2",
	"msql_dbname": "keyword2",
	"msql_drop_db": "keyword2",
	"msql_dropdb": "keyword2",
	"msql_error": "keyword2",
	"msql_fetch_array": "keyword2",
	"msql_fetch_field": "keyword2",
	"msql_fetch_object": "keyword2",
	"msql_fetch_row": "keyword2",
	"msql_field_flags": "keyword2",
	"msql_field_len": "keyword2",
	"msql_field_name": "keyword2",
	"msql_field_seek": "keyword2",
	"msql_field_table": "keyword2",
	"msql_field_type": "keyword2",
	"msql_fieldflags": "keyword2",
	"msql_fieldlen": "keyword2",
	"msql_fieldname": "keyword2",
	"msql_fieldtable": "keyword2",
	"msql_fieldtype": "keyword2",
	"msql_free_result": "keyword2",
	"msql_freeresult": "keyword2",
	"msql_list_dbs": "keyword2",
	"msql_list_fields": "keyword2",
	"msql_list_tables": "keyword2",
	"msql_listdbs": "keyword2",
	"msql_listfields": "keyword2",
	"msql_listtables": "keyword2",
	"msql_num_fields": "keyword2",
	"msql_num_rows": "keyword2",
	"msql_numfields": "keyword2",
	"msql_numrows": "keyword2",
	"msql_pconnect": "keyword2",
	"msql_query": "keyword2",
	"msql_regcase": "keyword2",
	"msql_result": "keyword2",
	"msql_select_db": "keyword2",
	"msql_selectdb": "keyword2",
	"msql_tablename": "keyword2",
	"mssql_affected_rows": "keyword2",
	"mssql_close": "keyword2",
	"mssql_connect": "keyword2",
	"mssql_data_seek": "keyword2",
	"mssql_fetch_array": "keyword2",
	"mssql_fetch_batch": "keyword2",
	"mssql_fetch_field": "keyword2",
	"mssql_fetch_object": "keyword2",
	"mssql_fetch_row": "keyword2",
	"mssql_field_length": "keyword2",
	"mssql_field_name": "keyword2",
	"mssql_field_seek": "keyword2",
	"mssql_field_type": "keyword2",
	"mssql_free_result": "keyword2",
	"mssql_get_last_message": "keyword2",
	"mssql_min_client_severity": "keyword2",
	"mssql_min_error_severity": "keyword2",
	"mssql_min_message_severity": "keyword2",
	"mssql_min_server_severity": "keyword2",
	"mssql_next_result": "keyword2",
	"mssql_num_fields": "keyword2",
	"mssql_num_rows": "keyword2",
	"mssql_pconnect": "keyword2",
	"mssql_query": "keyword2",
	"mssql_result": "keyword2",
	"mssql_rows_affected": "keyword2",
	"mssql_select_db": "keyword2",
	"mt_getrandmax": "keyword2",
	"mt_rand": "keyword2",
	"mt_srand": "keyword2",
	"multcolor": "keyword2",
	"muscat_close": "keyword2",
	"muscat_get": "keyword2",
	"muscat_give": "keyword2",
	"muscat_setup": "keyword2",
	"muscat_setup_net": "keyword2",
	"mysql": "keyword2",
	"mysql_affected_rows": "keyword2",
	"mysql_close": "keyword2",
	"mysql_connect": "keyword2",
	"mysql_create_db": "keyword2",
	"mysql_createdb": "keyword2",
	"mysql_data_seek": "keyword2",
	"mysql_db_name": "keyword2",
	"mysql_db_query": "keyword2",
	"mysql_dbname": "keyword2",
	"mysql_drop_db": "keyword2",
	"mysql_dropdb": "keyword2",
	"mysql_errno": "keyword2",
	"mysql_error": "keyword2",
	"mysql_escape_string": "keyword2",
	"mysql_fetch_array": "keyword2",
	"mysql_fetch_assoc": "keyword2",
	"mysql_fetch_field": "keyword2",
	"mysql_fetch_lengths": "keyword2",
	"mysql_fetch_object": "keyword2",
	"mysql_fetch_row": "keyword2",
	"mysql_field_flags": "keyword2",
	"mysql_field_len": "keyword2",
	"mysql_field_name": "keyword2",
	"mysql_field_seek": "keyword2",
	"mysql_field_table": "keyword2",
	"mysql_field_type": "keyword2",
	"mysql_fieldflags": "keyword2",
	"mysql_fieldlen": "keyword2",
	"mysql_fieldname": "keyword2",
	"mysql_fieldtable": "keyword2",
	"mysql_fieldtype": "keyword2",
	"mysql_free_result": "keyword2",
	"mysql_freeresult": "keyword2",
	"mysql_get_client_info": "keyword2",
	"mysql_get_host_info": "keyword2",
	"mysql_get_proto_info": "keyword2",
	"mysql_get_server_info": "keyword2",
	"mysql_insert_id": "keyword2",
	"mysql_list_dbs": "keyword2",
	"mysql_list_fields": "keyword2",
	"mysql_list_tables": "keyword2",
	"mysql_listdbs": "keyword2",
	"mysql_listfields": "keyword2",
	"mysql_listtables": "keyword2",
	"mysql_num_fields": "keyword2",
	"mysql_num_rows": "keyword2",
	"mysql_numfields": "keyword2",
	"mysql_numrows": "keyword2",
	"mysql_pconnect": "keyword2",
	"mysql_query": "keyword2",
	"mysql_result": "keyword2",
	"mysql_select_db": "keyword2",
	"mysql_selectdb": "keyword2",
	"mysql_tablename": "keyword2",
	"mysql_unbuffered_query": "keyword2",
	"natcasesort": "keyword2",
	"natsort": "keyword2",
	"new": "keyword1",
	"new_xmldoc": "keyword2",
	"next": "keyword2",
	"nextframe": "keyword2",
	"nl2br": "keyword2",
	"notes_body": "keyword2",
	"notes_copy_db": "keyword2",
	"notes_create_db": "keyword2",
	"notes_create_note": "keyword2",
	"notes_drop_db": "keyword2",
	"notes_find_note": "keyword2",
	"notes_header_info": "keyword2",
	"notes_list_msgs": "keyword2",
	"notes_mark_read": "keyword2",
	"notes_mark_unread": "keyword2",
	"notes_nav_create": "keyword2",
	"notes_search": "keyword2",
	"notes_unread": "keyword2",
	"notes_version": "keyword2",
	"null": "keyword3",
	"number_format": "keyword2",
	"ob_end_clean": "keyword2",
	"ob_end_flush": "keyword2",
	"ob_get_clean": "keyword2",
	"ob_get_contents": "keyword2",
	"ob_get_flush": "keyword2",
	"ob_get_length": "keyword2",
	"ob_gzhandler": "keyword2",
	"ob_iconv_handler": "keyword2",
	"ob_implicit_flush": "keyword2",
	"ob_list_handlers": "keyword2",
	"ob_start": "keyword2",
	"ocibindbyname": "keyword2",
	"ocicancel": "keyword2",
	"ocicollappend": "keyword2",
	"ocicollassign": "keyword2",
	"ocicollassignelem": "keyword2",
	"ocicolldateappendelem": "keyword2",
	"ocicolldateassignelem": "keyword2",
	"ocicolldategetelem": "keyword2",
	"ocicollgetelem": "keyword2",
	"ocicollmax": "keyword2",
	"ocicollsize": "keyword2",
	"ocicolltrim": "keyword2",
	"ocicolumnisnull": "keyword2",
	"ocicolumnname": "keyword2",
	"ocicolumnprecision": "keyword2",
	"ocicolumnscale": "keyword2",
	"ocicolumnsize": "keyword2",
	"ocicolumntype": "keyword2",
	"ocicolumntyperaw": "keyword2",
	"ocicommit": "keyword2",
	"ocidefinebyname": "keyword2",
	"ocierror": "keyword2",
	"ociexecute": "keyword2",
	"ocifetch": "keyword2",
	"ocifetchinto": "keyword2",
	"ocifetchstatement": "keyword2",
	"ocifreecoll": "keyword2",
	"ocifreecursor": "keyword2",
	"ocifreedesc": "keyword2",
	"ocifreestatement": "keyword2",
	"ociinternaldebug": "keyword2",
	"ociloadlob": "keyword2",
	"ocilogoff": "keyword2",
	"ocilogon": "keyword2",
	"ocinewcollection": "keyword2",
	"ocinewcursor": "keyword2",
	"ocinewdescriptor": "keyword2",
	"ocinlogon": "keyword2",
	"ocinumcols": "keyword2",
	"ociparse": "keyword2",
	"ociplogon": "keyword2",
	"ociresult": "keyword2",
	"ocirollback": "keyword2",
	"ocirowcount": "keyword2",
	"ocisavelob": "keyword2",
	"ocisavelobfile": "keyword2",
	"ociserverversion": "keyword2",
	"ocisetprefetch": "keyword2",
	"ocistatementtype": "keyword2",
	"ociwritelobtofile": "keyword2",
	"octdec": "keyword2",
	"odbc_autocommit": "keyword2",
	"odbc_binmode": "keyword2",
	"odbc_close": "keyword2",
	"odbc_close_all": "keyword2",
	"odbc_columnprivileges": "keyword2",
	"odbc_columns": "keyword2",
	"odbc_commit": "keyword2",
	"odbc_connect": "keyword2",
	"odbc_cursor": "keyword2",
	"odbc_data_source": "keyword2",
	"odbc_do": "keyword2",
	"odbc_error": "keyword2",
	"odbc_errormsg": "keyword2",
	"odbc_exec": "keyword2",
	"odbc_execute": "keyword2",
	"odbc_fetch_array": "keyword2",
	"odbc_fetch_into": "keyword2",
	"odbc_fetch_object": "keyword2",
	"odbc_fetch_row": "keyword2",
	"odbc_field_len": "keyword2",
	"odbc_field_name": "keyword2",
	"odbc_field_num": "keyword2",
	"odbc_field_precision": "keyword2",
	"odbc_field_scale": "keyword2",
	"odbc_field_type": "keyword2",
	"odbc_foreignkeys": "keyword2",
	"odbc_free_result": "keyword2",
	"odbc_gettypeinfo": "keyword2",
	"odbc_longreadlen": "keyword2",
	"odbc_next_result": "keyword2",
	"odbc_num_fields": "keyword2",
	"odbc_num_rows": "keyword2",
	"odbc_pconnect": "keyword2",
	"odbc_prepare": "keyword2",
	"odbc_primarykeys": "keyword2",
	"odbc_procedurecolumns": "keyword2",
	"odbc_procedures": "keyword2",
	"odbc_result": "keyword2",
	"odbc_result_all": "keyword2",
	"odbc_rollback": "keyword2",
	"odbc_setoption": "keyword2",
	"odbc_specialcolumns": "keyword2",
	"odbc_statistics": "keyword2",
	"odbc_tableprivileges": "keyword2",
	"odbc_tables": "keyword2",
	"old_function": "keyword1",
	"open_listen_sock": "keyword2",
	"opendir": "keyword2",
	"openlog": "keyword2",
	"openssl_error_string": "keyword2",
	"openssl_free_key": "keyword2",
	"openssl_get_privatekey": "keyword2",
	"openssl_get_publickey": "keyword2",
	"openssl_open": "keyword2",
	"openssl_pkcs7_decrypt": "keyword2",
	"openssl_pkcs7_encrypt": "keyword2",
	"openssl_pkcs7_sign": "keyword2",
	"openssl_pkcs7_verify": "keyword2",
	"openssl_seal": "keyword2",
	"openssl_sign": "keyword2",
	"openssl_verify": "keyword2",
	"openssl_x509_checkpurpose": "keyword2",
	"openssl_x509_free": "keyword2",
	"openssl_x509_parse": "keyword2",
	"openssl_x509_read": "keyword2",
	"or": "operator",
	"ora_bind": "keyword2",
	"ora_close": "keyword2",
	"ora_columnname": "keyword2",
	"ora_columnsize": "keyword2",
	"ora_columntype": "keyword2",
	"ora_commit": "keyword2",
	"ora_commitoff": "keyword2",
	"ora_commiton": "keyword2",
	"ora_do": "keyword2",
	"ora_error": "keyword2",
	"ora_errorcode": "keyword2",
	"ora_exec": "keyword2",
	"ora_fetch": "keyword2",
	"ora_fetch_into": "keyword2",
	"ora_getcolumn": "keyword2",
	"ora_logoff": "keyword2",
	"ora_logon": "keyword2",
	"ora_numcols": "keyword2",
	"ora_numrows": "keyword2",
	"ora_open": "keyword2",
	"ora_parse": "keyword2",
	"ora_plogon": "keyword2",
	"ora_rollback": "keyword2",
	"orbit_caught_exception": "keyword2",
	"orbit_exception_id": "keyword2",
	"orbit_exception_value": "keyword2",
	"orbit_get_repository_id": "keyword2",
	"orbit_load_idl": "keyword2",
	"ord": "keyword2",
	"output": "keyword2",
	"ovrimos_close": "keyword2",
	"ovrimos_close_all": "keyword2",
	"ovrimos_commit": "keyword2",
	"ovrimos_connect": "keyword2",
	"ovrimos_cursor": "keyword2",
	"ovrimos_exec": "keyword2",
	"ovrimos_execute": "keyword2",
	"ovrimos_fetch_into": "keyword2",
	"ovrimos_fetch_row": "keyword2",
	"ovrimos_field_len": "keyword2",
	"ovrimos_field_name": "keyword2",
	"ovrimos_field_num": "keyword2",
	"ovrimos_field_type": "keyword2",
	"ovrimos_free_result": "keyword2",
	"ovrimos_longreadlen": "keyword2",
	"ovrimos_num_fields": "keyword2",
	"ovrimos_num_rows": "keyword2",
	"ovrimos_prepare": "keyword2",
	"ovrimos_result": "keyword2",
	"ovrimos_result_all": "keyword2",
	"ovrimos_rollback": "keyword2",
	"pack": "keyword2",
	"parse_ini_file": "keyword2",
	"parse_str": "keyword2",
	"parse_url": "keyword2",
	"passthru": "keyword2",
	"pathinfo": "keyword2",
	"pclose": "keyword2",
	"pdf_add_annotation": "keyword2",
	"pdf_add_bookmark": "keyword2",
	"pdf_add_launchlink": "keyword2",
	"pdf_add_locallink": "keyword2",
	"pdf_add_note": "keyword2",
	"pdf_add_outline": "keyword2",
	"pdf_add_pdflink": "keyword2",
	"pdf_add_thumbnail": "keyword2",
	"pdf_add_weblink": "keyword2",
	"pdf_arc": "keyword2",
	"pdf_arcn": "keyword2",
	"pdf_attach_file": "keyword2",
	"pdf_begin_page": "keyword2",
	"pdf_begin_pattern": "keyword2",
	"pdf_begin_template": "keyword2",
	"pdf_circle": "keyword2",
	"pdf_clip": "keyword2",
	"pdf_close": "keyword2",
	"pdf_close_image": "keyword2",
	"pdf_close_pdi": "keyword2",
	"pdf_close_pdi_page": "keyword2",
	"pdf_closepath": "keyword2",
	"pdf_closepath_fill_stroke": "keyword2",
	"pdf_closepath_stroke": "keyword2",
	"pdf_concat": "keyword2",
	"pdf_continue_text": "keyword2",
	"pdf_curveto": "keyword2",
	"pdf_delete": "keyword2",
	"pdf_end_page": "keyword2",
	"pdf_end_pattern": "keyword2",
	"pdf_end_template": "keyword2",
	"pdf_endpath": "keyword2",
	"pdf_fill": "keyword2",
	"pdf_fill_stroke": "keyword2",
	"pdf_findfont": "keyword2",
	"pdf_get_buffer": "keyword2",
	"pdf_get_font": "keyword2",
	"pdf_get_fontname": "keyword2",
	"pdf_get_fontsize": "keyword2",
	"pdf_get_image_height": "keyword2",
	"pdf_get_image_width": "keyword2",
	"pdf_get_parameter": "keyword2",
	"pdf_get_pdi_parameter": "keyword2",
	"pdf_get_pdi_value": "keyword2",
	"pdf_get_value": "keyword2",
	"pdf_initgraphics": "keyword2",
	"pdf_lineto": "keyword2",
	"pdf_makespotcolor": "keyword2",
	"pdf_moveto": "keyword2",
	"pdf_new": "keyword2",
	"pdf_open": "keyword2",
	"pdf_open_ccitt": "keyword2",
	"pdf_open_file": "keyword2",
	"pdf_open_gif": "keyword2",
	"pdf_open_image": "keyword2",
	"pdf_open_image_file": "keyword2",
	"pdf_open_jpeg": "keyword2",
	"pdf_open_memory_image": "keyword2",
	"pdf_open_pdi": "keyword2",
	"pdf_open_pdi_page": "keyword2",
	"pdf_open_png": "keyword2",
	"pdf_open_tiff": "keyword2",
	"pdf_place_image": "keyword2",
	"pdf_place_pdi_page": "keyword2",
	"pdf_rect": "keyword2",
	"pdf_restore": "keyword2",
	"pdf_rotate": "keyword2",
	"pdf_save": "keyword2",
	"pdf_scale": "keyword2",
	"pdf_set_border_color": "keyword2",
	"pdf_set_border_dash": "keyword2",
	"pdf_set_border_style": "keyword2",
	"pdf_set_char_spacing": "keyword2",
	"pdf_set_duration": "keyword2",
	"pdf_set_font": "keyword2",
	"pdf_set_horiz_scaling": "keyword2",
	"pdf_set_info": "keyword2",
	"pdf_set_info_author": "keyword2",
	"pdf_set_info_creator": "keyword2",
	"pdf_set_info_keywords": "keyword2",
	"pdf_set_info_subject": "keyword2",
	"pdf_set_info_title": "keyword2",
	"pdf_set_leading": "keyword2",
	"pdf_set_parameter": "keyword2",
	"pdf_set_text_pos": "keyword2",
	"pdf_set_text_rendering": "keyword2",
	"pdf_set_text_rise": "keyword2",
	"pdf_set_transition": "keyword2",
	"pdf_set_value": "keyword2",
	"pdf_set_word_spacing": "keyword2",
	"pdf_setcolor": "keyword2",
	"pdf_setdash": "keyword2",
	"pdf_setflat": "keyword2",
	"pdf_setfont": "keyword2",
	"pdf_setgray": "keyword2",
	"pdf_setgray_fill": "keyword2",
	"pdf_setgray_stroke": "keyword2",
	"pdf_setlinecap": "keyword2",
	"pdf_setlinejoin": "keyword2",
	"pdf_setlinewidth": "keyword2",
	"pdf_setmatrix": "keyword2",
	"pdf_setmiterlimit": "keyword2",
	"pdf_setpolydash": "keyword2",
	"pdf_setrgbcolor": "keyword2",
	"pdf_setrgbcolor_fill": "keyword2",
	"pdf_setrgbcolor_stroke": "keyword2",
	"pdf_show": "keyword2",
	"pdf_show_boxed": "keyword2",
	"pdf_show_xy": "keyword2",
	"pdf_skew": "keyword2",
	"pdf_stringwidth": "keyword2",
	"pdf_stroke": "keyword2",
	"pdf_translate": "keyword2",
	"pfpro_cleanup": "keyword2",
	"pfpro_init": "keyword2",
	"pfpro_process": "keyword2",
	"pfpro_process_raw": "keyword2",
	"pfpro_version": "keyword2",
	"pfsockopen": "keyword2",
	"pg_client_encoding": "keyword2",
	"pg_clientencoding": "keyword2",
	"pg_close": "keyword2",
	"pg_cmdtuples": "keyword2",
	"pg_connect": "keyword2",
	"pg_convert": "keyword2",
	"pg_dbname": "keyword2",
	"pg_delete": "keyword2",
	"pg_end_copy": "keyword2",
	"pg_errormessage": "keyword2",
	"pg_exec": "keyword2",
	"pg_fetch_all": "keyword2",
	"pg_fetch_array": "keyword2",
	"pg_fetch_assoc": "keyword2",
	"pg_fetch_object": "keyword2",
	"pg_fetch_row": "keyword2",
	"pg_fieldisnull": "keyword2",
	"pg_fieldname": "keyword2",
	"pg_fieldnum": "keyword2",
	"pg_fieldprtlen": "keyword2",
	"pg_fieldsize": "keyword2",
	"pg_fieldtype": "keyword2",
	"pg_freeresult": "keyword2",
	"pg_get_notify": "keyword2",
	"pg_get_pid": "keyword2",
	"pg_getlastoid": "keyword2",
	"pg_host": "keyword2",
	"pg_insert": "keyword2",
	"pg_loclose": "keyword2",
	"pg_locreate": "keyword2",
	"pg_loexport": "keyword2",
	"pg_loimport": "keyword2",
	"pg_loopen": "keyword2",
	"pg_loread": "keyword2",
	"pg_loreadall": "keyword2",
	"pg_lounlink": "keyword2",
	"pg_lowrite": "keyword2",
	"pg_meta_data": "keyword2",
	"pg_numfields": "keyword2",
	"pg_numrows": "keyword2",
	"pg_options": "keyword2",
	"pg_pconnect": "keyword2",
	"pg_ping": "keyword2",
	"pg_port": "keyword2",
	"pg_put_line": "keyword2",
	"pg_result": "keyword2",
	"pg_result_seek": "keyword2",
	"pg_select": "keyword2",
	"pg_set_client_encoding": "keyword2",
	"pg_setclientencoding": "keyword2",
	"pg_trace": "keyword2",
	"pg_tty": "keyword2",
	"pg_unescape_bytea": "keyword2",
	"pg_untrace": "keyword2",
	"pg_update": "keyword2",
	"php_logo_guid": "keyword2",
	"php_sapi_name": "keyword2",
	"php_uname": "keyword2",
	"phpcredits": "keyword2",
	"phpinfo": "keyword2",
	"phpversion": "keyword2",
	"pi": "keyword2",
	"png2wbmp": "keyword2",
	"popen": "keyword2",
	"pos": "keyword2",
	"posix_ctermid": "keyword2",
	"posix_getcwd": "keyword2",
	"posix_getegid": "keyword2",
	"posix_geteuid": "keyword2",
	"posix_getgid": "keyword2",
	"posix_getgrgid": "keyword2",
	"posix_getgrnam": "keyword2",
	"posix_getgroups": "keyword2",
	"posix_getlogin": "keyword2",
	"posix_getpgid": "keyword2",
	"posix_getpgrp": "keyword2",
	"posix_getpid": "keyword2",
	"posix_getppid": "keyword2",
	"posix_getpwnam": "keyword2",
	"posix_getpwuid": "keyword2",
	"posix_getrlimit": "keyword2",
	"posix_getsid": "keyword2",
	"posix_getuid": "keyword2",
	"posix_isatty": "keyword2",
	"posix_kill": "keyword2",
	"posix_mkfifo": "keyword2",
	"posix_setegid": "keyword2",
	"posix_seteuid": "keyword2",
	"posix_setgid": "keyword2",
	"posix_setpgid": "keyword2",
	"posix_setsid": "keyword2",
	"posix_setuid": "keyword2",
	"posix_times": "keyword2",
	"posix_ttyname": "keyword2",
	"posix_uname": "keyword2",
	"pow": "keyword2",
	"preg_grep": "keyword2",
	"preg_match": "keyword2",
	"preg_match_all": "keyword2",
	"preg_quote": "keyword2",
	"preg_replace": "keyword2",
	"preg_replace_callback": "keyword2",
	"preg_split": "keyword2",
	"prev": "keyword2",
	"print_r": "keyword2",
	"printer_abort": "keyword2",
	"printer_close": "keyword2",
	"printer_create_brush": "keyword2",
	"printer_create_dc": "keyword2",
	"printer_create_font": "keyword2",
	"printer_create_pen": "keyword2",
	"printer_delete_brush": "keyword2",
	"printer_delete_dc": "keyword2",
	"printer_delete_font": "keyword2",
	"printer_delete_pen": "keyword2",
	"printer_draw_bmp": "keyword2",
	"printer_draw_chord": "keyword2",
	"printer_draw_elipse": "keyword2",
	"printer_draw_line": "keyword2",
	"printer_draw_pie": "keyword2",
	"printer_draw_rectangle": "keyword2",
	"printer_draw_roundrect": "keyword2",
	"printer_draw_text": "keyword2",
	"printer_end_doc": "keyword2",
	"printer_end_page": "keyword2",
	"printer_get_option": "keyword2",
	"printer_list": "keyword2",
	"printer_logical_fontheight": "keyword2",
	"printer_open": "keyword2",
	"printer_select_brush": "keyword2",
	"printer_select_font": "keyword2",
	"printer_select_pen": "keyword2",
	"printer_set_option": "keyword2",
	"printer_start_doc": "keyword2",
	"printer_start_page": "keyword2",
	"printer_write": "keyword2",
	"printf": "keyword2",
	"private": "keyword1",
	"protected": "keyword1",
	"pspell_add_to_personal": "keyword2",
	"pspell_add_to_session": "keyword2",
	"pspell_check": "keyword2",
	"pspell_clear_session": "keyword2",
	"pspell_config_create": "keyword2",
	"pspell_config_ignore": "keyword2",
	"pspell_config_mode": "keyword2",
	"pspell_config_personal": "keyword2",
	"pspell_config_repl": "keyword2",
	"pspell_config_runtogether": "keyword2",
	"pspell_config_save_repl": "keyword2",
	"pspell_new": "keyword2",
	"pspell_new_config": "keyword2",
	"pspell_new_personal": "keyword2",
	"pspell_save_wordlist": "keyword2",
	"pspell_store_replacement": "keyword2",
	"pspell_suggest": "keyword2",
	"public": "keyword1",
	"putenv": "keyword2",
	"qdom_error": "keyword2",
	"qdom_tree": "keyword2",
	"quoted_printable_decode": "keyword2",
	"quotemeta": "keyword2",
	"rad2deg": "keyword2",
	"rand": "keyword2",
	"range": "keyword2",
	"rawurldecode": "keyword2",
	"rawurlencode": "keyword2",
	"read": "keyword2",
	"read_exif_data": "keyword2",
	"readdir": "keyword2",
	"readfile": "keyword2",
	"readgzfile": "keyword2",
	"readline": "keyword2",
	"readline_add_history": "keyword2",
	"readline_clear_history": "keyword2",
	"readline_completion_function": "keyword2",
	"readline_info": "keyword2",
	"readline_list_history": "keyword2",
	"readline_read_history": "keyword2",
	"readline_write_history": "keyword2",
	"readlink": "keyword2",
	"readv": "keyword2",
	"realpath": "keyword2",
	"recode": "keyword2",
	"recode_file": "keyword2",
	"recode_string": "keyword2",
	"recv": "keyword2",
	"recvfrom": "keyword2",
	"recvmsg": "keyword2",
	"register_shutdown_function": "keyword2",
	"register_tick_function": "keyword2",
	"remove": "keyword2",
	"rename": "keyword2",
	"require": "keyword1",
	"require_once": "keyword1",
	"reset": "keyword2",
	"restore_error_handler": "keyword2",
	"return": "keyword1",
	"rewind": "keyword2",
	"rewinddir": "keyword2",
	"rmdir": "keyword2",
	"rotate": "keyword2",
	"rotateto": "keyword2",
	"round": "keyword2",
	"rsort": "keyword2",
	"rtrim": "keyword2",
	"satellite_caught_exception": "keyword2",
	"satellite_exception_id": "keyword2",
	"satellite_exception_value": "keyword2",
	"satellite_get_repository_id": "keyword2",
	"satellite_load_idl": "keyword2",
	"save": "keyword2",
	"savetofile": "keyword2",
	"scale": "keyword2",
	"scaleto": "keyword2",
	"scandir": "keyword2",
	"select": "keyword2",
	"sem_acquire": "keyword2",
	"sem_get": "keyword2",
	"sem_release": "keyword2",
	"send": "keyword2",
	"sendmsg": "keyword2",
	"sendto": "keyword2",
	"serialize": "keyword2",
	"session_cache_limiter": "keyword2",
	"session_decode": "keyword2",
	"session_destroy": "keyword2",
	"session_encode": "keyword2",
	"session_get_cookie_params": "keyword2",
	"session_id": "keyword2",
	"session_is_registered": "keyword2",
	"session_module_name": "keyword2",
	"session_name": "keyword2",
	"session_register": "keyword2",
	"session_save_path": "keyword2",
	"session_set_cookie_params": "keyword2",
	"session_set_save_handler": "keyword2",
	"session_start": "keyword2",
	"session_unregister": "keyword2",
	"session_unset": "keyword2",
	"session_write_close": "keyword2",
	"set_content": "keyword2",
	"set_error_handler": "keyword2",
	"set_file_buffer": "keyword2",
	"set_iovec": "keyword2",
	"set_magic_quotes_runtime": "keyword2",
	"set_nonblock": "keyword2",
	"set_socket_blocking": "keyword2",
	"set_time_limit": "keyword2",
	"setaction": "keyword2",
	"setbackground": "keyword2",
	"setbounds": "keyword2",
	"setcolor": "keyword2",
	"setcookie": "keyword2",
	"setdepth": "keyword2",
	"setdimension": "keyword2",
	"setdown": "keyword2",
	"setfont": "keyword2",
	"setframes": "keyword2",
	"setheight": "keyword2",
	"sethit": "keyword2",
	"setindentation": "keyword2",
	"setleftfill": "keyword2",
	"setleftmargin": "keyword2",
	"setline": "keyword2",
	"setlinespacing": "keyword2",
	"setlocale": "keyword2",
	"setmargins": "keyword2",
	"setmatrix": "keyword2",
	"setname": "keyword2",
	"setover": "keyword2",
	"setrate": "keyword2",
	"setratio": "keyword2",
	"setrightfill": "keyword2",
	"setrightmargin": "keyword2",
	"setsockopt": "keyword2",
	"setspacing": "keyword2",
	"settype": "keyword2",
	"setup": "keyword2",
	"sha1": "keyword2",
	"sha1_file": "keyword2",
	"shell_exec": "keyword2",
	"shm_attach": "keyword2",
	"shm_detach": "keyword2",
	"shm_get_var": "keyword2",
	"shm_put_var": "keyword2",
	"shm_remove": "keyword2",
	"shm_remove_var": "keyword2",
	"shmop_close": "keyword2",
	"shmop_delete": "keyword2",
	"shmop_open": "keyword2",
	"shmop_read": "keyword2",
	"shmop_size": "keyword2",
	"shmop_write": "keyword2",
	"show_source": "keyword2",
	"shuffle": "keyword2",
	"shutdown": "keyword2",
	"signal": "keyword2",
	"similar_text": "keyword2",
	"sin": "keyword2",
	"sizeof": "keyword2",
	"skewx": "keyword2",
	"skewxto": "keyword2",
	"skewy": "keyword2",
	"skewyto": "keyword2",
	"sleep": "keyword2",
	"snmp_get_quick_print": "keyword2",
	"snmp_set_quick_print": "keyword2",
	"snmpget": "keyword2",
	"snmprealwalk": "keyword2",
	"snmpset": "keyword2",
	"snmpwalk": "keyword2",
	"snmpwalkoid": "keyword2",
	"socket": "keyword2",
	"socket_get_status": "keyword2",
	"socket_set_blocking": "keyword2",
	"socket_set_timeout": "keyword2",
	"socketpair": "keyword2",
	"sort": "keyword2",
	"soundex": "keyword2",
	"split": "keyword2",
	"spliti": "keyword2",
	"sprintf": "keyword2",
	"sql_regcase": "keyword2",
	"sqrt": "keyword2",
	"srand": "keyword2",
	"sscanf": "keyword2",
	"stat": "keyword2",
	"static": "keyword1",
	"str_pad": "keyword2",
	"str_repeat": "keyword2",
	"str_replace": "keyword2",
	"str_rot13": "keyword2",
	"str_split": "keyword2",
	"str_word_count": "keyword2",
	"strcasecmp": "keyword2",
	"strchr": "keyword2",
	"strcmp": "keyword2",
	"strcoll": "keyword2",
	"strcspn": "keyword2",
	"stream_context_create": "keyword2",
	"stream_context_set_option": "keyword2",
	"stream_context_set_params": "keyword2",
	"stream_filter_append": "keyword2",
	"stream_filter_prepend": "keyword2",
	"stream_get_status": "keyword2",
	"stream_select": "keyword2",
	"stream_set_blocking": "keyword2",
	"stream_set_timeout": "keyword2",
	"streammp3": "keyword2",
	"strerror": "keyword2",
	"strftime": "keyword2",
	"strip_tags": "keyword2",
	"stripcslashes": "keyword2",
	"stripos": "keyword2",
	"stripslashes": "keyword2",
	"stristr": "keyword2",
	"strlen": "keyword2",
	"strnatcasecmp": "keyword2",
	"strnatcmp": "keyword2",
	"strncasecmp": "keyword2",
	"strncmp": "keyword2",
	"strpbrk": "keyword2",
	"strpos": "keyword2",
	"strrchr": "keyword2",
	"strrev": "keyword2",
	"strrpos": "keyword2",
	"strspn": "keyword2",
	"strstr": "keyword2",
	"strtok": "keyword2",
	"strtolower": "keyword2",
	"strtotime": "keyword2",
	"strtoupper": "keyword2",
	"strtr": "keyword2",
	"strval": "keyword2",
	"substr": "keyword2",
	"substr_compare": "keyword2",
	"substr_count": "keyword2",
	"substr_replace": "keyword2",
	"swf_actiongeturl": "keyword2",
	"swf_actiongotoframe": "keyword2",
	"swf_actiongotolabel": "keyword2",
	"swf_actionnextframe": "keyword2",
	"swf_actionplay": "keyword2",
	"swf_actionprevframe": "keyword2",
	"swf_actionsettarget": "keyword2",
	"swf_actionstop": "keyword2",
	"swf_actiontogglequality": "keyword2",
	"swf_actionwaitforframe": "keyword2",
	"swf_addbuttonrecord": "keyword2",
	"swf_addcolor": "keyword2",
	"swf_closefile": "keyword2",
	"swf_definebitmap": "keyword2",
	"swf_definefont": "keyword2",
	"swf_defineline": "keyword2",
	"swf_definepoly": "keyword2",
	"swf_definerect": "keyword2",
	"swf_definetext": "keyword2",
	"swf_endbutton": "keyword2",
	"swf_enddoaction": "keyword2",
	"swf_endshape": "keyword2",
	"swf_endsymbol": "keyword2",
	"swf_fontsize": "keyword2",
	"swf_fontslant": "keyword2",
	"swf_fonttracking": "keyword2",
	"swf_getbitmapinfo": "keyword2",
	"swf_getfontinfo": "keyword2",
	"swf_getframe": "keyword2",
	"swf_labelframe": "keyword2",
	"swf_lookat": "keyword2",
	"swf_modifyobject": "keyword2",
	"swf_mulcolor": "keyword2",
	"swf_nextid": "keyword2",
	"swf_oncondition": "keyword2",
	"swf_openfile": "keyword2",
	"swf_ortho": "keyword2",
	"swf_ortho2": "keyword2",
	"swf_perspective": "keyword2",
	"swf_placeobject": "keyword2",
	"swf_polarview": "keyword2",
	"swf_popmatrix": "keyword2",
	"swf_posround": "keyword2",
	"swf_pushmatrix": "keyword2",
	"swf_removeobject": "keyword2",
	"swf_rotate": "keyword2",
	"swf_scale": "keyword2",
	"swf_setfont": "keyword2",
	"swf_setframe": "keyword2",
	"swf_shapearc": "keyword2",
	"swf_shapecurveto": "keyword2",
	"swf_shapecurveto3": "keyword2",
	"swf_shapefillbitmapclip": "keyword2",
	"swf_shapefillbitmaptile": "keyword2",
	"swf_shapefilloff": "keyword2",
	"swf_shapefillsolid": "keyword2",
	"swf_shapelinesolid": "keyword2",
	"swf_shapelineto": "keyword2",
	"swf_shapemoveto": "keyword2",
	"swf_showframe": "keyword2",
	"swf_startbutton": "keyword2",
	"swf_startdoaction": "keyword2",
	"swf_startshape": "keyword2",
	"swf_startsymbol": "keyword2",
	"swf_textwidth": "keyword2",
	"swf_translate": "keyword2",
	"swf_viewport": "keyword2",
	"swfaction": "keyword2",
	"swfbitmap": "keyword2",
	"swfbutton": "keyword2",
	"swfbutton_keypress": "keyword2",
	"swffill": "keyword2",
	"swffont": "keyword2",
	"swfgradient": "keyword2",
	"swfmorph": "keyword2",
	"swfmovie": "keyword2",
	"swfshape": "keyword2",
	"swfsprite": "keyword2",
	"swftext": "keyword2",
	"swftextfield": "keyword2",
	"switch": "keyword1",
	"sybase_affected_rows": "keyword2",
	"sybase_close": "keyword2",
	"sybase_connect": "keyword2",
	"sybase_data_seek": "keyword2",
	"sybase_fetch_array": "keyword2",
	"sybase_fetch_field": "keyword2",
	"sybase_fetch_object": "keyword2",
	"sybase_fetch_row": "keyword2",
	"sybase_field_seek": "keyword2",
	"sybase_free_result": "keyword2",
	"sybase_get_last_message": "keyword2",
	"sybase_min_client_severity": "keyword2",
	"sybase_min_error_severity": "keyword2",
	"sybase_min_message_severity": "keyword2",
	"sybase_min_server_severity": "keyword2",
	"sybase_num_fields": "keyword2",
	"sybase_num_rows": "keyword2",
	"sybase_pconnect": "keyword2",
	"sybase_query": "keyword2",
	"sybase_result": "keyword2",
	"sybase_select_db": "keyword2",
	"symlink": "keyword2",
	"syslog": "keyword2",
	"system": "keyword2",
	"tan": "keyword2",
	"tempnam": "keyword2",
	"textdomain": "keyword2",
	"throw": "keyword1",
	"time": "keyword2",
	"time_nanosleep": "keyword2",
	"tmpfile": "keyword2",
	"touch": "keyword2",
	"trigger_error": "keyword2",
	"trim": "keyword2",
	"true": "keyword3",
	"try": "keyword1",
	"uasort": "keyword2",
	"ucfirst": "keyword2",
	"ucwords": "keyword2",
	"udm_add_search_limit": "keyword2",
	"udm_alloc_agent": "keyword2",
	"udm_api_version": "keyword2",
	"udm_clear_search_limits": "keyword2",
	"udm_errno": "keyword2",
	"udm_error": "keyword2",
	"udm_find": "keyword2",
	"udm_free_agent": "keyword2",
	"udm_free_ispell_data": "keyword2",
	"udm_free_res": "keyword2",
	"udm_get_doc_count": "keyword2",
	"udm_get_res_field": "keyword2",
	"udm_get_res_param": "keyword2",
	"udm_load_ispell_data": "keyword2",
	"udm_set_agent_param": "keyword2",
	"uksort": "keyword2",
	"umask": "keyword2",
	"uniqid": "keyword2",
	"unixtojd": "keyword2",
	"unlink": "keyword2",
	"unpack": "keyword2",
	"unregister_tick_function": "keyword2",
	"unserialize": "keyword2",
	"unset": "keyword2",
	"urldecode": "keyword2",
	"urlencode": "keyword2",
	"user_error": "keyword2",
	"usleep": "keyword2",
	"usort": "keyword2",
	"utf8_decode": "keyword2",
	"utf8_encode": "keyword2",
	"var": "keyword1",
	"var_dump": "keyword2",
	"velocis_autocommit": "keyword2",
	"velocis_close": "keyword2",
	"velocis_commit": "keyword2",
	"velocis_connect": "keyword2",
	"velocis_exec": "keyword2",
	"velocis_fetch": "keyword2",
	"velocis_fieldname": "keyword2",
	"velocis_fieldnum": "keyword2",
	"velocis_freeresult": "keyword2",
	"velocis_off_autocommit": "keyword2",
	"velocis_result": "keyword2",
	"velocis_rollback": "keyword2",
	"virtual": "keyword2",
	"vpopmail_add_alias_domain": "keyword2",
	"vpopmail_add_alias_domain_ex": "keyword2",
	"vpopmail_add_domain": "keyword2",
	"vpopmail_add_domain_ex": "keyword2",
	"vpopmail_add_user": "keyword2",
	"vpopmail_auth_user": "keyword2",
	"vpopmail_del_domain": "keyword2",
	"vpopmail_del_domain_ex": "keyword2",
	"vpopmail_del_user": "keyword2",
	"vpopmail_error": "keyword2",
	"vpopmail_passwd": "keyword2",
	"vpopmail_set_user_quota": "keyword2",
	"wddx_add_vars": "keyword2",
	"wddx_deserialize": "keyword2",
	"wddx_packet_end": "keyword2",
	"wddx_packet_start": "keyword2",
	"wddx_serialize_value": "keyword2",
	"wddx_serialize_vars": "keyword2",
	"while": "keyword1",
	"wordwrap": "keyword2",
	"write": "keyword2",
	"writev": "keyword2",
	"xml_error_string": "keyword2",
	"xml_get_current_byte_index": "keyword2",
	"xml_get_current_column_number": "keyword2",
	"xml_get_current_line_number": "keyword2",
	"xml_get_error_code": "keyword2",
	"xml_parse": "keyword2",
	"xml_parse_into_struct": "keyword2",
	"xml_parser_create": "keyword2",
	"xml_parser_create_ns": "keyword2",
	"xml_parser_free": "keyword2",
	"xml_parser_get_option": "keyword2",
	"xml_parser_set_option": "keyword2",
	"xml_set_character_data_handler": "keyword2",
	"xml_set_default_handler": "keyword2",
	"xml_set_element_handler": "keyword2",
	"xml_set_end_namespace_decl_handler": "keyword2",
	"xml_set_external_entity_ref_handler": "keyword2",
	"xml_set_notation_decl_handler": "keyword2",
	"xml_set_object": "keyword2",
	"xml_set_processing_instruction_handler": "keyword2",
	"xml_set_start_namespace_decl_handler": "keyword2",
	"xml_set_unparsed_entity_decl_handler": "keyword2",
	"xmldoc": "keyword2",
	"xmldocfile": "keyword2",
	"xmltree": "keyword2",
	"xpath_eval": "keyword2",
	"xpath_eval_expression": "keyword2",
	"xptr_eval": "keyword2",
	"xslt_closelog": "keyword2",
	"xslt_create": "keyword2",
	"xslt_errno": "keyword2",
	"xslt_error": "keyword2",
	"xslt_fetch_result": "keyword2",
	"xslt_free": "keyword2",
	"xslt_openlog": "keyword2",
	"xslt_output_begintransform": "keyword2",
	"xslt_output_endtransform": "keyword2",
	"xslt_process": "keyword2",
	"xslt_run": "keyword2",
	"xslt_set_base": "keyword2",
	"xslt_set_encoding": "keyword2",
	"xslt_set_error_handler": "keyword2",
	"xslt_set_sax_handler": "keyword2",
	"xslt_set_scheme_handler": "keyword2",
	"xslt_transform": "keyword2",
	"yaz_addinfo": "keyword2",
	"yaz_ccl_conf": "keyword2",
	"yaz_ccl_parse": "keyword2",
	"yaz_close": "keyword2",
	"yaz_connect": "keyword2",
	"yaz_database": "keyword2",
	"yaz_element": "keyword2",
	"yaz_errno": "keyword2",
	"yaz_error": "keyword2",
	"yaz_hits": "keyword2",
	"yaz_itemorder": "keyword2",
	"yaz_present": "keyword2",
	"yaz_range": "keyword2",
	"yaz_record": "keyword2",
	"yaz_scan": "keyword2",
	"yaz_scan_result": "keyword2",
	"yaz_search": "keyword2",
	"yaz_syntax": "keyword2",
	"yaz_wait": "keyword2",
	"yp_all": "keyword2",
	"yp_cat": "keyword2",
	"yp_err_string": "keyword2",
	"yp_errno": "keyword2",
	"yp_first": "keyword2",
	"yp_get_default_domain": "keyword2",
	"yp_master": "keyword2",
	"yp_match": "keyword2",
	"yp_next": "keyword2",
	"yp_order": "keyword2",
	"zend_logo_guid": "keyword2",
	"zend_test_func": "keyword2",
	"zend_version": "keyword2",
	"zzip_close": "keyword2",
	"zzip_closedir": "keyword2",
	"zzip_entry_compressedsize": "keyword2",
	"zzip_entry_compressionmethod": "keyword2",
	"zzip_entry_filesize": "keyword2",
	"zzip_entry_name": "keyword2",
	"zzip_open": "keyword2",
	"zzip_opendir": "keyword2",
	"zzip_read": "keyword2",
	"zzip_readdir": "keyword2",
}

# Keywords dict for php_php_literal ruleset.
php_php_literal_keywords_dict = {}

# Keywords dict for php_javascript ruleset.
php_javascript_keywords_dict = {}

# Keywords dict for php_javascript_php ruleset.
php_javascript_php_keywords_dict = {}

# Keywords dict for php_phpdoc ruleset.
php_phpdoc_keywords_dict = {
	"@abstract": "label",
	"@access": "label",
	"@author": "label",
	"@category": "label",
	"@copyright": "label",
	"@deprecated": "label",
	"@example": "label",
	"@filesource": "label",
	"@final": "label",
	"@global": "label",
	"@id": "label",
	"@ignore": "label",
	"@inheritdoc": "label",
	"@internal": "label",
	"@license": "label",
	"@link": "label",
	"@name": "label",
	"@package": "label",
	"@param": "label",
	"@return": "label",
	"@see": "label",
	"@since": "label",
	"@source": "label",
	"@static": "label",
	"@staticvar": "label",
	"@subpackage": "label",
	"@toc": "label",
	"@todo": "label",
	"@tutorial": "label",
	"@uses": "label",
	"@var": "label",
	"@version": "label",
}

# Dictionary of keywords dictionaries for php mode.
keywordsDictDict = {
	"php_javascript": php_javascript_keywords_dict,
	"php_javascript_php": php_javascript_php_keywords_dict,
	"php_main": php_main_keywords_dict,
	"php_php": php_php_keywords_dict,
	"php_php_literal": php_php_literal_keywords_dict,
	"php_phpdoc": php_phpdoc_keywords_dict,
	"php_tags": php_tags_keywords_dict,
	"php_tags_literal": php_tags_literal_keywords_dict,
}

# Rules for php_main ruleset.

def rule0(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<?php", end="?>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHP",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule1(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<?", end="?>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHP",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule2(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<%=", end="%>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHP",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule3(colorer, s, i):
    return colorer.match_span(s, i, kind="comment1", begin="<!--", end="-->",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule4(colorer, s, i):
    return colorer.match_span_regexp(s, i, kind="markup", begin="<SCRIPT\\s+LANGUAGE=\"?PHP\"?>", end="</SCRIPT>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHP",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule5(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<SCRIPT", end="</SCRIPT>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="JAVASCRIPT",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule6(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<STYLE", end="</STYLE>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="html::CSS",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule7(colorer, s, i):
    return colorer.match_span(s, i, kind="keyword2", begin="<!", end=">",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="xml::DTD-TAGS",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule8(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<", end=">",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="TAGS",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule9(colorer, s, i):
    return colorer.match_span(s, i, kind="literal2", begin="&", end=";",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=True)

# Rules dict for main ruleset.
rulesDict1 = {
	"&": [rule9,],
	"<": [rule0,rule1,rule2,rule3,rule4,rule5,rule6,rule7,rule8,],
}

# Rules for php_tags ruleset.

def rule10(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<?php", end="?>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHP",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule11(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<?", end="?>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHP",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule12(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<%=", end="%>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHP",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule13(colorer, s, i):
    return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="TAGS_LITERAL",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule14(colorer, s, i):
    return colorer.match_span(s, i, kind="literal1", begin="'", end="'",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="TAGS_LITERAL",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule15(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="=",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

# Rules dict for tags ruleset.
rulesDict2 = {
	"\"": [rule13,],
	"'": [rule14,],
	"<": [rule10,rule11,rule12,],
	"=": [rule15,],
}

# Rules for php_tags_literal ruleset.

def rule16(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<?php", end="?>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHP",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule17(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<?", end="?>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHP",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule18(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<%=", end="%>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHP",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

# Rules dict for tags_literal ruleset.
rulesDict3 = {
	"<": [rule16,rule17,rule18,],
}

# Rules for php_php ruleset.

def rule19(colorer, s, i):
    return colorer.match_span(s, i, kind="comment3", begin="/**", end="*/",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHPDOC",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule20(colorer, s, i):
    return colorer.match_span(s, i, kind="comment1", begin="/*", end="*/",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule21(colorer, s, i):
    return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHP_LITERAL",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule22(colorer, s, i):
    return colorer.match_span(s, i, kind="literal1", begin="'", end="'",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule23(colorer, s, i):
    return colorer.match_span(s, i, kind="literal1", begin="`", end="`",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHP_LITERAL",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule24(colorer, s, i):
    return colorer.match_eol_span(s, i, kind="comment2", seq="//",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="", exclude_match=False)

def rule25(colorer, s, i):
    return colorer.match_eol_span(s, i, kind="comment1", seq="#",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="", exclude_match=False)

def rule26(colorer, s, i):
    return colorer.match_span_regexp(s, i, kind="literal1", begin="<<<[[:space:]'\"]*([[:alnum:]_]+)[[:space:]'\"]*", end="$1",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="PHP_LITERAL",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule27(colorer, s, i):
    return colorer.match_mark_following(s, i, kind="keyword3", pattern="$",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, exclude_match=False)

def rule28(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="=",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule29(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="->",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule30(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="!",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule31(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq=">=",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule32(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="<=",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule33(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="=",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule34(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="+",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule35(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="-",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule36(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="/",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule37(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="*",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule38(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq=">",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule39(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="<",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule40(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="%",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule41(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="&",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule42(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="|",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule43(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="^",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule44(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="~",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule45(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq=".",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule46(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="}",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule47(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="{",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule48(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq=",",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule49(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq=";",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule50(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="]",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule51(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="[",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule52(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="?",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule53(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq="@",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule54(colorer, s, i):
    return colorer.match_seq(s, i, kind="operator", seq=":",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule55(colorer, s, i):
    return colorer.match_mark_previous(s, i, kind="function", pattern="(",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, exclude_match=True)

def rule56(colorer, s, i):
    return colorer.match_keywords(s, i)

# Rules dict for php ruleset.
rulesDict4 = {
	"!": [rule30,],
	"\"": [rule21,],
	"#": [rule25,],
	"$": [rule27,],
	"%": [rule40,],
	"&": [rule41,],
	"'": [rule22,],
	"(": [rule55,],
	"*": [rule37,],
	"+": [rule34,],
	",": [rule48,],
	"-": [rule29,rule35,],
	".": [rule45,],
	"/": [rule19,rule20,rule24,rule36,],
	"0": [rule56,],
	"1": [rule56,],
	"2": [rule56,],
	"3": [rule56,],
	"4": [rule56,],
	"5": [rule56,],
	"6": [rule56,],
	"7": [rule56,],
	"8": [rule56,],
	"9": [rule56,],
	":": [rule54,],
	";": [rule49,],
	"<": [rule26,rule32,rule39,],
	"=": [rule28,rule33,],
	">": [rule31,rule38,],
	"?": [rule52,],
	"@": [rule53,rule56,],
	"A": [rule56,],
	"B": [rule56,],
	"C": [rule56,],
	"D": [rule56,],
	"E": [rule56,],
	"F": [rule56,],
	"G": [rule56,],
	"H": [rule56,],
	"I": [rule56,],
	"J": [rule56,],
	"K": [rule56,],
	"L": [rule56,],
	"M": [rule56,],
	"N": [rule56,],
	"O": [rule56,],
	"P": [rule56,],
	"Q": [rule56,],
	"R": [rule56,],
	"S": [rule56,],
	"T": [rule56,],
	"U": [rule56,],
	"V": [rule56,],
	"W": [rule56,],
	"X": [rule56,],
	"Y": [rule56,],
	"Z": [rule56,],
	"[": [rule51,],
	"]": [rule50,],
	"^": [rule43,],
	"_": [rule56,],
	"`": [rule23,],
	"a": [rule56,],
	"b": [rule56,],
	"c": [rule56,],
	"d": [rule56,],
	"e": [rule56,],
	"f": [rule56,],
	"g": [rule56,],
	"h": [rule56,],
	"i": [rule56,],
	"j": [rule56,],
	"k": [rule56,],
	"l": [rule56,],
	"m": [rule56,],
	"n": [rule56,],
	"o": [rule56,],
	"p": [rule56,],
	"q": [rule56,],
	"r": [rule56,],
	"s": [rule56,],
	"t": [rule56,],
	"u": [rule56,],
	"v": [rule56,],
	"w": [rule56,],
	"x": [rule56,],
	"y": [rule56,],
	"z": [rule56,],
	"{": [rule47,],
	"|": [rule42,],
	"}": [rule46,],
	"~": [rule44,],
}

# Rules for php_php_literal ruleset.

def rule57(colorer, s, i):
    return colorer.match_mark_following(s, i, kind="keyword3", pattern="$",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, exclude_match=False)

# Rules dict for php_literal ruleset.
rulesDict5 = {
	"$": [rule57,],
}

# Rules for php_javascript ruleset.

def rule58(colorer, s, i):
    return colorer.match_seq(s, i, kind="markup", seq=">",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="JAVASCRIPT+PHP")

def rule59(colorer, s, i):
    return colorer.match_seq(s, i, kind="markup", seq="SRC=",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="BACK_TO_HTML")

# Rules dict for javascript ruleset.
rulesDict6 = {
	">": [rule58,],
	"S": [rule59,],
}

# Rules for php_javascript_php ruleset.

def rule60(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<?php", end="?>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="php::PHP",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule61(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<?", end="?>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="php::PHP",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule62(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<%=", end="%>",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="php::PHP",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)


# Rules dict for javascript_php ruleset.
rulesDict7 = {
	"<": [rule60,rule61,rule62,],
}

# Rules for php_phpdoc ruleset.

def rule63(colorer, s, i):
    return colorer.match_seq(s, i, kind="comment3", seq="{",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule64(colorer, s, i):
    return colorer.match_seq(s, i, kind="comment3", seq="*",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule65(colorer, s, i):
    return colorer.match_span(s, i, kind="comment2", begin="<!--", end="-->",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="",exclude_match=False,
        no_escape=False, no_line_break=False, no_word_break=False)

def rule66(colorer, s, i):
    return colorer.match_seq(s, i, kind="comment3", seq="<<",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule67(colorer, s, i):
    return colorer.match_seq(s, i, kind="comment3", seq="<=",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule68(colorer, s, i):
    return colorer.match_seq(s, i, kind="comment3", seq="< ",
        at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")

def rule69(colorer, s, i):
    return colorer.match_span(s, i, kind="markup", begin="<", end=">",
        at_line_start=False, at_whitespace_end=False, at_word_start=False,
        delegate="xml::TAGS",exclude_match=False,
        no_escape=False, no_line_break=True, no_word_break=False)

def rule70(colorer, s, i):
    return colorer.match_keywords(s, i)

# Rules dict for phpdoc ruleset.
rulesDict8 = {
	"*": [rule64,],
	"0": [rule70,],
	"1": [rule70,],
	"2": [rule70,],
	"3": [rule70,],
	"4": [rule70,],
	"5": [rule70,],
	"6": [rule70,],
	"7": [rule70,],
	"8": [rule70,],
	"9": [rule70,],
	"<": [rule65,rule66,rule67,rule68,rule69,],
	"@": [rule70,],
	"A": [rule70,],
	"B": [rule70,],
	"C": [rule70,],
	"D": [rule70,],
	"E": [rule70,],
	"F": [rule70,],
	"G": [rule70,],
	"H": [rule70,],
	"I": [rule70,],
	"J": [rule70,],
	"K": [rule70,],
	"L": [rule70,],
	"M": [rule70,],
	"N": [rule70,],
	"O": [rule70,],
	"P": [rule70,],
	"Q": [rule70,],
	"R": [rule70,],
	"S": [rule70,],
	"T": [rule70,],
	"U": [rule70,],
	"V": [rule70,],
	"W": [rule70,],
	"X": [rule70,],
	"Y": [rule70,],
	"Z": [rule70,],
	"_": [rule70,],
	"a": [rule70,],
	"b": [rule70,],
	"c": [rule70,],
	"d": [rule70,],
	"e": [rule70,],
	"f": [rule70,],
	"g": [rule70,],
	"h": [rule70,],
	"i": [rule70,],
	"j": [rule70,],
	"k": [rule70,],
	"l": [rule70,],
	"m": [rule70,],
	"n": [rule70,],
	"o": [rule70,],
	"p": [rule70,],
	"q": [rule70,],
	"r": [rule70,],
	"s": [rule70,],
	"t": [rule70,],
	"u": [rule70,],
	"v": [rule70,],
	"w": [rule70,],
	"x": [rule70,],
	"y": [rule70,],
	"z": [rule70,],
	"{": [rule63,],
}

# x.rulesDictDict for php mode.
rulesDictDict = {
	"php_javascript": rulesDict6,
	"php_javascript_php": rulesDict7,
	"php_main": rulesDict1,
	"php_php": rulesDict4,
	"php_php_literal": rulesDict5,
	"php_phpdoc": rulesDict8,
	"php_tags": rulesDict2,
	"php_tags_literal": rulesDict3,
}

# Import dict for php mode.
importDict = {
	"php_javascript_php": ["javascript_main",],
}

#@-node:php.py
#@-node:Colorizer tests
#@+node:jEdit docs...
@nocolor
#@nonl
#@+node:@url http://www.jedit.org/42docs/users-guide/writing-modes-part.html
#@-node:@url http://www.jedit.org/42docs/users-guide/writing-modes-part.html
#@+node:Rule ordering
You might encounter this very common pitfall when writing your own modes.

Since jEdit checks buffer text against parser rules in the order they appear in
the ruleset, more specific rules must be placed before generalized ones,
otherwise the generalized rules will catch everything.
#@nonl
#@-node:Rule ordering
#@+node:Attributes
#@+node: Documentation of attributes
#@+node:AT_xxx
#@+node:AT_LINE_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a line.

For 'seq', 'span', 'begin', 'end'
#@nonl
#@-node:AT_LINE_START (bool)
#@+node:AT_WHITESPACE_END (bool)
If set to TRUE, the sequence will only be highlighted if it is the first non-whitespace text in the line.

For 'seq', 'span', 'begin', 'end'
#@-node:AT_WHITESPACE_END (bool)
#@+node:AT_WORD_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a word.

For 'seq', 'span', 'begin', 'end'
#@-node:AT_WORD_START (bool)
#@-node:AT_xxx
#@+node:AT_CHAR (int)
The number of characters to terminate after.

For terminate only.
#@nonl
#@-node:AT_CHAR (int)
#@+node:EXCLUDE_MATCH (bool)
If set to TRUE, the match will not be highlighted, only the text before it will.
#@nonl
#@-node:EXCLUDE_MATCH (bool)
#@+node:NO_xxx
#@+node:NO_WORD_BREAK (bool)
If set to TRUE, the span will not cross word breaks.

For 'span' only.
#@nonl
#@-node:NO_WORD_BREAK (bool)
#@+node:NO_LINE_BREAK (bool)
If set to TRUE, the span will not cross line breaks.

For 'span' only.
#@nonl
#@-node:NO_LINE_BREAK (bool)
#@+node:NO_ESCAPE (bool)
If set to TRUE, the ruleset's escape character will have no effect before the
span's end string. Otherwise, the presence of the escape character will cause
that occurrence of the end string to be ignored.

For 'span' only.
#@nonl
#@-node:NO_ESCAPE (bool)
#@-node:NO_xxx
#@+node: For 'rules' only
#@+node:DEFAULT
The token type for text which doesn't match any specific rule. Default is NULL.
See the section called “Token Types” for a list of token types.
#@nonl
#@-node:DEFAULT
#@+node:DIGIT_RE (re) & HIGHLIGHT_DIGITS  (bool)
If the HIGHLIGHT_DIGITS attribute is set to TRUE, jEdit will attempt to highlight numbers in this ruleset.

Any word consisting entirely of digits (0-9) will be highlighted with the DIGIT token type.

A word that contains other letters in addition to digits will be highlighted with the DIGIT token type only if it matches the regular expression specified in the DIGIT_RE attribute. If this attribute is not specified, it will not be highlighted.

Here is an example DIGIT_RE regular expression that highlights Java-style numeric literals (normal numbers, hexadecimals prefixed with 0x, numbers suffixed with various type indicators, and floating point literals containing an exponent):

DIGIT_RE="(0x[[:xdigit:]]+|[[:digit:]]+(e[[:digit:]]*)?)[lLdDfF]?"
#@nonl
#@-node:DIGIT_RE (re) & HIGHLIGHT_DIGITS  (bool)
#@+node:IGNORE_CASE (bool)
If set to FALSE, matches will be case sensitive.
Otherwise, case will not matter. Default is TRUE.
#@nonl
#@-node:IGNORE_CASE (bool)
#@+node:SET
The name of this ruleset. All rulesets other than the first must have a name.
#@nonl
#@-node:SET
#@+node:NO_WORD_SEP
Any non-alphanumeric character not in this list is treated as a word separator for the purposes of syntax highlighting.
#@-node:NO_WORD_SEP
#@-node: For 'rules' only
#@+node:DELEGATE
EKR: This attribute is used in two completely different ways:

1.  In spans:

Text inside the span will be highlighted with the specified ruleset.

EKR: this is essentially a 'recursive' coloring. The delegate ruleset is the
only ruleset used.

2.  In import rules:

The imported ruleset is copied to the **end** of the containing ruleset,
**not** to the location of the IMPORT rule. This has implications with
rule-ordering; see the section called “Rule Ordering Requirements”.

To delegate to a ruleset defined in the current mode, just specify its name. To
delegate to a ruleset defined in another mode, specify a name of the form
mode::ruleset. Note that the first (unnamed) ruleset in a mode is called
“MAIN”.

#@-node:DELEGATE
#@+node:HASH_CHAR (char) For 'regx'
Atribute            Elements used in
--------            ----------------
HASH_CHAR           EOL_SPAN_REGEXP,SPAN_REGEXP,SEQ_REGEXP

It must be set to the first character that the regular expression matches. This
rules out using regular expressions which can match more than one character at
the start position. The regular expression match cannot span more than one line,
either.
#@nonl
#@-node:HASH_CHAR (char) For 'regx'
#@+node:TYPE (Token Types)
The token type to highlight the text with.

Parser rules can highlight tokens using any of the following token types:

NULL - no special highlighting
COMMENT1,COMMENT2,COMMENT3,COMMENT4
FUNCTION
KEYWORD1,KEYWORD2,KEYWORD3,KEYWORD4
LABEL
LITERAL1,LITERAL2,LITERAL3,LITERAL4
MARKUP
OPERATOR
#@nonl
#@-node:TYPE (Token Types)
#@-node: Documentation of attributes
#@+node:Attributes for 'begin' & 'end'
Atribute            Elements used in
--------            ----------------
AT_LINE_START       BEGIN,END
AT_WHITESPACE_END   BEGIN,END
AT_WORD_START       BEGIN,END
#@nonl
#@-node:Attributes for 'begin' & 'end'
#@+node:Attributes for 'import': delegate
The only required attribute DELEGATE must be set to the name of a ruleset. To
import a ruleset defined in the current mode, just specify its name. To import a
ruleset defined in another mode, specify a name of the form mode::ruleset. Note
that the first (unnamed) ruleset in a mode is called “MAIN”.

One quirk is that the definition of the imported ruleset is not copied to the
location of the IMPORT tag, but rather to the end of the containing ruleset.
This has implications with rule-ordering; see the section called “Rule Ordering
Requirements”.
#@nonl
#@-node:Attributes for 'import': delegate
#@+node:Attributes for 'mark' 
Atribute            Elements used in
--------            ----------------
AT_LINE_START       MARK_FOLLOWING,MARK_PREVIOUS
AT_WHITESPACE_END   MARK_FOLLOWING,MARK_PREVIOUS
AT_WORD_START       MARK_FOLLOWING,MARK_PREVIOUS 
EXCLUDE_MATCH       MARK_FOLLOWING,MARK_PREVIOUS
TYPE                MARK_FOLLOWING,MARK_PREVIOUS
#@nonl
#@-node:Attributes for 'mark' 
#@+node:Attributes for 'property': name, value
Atribute            Elements used in
--------            ----------------
NAME                PROPERTY
VALUE               PROPERTY
#@nonl
#@-node:Attributes for 'property': name, value
#@+node:Attributes for 'rules'
Atribute            Elements used in
--------            ----------------
DEFAULT             RULES
DIGIT_RE            RULES
HIGHLIGHT_DIGITS    RULES
IGNORE_CASE         RULES
SET                 RULES
NO_WORD_SEP         RULES

The RULES tag supports the following attributes, all of which are optional: 

SET the name of this ruleset. All rulesets other than the first must have a
name.

IGNORE_CASE if set to FALSE, matches will be case sensitive. Otherwise, case
will not matter. Default is TRUE.

NO_WORD_SEP Any non-alphanumeric character not in this list is treated as a word
separator for the purposes of syntax highlighting.

DEFAULT The token type for text which doesn't match any specific rule. Default
is NULL. See the section called “Token Types” for a list of token types.

HIGHLIGHT_DIGITS DIGIT_RE

If the HIGHLIGHT_DIGITS attribute is set to TRUE, jEdit will attempt to
highlight numbers in this ruleset.

Any word consisting entirely of digits (0-9) will be highlighted with the DIGIT
token type. A word that contains other letters in addition to digits will be
highlighted with the DIGIT token type only if it matches the regular expression
specified in the DIGIT_RE attribute. If this attribute is not specified, it will
not be highlighted.
#@nonl
#@-node:Attributes for 'rules'
#@+node:Attributes for 'span'
all_spans   = EOL_SPAN,EOL_SPAN_REGEXP,SPAN,SPAN_REGEXP
plain_spans = SPAN,SPAN_REGEXP
regx_spans  = EOL_SPAN_REGEXP,SPAN_REGEXP

Atribute            Elements used in
--------            ----------------
AT_LINE_START       all_spans
AT_WHITESPACE_END   all_spans
AT_WORD_START       all_spans
DELEGATE            all_spans
EXCLUDE_MATCH       all_spans
HASH_CHAR           regx_spans  (must be specified)
NO_ESCAPE           plain_spans
NO_LINE_BREAK       plain_spans
NO_WORD_BREAK       plain_spans
TYPE                all_spans
#@nonl
#@-node:Attributes for 'span'
#@+node:Attributes for 'seq' 
Atribute            Elements used in
--------            ----------------
AT_LINE_START       SEQ,SEQ_REGEXP
AT_WHITESPACE_END   SEQ,SEQ_REGEXP
AT_WORD_START       SEQ,SEQ_REGEXP
DELEGATE            SEQ,SEQ_REGEXP
HASH_CHAR           SEQ_REGEXP (must be specified)
TYPE                SEQ,SEQ_REGEXP
#@nonl
#@-node:Attributes for 'seq' 
#@-node:Attributes
#@+node:Elements
@language html
@color
#@nonl
#@+node:Spans and seqs...
#@+node:eol_span
An EOL_SPAN is similar to a SPAN except that highlighting stops at the end of
the line, and no end sequence needs to be specified. The text to match is
specified between the opening and closing EOL_SPAN tags.

The following attributes are supported:

TYPE - The token type to highlight the span with. See the section called “Token
Types” for a list of token types.

AT_LINE_START - If set to TRUE, the span will only be highlighted if the start
sequence occurs at the beginning of a line.

AT_WHITESPACE_END - If set to TRUE, the span will only be highlighted if the
sequence is the first non-whitespace text in the line.

AT_WORD_START - If set to TRUE, the span will only be highlighted if the start
sequence occurs at the beginning of a word.

DELEGATE - text inside the span will be highlighted with the specified ruleset.
To delegate to a ruleset defined in the current mode, just specify its name. To
delegate to a ruleset defined in another mode, specify a name of the form
mode::ruleset. Note that the first (unnamed) ruleset in a mode is called
“MAIN”.

EXCLUDE_MATCH - If set to TRUE, the start and end sequences will not be
highlighted, only the text between them will.

Here is an EOL_SPAN that highlights C++ comments:

<EOL_SPAN TYPE="COMMENT1">//</EOL_SPAN>
#@nonl
#@-node:eol_span
#@+node:eol_span_regexp
The EOL_SPAN_REGEXP rule is similar to the EOL_SPAN rule except the match
sequence is taken to be a regular expression.

In addition to the attributes supported by the EOL_SPAN tag, the HASH_CHAR
attribute must be specified. It must be set to the first character that the
regular expression matches. This rules out using regular expressions which can
match more than one character at the start position. The regular expression
match cannot span more than one line, either.

Here is an EOL_SPAN_REGEXP that highlights MS-DOS batch file comments, which
start with REM, followed by any whitespace character, and extend until the end
of the line:

<EOL_SPAN_REGEXP AT_WHITESPACE_END="TRUE" HASH_CHAR="R" TYPE="COMMENT1">REM\s</EOL_SPAN_REGEXP>
#@nonl
#@-node:eol_span_regexp
#@+node:mark_following
The MARK_FOLLOWING rule, which must be placed inside a RULES tag, highlights
from the start of the match to the next syntax token. The text to match is
specified between opening and closing MARK_FOLLOWING tags.

The following attributes are supported:

TYPE - The token type to highlight the text with. See the section called “Token
Types” for a list of token types.

AT_LINE_START - If set to TRUE, the sequence will only be highlighted if it
occurs at the beginning of a line.

AT_WHITESPACE_END - If set to TRUE, the sequence will only be highlighted if it
is the first non-whitespace text in the line.

AT_WORD_START - If set to TRUE, the sequence will only be highlighted if it
occurs at the beginning of a word.

EXCLUDE_MATCH - If set to TRUE, the match will not be highlighted, only the text
after it will.

Here is a rule that highlights variables in Unix shell scripts (“$CLASSPATH”,“$IFS”, etc):

<MARK_FOLLOWING TYPE="KEYWORD2">$</MARK_FOLLOWING>
#@nonl
#@-node:mark_following
#@+node:mark_previous
The MARK_PREVIOUS rule, which must be placed inside a RULES tag, highlights from
the end of the previous syntax token to the matched text. The text to match is
specified between opening and closing MARK_PREVIOUS tags. The following
attributes are supported:

TYPE - The token type to highlight the text with. See the section called “Token
Types” for a list of token types.

AT_LINE_START - If set to TRUE, the sequence will only be highlighted if it
occurs at the beginning of a line.

AT_WHITESPACE_END - If set to TRUE, the sequence will only be highlighted if it
is the first non-whitespace text in the line.

AT_WORD_START - If set to TRUE, the sequence will only be highlighted if it
occurs at the beginning of a word.

EXCLUDE_MATCH - If set to TRUE, the match will not be highlighted, only the text
before it will.

Here is a rule that highlights labels in Java mode (for example, “XXX:”):

<MARK_PREVIOUS AT_WHITESPACE_END="TRUE" EXCLUDE_MATCH="TRUE">:</MARK_PREVIOUS>
#@nonl
#@-node:mark_previous
#@+node:seq
The SEQ rule, which must be placed inside a RULES tag, highlights fixed
sequences of text. The text to highlight is specified between opening and
closing SEQ tags. The following attributes are supported:

TYPE - the token type to highlight the sequence with. See the section called
“Token Types” for a list of token types.

AT_LINE_START - If set to TRUE, the sequence will only be highlighted if it
occurs at the beginning of a line.

AT_WHITESPACE_END - If set to TRUE, the sequence will only be highlighted if it
is the first non-whitespace text in the line.

AT_WORD_START - If set to TRUE, the sequence will only be highlighted if it
occurs at the beginning of a word.

DELEGATE - if this attribute is specified, all text after the sequence will be
highlighted using this ruleset. To delegate to a ruleset defined in the current
mode, just specify its name. To delegate to a ruleset defined in another mode,
specify a name of the form mode::ruleset. Note that the first (unnamed) ruleset
in a mode is called “MAIN”.

The following rules highlight a few Java operators:

<SEQ TYPE="OPERATOR">+</SEQ>
<SEQ TYPE="OPERATOR">-</SEQ>
<SEQ TYPE="OPERATOR">*</SEQ>
<SEQ TYPE="OPERATOR">/</SEQ>
#@nonl
#@-node:seq
#@+node:seq_regexp
The SEQ_REGEXP rule is similar to the SEQ rule except the match sequence is
taken to be a regular expression.

In addition to the attributes supported by the SEQ tag, the HASH_CHAR attribute
must be specified. It must be set to the first character that the regular
expression matches. This rules out using regular expressions which can match
more than one character at the start position. The regular expression match
cannot span more than one line, either.

Here is an example of a SEQ_REGEXP rule that highlights Perl's matcher
constructions such as m/(.+):(\d+):(.+)/:

<SEQ_REGEXP TYPE="MARKUP"
    HASH_CHAR="m"
    AT_WORD_START="TRUE"
>m([[:punct:]])(?:.*?[^\\])*?\1[sgiexom]*</SEQ_REGEXP>
#@nonl
#@-node:seq_regexp
#@+node:span
The SPAN rule, which must be placed inside a RULES tag, highlights text between
a start and end string. The start and end strings are specified inside child
elements of the SPAN tag. The following attributes are supported:

TYPE - The token type to highlight the span with. See the section called “Token
Types” for a list of token types.

AT_LINE_START - If set to TRUE, the span will only be highlighted if the start
sequence occurs at the beginning of a line.

AT_WHITESPACE_END - If set to TRUE, the span will only be highlighted if the
start sequence is the first non-whitespace text in the line.

AT_WORD_START - If set to TRUE, the span will only be highlighted if the start
sequence occurs at the beginning of a word.

DELEGATE - text inside the span will be highlighted with the specified ruleset.
To delegate to a ruleset defined in the current mode, just specify its name. To
delegate to a ruleset defined in another mode, specify a name of the form
mode::ruleset. Note that the first (unnamed) ruleset in a mode is called
“MAIN”.

EXCLUDE_MATCH - If set to TRUE, the start and end sequences will not be
highlighted, only the text between them will.

NO_ESCAPE - If set to TRUE, the ruleset's escape character will have no effect
before the span's end string. Otherwise, the presence of the escape character
will cause that occurrence of the end string to be ignored.

NO_LINE_BREAK - If set to TRUE, the span will not cross line breaks.

NO_WORD_BREAK - If set to TRUE, the span will not cross word breaks.

Note that the AT_LINE_START, AT_WHITESPACE_END and AT_WORD_START attributes can
also be used on the BEGIN and END elements. Setting these attributes to the same
value on both elements has the same effect as setting them on the SPAN element.

Here is a SPAN that highlights Java string literals, which cannot include line breaks:

<SPAN TYPE="LITERAL1" NO_LINE_BREAK="TRUE">
  <BEGIN>"</BEGIN>
  <END>"</END>
</SPAN>

Here is a SPAN that highlights Java documentation comments by delegating to the “JAVADOC” ruleset defined elsewhere in the current mode:

<SPAN TYPE="COMMENT2" DELEGATE="JAVADOC">
  <BEGIN>/**</BEGIN>
  <END>*/</END>
</SPAN>

Here is a SPAN that highlights HTML cascading stylesheets inside <STYLE> tags by delegating to the main ruleset in the CSS edit mode:

<SPAN TYPE="MARKUP" DELEGATE="css::MAIN">
  <BEGIN>&lt;style&gt;</BEGIN>
  <END>&lt;/style&gt;</END>
</SPAN>
#@nonl
#@-node:span
#@+node:span_regexp
The SPAN_REGEXP rule is similar to the SPAN rule except the start sequence is
taken to be a regular expression.

In addition to the attributes supported by the SPAN tag, the HASH_CHAR attribute
must be specified. It must be set to the first character that the regular
expression matches. This rules out using regular expressions which can match
more than one character at the start position. The regular expression match
cannot span more than one line, either.

Any text matched by groups in the BEGIN regular expression is substituted in the
END string. See below for an example of where this is useful. 

Here is a SPAN_REGEXP rule that highlights “read-ins” in shell scripts:

<SPAN_REGEXP HASH_CHAR="<" TYPE="LITERAL1" DELEGATE="LITERAL">
    <BEGIN><![CDATA[<<[[:space:]'"]*([[:alnum:]_]+)[[:space:]'"]*]]></BEGIN>
    <END>$1</END>
</SPAN_REGEXP>

Here is a SPAN_REGEXP rule that highlights constructs placed between <#ftl and
>, as long as the <#ftl is followed by a word break:

<SPAN_REGEXP TYPE="KEYWORD1" HASH_CHAR="&lt;" DELEGATE="EXPRESSION">
    <BEGIN>&lt;#ftl\&gt;</BEGIN>
    <END>&gt;</END>
</SPAN_REGEXP>
#@nonl
#@-node:span_regexp
#@-node:Spans and seqs...
#@+node:All others...
#@+node:import
The IMPORT tag, which must be placed inside a RULES tag, loads all rules defined
in a given ruleset into the current ruleset; in other words, it has the same
effect as copying and pasting the imported ruleset.

The only required attribute DELEGATE must be set to the name of a ruleset. To
import a ruleset defined in the current mode, just specify its name. To import a
ruleset defined in another mode, specify a name of the form mode::ruleset. Note
that the first (unnamed) ruleset in a mode is called “MAIN”.

One quirk is that the definition of the imported ruleset is not copied to the
location of the IMPORT tag, but rather to the end of the containing ruleset.
This has implications with rule-ordering; see the section called “Rule Ordering
Requirements”.

Here is an example from the PHP mode, which extends the inline JavaScript
highlighting to support embedded PHP:


   <RULES SET="JAVASCRIPT+PHP">

   <SPAN TYPE="MARKUP" DELEGATE="php::PHP">
       <BEGIN>&lt;?php</BEGIN>
       <END>?&gt;</END>
   </SPAN>

   <SPAN TYPE="MARKUP" DELEGATE="php::PHP">
       <BEGIN>&lt;?</BEGIN>
       <END>?&gt;</END>
   </SPAN>

   <SPAN TYPE="MARKUP" DELEGATE="php::PHP">
       <BEGIN>&lt;%=</BEGIN>
       <END>%&gt;</END>
   </SPAN>

   <IMPORT DELEGATE="javascript::MAIN"/>
</RULES>
#@nonl
#@-node:import
#@+node:keywords (done)
The KEYWORDS tag, which must be placed inside a RULES tag and can only appear
once, specifies a list of keywords to highlight. Keywords are similar to SEQs,
except that SEQs match anywhere in the text, whereas keywords only match whole
words. Words are considered to be runs of text separated by non-alphanumeric
characters.

The KEYWORDS tag does not define any attributes.

Each child element of the KEYWORDS tag is an element whose name is a token type,
and whose content is the keyword to highlight.

<KEYWORDS>
  <KEYWORD1>if</KEYWORD1>
  <KEYWORD1>else</KEYWORD1>
  <KEYWORD3>int</KEYWORD3>
  <KEYWORD3>void</KEYWORD3>
</KEYWORDS>
#@nonl
#@-node:keywords (done)
#@+node:mode (done)
Each mode definition must begin with the following:

<?xml version="1.0"?>
<!DOCTYPE MODE SYSTEM "xmode.dtd">

Each mode definition must also contain exactly one MODE tag. All other tags (PROPS, RULES) must be placed inside the MODE tag. The MODE tag does not have any defined attributes. Here is an example:

<MODE>
    ... mode definition goes here ...
</MODE>
#@nonl
#@-node:mode (done)
#@+node:props & property  (for auto-indent)
The PROPS tag and the PROPERTY tags inside it are used to define mode-specific
properties. Each PROPERTY tag must have a NAME attribute set to the property's
name, and a VALUE attribute with the property's value.

All buffer-local properties listed in the section called “Buffer-Local
Properties” may be given values in edit modes.

The following mode properties specify commenting strings:

* commentEnd - the comment end string, used by the Range Comment command.
* commentStart - the comment start string, used by the Range Comment command.
* lineComment - the line comment string, used by the Line Comment command. 

When performing auto indent, a number of mode properties determine the resulting indent level:

* The line and the one before it are scanned for brackets listed in the
indentCloseBrackets and indentOpenBrackets properties. Opening brackets in the
previous line increase indent.

If lineUpClosingBracket is set to true, then closing brackets on the current
line will line up with the line containing the matching opening bracket. For
example, in Java mode lineUpClosingBracket is set to true, resulting in brackets
being indented like so:

{
    // Code
    {
        // More code
    }
}

If lineUpClosingBracket is set to false, the line after a closing bracket will
be lined up with the line containing the matching opening bracket. For example,
in Lisp mode lineUpClosingBracket is set to false, resulting in brackets being
indented like so:

(foo 'a-parameter
    (crazy-p)
    (bar baz ()))
(print "hello world")

* If the previous line contains no opening brackets, or if the
doubleBracketIndent property is set to true, the previous line is checked
against the regular expressions in the indentNextLine and indentNextLines
properties. If the previous line matches the former, the indent of the current
line is increased and the subsequent line is shifted back again. If the previous
line matches the latter, the indent of the current and subsequent lines is
increased.

In Java mode, for example, the indentNextLine property is set to match control
structures such as “if”, “else”, “while”, and so on.

The doubleBracketIndent property, if set to the default of false, results in code indented like so:

while(objects.hasNext())
{
    Object next = objects.hasNext();
    if(next instanceof Paintable)
        next.paint(g);
}

On the other hand, settings this property to “true” will give the following result:

while(objects.hasNext())
    {
        Object next = objects.hasNext();
        if(next instanceof Paintable)
            next.paint(g);
    }

Here is the complete <PROPS> tag for Java mode:

<PROPS>
    <PROPERTY NAME="commentStart" VALUE="/*" />
    <PROPERTY NAME="commentEnd" VALUE="*/" />
    <PROPERTY NAME="lineComment" VALUE="//" />
    <PROPERTY NAME="wordBreakChars" VALUE=",+-=&lt;&gt;/?^&amp;*" />

    <!-- Auto indent -->
    <PROPERTY NAME="indentOpenBrackets" VALUE="{" />
    <PROPERTY NAME="indentCloseBrackets" VALUE="}" />
    <PROPERTY NAME="indentNextLine"
    	VALUE="\s*(((if|while)\s*\(|else\s*|else\s+if\s*\(|for\s*\(.*\))[^{;]*)" />
    <!-- set this to 'true' if you want to use GNU coding style -->
    <PROPERTY NAME="doubleBracketIndent" VALUE="false" />
    <PROPERTY NAME="lineUpClosingBracket" VALUE="true" />
</PROPS>
#@nonl
#@-node:props & property  (for auto-indent)
#@+node:rules
RULES tags must be placed inside the MODE tag. Each RULES tag defines a ruleset.
A ruleset consists of a number of parser rules, with each parser rule specifying
how to highlight a specific syntax token. There must be at least one ruleset in
each edit mode. There can also be more than one, with different rulesets being
used to highlight different parts of a buffer (for example, in HTML mode, one
rule set highlights HTML tags, and another highlights inline JavaScript). For
information about using more than one ruleset, see the section called “The SPAN
Tag”.

The RULES tag supports the following attributes, all of which are optional: 

SET the name of this ruleset. All rulesets other than the first must have a
name.

IGNORE_CASE if set to FALSE, matches will be case sensitive. Otherwise, case
will not matter. Default is TRUE.

NO_WORD_SEP Any non-alphanumeric character not in this list is treated as a word
separator for the purposes of syntax highlighting.

DEFAULT The token type for text which doesn't match any specific rule. Default
is NULL. See the section called “Token Types” for a list of token types.

HIGHLIGHT_DIGITS DIGIT_RE

If the HIGHLIGHT_DIGITS attribute is set to TRUE, jEdit will attempt to
highlight numbers in this ruleset.

Any word consisting entirely of digits (0-9) will be highlighted with the DIGIT
token type. A word that contains other letters in addition to digits will be
highlighted with the DIGIT token type only if it matches the regular expression
specified in the DIGIT_RE attribute. If this attribute is not specified, it will
not be highlighted.

Here is an example DIGIT_RE regular expression that highlights Java-style
numeric literals (normal numbers, hexadecimals prefixed with 0x, numbers
suffixed with various type indicators, and floating point literals containing an
exponent):

DIGIT_RE="(0x[[:xdigit:]]+|[[:digit:]]+(e[[:digit:]]*)?)[lLdDfF]?"

Here is an example RULES tag:

<RULES IGNORE_CASE="FALSE" HIGHLIGHT_DIGITS="TRUE">
    ... parser rules go here ...
</RULES>
#@nonl
#@-node:rules
#@+node:terminate
The TERMINATE rule, which must be placed inside a RULES tag, specifies that
parsing should stop after the specified number of characters have been read from
a line.

The number of characters to terminate after should be specified with the AT_CHAR
attribute. Here is an example:

<TERMINATE AT_CHAR="1" />

This rule is used in Patch mode, for example, because only the first character
of each line affects highlighting.
#@nonl
#@-node:terminate
#@-node:All others...
#@-node:Elements
#@-node:jEdit docs...
#@+node:Refactored jEdit docs...
@nocolor
#@nonl
#@+node:@url http://www.jedit.org/42docs/users-guide/writing-modes-part.html
#@-node:@url http://www.jedit.org/42docs/users-guide/writing-modes-part.html
#@+node:Rule ordering
You might encounter this very common pitfall when writing your own modes.

Since jEdit checks buffer text against parser rules in the order they appear in
the ruleset, more specific rules must be placed before generalized ones,
otherwise the generalized rules will catch everything.
#@nonl
#@-node:Rule ordering
#@+node:Attributes
#@+node:AT_CHAR (int)
The number of characters to terminate after.

For terminate only.
#@nonl
#@-node:AT_CHAR (int)
#@+node:AT_xxx
#@+node:AT_LINE_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a line.

For 'seq', 'span', 'begin', 'end'
#@nonl
#@-node:AT_LINE_START (bool)
#@+node:AT_WHITESPACE_END (bool)
If set to TRUE, the sequence will only be highlighted if it is the first non-whitespace text in the line.

For 'seq', 'span', 'begin', 'end'
#@-node:AT_WHITESPACE_END (bool)
#@+node:AT_WORD_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a word.

For 'seq', 'span', 'begin', 'end'
#@-node:AT_WORD_START (bool)
#@-node:AT_xxx
#@+node:DELEGATE
EKR: This attribute is used in two completely different ways:

1.  In spans:

Text inside the span will be highlighted with the specified ruleset.

EKR: this is essentially a 'recursive' coloring. The delegate ruleset is the
only ruleset used.

2.  In import rules:

The imported ruleset is copied to the **end** of the containing ruleset,
**not** to the location of the IMPORT rule. This has implications with
rule-ordering; see the section called “Rule Ordering Requirements”.

To delegate to a ruleset defined in the current mode, just specify its name. To
delegate to a ruleset defined in another mode, specify a name of the form
mode::ruleset. Note that the first (unnamed) ruleset in a mode is called
“MAIN”.

#@-node:DELEGATE
#@+node:EXCLUDE_MATCH (bool)
If set to TRUE, the match will not be highlighted, only the text before it will.
#@nonl
#@-node:EXCLUDE_MATCH (bool)
#@+node:For rules only
#@+node:DEFAULT (token type)
The token type for text which doesn't match any specific rule. Default is NULL.
See the section called “Token Types” for a list of token types.

For 'rules' only.
#@nonl
#@-node:DEFAULT (token type)
#@+node:HIGHLIGHT_DIGITS  &DIGITS_RE (bool)
If the HIGHLIGHT_DIGITS attribute is set to TRUE, jEdit will attempt to highlight numbers in this ruleset.

Any word consisting entirely of digits (0-9) will be highlighted with the DIGIT token type.

A word that contains other letters in addition to digits will be highlighted with the DIGIT token type only if it matches the regular expression specified in the DIGIT_RE attribute. If this attribute is not specified, it will not be highlighted.

For 'rules' only.

Example: a DIGIT_RE regular expression that highlights Java-style
numeric literals (normal numbers, hexadecimals prefixed with 0x, numbers
suffixed with various type indicators, and floating point literals containing an
exponent):

DIGIT_RE="(0x[[:xdigit:]]+|[[:digit:]]+(e[[:digit:]]*)?)[lLdDfF]?"
#@nonl
#@-node:HIGHLIGHT_DIGITS  &DIGITS_RE (bool)
#@+node:IGNORE_CASE (bool)
If set to FALSE, matches will be case sensitive.
Otherwise, case will not matter. Default is TRUE.

For 'rules' only.
#@-node:IGNORE_CASE (bool)
#@+node:SET (string)
The name of this ruleset. All rulesets other than the first must have a name.

For 'rules' only.
#@nonl
#@-node:SET (string)
#@+node:NO_WORD_SEP
Any non-alphanumeric character not in this list is treated as a word separator
for the purposes of syntax highlighting.

For 'rules' only.
#@nonl
#@-node:NO_WORD_SEP
#@-node:For rules only
#@+node:HASH_CHAR (char)
The first character that the regular expression matches.

This rules out using regular expressions which can match more than one character
at the start position. The regular expression match cannot span more than one
line, either.

Required for eol_span_regexp, span_regexp, seq_regexp.
#@nonl
#@-node:HASH_CHAR (char)
#@+node:NAME and VALUE
Each PROPERTY tag must have a NAME attribute set to the property's name, and a
VALUE attribute with the property's value.

For property only.
#@nonl
#@-node:NAME and VALUE
#@+node:NO_xxx
#@+node:NO_WORD_BREAK (bool)
If set to TRUE, the span will not cross word breaks.

For 'span' only.
#@nonl
#@-node:NO_WORD_BREAK (bool)
#@+node:NO_LINE_BREAK (bool)
If set to TRUE, the span will not cross line breaks.

For 'span' only.
#@nonl
#@-node:NO_LINE_BREAK (bool)
#@+node:NO_ESCAPE (bool)
If set to TRUE, the ruleset's escape character will have no effect before the
span's end string. Otherwise, the presence of the escape character will cause
that occurrence of the end string to be ignored.

For 'span' only.
#@nonl
#@-node:NO_ESCAPE (bool)
#@-node:NO_xxx
#@+node:TYPE (Token Types)
The token type to highlight the text with.

Parser rules can highlight tokens using any of the following token types:

NULL - no special highlighting
COMMENT1,COMMENT2,COMMENT3,COMMENT4
FUNCTION
KEYWORD1,KEYWORD2,KEYWORD3,KEYWORD4
LABEL
LITERAL1,LITERAL2,LITERAL3,LITERAL4
MARKUP
OPERATOR
#@nonl
#@-node:TYPE (Token Types)
#@-node:Attributes
#@+node:Elements (children are attributes)
@language html
@color

All rules must be contained in the RULES element.
#@nonl
#@+node:begin & end
#@+node:AT_xxx
#@+node:AT_LINE_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a line.

For 'seq', 'span', 'begin', 'end'
#@nonl
#@-node:AT_LINE_START (bool)
#@+node:AT_WHITESPACE_END (bool)
If set to TRUE, the sequence will only be highlighted if it is the first non-whitespace text in the line.

For 'seq', 'span', 'begin', 'end'
#@-node:AT_WHITESPACE_END (bool)
#@+node:AT_WORD_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a word.

For 'seq', 'span', 'begin', 'end'
#@-node:AT_WORD_START (bool)
#@-node:AT_xxx
#@-node:begin & end
#@+node:eol_span (can use DELEGATE)
An EOL_SPAN is similar to a SPAN except that highlighting stops at the end of
the line, and no end sequence needs to be specified. The text to match is
specified between the opening and closing EOL_SPAN tags.

Attributes: TYPE, AT_xxx, DELEGATE, EXCLUDE_MATCH

Here is an EOL_SPAN that highlights C++ comments:

<EOL_SPAN TYPE="COMMENT1">//</EOL_SPAN>
#@nonl
#@+node:AT_xxx
#@+node:AT_LINE_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a line.

For 'seq', 'span', 'begin', 'end'
#@nonl
#@-node:AT_LINE_START (bool)
#@+node:AT_WHITESPACE_END (bool)
If set to TRUE, the sequence will only be highlighted if it is the first non-whitespace text in the line.

For 'seq', 'span', 'begin', 'end'
#@-node:AT_WHITESPACE_END (bool)
#@+node:AT_WORD_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a word.

For 'seq', 'span', 'begin', 'end'
#@-node:AT_WORD_START (bool)
#@-node:AT_xxx
#@+node:DELEGATE
EKR: This attribute is used in two completely different ways:

1.  In spans:

Text inside the span will be highlighted with the specified ruleset.

EKR: this is essentially a 'recursive' coloring. The delegate ruleset is the
only ruleset used.

2.  In import rules:

The imported ruleset is copied to the **end** of the containing ruleset,
**not** to the location of the IMPORT rule. This has implications with
rule-ordering; see the section called “Rule Ordering Requirements”.

To delegate to a ruleset defined in the current mode, just specify its name. To
delegate to a ruleset defined in another mode, specify a name of the form
mode::ruleset. Note that the first (unnamed) ruleset in a mode is called
“MAIN”.

#@-node:DELEGATE
#@+node:EXCLUDE_MATCH (bool)
If set to TRUE, the match will not be highlighted, only the text before it will.
#@nonl
#@-node:EXCLUDE_MATCH (bool)
#@-node:eol_span (can use DELEGATE)
#@+node:eol_span_regexp (can use DELEGATE)
The EOL_SPAN_REGEXP rule is similar to the EOL_SPAN rule except the match
sequence is taken to be a regular expression.

Attributes: TYPE, AT_xxx, DELEGATE, EXCLUDE_MATCH, HASH_CHAR(required)

An EOL_SPAN_REGEXP that highlights MS-DOS batch file comments, which
start with REM, followed by any whitespace character, and extend until the end
of the line:

<EOL_SPAN_REGEXP AT_WHITESPACE_END="TRUE" HASH_CHAR="R" TYPE="COMMENT1">REM\s</EOL_SPAN_REGEXP>
#@nonl
#@+node:AT_xxx
#@+node:AT_LINE_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a line.

For 'seq', 'span', 'begin', 'end'
#@nonl
#@-node:AT_LINE_START (bool)
#@+node:AT_WHITESPACE_END (bool)
If set to TRUE, the sequence will only be highlighted if it is the first non-whitespace text in the line.

For 'seq', 'span', 'begin', 'end'
#@-node:AT_WHITESPACE_END (bool)
#@+node:AT_WORD_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a word.

For 'seq', 'span', 'begin', 'end'
#@-node:AT_WORD_START (bool)
#@-node:AT_xxx
#@+node:DELEGATE
EKR: This attribute is used in two completely different ways:

1.  In spans:

Text inside the span will be highlighted with the specified ruleset.

EKR: this is essentially a 'recursive' coloring. The delegate ruleset is the
only ruleset used.

2.  In import rules:

The imported ruleset is copied to the **end** of the containing ruleset,
**not** to the location of the IMPORT rule. This has implications with
rule-ordering; see the section called “Rule Ordering Requirements”.

To delegate to a ruleset defined in the current mode, just specify its name. To
delegate to a ruleset defined in another mode, specify a name of the form
mode::ruleset. Note that the first (unnamed) ruleset in a mode is called
“MAIN”.

#@-node:DELEGATE
#@+node:EXCLUDE_MATCH (bool)
If set to TRUE, the match will not be highlighted, only the text before it will.
#@nonl
#@-node:EXCLUDE_MATCH (bool)
#@+node:HASH_CHAR (char)
The first character that the regular expression matches.

This rules out using regular expressions which can match more than one character
at the start position. The regular expression match cannot span more than one
line, either.

Required for eol_span_regexp, span_regexp, seq_regexp.
#@nonl
#@-node:HASH_CHAR (char)
#@-node:eol_span_regexp (can use DELEGATE)
#@+node:import
The IMPORT tag loads all rules defined in a given ruleset into the current
ruleset; in other words, it has the same effect as copying and pasting the
imported ruleset.

Attriubtes: DELEGATE (required)

An example from the PHP mode, which extends the inline JavaScript
highlighting to support embedded PHP:

<RULES SET="JAVASCRIPT+PHP">

   <SPAN TYPE="MARKUP" DELEGATE="php::PHP">
       <BEGIN>&lt;?php</BEGIN>
       <END>?&gt;</END>
   </SPAN>

   <SPAN TYPE="MARKUP" DELEGATE="php::PHP">
       <BEGIN>&lt;?</BEGIN>
       <END>?&gt;</END>
   </SPAN>

   <SPAN TYPE="MARKUP" DELEGATE="php::PHP">
       <BEGIN>&lt;%=</BEGIN>
       <END>%&gt;</END>
   </SPAN>

   <IMPORT DELEGATE="javascript::MAIN"/>
</RULES>
#@nonl
#@+node:DELEGATE
EKR: This attribute is used in two completely different ways:

1.  In spans:

Text inside the span will be highlighted with the specified ruleset.

EKR: this is essentially a 'recursive' coloring. The delegate ruleset is the
only ruleset used.

2.  In import rules:

The imported ruleset is copied to the **end** of the containing ruleset,
**not** to the location of the IMPORT rule. This has implications with
rule-ordering; see the section called “Rule Ordering Requirements”.

To delegate to a ruleset defined in the current mode, just specify its name. To
delegate to a ruleset defined in another mode, specify a name of the form
mode::ruleset. Note that the first (unnamed) ruleset in a mode is called
“MAIN”.

#@-node:DELEGATE
#@-node:import
#@+node:keywords
The KEYWORDS tag can only appear once. It specifies a list of keywords to
highlight.

Keywords are similar to SEQs, except that SEQs match anywhere in the
text, whereas keywords only match whole words. Words are considered to be runs
of text separated by non-alphanumeric characters.

Attributes:  None.

Each child element of the KEYWORDS tag is an element whose name is a token type,
and whose content is the keyword to highlight.

Example:

<KEYWORDS>
  <KEYWORD1>if</KEYWORD1>
  <KEYWORD1>else</KEYWORD1>
  <KEYWORD3>int</KEYWORD3>
  <KEYWORD3>void</KEYWORD3>
</KEYWORDS>
#@nonl
#@-node:keywords
#@+node:mark_following & mark_previous
The MARK_FOLLOWING rule ighlights from the start of the match to the next syntax
token. The text to match is specified between opening and closing MARK_FOLLOWING
tags.

The MARK_PREVIOUS rule highlights from the end of the previous syntax token to
the matched text. The text to match is specified between opening and closing
MARK_PREVIOUS tags.

Attributes: TYPE, AT_xxx, EXCLUDE_MATCH

Example:

<MARK_FOLLOWING TYPE="KEYWORD2">$</MARK_FOLLOWING>
#@nonl
#@+node:AT_xxx
#@+node:AT_LINE_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a line.

For 'seq', 'span', 'begin', 'end'
#@nonl
#@-node:AT_LINE_START (bool)
#@+node:AT_WHITESPACE_END (bool)
If set to TRUE, the sequence will only be highlighted if it is the first non-whitespace text in the line.

For 'seq', 'span', 'begin', 'end'
#@-node:AT_WHITESPACE_END (bool)
#@+node:AT_WORD_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a word.

For 'seq', 'span', 'begin', 'end'
#@-node:AT_WORD_START (bool)
#@-node:AT_xxx
#@+node:EXCLUDE_MATCH (bool)
If set to TRUE, the match will not be highlighted, only the text before it will.
#@nonl
#@-node:EXCLUDE_MATCH (bool)
#@-node:mark_following & mark_previous
#@+node:mode
Each mode definition must begin with the following:

<?xml version="1.0"?>
<!DOCTYPE MODE SYSTEM "xmode.dtd">

Each mode definition must also contain exactly one MODE tag.  All other tags (PROPS, RULES) must be placed inside the MODE tag.

Attributes: None

<MODE>
    ... mode definition goes here ...
</MODE>
#@nonl
#@-node:mode
#@+node:props & property
The PROPS tag and the PROPERTY tags define mode-specific properties.

Attributes: NAME, VALUE

All buffer-local properties may be given values in edit modes.

EKR: must support at least commentStart, commentEnd, lineComment, and wordBreakChars attributes.

Here is the complete <PROPS> tag for Java mode:

<PROPS>
    <PROPERTY NAME="commentStart" VALUE="/*" />
    <PROPERTY NAME="commentEnd" VALUE="*/" />
    <PROPERTY NAME="lineComment" VALUE="//" />
    <PROPERTY NAME="wordBreakChars" VALUE=",+-=&lt;&gt;/?^&amp;*" />

    <!-- Auto indent -->
    <PROPERTY NAME="indentOpenBrackets" VALUE="{" />
    <PROPERTY NAME="indentCloseBrackets" VALUE="}" />
    <PROPERTY NAME="indentNextLine"
    	VALUE="\s*(((if|while)\s*\(|else\s*|else\s+if\s*\(|for\s*\(.*\))[^{;]*)" />
    <!-- set this to 'true' if you want to use GNU coding style -->
    <PROPERTY NAME="doubleBracketIndent" VALUE="false" />
    <PROPERTY NAME="lineUpClosingBracket" VALUE="true" />
</PROPS>
#@nonl
#@+node:NAME and VALUE
Each PROPERTY tag must have a NAME attribute set to the property's name, and a
VALUE attribute with the property's value.

For property only.
#@nonl
#@-node:NAME and VALUE
#@+node:Properties for comment strings
The following mode properties specify commenting strings:

commentEnd - the comment end string, used by the Range Comment command.

commentStart - the comment start string, used by the Range Comment command.

lineComment - the line comment string, used by the Line Comment command. 
#@nonl
#@-node:Properties for comment strings
#@+node:Properties for auto-indent
When performing auto indent, a number of mode properties determine the resulting indent level:



#@+node:indentCloseBrackets and indentOpenBrackets
The line and the one before it are scanned for brackets listed in the
indentCloseBrackets and indentOpenBrackets properties. Opening brackets in the
previous line increase indent.

If lineUpClosingBracket is set to true, then closing brackets on the current
line will line up with the line containing the matching opening bracket. For
example, in Java mode lineUpClosingBracket is set to true, resulting in brackets
being indented like so:

{
    // Code
    {
        // More code
    }
}

If lineUpClosingBracket is set to false, the line after a closing bracket will
be lined up with the line containing the matching opening bracket. For example,
in Lisp mode lineUpClosingBracket is set to false, resulting in brackets being
indented like so:

(foo 'a-parameter
    (crazy-p)
    (bar baz ()))
(print "hello world")
#@nonl
#@-node:indentCloseBrackets and indentOpenBrackets
#@+node:doubleBracketIndent
If the previous line contains no opening brackets, or if the
doubleBracketIndent property is set to true, the previous line is checked
against the regular expressions in the indentNextLine and indentNextLines
properties. If the previous line matches the former, the indent of the current
line is increased and the subsequent line is shifted back again. If the previous
line matches the latter, the indent of the current and subsequent lines is
increased.

In Java mode, for example, the indentNextLine property is set to match control
structures such as “if”, “else”, “while”, and so on.

The doubleBracketIndent property, if set to the default of false, results in code indented like so::

    while(objects.hasNext())
    {
        Object next = objects.hasNext();
        if(next instanceof Paintable)
            next.paint(g);
    }

On the other hand, settings this property to “true” will give the following result::

    while(objects.hasNext())
        {
            Object next = objects.hasNext();
            if(next instanceof Paintable)
                next.paint(g);
        }
#@nonl
#@-node:doubleBracketIndent
#@-node:Properties for auto-indent
#@+node:Buffer-Local Properties
Buffer-local properties provide an alternate way to change editor settings on a
per-buffer basis. While changes made in the Buffer Options dialog box are lost
after the buffer is closed, buffer-local properties take effect each time the
file is opened, because they are embedded in the file itself. 

When jEdit loads a file, it checks the first and last 10 lines for
colon-enclosed name/value pairs. For example, placing the following in a buffer
changes the indent width to 4 characters, enables soft tabs, and activates the
Perl edit mode: 

:indentSize=4:noTabs=true:mode=perl:

Adding buffer-local properties to a buffer takes effect after the next time the
buffer is saved. 
#@nonl
#@+node:collapseFolds
Folds with a level of this or higher will be collapsed when the buffer is
opened. If set to zero, all folds will be expanded initially. See the section
called “Folding”.

#@-node:collapseFolds
#@+node:deepIndent
When set to “true”, multiple-line expressions delimited by parentheses are aligned like so::

    retVal.x = (int)(horizontalOffset
        + Chunk.offsetToX(info.chunks,
                          offset));

With this setting disabled, the text would look like so::

    retVal.x = (int)(horizontalOffset
        + Chunk.offsetToX(info.chunks,
        offset));
#@nonl
#@-node:deepIndent
#@+node:folding
The fold mode; one of “none”, “indent”, “explicit”, or the name of a plugin
folding mode. See the section called “Folding”.
#@nonl
#@-node:folding
#@+node:indentSize
The width, in characters, of one indent. Must be an integer greater than 0. See
the section called “Tabbing and Indentation”.
#@nonl
#@-node:indentSize
#@+node:maxLineLen
The maximum line length and wrap column position. Inserting text beyond this
column will automatically insert a line break at the appropriate position. See
the section called “Inserting and Deleting Text”.

#@-node:maxLineLen
#@+node:mode
The default edit mode for the buffer. See the section called “Edit Modes”. 

#@-node:mode
#@+node:noTabs
If set to “true”, soft tabs (multiple space characters) will be used instead of
“real” tabs. See the section called “Tabbing and Indentation”.

#@-node:noTabs
#@+node:noWordSep
A list of non-alphanumeric characters that are not to be treated as word
separators. Global default is “_”. tabSize The tab width. Must be an integer
greater than 0. See the section called “Tabbing and Indentation”.

#@-node:noWordSep
#@+node:wordBreakChars
Characters, in addition to spaces and tabs, at which lines may be split when
word wrapping. See the section called “Inserting and Deleting Text”.

#@-node:wordBreakChars
#@+node:wrap
The word wrap mode; one of “none”, “soft”, or “hard”. See the section called
“Wrapping Long Lines”.
#@nonl
#@-node:wrap
#@-node:Buffer-Local Properties
#@-node:props & property
#@+node:rules
For information about using more than one ruleset, see the section called “The SPAN Tag”.

Attributes: SET, IGNORE_CASE, NO_WORD_SEP, DEFAULT, HIGHLIGHT_DIGITS DIGIT_RE

<RULES IGNORE_CASE="FALSE" HIGHLIGHT_DIGITS="TRUE">
    ... parser rules go here ...
</RULES>
#@nonl
#@+node:For rules only
#@+node:DEFAULT (token type)
The token type for text which doesn't match any specific rule. Default is NULL.
See the section called “Token Types” for a list of token types.

For 'rules' only.
#@nonl
#@-node:DEFAULT (token type)
#@+node:HIGHLIGHT_DIGITS  &DIGITS_RE (bool)
If the HIGHLIGHT_DIGITS attribute is set to TRUE, jEdit will attempt to highlight numbers in this ruleset.

Any word consisting entirely of digits (0-9) will be highlighted with the DIGIT token type.

A word that contains other letters in addition to digits will be highlighted with the DIGIT token type only if it matches the regular expression specified in the DIGIT_RE attribute. If this attribute is not specified, it will not be highlighted.

For 'rules' only.

Example: a DIGIT_RE regular expression that highlights Java-style
numeric literals (normal numbers, hexadecimals prefixed with 0x, numbers
suffixed with various type indicators, and floating point literals containing an
exponent):

DIGIT_RE="(0x[[:xdigit:]]+|[[:digit:]]+(e[[:digit:]]*)?)[lLdDfF]?"
#@nonl
#@-node:HIGHLIGHT_DIGITS  &DIGITS_RE (bool)
#@+node:IGNORE_CASE (bool)
If set to FALSE, matches will be case sensitive.
Otherwise, case will not matter. Default is TRUE.

For 'rules' only.
#@-node:IGNORE_CASE (bool)
#@+node:SET (string)
The name of this ruleset. All rulesets other than the first must have a name.

For 'rules' only.
#@nonl
#@-node:SET (string)
#@+node:NO_WORD_SEP
Any non-alphanumeric character not in this list is treated as a word separator
for the purposes of syntax highlighting.

For 'rules' only.
#@nonl
#@-node:NO_WORD_SEP
#@-node:For rules only
#@-node:rules
#@+node:seq (can use DELEGATE)
The SEQ rule highlights fixed sequences of text. The text to highlight is
specified between opening and closing SEQ tags. The following attributes are
supported:

Attributes: TYPE, AT_xxx, DELEGATE

Examples:

<SEQ TYPE="OPERATOR">+</SEQ>
<SEQ TYPE="OPERATOR">-</SEQ>
<SEQ TYPE="OPERATOR">*</SEQ>
<SEQ TYPE="OPERATOR">/</SEQ>
#@nonl
#@+node:AT_xxx
#@+node:AT_LINE_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a line.

For 'seq', 'span', 'begin', 'end'
#@nonl
#@-node:AT_LINE_START (bool)
#@+node:AT_WHITESPACE_END (bool)
If set to TRUE, the sequence will only be highlighted if it is the first non-whitespace text in the line.

For 'seq', 'span', 'begin', 'end'
#@-node:AT_WHITESPACE_END (bool)
#@+node:AT_WORD_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a word.

For 'seq', 'span', 'begin', 'end'
#@-node:AT_WORD_START (bool)
#@-node:AT_xxx
#@+node:DELEGATE
EKR: This attribute is used in two completely different ways:

1.  In spans:

Text inside the span will be highlighted with the specified ruleset.

EKR: this is essentially a 'recursive' coloring. The delegate ruleset is the
only ruleset used.

2.  In import rules:

The imported ruleset is copied to the **end** of the containing ruleset,
**not** to the location of the IMPORT rule. This has implications with
rule-ordering; see the section called “Rule Ordering Requirements”.

To delegate to a ruleset defined in the current mode, just specify its name. To
delegate to a ruleset defined in another mode, specify a name of the form
mode::ruleset. Note that the first (unnamed) ruleset in a mode is called
“MAIN”.

#@-node:DELEGATE
#@-node:seq (can use DELEGATE)
#@+node:seq_regexp (can use DELEGATE)
The SEQ_REGEXP rule is similar to the SEQ rule except the match sequence is
taken to be a regular expression.

Attributes: TYPE, AT_xxx, DELEGATE, HASH_CHAR(required)

Example: a SEQ_REGEXP rule that highlights Perl's matcher
constructions such as m/(.+):(\d+):(.+)/:

<SEQ_REGEXP TYPE="MARKUP"
    HASH_CHAR="m"
    AT_WORD_START="TRUE"
>m([[:punct:]])(?:.*?[^\\])*?\1[sgiexom]*</SEQ_REGEXP>
#@nonl
#@+node:AT_xxx
#@+node:AT_LINE_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a line.

For 'seq', 'span', 'begin', 'end'
#@nonl
#@-node:AT_LINE_START (bool)
#@+node:AT_WHITESPACE_END (bool)
If set to TRUE, the sequence will only be highlighted if it is the first non-whitespace text in the line.

For 'seq', 'span', 'begin', 'end'
#@-node:AT_WHITESPACE_END (bool)
#@+node:AT_WORD_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a word.

For 'seq', 'span', 'begin', 'end'
#@-node:AT_WORD_START (bool)
#@-node:AT_xxx
#@+node:DELEGATE
EKR: This attribute is used in two completely different ways:

1.  In spans:

Text inside the span will be highlighted with the specified ruleset.

EKR: this is essentially a 'recursive' coloring. The delegate ruleset is the
only ruleset used.

2.  In import rules:

The imported ruleset is copied to the **end** of the containing ruleset,
**not** to the location of the IMPORT rule. This has implications with
rule-ordering; see the section called “Rule Ordering Requirements”.

To delegate to a ruleset defined in the current mode, just specify its name. To
delegate to a ruleset defined in another mode, specify a name of the form
mode::ruleset. Note that the first (unnamed) ruleset in a mode is called
“MAIN”.

#@-node:DELEGATE
#@-node:seq_regexp (can use DELEGATE)
#@+node:span  (can use DELEGATE)
The SPAN rule highlights text between a start and end string. The start and end
strings are specified inside child elements of the SPAN tag.

Attributes: TYPE, AT_xxx, DELEGATE, EXCLUDE_MATCH, NO_xxx,

Example: a SPAN that highlights Java string literals, which cannot include line breaks:

<SPAN TYPE="LITERAL1" NO_LINE_BREAK="TRUE">
  <BEGIN>"</BEGIN>
  <END>"</END>
</SPAN>

Example: a SPAN that highlights Java documentation comments by delegating to the
“JAVADOC” ruleset defined elsewhere in the current mode:

<SPAN TYPE="COMMENT2" DELEGATE="JAVADOC">
  <BEGIN>/**</BEGIN>
  <END>*/</END>
</SPAN>

Example: a SPAN that highlights HTML cascading stylesheets inside <STYLE> tags by
delegating to the main ruleset in the CSS edit mode:

<SPAN TYPE="MARKUP" DELEGATE="css::MAIN">
  <BEGIN>&lt;style&gt;</BEGIN>
  <END>&lt;/style&gt;</END>
</SPAN>
#@nonl
#@+node:AT_xxx
#@+node:AT_LINE_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a line.

For 'seq', 'span', 'begin', 'end'
#@nonl
#@-node:AT_LINE_START (bool)
#@+node:AT_WHITESPACE_END (bool)
If set to TRUE, the sequence will only be highlighted if it is the first non-whitespace text in the line.

For 'seq', 'span', 'begin', 'end'
#@-node:AT_WHITESPACE_END (bool)
#@+node:AT_WORD_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a word.

For 'seq', 'span', 'begin', 'end'
#@-node:AT_WORD_START (bool)
#@-node:AT_xxx
#@+node:DELEGATE
EKR: This attribute is used in two completely different ways:

1.  In spans:

Text inside the span will be highlighted with the specified ruleset.

EKR: this is essentially a 'recursive' coloring. The delegate ruleset is the
only ruleset used.

2.  In import rules:

The imported ruleset is copied to the **end** of the containing ruleset,
**not** to the location of the IMPORT rule. This has implications with
rule-ordering; see the section called “Rule Ordering Requirements”.

To delegate to a ruleset defined in the current mode, just specify its name. To
delegate to a ruleset defined in another mode, specify a name of the form
mode::ruleset. Note that the first (unnamed) ruleset in a mode is called
“MAIN”.

#@-node:DELEGATE
#@+node:EXCLUDE_MATCH (bool)
If set to TRUE, the match will not be highlighted, only the text before it will.
#@nonl
#@-node:EXCLUDE_MATCH (bool)
#@+node:NO_xxx
#@+node:NO_WORD_BREAK (bool)
If set to TRUE, the span will not cross word breaks.

For 'span' only.
#@nonl
#@-node:NO_WORD_BREAK (bool)
#@+node:NO_LINE_BREAK (bool)
If set to TRUE, the span will not cross line breaks.

For 'span' only.
#@nonl
#@-node:NO_LINE_BREAK (bool)
#@+node:NO_ESCAPE (bool)
If set to TRUE, the ruleset's escape character will have no effect before the
span's end string. Otherwise, the presence of the escape character will cause
that occurrence of the end string to be ignored.

For 'span' only.
#@nonl
#@-node:NO_ESCAPE (bool)
#@-node:NO_xxx
#@-node:span  (can use DELEGATE)
#@+node:span_regexp  (can use DELEGATE)
The SPAN_REGEXP rule is similar to the SPAN rule except the start sequence is
a regular expression.

Attributes: TYPE, AT_xxx, DELEGATE, EXCLUDE_MATCH, NO_xxx, HASH_CHAR (required).

Any text matched by groups in the BEGIN regular expression is substituted in the
END string. See below for an example of where this is useful. 

Example: a SPAN_REGEXP rule that highlights “read-ins” in shell scripts:

<SPAN_REGEXP HASH_CHAR="<" TYPE="LITERAL1" DELEGATE="LITERAL">
    <BEGIN><![CDATA[<<[[:space:]'"]*([[:alnum:]_]+)[[:space:]'"]*]]></BEGIN>
    <END>$1</END>
</SPAN_REGEXP>

Example: a SPAN_REGEXP rule that highlights constructs placed between <#ftl and
>, as long as the <#ftl is followed by a word break:

<SPAN_REGEXP TYPE="KEYWORD1" HASH_CHAR="&lt;" DELEGATE="EXPRESSION">
    <BEGIN>&lt;#ftl\&gt;</BEGIN>
    <END>&gt;</END>
</SPAN_REGEXP>
#@nonl
#@+node:AT_xxx
#@+node:AT_LINE_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a line.

For 'seq', 'span', 'begin', 'end'
#@nonl
#@-node:AT_LINE_START (bool)
#@+node:AT_WHITESPACE_END (bool)
If set to TRUE, the sequence will only be highlighted if it is the first non-whitespace text in the line.

For 'seq', 'span', 'begin', 'end'
#@-node:AT_WHITESPACE_END (bool)
#@+node:AT_WORD_START (bool)
If set to TRUE, the sequence will only be highlighted if it occurs at the beginning of a word.

For 'seq', 'span', 'begin', 'end'
#@-node:AT_WORD_START (bool)
#@-node:AT_xxx
#@+node:DELEGATE
EKR: This attribute is used in two completely different ways:

1.  In spans:

Text inside the span will be highlighted with the specified ruleset.

EKR: this is essentially a 'recursive' coloring. The delegate ruleset is the
only ruleset used.

2.  In import rules:

The imported ruleset is copied to the **end** of the containing ruleset,
**not** to the location of the IMPORT rule. This has implications with
rule-ordering; see the section called “Rule Ordering Requirements”.

To delegate to a ruleset defined in the current mode, just specify its name. To
delegate to a ruleset defined in another mode, specify a name of the form
mode::ruleset. Note that the first (unnamed) ruleset in a mode is called
“MAIN”.

#@-node:DELEGATE
#@+node:EXCLUDE_MATCH (bool)
If set to TRUE, the match will not be highlighted, only the text before it will.
#@nonl
#@-node:EXCLUDE_MATCH (bool)
#@+node:NO_xxx
#@+node:NO_WORD_BREAK (bool)
If set to TRUE, the span will not cross word breaks.

For 'span' only.
#@nonl
#@-node:NO_WORD_BREAK (bool)
#@+node:NO_LINE_BREAK (bool)
If set to TRUE, the span will not cross line breaks.

For 'span' only.
#@nonl
#@-node:NO_LINE_BREAK (bool)
#@+node:NO_ESCAPE (bool)
If set to TRUE, the ruleset's escape character will have no effect before the
span's end string. Otherwise, the presence of the escape character will cause
that occurrence of the end string to be ignored.

For 'span' only.
#@nonl
#@-node:NO_ESCAPE (bool)
#@-node:NO_xxx
#@+node:HASH_CHAR (char)
The first character that the regular expression matches.

This rules out using regular expressions which can match more than one character
at the start position. The regular expression match cannot span more than one
line, either.

Required for eol_span_regexp, span_regexp, seq_regexp.
#@nonl
#@-node:HASH_CHAR (char)
#@-node:span_regexp  (can use DELEGATE)
#@+node:terminate
The TERMINATE rule specifies that parsing should stop after the specified number
of characters have been read from a line.

The number of characters to terminate after should be specified with the AT_CHAR
attribute. Here is an example:

<TERMINATE AT_CHAR="1" />

This rule is used in Patch mode, for example, because only the first character
of each line affects highlighting.
#@nonl
#@+node:AT_CHAR (int)
The number of characters to terminate after.

For terminate only.
#@nonl
#@-node:AT_CHAR (int)
#@-node:terminate
#@-node:Elements (children are attributes)
#@-node:Refactored jEdit docs...
#@+node:import from modes
import glob

print '-' * 20
path = r'c:\prog\tigris-cvs\leo\modes'

errors = ['cil',] # End keyword not matched by start.

if 0:
    files = ['python','php']
else:
    files = glob.glob(r'c:\prog\tigris-cvs\leo\modes\*.py')
    files = [g.os_path_split(f)[1] for f in files]
    files = [g.os_path_splitext(f)[0] for f in files]
    # for f in files: print str(f)

if 1:
    good, bad, skipped = 0,0,0
    for modeName in (files):
        if modeName in errors:
            skipped += 1 ; continue
        mode = g.importFromPath (modeName,path)
        if mode:
            good += 1
            if 0:
                for s in ('properties','rulesDict','importDict'):
                    print hasattr(mode,s),modeName,s
        else: bad += 1
    print 'good: %d, bad: %d, skipped: %d' % (good,bad,skipped)
#@-node:import from modes
#@+node:errors
@nocolor
convert: applescript
SAXParseException: c:\prog\tigris-cvs\leo\modes\applescript.xml:44:23: not well-formed (invalid token)

convert: bibtex
SAXParseException: c:\prog\tigris-cvs\leo\modes\bibtex.xml:1057:48: duplicate attribute

convert: cil
XML error: end keywords not matched by start keywords

convert: pl1
SAXParseException: c:\prog\tigris-cvs\leo\modes\pl1.xml:45:23: not well-formed (invalid token)

convert: shell
SAXParseException: c:\prog\tigris-cvs\leo\modes\shell.xml:61:26: not well-formed (invalid token)

convert: shellscript
SAXParseException: c:\prog\tigris-cvs\leo\modes\shellscript.xml:61:26: not well-formed (invalid token)

convert: ssharp
SAXParseException: c:\prog\tigris-cvs\leo\modes\ssharp.xml:34:19: not well-formed (invalid token)

« (bad character) U+00AB  &#xAB;
» (bad character)       &#xBB;

convert: text
Exception creating c:\prog\tigris-cvs\leo\modes\text..py
Traceback (most recent call last):

  File "<string>", line 1179, in ?

  File "<string>", line 41, in convert

  File "<string>", line 560, in write

  File "<string>", line 541, in putRules

  File "<string>", line 294, in putRule

  File "<string>", line 516, in putTerminate

TypeError: int argument required
#@nonl
#@-node:errors
#@+node:@@button jEdit2Py
'''Convert jEdit language description file to an equivalent .py file.'''

@language python
@tabwidth -4
@pagewidth 80

<< imports >>
<< to do >>
<< version history >>
<< set files to convert >>
opt_print_summary = False # True: print summary in doEndElement.
opt_print_elements = False

@others

print '-'*40
for path1 in files:
    if 1:
        if path1 in errors:
            print 'skipping %s' % path1 ; continue
    path2 = path1[:-3] + 'py'
    try:
        convert (c,path1,path2)
    except Exception:
        print 'Exception creating', path2
        g.es_exception()

g.es_print('done')
#@nonl
#@+node:<< imports >>
import glob
import string
import xml.sax
import xml.sax.saxutils
#@nonl
#@-node:<< imports >>
#@+node:<< to do >>
@nocolor
@

- AT_LINE_START, AT_WHITESPACE_END and AT_WORD_START attributes can also be used
on the BEGIN and END elements. Setting these attributes to the same value on
both elements has the same effect as setting them on the SPAN element.
#@nonl
#@-node:<< to do >>
#@+node:<< version history >>
@nocolor
@

7/23/06 EKR: Ignore keywords containing whitespace.
7/30/06 EKR: Compute keywordChars properly.  This fixes erlang bug, e.g.
8/24/06 EKR: Lowercase all keywords if ignore_case is true.
8/25/06 EKR: Prefixed all rule names with fileName (w/o extension) so I can makes sense of imported rules.
8/26/06 EKR: No need for hash_char in pattern matchers, because they are called only if the hash_char matches.
9/4/06  EKR: Added support for ESCAPE attribute of RULES element.
             This is required so the new colorizer plugin can support the no_escape argument to pattern matchers.
#@nonl
#@-node:<< version history >>
#@+node:<< set files to convert >>
# theDir = r'c:\prog\tigris-cvs\leo\modes'
theDir = g.os_path_abspath(g.os_path_join(g.app.loadDir,'..','modes'))
fixed = ['antlr','applescript','apacheconf','bibtex','cil','pl1','shell','shellscript','ssharp','text']
errors = [] # 'cil',] # End keyword not matched by start.
errors = [r'%s\%s.xml' % (theDir,s) for s in errors]
# for s in errors: print s
if 0: # A a list of files.
    files = [r'%s\%s.xml' % (theDir,s) for s in ('perl',)] # 'rest','perl',
elif 1: # translate all files.
    files = glob.glob(g.os_path_abspath(g.os_path_join(theDir,'*.xml')))
elif 1: # error files
    files = [g.os_path_abspath(g.os_path_join(theDir,s)) for s in errors]
else: # translate two representative files.
    files = [g.os_path_abspath(g.os_path_join(theDir,s)) for s in ('python','php')]
# for s in files: print s
#@nonl
#@-node:<< set files to convert >>
#@+node:top-level
#@+node:convert
def convert (c,inputFileName,outputFileName):

    junk, fn = g.os_path_split(inputFileName)
    language, junk = g.os_path_splitext(fn)
    g.es_print('convert: %s' % (language))

    mode = parse_jEdit_file(inputFileName,language)

    if mode:
        try:
            f = file(outputFileName,'w')
        except IOError:
            g.trace('can not create %s' % outputFileName)
            return

        try:
            mode.write(f,language)
        finally:
            f.close()
#@nonl
#@-node:convert
#@+node:parse_jEdit_file
def parse_jEdit_file(inputFileName,language):

    if not inputFileName:
        return None

    if not inputFileName.endswith('.xml'):
        inputFileName = inputFileName + '.xml'

    path = g.os_path_join(g.app.loadDir,'../','modes',inputFileName)
    path = g.os_path_normpath(path)

    try: f = open(path)
    except IOError:
        g.trace('can not open %s'%path)
        return None
    try:
        try:
            mode = None
            parser = xml.sax.make_parser()
            # Do not include external general entities.
            # The actual feature name is "http://xml.org/sax/features/external-general-entities"
            parser.setFeature(xml.sax.handler.feature_external_ges,0)
            handler = contentHandler(c,inputFileName,language)
            parser.setContentHandler(handler)
            parser.parse(f)
            mode = handler.getMode()
        except:
            g.es('unexpected exception parsing %s' % (inputFileName),color='red')
            g.es_exception()
    finally:
        f.close()
        return mode
#@nonl
#@-node:parse_jEdit_file
#@+node:munge
def munge(s):

    '''Munge a mode name so that it is a valid python id.'''

    valid = string.ascii_letters + string.digits + '_'

    return ''.join([g.choose(ch in valid,ch.lower(),'_') for ch in s])
#@nonl
#@-node:munge
#@-node:top-level
#@+node:class modeClass
class modeClass:

    '''A class representing one jEdit language-description mode.

    Use getters to access the attributes, properties and rules of this mode.'''

    @others
#@nonl
#@+node: mode.__init__
def __init__ (self,contentHandler,fileName):

    # g.trace('mode',fileName)

    self.contentHandler = contentHandler
    self.c = contentHandler.c
    self.fileName = g.shortFileName(fileName) # The file from which the mode was imported.
    modeName, junk = g.os_path_splitext(self.fileName)
    self.fileModeName = modeName
    self.modeName = munge(modeName).lower()
    self.outputFile = None # The open output file to which Python statements get written.
    self.tab_width = g.scanDirectives(c).get('tab_width')

    # Mode statistics...
    self.numberOfAttributes = 0
    self.numberOfElements = 0
    self.numberOfErrors = 0
    self.numberOfPropertyAttributes = 0
    self.numberOfRuleAttributes = 0

    # List of boolean attributes.
    self.boolAttrs = [
        'at_line_start','at_whitespace_end','at_word_start',
        'exclude_match','highlight_digits','ignore_case',
        'no_escape','no_line_break','no_word_break',]

    # List of elements that start a rule.
    self.ruleElements = [
        'eol_span','eol_span_regexp','import','keywords',
        'mark_following','mark_previous','seq','seq_regexp',
        'span','span_regexp','terminate',]

    if 0: # Not used at present.
        self.seqSpanElements = [
            'eol_span','eol_span_regexp','seq','seq_regexp',
            'span','span_regexp',]

    # Mode semantics.
    self.attributes = {}
    self.handlerCount = 0
    self.importedRules = [] # A bunch describing the imported ruleset.
    self.inProps = False
    self.inRules = False
    self.keywords = None
    self.modeProperties = []
    self.presentProperty = None # A bunch to be assigned to modeProperties or rulesetProperties.
    self.rule = None
    self.rulesets = []
    self.rules = [] # The rules of the present rules element.
    self.rulesetProperties = []
    self.rulesetAttributes = {} # The attributes of the present rules element.
#@nonl
#@-node: mode.__init__
#@+node: mode.__str__ & __repr__
def __str__ (self):

    return '<modeClass for %s>' % self.fileName

__repr__ = __str__
#@nonl
#@-node: mode.__str__ & __repr__
#@+node: Output...
# Similar to printing, but generates the output file.
#@nonl
#@+node:fullDelegate
def fullDelegate (self,delegate):

    if delegate:
        delegate = delegate.lower()
        i = delegate.find('::')
        if i == -1:
            return '%s::%s' % (self.fileModeName.lower(),delegate)
        else:
            return delegate
    else:
        return ''
#@nonl
#@-node:fullDelegate
#@+node:escapeString & quoteString
def escapeString (self,s):

    '''Return string s enclosed in double quotes.'''

    if type(s) == type([]): s = ''.join(s)

    # Order is important: escape backspaces first.
    return '"%s"' % s.replace('\\','\\\\').replace('"','\\"').replace('\t','\\t')

quoteString = escapeString
#@nonl
#@-node:escapeString & quoteString
#@+node:put, putTripleString
def put (self,s):

    self.outputFile.write(s)

def putTripleString(self,s):

    self.put(g.adjustTripleString(s,self.tab_width))
#@nonl
#@-node:put, putTripleString
#@+node:putAttributes
def putAttributes (self):

    dd = {}
    data = (
        ('default','null'),
        ('digit_re',''),
        ('escape',''),
        ('highlight_digits',True),
        ('ignore_case',True),
        ('no_word_sep',None), # could be false or ''.
    )

    for ruleset in self.rulesets:
        d = {}
        prefix = '%s_%s' % (self.modeName,ruleset.name)
        self.put('# Attributes dict for %s ruleset.\n' % (prefix))
        for key,default in data:
            val = ruleset.attributes.get(key,default)
            if default == True: val = g.choose(val,'true','false')
            elif default == None:
                if val and val.lower() == 'false': val = ''
                else: val = g.choose(val,val,'')
            # if val: g.trace(key,repr(val))
            d [key] = val


        self.putDict('%s_attributes_dict' % (prefix),d)
        dd [ prefix ] = '%s_attributes_dict' % (prefix)

    self.put('# Dictionary of attributes dictionaries for %s mode.\n' % (self.modeName))
    self.putDict('attributesDictDict',dd,escape=False)
#@nonl
#@-node:putAttributes
#@+node:putDict & putDictOfLists
def putDict (self,name,theDict,escape=True):

    esc = self.escapeString
    esc2 = g.choose(escape,self.escapeString,lambda a: a)
    keys = theDict.keys() ; keys.sort()
    s = ''.join(['\t%s: %s,\n' % (esc(key),esc2(theDict.get(key))) for key in keys])
    if s: s = '\n' + s
    self.put('%s = {%s}\n\n' % (name,s))

def putDictOfLists (self,name,theDict,strings=False):

    esc = self.escapeString
    keys = theDict.keys() ; keys.sort()
    theList = []
    for key in keys:
        if strings:
            # Not completely general, but it works for the import dict, and that's good enough.
            s = ''.join(['"%s",' % (item) for item in theDict.get(key)])
        else:
             s = ''.join(['%s,' % (item) for item in theDict.get(key)])
        theList.append('\t%s: [%s],\n' % (esc(key),s))
    s = ''.join(theList)
    if s: s = '\n' + s
    self.put('%s = {%s}\n\n' % (name,s))
#@nonl
#@-node:putDict & putDictOfLists
#@+node:putImportDict
def putImportDict (self):

    d = {}
    for ruleset in self.rulesets:
        prefix = '%s_%s' % (self.modeName,ruleset.name)
        for rule in ruleset.rules:
            if rule.name == 'import':
                delegate = rule.getStrAttrib('delegate').lower()
                if delegate:
                    i = delegate.find('::')
                    delegate_name = g.choose(i==-1,
                        '%s::%s' % (prefix,delegate), # Can indeed happen.
                        '%s' % (delegate))
                    theList = d.get(prefix,[])
                    if delegate_name not in theList:
                        theList.append(delegate_name)
                        d [prefix] = theList

    self.put('# Import dict for %s mode.\n' % (self.modeName))
    self.putDictOfLists('importDict',d,strings=True)
#@nonl
#@-node:putImportDict
#@+node:putKeywordsData
def putKeywordsData (self):

    dd = {}

    for ruleset in self.rulesets:

        prefix = '%s_%s' % (self.modeName,ruleset.name)

        ignore_case = ruleset.attributes.get('ignore_case',False)
        # g.trace(ignore_case)

        self.put('# Keywords dict for %s ruleset.\n' % (prefix))

        for rule in ruleset.rules:
            if rule.name == 'keywords':
                d = rule.keywordsDict
                for key in d.keys():
                    if not key.strip():
                        del d [key]
                    elif key.find(' ') != -1 or key.find('\t') != -1:
                        del d [key]
                        g.es_print('Warning: ignoring keyword containing whitespace: %s' % (
                            repr(key)))
                    elif ignore_case: # New in 4.4.1 final.
                        # Downcase all keys.
                        val = d.get(key)
                        key2 = key.lower()
                        if key2 != key:
                            del d[key]
                        d[key2] = val
                break
        else: d = {}

        self.putDict('%s_keywords_dict' % (prefix),d)
        dd [ prefix ] = '%s_keywords_dict' % (prefix)

    self.put('# Dictionary of keywords dictionaries for %s mode.\n' % (self.modeName))
    self.putDict('keywordsDictDict',dd,escape=False)
#@nonl
#@-node:putKeywordsData
#@+node:putList
def putList (self,name,theList,escape=True,lineCount=0):

    def comma(i):
        return ',' # return g.choose(i==len(theList)-1,'',',')
    def nl(i):
        return g.choose(lineCount == 0 or ((i%lineCount)==lineCount-1),'\n',' ')
    def tab(i,n):
        # return g.choose(i == 0 or nl(i-1) != ' ','\t','')
        return g.choose(i > 0 and nl(i-1) != ' ','\t','')
    esc = g.choose(escape,self.escapeString,lambda a:a)

    result = []
    n = len(theList)
    for i in xrange(n):
        result.append('%s%s%s%s' % (tab(i,n),esc(theList[i]),comma(i),nl(i)))

    vals = ''.join(result)
    # if n > 1: vals = '\n' + vals
    self.put('%s = [%s]\n\n' % (name,vals))
#@-node:putList
#@+node:putModeProperties
def putModeProperties (self,language):

    d = {}

    self.put('# Properties for %s mode.\n' % (language))

    for prop in self.modeProperties:
        d2 = prop.attributes
        name = d2.get('name')
        d [name] = d2.get('value')

    self.putDict('properties', d)
#@nonl
#@-node:putModeProperties
#@+node:putRule & rule creators
def putRule (self, rule):

    '''Call the rule creator for the given rule.'''

    d = {
        'eol_span':         self.putEolSpan,
        'eol_span_regexp':  self.putEolSpanRegexp,
        'import':           self.putImport,
        'keywords':         self.putKeywords,
        'mark_following':   self.putMarkFollowing,
        'mark_previous':    self.putMarkPrevious,
        'seq':              self.putSeq,
        'seq_regexp':       self.putSeqRegexp,
        'span':             self.putSpan,
        'span_regexp':      self.putSpanRegexp,
        'terminate':        self.putTerminate,
    }

    # Call the rule creator.
    f = d.get(rule.name,self.putBadRule)
    val = f (rule)
    self.handlerCount += 1

    return val
#@nonl
#@+node:putBadRule
def putBadRule (self,rule):

    self.put('\n\n# *****no output creator for %s*****' % rule.name)
#@nonl
#@-node:putBadRule
#@+node:putEolSpan
def putEolSpan (self,rule):

    quote = self.quoteString
    seq = rule.getSeq(rule.name)

    s = '''\n\
def %s_rule%d(colorer, s, i):
    return colorer.match_eol_span(s, i, kind=%s, seq=%s,
        at_line_start=%s, at_whitespace_end=%s, at_word_start=%s,
        delegate=%s, exclude_match=%s)''' % (
        self.fileModeName,
        self.handlerCount,
        quote(rule.getStrAttrib('type').lower()),
        quote(seq),
        rule.getBoolAttrib('at_line_start'),
        rule.getBoolAttrib('at_whitespace_end'),
        rule.getBoolAttrib('at_word_end'),
        quote(self.fullDelegate(rule.getStrAttrib('delegate'))),
        rule.getBoolAttrib('exclude_match'),
    )

    self.putTripleString(s)
    return seq[0]
#@nonl
#@-node:putEolSpan
#@+node:putEolSpanRegexp
def putEolSpanRegexp (self,rule):

    quote = self.quoteString
    seq = rule.getSeq(rule.name)
    hash_char = rule.getStrAttrib('hash_char') or seq[0]
    # g.trace('hash_char',hash_char)

    s = '''\n\
def %s_rule%d(colorer, s, i):
    return colorer.match_eol_span_regexp(s, i, kind=%s, regexp=%s,
        at_line_start=%s, at_whitespace_end=%s, at_word_start=%s,
        delegate=%s, exclude_match=%s)''' % (
        self.fileModeName,
        self.handlerCount,
        quote(rule.getStrAttrib('type').lower()),
        quote(seq),
        # quote(rule.getStrAttrib('hash_char')),
        rule.getBoolAttrib('at_line_start'),
        rule.getBoolAttrib('at_whitespace_end'),
        rule.getBoolAttrib('at_word_end'),
        quote(self.fullDelegate(rule.getStrAttrib('delegate'))),
        rule.getBoolAttrib('exclude_match'),
    )

    self.putTripleString(s)
    return hash_char  # Bug fix: was seq[0]
#@nonl
#@-node:putEolSpanRegexp
#@+node:putImport
# Do nothing here: putImportDict creates x.importDict.

def putImport (self,rule):

    # Decrement the count to indicate that this method did not generate a rule.
    self.handlerCount -= 1
    return ''
#@nonl
#@-node:putImport
#@+node:putKeywords
def putKeywords (self,rule):

    s = '''\n\
def %s_rule%d(colorer, s, i):
    return colorer.match_keywords(s, i)''' % (
    self.fileModeName,
    self.handlerCount)

    self.putTripleString(s)
    return 'keywords'
#@nonl
#@-node:putKeywords
#@+node:putMarkFollowing
def putMarkFollowing (self,rule):

    quote = self.quoteString
    seq = rule.getSeq(rule.name)

    s = '''\n\
def %s_rule%d(colorer, s, i):
    return colorer.match_mark_following(s, i, kind=%s, pattern=%s,
        at_line_start=%s, at_whitespace_end=%s, at_word_start=%s, exclude_match=%s)''' % (
        self.fileModeName,
        self.handlerCount,
        quote(rule.getStrAttrib('type').lower()),
        quote(seq),
        rule.getBoolAttrib('at_line_start'),
        rule.getBoolAttrib('at_whitespace_end'),
        rule.getBoolAttrib('at_word_end'),
        rule.getBoolAttrib('exclude_match'),
    )

    self.putTripleString(s)
    return seq[0]
#@nonl
#@-node:putMarkFollowing
#@+node:putMarkPrevious
def putMarkPrevious (self,rule):

    quote = self.quoteString
    seq = rule.getSeq(rule.name)

    s = '''\n\
def %s_rule%d(colorer, s, i):
    return colorer.match_mark_previous(s, i, kind=%s, pattern=%s,
        at_line_start=%s, at_whitespace_end=%s, at_word_start=%s, exclude_match=%s)''' % (
        self.fileModeName,
        self.handlerCount,
        quote(rule.getStrAttrib('type').lower()),
        quote(seq),
        rule.getBoolAttrib('at_line_start'),
        rule.getBoolAttrib('at_whitespace_end'),
        rule.getBoolAttrib('at_word_end'),
        rule.getBoolAttrib('exclude_match'),
    )

    self.putTripleString(s)
    return seq[0]
#@nonl
#@-node:putMarkPrevious
#@+node:putSeq
def putSeq (self,rule):

    quote = self.quoteString
    seq = rule.getSeq(rule.name)

    s = '''\n\
def %s_rule%d(colorer, s, i):
    return colorer.match_seq(s, i, kind=%s, seq=%s,
        at_line_start=%s, at_whitespace_end=%s, at_word_start=%s, delegate=%s)''' % (
        self.fileModeName,
        self.handlerCount,
        quote(rule.getStrAttrib('type').lower()),
        quote(seq),
        rule.getBoolAttrib('at_line_start'),
        rule.getBoolAttrib('at_whitespace_end'),
        rule.getBoolAttrib('at_word_end'),
        quote(self.fullDelegate(rule.getStrAttrib('delegate'))),
    )

    self.putTripleString(s)
    return seq[0]
#@nonl
#@-node:putSeq
#@+node:putSeqRegexp
def putSeqRegexp (self,rule):

    quote = self.quoteString
    seq = rule.getSeq(rule.name)
    hash_char = rule.getStrAttrib('hash_char') or seq[0]
    # g.trace('hash_char',hash_char)

    s = '''\n\
def %s_rule%d(colorer, s, i):
    return colorer.match_seq_regexp(s, i, kind=%s, regexp=%s,
        at_line_start=%s, at_whitespace_end=%s, at_word_start=%s, delegate=%s)''' % (
        self.fileModeName,
        self.handlerCount,
        quote(rule.getStrAttrib('type').lower()),
        quote(seq),
        # quote(rule.getStrAttrib('hash_char')),
        rule.getBoolAttrib('at_line_start'),
        rule.getBoolAttrib('at_whitespace_end'),
        rule.getBoolAttrib('at_word_end'),
        quote(self.fullDelegate(rule.getStrAttrib('delegate'))),
    )

    self.putTripleString(s)
    return hash_char # Bug fix: was seq[0]
#@nonl
#@-node:putSeqRegexp
#@+node:putSpan
def putSpan (self,rule):

    quote = self.quoteString
    begin,end = rule.getSpan()

    s = '''\n\
def %s_rule%d(colorer, s, i):
    return colorer.match_span(s, i, kind=%s, begin=%s, end=%s,
        at_line_start=%s, at_whitespace_end=%s, at_word_start=%s,
        delegate=%s,exclude_match=%s,
        no_escape=%s, no_line_break=%s, no_word_break=%s)''' % (
        self.fileModeName,
        self.handlerCount,
        quote(rule.getStrAttrib('type').lower()),
        quote(begin),quote(end),
        rule.getBoolAttrib('at_line_start'),
        rule.getBoolAttrib('at_whitespace_end'),
        rule.getBoolAttrib('at_word_end'),
        quote(self.fullDelegate(rule.getStrAttrib('delegate'))),
        rule.getBoolAttrib('exclude_match'),
        rule.getBoolAttrib('no_escape'),
        rule.getBoolAttrib('no_line_break'),
        rule.getBoolAttrib('no_word_break'),
    )

    self.putTripleString(s)
    return begin[0]
#@nonl
#@-node:putSpan
#@+node:putSpanRegexp
def putSpanRegexp (self,rule):

    quote = self.quoteString
    begin,end = rule.getSpan()
    hash_char = rule.getStrAttrib('hash_char') or begin[0]

    s = '''\n\
def %s_rule%d(colorer, s, i):
    return colorer.match_span_regexp(s, i, kind=%s, begin=%s, end=%s,
        at_line_start=%s, at_whitespace_end=%s, at_word_start=%s,
        delegate=%s,exclude_match=%s,
        no_escape=%s, no_line_break=%s, no_word_break=%s)''' % (
        self.fileModeName,
        self.handlerCount,
        quote(rule.getStrAttrib('type').lower()),
        quote(begin),quote(end),
        # quote(rule.getStrAttrib('hash_char')),
        rule.getBoolAttrib('at_line_start'),
        rule.getBoolAttrib('at_whitespace_end'),
        rule.getBoolAttrib('at_word_end'),
        quote(self.fullDelegate(rule.getStrAttrib('delegate'))),
        rule.getBoolAttrib('exclude_match'),
        rule.getBoolAttrib('no_escape'),
        rule.getBoolAttrib('no_line_break'),
        rule.getBoolAttrib('no_word_break'),
    )

    self.putTripleString(s)
    return hash_char # Bug fix: was begin[0]
#@nonl
#@-node:putSpanRegexp
#@+node:putTerminate
def putTerminate (self,rule):

    quote = self.quoteString

    n = rule.getIntAttrib('at_char')
    if n == None: return

    s = '''\n\
def %s_rule%d(colorer, s, i):
    return colorer.match_terminate(s, i, kind=%s, at_char=%d)''' % (
        self.fileModeName,
        self.handlerCount,
        quote(rule.getStrAttrib('type').lower()),
        n)

    self.putTripleString(s)
#@nonl
#@-node:putTerminate
#@-node:putRule & rule creators
#@+node:putRules
def putRules (self):

    '''Create all rule matchers, a rules dict for each ruleset and x.rulesDictDict.'''

    d = {} ; d2Count = 0
    for ruleset in self.rulesets:
        d2 = {}
        start_count = self.handlerCount
        # prefix = '%s::%s' % (self.modeName,ruleset.name)
        prefix2 = '%s_%s_rules' % (self.modeName,ruleset.name)
        prefix3 = '%s_%s'% (self.modeName,ruleset.name)
        self.put('# Rules for %s ruleset.\n' % (prefix3))
        for rule in ruleset.rules:
            ch = self.putRule(rule)
            self.put('\n')
            if ch == 'keywords':
                for ch in self.keywordChars:
                    theList = d2.get(ch,[])
                    theList.append('%s_rule%d' % (self.fileModeName,self.handlerCount-1))
                    d2 [ch] = theList
            elif ch:
                theList = d2.get(ch,[])
                theList.append('%s_rule%d' % (self.fileModeName,self.handlerCount-1))
                d2 [ch] = theList
        # Create the rules dict for the ruleset.
        self.put('\n# Rules dict for %s ruleset.\n' % (prefix3))
        d2Count += 1 ; name = 'rulesDict%d' % d2Count
        self.putDictOfLists(name,d2)
        d [prefix3] = name
    # Create rulesDictDict.
    self.put('# x.rulesDictDict for %s mode.\n' % (self.modeName))
    self.putDict('rulesDictDict',d,escape=False)
#@nonl
#@-node:putRules
#@+node:write
def write (self,theFile,language):

    # Compute all the letters that can occur in a keyword.
    self.keywordChars = [ch for ch in string.letters + string.digits + '@']
    for ruleset in self.rulesets:
        for rule in ruleset.rules:
            d = rule.keywordsDict
            for key in d.keys():
                for ch in key:
                    if ch not in self.keywordChars and ch not in (' ','\t','\n'):
                        self.keywordChars.append(ch)
    self.keywordChars = ''.join(self.keywordChars)
    # g.trace('keywordChars',repr(self.keywordChars))

    self.outputFile = theFile
    self.put('# Leo colorizer control file for %s mode.\n' % language)
    self.put('# This file is in the public domain.\n\n')
    self.putModeProperties(language)
    self.putAttributes()
    self.putKeywordsData()
    self.putRules()
    self.putImportDict()
#@nonl
#@-node:write
#@-node: Output...
#@+node: Printing...
#@+node:printModeAttributes, printRulesetAttributes & printAttributesHelper
def printModeAttributes (self):

    self.printAttributesHelper('mode attributes',self.attributes)

def printRulesetAttributes (self,ruleset,tag=None):

    if not tag: tag = 'main ruleset'

    self.printAttributesHelper(tag,ruleset.attributes)

def printAttributesHelper (self,kind,attrs):

    print '%-15s' % (kind),'attrs:',attrs
#@nonl
#@-node:printModeAttributes, printRulesetAttributes & printAttributesHelper
#@+node:printProperty
def printProperty (self,theProperty):

    # A property is a bunch.
    d = theProperty.attributes
    if d:
        self.printAttributesHelper('property',d)
#@nonl
#@-node:printProperty
#@+node:printRule
def printRule (self,rule):

    # A rule is a g.Bunch.
    if rule.name == 'keywords':
        print '%-15s' % ('rule:keywords'),
        d = rule.keywordsDict
        d2 = {}
        for key in d:
            val = d.get(key)
            d2 [val] = d2.get(val,0) + 1
        keys = d2.keys() ; keys.sort()
        for key in keys:
            print '%s: %d' % (key,d2.get(key)),
        print
    else:
        d = rule.attributes
        d2 = rule.contents
        if d or d2:
            print '%-15s' % ('rule:'+rule.name),
            if d:  print 'attrs:',d,
            if d2: print 'contents:',d2,
            print
#@-node:printRule
#@+node:printRuleset
def printRuleset (self,ruleset,tag):

    self.printRulesetAttributes(ruleset,tag)

    for rule in self.rulesets[0].rules:
        self.printRule(rule)
#@nonl
#@-node:printRuleset
#@+node:printSummary
def printSummary (self,printStats=True):

    if printStats:
        print '-' * 10, 'mode statistics'
        print 'elements',self.numberOfElements
        print 'errors',self.numberOfErrors
        print 'mode attributes',self.numberOfAttributes
        print 'property attributes',self.numberOfPropertyAttributes
        print 'rule attributes',self.numberOfRuleAttributes

    self.printModeAttributes()

    for bunch in self.modeProperties:
        self.printProperty(bunch)

    self.printRuleset(self.rulesets[0],tag='main ruleset')
#@nonl
#@-node:printSummary
#@-node: Printing...
#@+node:doAttribute
def doAttribute (self,name,val):

    name = str(name.lower())

    if name in self.boolAttrs:
        val = g.choose(val.lower()=='true',True,False)
    else:
        val = str(val) # Do NOT lower this value!

    if self.rule:
        d = self.rule.attributes
        d [name] = val
        self.numberOfRuleAttributes += 1
    elif self.presentProperty:
        d = self.presentProperty.get('attributes')
        d [name] = val
        self.numberOfPropertyAttributes += 1
    elif self.inRules:
        self.rulesetAttributes[name] = val
        self.numberOfAttributes += 1
    else:
        self.attributes[name] = val
        self.numberOfAttributes += 1
#@nonl
#@-node:doAttribute
#@+node:doContent
def doContent (self,elementName,content):

    if not content:
        return

    name = str(elementName.lower())

    if self.inRule('keywords'):
        # g.trace('in keywords',name,content)
        d = self.rule.keywordsDict
        d [ content ] = name

    elif self.rule:
        d = self.rule.contents
        s = d.get(name,'')
        d [name] = s + content
        self.contents = d
#@nonl
#@-node:doContent
#@+node:endElement
def endElement (self,elementName):

    name = elementName.lower()

    if name == 'props':
        self.inProps = True
    if name == 'rules':
        self.inRules = False
        ruleset = rulesetClass(self.rulesetAttributes,self.keywords,self.rulesetProperties,self.rules)
        self.rulesets.append(ruleset)
        #g.trace('rules...\n',g.listToString(self.rules))
        #g.trace('ruleset attributes...\n',g.dictToString(self.rulesetAttributes))
    if name == 'property':
        bunch = self.presentProperty
        if bunch:
            if self.inRules:
                self.rulesetProperties.append(bunch)
            else:
                self.modeProperties.append(bunch)
        else:
            self.error('end %s not matched by start %s' % (name,name))
        self.presentProperty = None
    if name in self.ruleElements:
        if self.inRule(name):
            self.rules.append(self.rule)
            self.rule = None
        else:
            self.error('end %s not matched by start %s' % (name,name))
#@nonl
#@-node:endElement
#@+node:error
def error (self,message):

    self.numberOfErrors += 1

    self.contentHandler.error(message)
#@nonl
#@-node:error
#@+node:getters
def getAttributes (self):
    return self.attributes

def getAttributesForRuleset (self,ruleset):
    bunch = ruleset
    return bunch.attributes

def getFileName (self):
    return self.fileName

def getKeywords (self,n,ruleset):
    bunch = ruleset
    keywords = bunch.keywords
    if keywords:
        return keywords.get('keyword%d'%(n),[])
    return []

def getLanguage (self):
    path,name = g.os_path_split(self.fileName)
    language,ext = g.os_path_splitext(name)
    return language

def getPropertiesForMode (self):
    return self.props

def getPropertiesForRuleset (self,name=''):
    bunch = self.getRuleset(name)
    if bunch:
        return bunch.properties
    else:
        return []

def getRuleset(self,name=''):
    if not name:
        return self.rulesets[0] # Return the main ruleset.
    for ruleset in self.rulesets:
        if ruleset.name.lower()==name.lower():
            return ruleset
    else: return None

def getRulesets(self):
    return self.rulesets

def getRulesForRuleset (self,name=''):
    bunch = self.getRuleset(name)
    if bunch:
        return bunch.rules
    else:
        return []
#@nonl
#@-node:getters
#@+node:inRule
def inRule (self,elementName):

    return self.rule and self.rule.name == elementName
#@nonl
#@-node:inRule
#@+node:startElement
def startElement (self,elementName):

    name = elementName.lower()

    if name == 'props':
        self.inProps = True
    if name == 'rules':
        self.inRules = True
        self.attributes=[]
        self.keywords=[]
        self.rulesetProperties=[]
        self.rules=[]
    if name == 'property':
        if self.inProps:
            self.presentProperty = g.bunch(name=name,attributes={})
        else:
            self.error('property not in props element')
    if name in self.ruleElements:
        if self.inRules:
            self.rule = ruleClass(name=name)
            if name == 'keywords':
                self.keywords = self.rule
        else:
            self.error('%s not in rules element' % name)
#@nonl
#@-node:startElement
#@-node:class modeClass
#@+node:class rulesetClass
class rulesetClass:

    @others
#@nonl
#@+node:ctor & __str__
def __init__ (self,attributes,keywords,properties,rules):

    self.name=munge(attributes.get('set','main'))
    self.attributes=attributes.copy() # A dict.
    self.properties=properties[:] # A list.
    self.keywords=keywords # A bunch.
    self.rules=rules[:] # A list.

    # g.trace('ruleset',self.name or 'main')

    self.defaultColor = self.attributes.get('default')

def __str__ (self):

    return '<ruleset %s>' % self.name

__repr__ = __str__
#@nonl
#@-node:ctor & __str__
#@-node:class rulesetClass
#@+node:class ruleClass
class ruleClass:

    '''A class to represent one xml rule.'''

    @others
#@nonl
#@+node:ctor & __str__
def __init__ (self,name):

    self.attributes = {}
    self.contents = {}
    self.keywordsDict = {}
    self.name = name

def __str__ (self):

    return '<rule %s\nattr: %s\ncontents: %s>' % (
        self.name,g.dictToString(self.attributes),g.dictToString(self.contents))

__repr__ = __str__
#@nonl
#@-node:ctor & __str__
#@+node:rule.getters
def getBoolAttrib(self,name):
    d = self.attributes
    val = d.get(name)
    return g.choose(val,'True','False')

def getIntAttrib(self,name):
    d = self.attributes
    val = d.get(name)
    if val is not None:
        try:
            val = int(val)
        except ValueError:
            g.trace('bad int argument: %s = %s' % (name,val))
            val = None
    return val

def getSpan (self):
    d = self.contents
    begin = d.get('begin','')
    end   = d.get('end','')
    return begin,end

def getStrAttrib(self,name):
    d = self.attributes
    val = d.get(name,'')
    return str(val)

def getSeq(self,kind):
    # g.trace(repr(self.contents))
    d = self.contents
    return d.get(kind,'')
#@nonl
#@-node:rule.getters
#@-node:class ruleClass
#@+node:class contentHandler (xml.sax.saxutils.XMLGenerator)
class contentHandler (xml.sax.saxutils.XMLGenerator):

    '''A sax content handler class that handles jEdit language-description files.

    Creates mode that can be retrieved using the getMode method.'''

    @others
#@nonl
#@+node: __init__ & helpers
def __init__ (self,c,inputFileName,language):

    self.c = c
    self.inputFileName = inputFileName
    self.language = language

    # Init the base class.
    xml.sax.saxutils.XMLGenerator.__init__(self)

    # Non-mode statistics.
    self.numberOfAttributes = 0
    self.numberOfElements = 0

    # Options...
    self.ignoreWs = True # True: don't print contents with only ws.
    self.newLineAfterStartElement = [
        'keywords','mode','props','property','rules','span','eol_span',
        # 'seq',
    ]

    # Printing options
    if opt_print_elements:
        self.printAllElements = True
        self.printCharacters = False or self.printAllElements
        self.printAttributes = False and not self.printAllElements
        self.printElements = [
            #'begin','end',
            #'eol_span',
            #'keyword1','keyword2','keyword3','keyword4',
            #'mark_previous',
            #'mode',
            #'props',
            #'property',
            #'rules',
            #'span',
            #'seq',
        ]

        if self.printAllElements:
            self.suppressContent = []
        else:
            self.suppressContent = ['keyword1','keyword2','keyword3','keyword4']
    else:
        self.printAllElements = False
        self.printCharacters = False
        self.printAttributes = False
        self.printElements = []

    # Semantics: most of these should be mode ivars.
    self.elementStack = []
    self.errors = 0
    self.mode = None # The present mode, or None if outside all modes.
    self.modes = [] # All modes defined here or by imports.
#@nonl
#@-node: __init__ & helpers
#@+node:helpers
#@+node:attrsToList
def attrsToList (self,attrs):

    '''Convert the attributes to a list of g.Bunches.

    attrs: an Attributes item passed to startElement.

    sep: the separator charater between attributes.'''

    return [
        g.Bunch(name=name,val=attrs.getValue(name))
        for name in attrs.getNames()
    ]
#@nonl
#@-node:attrsToList
#@+node:attrsToString
def attrsToString (self,attrs,sep='\n'):

    '''Convert the attributes to a string.

    attrs: an Attributes item passed to startElement.

    sep: the separator charater between attributes.'''

    result = [
        '%s="%s"' % (bunch.name,bunch.val)
        for bunch in self.attrsToList(attrs)
    ]

    return sep.join(result)
#@nonl
#@-node:attrsToString
#@+node:clean
def clean(self,s):

    return g.toEncodedString(s,"ascii")
#@nonl
#@-node:clean
#@+node:error
def error (self, message):

    print
    print
    print 'XML error: %s' % (message)
    print

    self.errors += 1
#@-node:error
#@+node:printStartElement
def printStartElement(self,name,attrs):

    if attrs.getLength() > 0:
        print '<%s %s>' % (
            self.clean(name).strip(),
            self.attrsToString(attrs,sep=' ')),
    else:
        print '<%s>' % (self.clean(name).strip()),

    if name.lower() in self.newLineAfterStartElement:
        print
#@nonl
#@-node:printStartElement
#@+node:printSummary
def printSummary (self):

    print 'Summary...'
    print '-' * 10, 'non- mode statistics'
    print 'modes',len(self.modes)
    print 'elements', self.numberOfElements
#@nonl
#@-node:printSummary
#@-node:helpers
#@+node:sax over-rides
#@+node: Do nothing...
#@+node:other methods
def ignorableWhitespace(self):
    g.trace()

def processingInstruction (self,target,data):
    g.trace()

def skippedEntity(self,name):
    g.trace(name)

def startElementNS(self,name,qname,attrs):
    g.trace(name)

def endElementNS(self,name,qname):
    g.trace(name)
#@nonl
#@-node:other methods
#@+node:endDocument
def endDocument(self):

    pass


#@-node:endDocument
#@+node:startDocument
def startDocument(self):

    pass
#@nonl
#@-node:startDocument
#@-node: Do nothing...
#@+node:characters
def characters(self,content):

    # content = content.replace('\r','').strip()
    content = content.replace('\r','')
    if content.strip(): content = content.strip()
    content = self.clean(content)

    elementName = self.elementStack and self.elementStack[-1].lower() or '<no element name>'

    if self.printAllElements:
        print content,
    elif self.printCharacters and content and elementName not in self.suppressContent:
        print 'content:',elementName,repr(content)

    if self.mode:
        self.mode.doContent(elementName,content)
    else:
        self.error('characters outside of mode')
#@nonl
#@-node:characters
#@+node:endElement
def endElement(self,name):

    self.doEndElement(name)

    name2 = self.elementStack.pop()
    assert name == name2
#@nonl
#@-node:endElement
#@+node:startElement
def startElement(self,name,attrs):

    if self.mode:
        self.mode.numberOfElements += 1
    else:
        self.numberOfElements += 1

    self.elementStack.append(name)
    self.doStartElement(name,attrs)
#@nonl
#@-node:startElement
#@-node:sax over-rides
#@+node:doStartElement
def doStartElement (self,elementName,attrs):

    if self.printAllElements or elementName.lower() in self.printElements:
        self.printStartElement(elementName,attrs)

    elementName = elementName.lower()

    if elementName == 'mode':
        if self.mode:
            self.error('Multiple modes')
        else:
            self.mode = modeClass(self,self.inputFileName)
    elif self.mode:
        self.mode.startElement(elementName)
        for bunch in self.attrsToList(attrs):
            if self.printAttributes:
                print 'attr:',elementName,bunch.name,'=',bunch.val
            self.mode.doAttribute(bunch.name,bunch.val)
    else:
        self.error('Start element appears outside of Mode:%s' % elementName)
        for bunch in self.attrsToList(attrs):
            self.error('Attribute appears outside of Mode:%s' % bunch.name)
#@nonl
#@-node:doStartElement
#@+node:doEndElement
def doEndElement (self,elementName):

    if self.printAllElements or elementName.lower() in self.printElements:
        print '</' + self.clean(elementName).strip() + '>'

    if elementName.lower() == 'mode':
        if opt_print_summary: self.mode.printSummary()
    elif self.mode:
        self.mode.endElement(elementName)
    else:
        self.error('End element appears outside of Mode:%s' % elementName)
        for bunch in self.attrsToList(attrs):
            self.error('Attribute appears outside of Mode:%s' %bunch.name)
#@nonl
#@-node:doEndElement
#@+node:getMode
def getMode (self):

    if self.errors:
        return None
    else:
        return self.mode
#@nonl
#@-node:getMode
#@-node:class contentHandler (xml.sax.saxutils.XMLGenerator)
#@-node:@@button jEdit2Py
#@-node:jEdit2Py
#@+node:Print missing docstrings
# Print all commands that do not have a docstring.
d = c.commandsDict
keys = d.keys() ; keys.sort

missing = []

for key in keys:
    f = d.get(key)
    if (not f.__doc__ and
        not key.endswith('-mode') and
        not key.startswith('open-with-')
    ):
        missing.append((key,f.__name__),)

missing.sort()
print '-' * 20
for data in missing:
    a,b = data
    print '%30s %s' % (a,b)
#@nonl
#@-node:Print missing docstrings
#@+node:Recursive import script
# An example of running this script:

import os

@others

types = (".py",) #,".c",".html",".txt")

dir = "c:/Zope-2.6.2-src/lib/python"
dir = "c:/Zope-2.6.2-src/lib/Components"

c.beginUpdate()
try:
    importFiles(dir,types,recursive=True)
finally:
    c.endUpdate()

g.es("done",color="blue")
#@+node:importFiles
def importFiles (dir,type=None,kind="@file",recursive=False):

    v = c.currentVnode()

    # Check the params.
    if kind != "@file" and kind != "@root":
        g.es("kind must be @file or @root: " + kind)
        return

    if not g.os_path_exists(dir):
        g.es("directory does not exist: " + dir)
        return

    c.beginUpdate()
    try:
        root = createLastChildOf(v,"imported files")
        try:
            importDir (dir,type,kind,recursive,root)
            root.contract()
        except:
            g.es_exception()
    finally:
        c.endUpdate()
#@nonl
#@-node:importFiles
#@+node:importDir
def importDir (dir,types,kind,recursive,root):

    g.es("dir: " + dir,color="blue")

    try:
        files = os.listdir(dir)
        files2 = [] ; dirs =[]
        for f in files:
            path = g.os_path_join(dir,f)
            if g.os_path_isfile(path):
                name, ext = g.os_path_splitext(f)
                if not types or ext in types:
                    files2.append(path)
            elif recursive:
                dirs.append(path)
        if len(files2) > 0 or len(dirs) > 0:
            child = createLastChildOf(root,dir)
            c.selectVnode(child)
        if len(files2) > 0:
            c.importCommands.importFilesCommand(files2,kind)
        if len(dirs) > 0:
            dirs.sort()
            for dir in dirs:
                importDir(dir,types,kind,recursive,child)
    except:
        g.es("exception in importFiles script")
        g.es_exception()
#@-node:importDir
#@+node:createLastChildOf
def createLastChildOf (v,headline):

    child = v.insertAsLastChild()
    child.initHeadString(headline)
    return child
#@-node:createLastChildOf
#@-node:Recursive import script
#@+node:rst-to-leo
'''A script to import rst files into Leo.

This script parses a file containing Restructured text and creates node suitable for the rst3 plugin.

To run, do the following:

1. Create a call to the function ReSt2Leo below.
   The argument to ReST2Leo should be the url (including a local file)
   of a file containing restructure text.

2. Run this node using the Execute Script command or the Run Script button.

The script will create a sibling of this node for each call to ReST2Leo in this node.
For example, this node will create a sibling called 'reStructuredText Demonstration'.
The root headline of the created tree will be the top-level heading of the imported file.
'''

@language python
@tabwidth -4

__version__ = '0.1'
<< version history >>

<< define valid_underline_characters >>

@others

# files to test with:
if 1:
    ReST2Leo('http://docutils.sourceforge.net/docs/user/rst/demo.txt')
if 0:
    ReST2Leo('http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.txt')
if 0:
    ReST2Leo('http://springfed.com/ac/IssueNo0003Info/editform')
#@nonl
#@+node:<< define valid_underline_characters >>
# all the allowable underline characters.

valid_underline_characters = [
    '!','"','#','$','%','&',"'",'(',')','*','+',
    ',','-','.','/',':',';','<','=','>','?','@',
    '[','\\',']','^','_','`','{','|','}','~',
]
#@nonl
#@-node:<< define valid_underline_characters >>
#@+node:<< version history >>
@nocolor
@

v 0.1: Kent Tenney with minor mods by EKR.
#@nonl
#@-node:<< version history >>
#@+node:class ParseReST
class ParseReST:
    """Processes a chunks of ReST, creating a list of nodes/sections
    """
    @others
#@nonl
#@+node:__init__
def __init__(self, input):

    """Initialize document level variables
    """
    if type(input) == type('string'):
        self.lines = input.split("\n")
    else:            
        self.lines = input

    self.index = 0 

    # for each section gather title, contents and underline character
    # over-under titles are indicated by 
    # 2 character strings for underline_character
    # the initial section is root
    self.section = {'title':'root', 'contents':[], 'underline_character':'root'}
    # the list of nodes
    self.sections = []
#@nonl
#@-node:__init__
#@+node:isCharacterLine
def isCharacterLine(self):
    """Determine if the current line consists of only 
    valid underline characters
    """
    line = self.lines[self.index]
    character_line_found = False
    if len(line) > 0:
        if line[0] in valid_underline_characters:
            c = line[0]
            for char in line:
                if char == c:
                    character_line_found = True
                else:
                    character_line_found = False
                    #get out of the loop
                    #otherwise error if 1st and last are characters
                    break
    else:
        return False
    return character_line_found
#@nonl
#@-node:isCharacterLine
#@+node:isTransition
def isTransition(self):
    """self.index is pointing to a character line
    if there are blank lines on either side, this is a transition
    """

    current = self.lines[self.index]
    prev = self.lines[self.index - 1]
    next = self.lines[self.index + 1]

    return len(prev) == 0 and len(next) == 0
#@nonl
#@-node:isTransition
#@+node:isUnderline
def isUnderline(self):
    """self.index is pointing to a character line 
    if we are preceded by a blank line, then a line
    not longer than this, we have an underline
    """

    current = self.lines[self.index].strip()
    prev = self.lines[self.index - 1].strip()
    prevprev = self.lines[self.index - 2].strip()


    return len(prev) > 0 and \
    len(prev) <= len(current) and \
    len(prevprev) == 0
#@nonl
#@-node:isUnderline
#@+node:isUnderOverline
def isUnderOverline(self):
    """self.index is pointing at a character line
    if there is a line not longer than this
    followed by a character line like this,
    we have an UnderOverline
    """

    current = self.lines[self.index].strip()
    next = self.lines[self.index + 1].strip()
    #the last line may be a character line
    try:
        nextnext = self.lines[self.index + 2]
    except IndexError:
        return False

    return (nextnext == current) and (len(next) > 0) \
    and len(next) <= len(current)
#@nonl
#@-node:isUnderOverline
#@+node:isSectionHead
def isSectionHead(self):
    """The current line is a character line,
    is this a section heading?
    http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html#sections
    """
    # save typing with aliases
    current = self.lines[self.index]
    prev = self.lines[self.index - 1]
    next = self.lines[self.index + 1]

    # a transition has a blank line before and after
    if  self.isTransition():
        return False

    # underline section heading    
    if self.isUnderline():
        # previous to discovering the underline, we appended
        # the section title to the current section. 
        # Remove it before closing the section
        self.section['contents'].pop()
        self.closeCurrentSection()
        self.section['underline_character'] = current[0]
        self.section['title'] = prev
        # step index past this line
        self.index += 1
        return True

    # over-under section heading
    if self.isUnderOverline():
        self.closeCurrentSection()
        self.section['underline_character'] = current[0:2]
        # leading whitespace is allowed in over under style, remove it
        self.section['title'] = next.strip()
        # step index past overline, section title, and underline
        self.index += 3
        return True

        raise Exception ("Error in foundSectionHead()")
#@nonl
#@-node:isSectionHead
#@+node:closeCurrentSection
def closeCurrentSection(self):
    """We have a section title, which ended the previous
    section. Add this section to nodes, and start the next
    """
    self.sections.append(self.section)
    self.section = {'title':'', 'contents':[], 'underline_character':''}
#@nonl
#@-node:closeCurrentSection
#@+node:insertTitle
def insertTitle(self, uc, isSubTitle = False):
    """Inserting a title consists of merging section[1],
    the first section, into section[0], the root.
    This works the same for title and subtitle, since
    merging title deletes section[1], making the subtitle
    section[1]

    The 'isSubTitle' parameter differentiates between title and subtitle
    """    
    title = self.sections[1]['title']

    if not isSubTitle:
        self.sections[0]['title'] = title

    # extend the charline and pad the title
    charline = (len(title) * uc[0]) + (4 * uc[0])
    title = '  ' + title

    self.sections[0]['contents'].append('')
    self.sections[0]['contents'].append(charline)
    self.sections[0]['contents'].append(title)
    self.sections[0]['contents'].append(charline)
    self.sections[0]['contents'].append('')

    # append each line, not the list of lines
    for line in self.sections[1]['contents']:
        self.sections[0]['contents'].append(line)

    del self.sections[1]
#@nonl
#@-node:insertTitle
#@+node:fixupSections
def fixupSections(self):
    """Make corrections to the list of sections
    to reflect the syntax for 'Title' and 'Subtitle'

    If the first section heading is a unique over/under
    it is a title, and should stay in the root section.

    If the second section heading is a unique over/under
    it is a subtitle and should remain in the root section.
    """

    def isUnique(uc, start):
        index = start
        while index < len(self.sections):
            if self.sections[index]['underline_character'] == uc:
                return False
            index += 1
        return True                

    # self.sections[0] is root, a special case
    underline_first = self.sections[1]['underline_character'] 
    if len(underline_first) > 1:
        if isUnique(underline_first, 2):
            # the section head is the document title and must
            # be added to the root section
            self.insertTitle(underline_first)
    if len(self.sections) > 2:
        underline_second = self.sections[2]['underline_character'] 
        if len(underline_second) > 1:
            if isUnique(underline_second, 3):
                # the section head is the document subtitle and must
                # be added to the root section
                self.insertTitle(underline_second, True)
#@nonl
#@-node:fixupSections
#@+node:contents2String
def contents2String(self):
    """convert the list of strings in 
    self.sections[index]['contents'] to a string
    suitable for sticking into a Leo body
    """

    for section in self.sections:
        section['contents'] = '\n'.join(section['contents'])
#@nonl
#@-node:contents2String
#@+node:processLines
def processLines(self):
    """Loop through the lines of ReST input, building a list
    of sectopms. A section consists of::
        -title
        -contents
        -underline_character
    """
    line_count = len(self.lines)

    while self.index < line_count:
        if self.isCharacterLine() and self.isSectionHead():
            # isCharacterLine() and isSectionHead() do all the housekeeping
            # required. This doesn't look like good style, but I'm not
            # sure how this should be written.
            pass
        else:        
            self.section['contents'].append(self.lines[self.index])
            self.index += 1

    self.closeCurrentSection()
    if len(self.sections) > 1:
        if len(self.sections[0]['underline_character']) > 1:
            self.fixupSections()
    self.contents2String()
    return self.sections
#@nonl
#@-node:processLines
#@-node:class ParseReST
#@+node:class BuildLeo
class BuildLeo:
    @others
#@nonl
#@+node:__init__
def __init__(self, nodes):
    """the nodes paramater is returned by ParseReST.processLines
    It is a list of dictionaries consisting of 
    underline_character, title, contents 
    """
    self.nodes = nodes

    # self.levels is a dictionary, the keys
    # are underline_character and the value is the
    # last Leo node created at that level
    self.levels = {}

    # self.underline_characters is a list of the underline characters
    # in the order of levels. The first is always 'root'
    self.underline_characters = ['root',]




#@-node:__init__
#@+node:processNodes
def processNodes(self):
    """Step through the list of nodes created by
    parseReST creating the appropriate Leo nodes
    """

    # Create root node as a sibling of current node
    root = p.insertAfter()
    self.levels['root'] = root

    rootstring = self.nodes[0]['contents']
    roottitle = self.nodes[0]['title']
    root.setBodyString(rootstring)
    root.setHeadString(roottitle)

    # step through the rest of the nodes
    index = 1
    while index < len(self.nodes):
        uc = self.nodes[index]['underline_character']
        title = self.nodes[index]['title']
        contents = self.nodes[index]['contents']

        # this level exists, insert the node
        if self.levels.has_key(uc):
            # get parent of this node
            parent_index = self.underline_characters.index(uc) - 1
            parent_uc = self.underline_characters[parent_index]
            current = self.levels[parent_uc].insertAsLastChild()
            self.levels[uc] = current
            current.setHeadString(title)
            current.setBodyString(contents)

        # if this is the first time this uc is encountered
        # it means we are creating a new sublevel 
        # create the level then insert the node
        else:
            # if we are descending to a new level, the parent 
            # underline character is currently the last one
            parent = self.levels[self.underline_characters[-1] ]
            self.underline_characters.append(uc)
            current = parent.insertAsLastChild()
            self.levels[uc]  = current
            current.setHeadString(title)
            current.setBodyString(contents)

        index += 1
#@nonl
#@-node:processNodes
#@-node:class BuildLeo
#@+node:ReST2Leo
def ReST2Leo(input):
    """ A wrapper for ParseReST and BuildLeo
    """

    if type(input) == type(""):
        if input.startswith("http"):
            from urllib import urlopen
            try:
                data = urlopen(input).read()
            except HTTPError:
                print 'Unable to open page %s' % input
                return
        else:
            try:
                data = open(input, 'r').read()
            except IOError:
                print 'Unable to open file %s' % input
                return

    parsed = ParseReST(data)
    sections = parsed.processLines()
    nodes = BuildLeo(sections)
    nodes.processNodes()
    c.redraw()    
#@nonl
#@-node:ReST2Leo
#@-node:rst-to-leo
#@+node:Run a Lua program
# http://sourceforge.net/forum/message.php?msg_id=3581176
# By: e

"""
run lua program
work alike for perl too

edit in path to lua if required
make a script button


no fallback for no subprocess
download from eff.bot.org 
if you are on python <2.4

added option to pipe script to stdin

todo:
if your os can fork, use a fork.
"""

import subprocess as sub
from dynacommon import stripSentinels
import sys

if sys.platform[:3] == 'win':
    ex = 'c:/UTIL/lua.exe'
else:
    ex = '/usr/bin/lua'

script = g.getScript(c, p, forcePythonSentinels= False)
#print script

source = stripSentinels(script)
#print source

def run_cmd(cmdlst, tostdin=None):
    ps = sub.Popen(cmdlst, 
        universal_newlines= True,
        stdout= sub.PIPE, stderr= sub.PIPE,
        stdin= sub.PIPE)
    (outstd, outerr) = ps.communicate(tostdin)
    return (ps.wait(), outstd, outerr)

# send complete script on stdin
# many comandline programs have this same option.
ret,sto,sde = run_cmd([ex, '-'], source)

if not ret:
    print sto
if sde: print sde
#@nonl
#@-node:Run a Lua program
#@+node:Runniung tcl scripts from the body pane
@language tcltk
#@nonl
#@+node:run tcl 
# To use this script, you must make this a script button, either with the
# 'Script Button' button or by making this an @button node.

# Executes body text of selected node as a tcl script.
# For more information, see http://wiki.tcl.tk/6878

# This statement allows you to organize your scripts using noweb markup.
s = g.getScript(c,p,forcePythonSentinels=False)
g.es(g.app.root.tk.eval(s))
#@nonl
#@-node:run tcl 
#@+node:My first tcl script
# This is a comment
set x 50
return [expr $x*10]
#@nonl
#@-node:My first tcl script
#@+node:My second tcl scrip, organized with noweb markup
@others
#@nonl
#@+node:the actual script
# This is a comment
set x 50 +
return [expr $x*10]
#@nonl
#@-node:the actual script
#@-node:My second tcl scrip, organized with noweb markup
#@-node:Runniung tcl scripts from the body pane
#@+node:Script to import a file
# Note: the source files contain mixed tabs/blanks, and that's very hard for Leo's imports to handle.

@tabwidth 8
@language python

path = r"c:\Python23\Lib\site-packages\Pmw\Pmw_1_1\lib\PmwPanedWidget.py"

path = r"c:\prog\PmwPanedWidget.py" # The same file with tabs converted to 8 blanks.

path = r"c:\Python23\Lib\site-packages\Pmw\Pmw_1_1\demos\All.py"

assert g.os_path_exists(path)

c.importCommands.importFilesCommand([path],"@file")
#@nonl
#@-node:Script to import a file
#@+node:Scripts demonstrating how to do anything with Leo menus
#@+node:Test of createMenuItemsFromTable
def callback(*args,**keys): g.trace()

table = (("Test",None,callback),)

c.frame.menu.createMenuItemsFromTable('File',table)
#@nonl
#@-node:Test of createMenuItemsFromTable
#@+node:Add a menu item after Open With
def callback(*args,**keys):
    g.trace('after Open With')

# Get the actual Tkinter menu.
fileMenu = c.frame.menu.getMenu('File')

# Now use raw Tkinter calls to insert the menu.
fileMenu.insert(3,'command',label='Test',command=callback) 
#@nonl
#@-node:Add a menu item after Open With
#@-node:Scripts demonstrating how to do anything with Leo menus
#@+node:Scripts that make unit tests
#@+node:@command make-test
c.beginUpdate()
try:
    p1 = p.insertAfter()
    c.setHeadString(p1,'@test ')
    body = 'import leoTest\nleoTest.runEditCommandTest(c,p)'
    c.setBodyString(p1,body)
    for s in ('work','before','after'):
        p2 = p1.insertAsLastChild()
        c.setHeadString(p2,s)
    p1.expand()
finally:
    c.endUpdate()
    c.editPosition(p1)
#@nonl
#@-node:@command make-test
#@+node:@command do-before
@
p should be in tree whose root is a @test node containing 'work', 'before' and
'after' children. The work node should have body text. If all is as expected,
copy the body text the work node to the before node, and represent the selection
range of the work in the headline of the before node.
@c

@others

sel = getSel(c)
top,work,before,after = findNodes(p)
if top and work.bodyString():
    c.beginUpdate()
    try:
        c.setBodyString(before,work.bodyString())
        c.setBodyString(after,'')
        putSelectionInHeadline(c,before,'before',sel)
    finally:
        c.endUpdate()
else:
    g.es_print('do-before: not in a proper @test tree')
#@nonl
#@+node:getSel
def getSel(c):

    w = c.frame.body.bodyCtrl
    sel = g.app.gui.getSelectionRange(w)
    if not sel:
        i = g.app.gui.getInsertPoint(w)
        sel = (i,i)
    return sel
#@nonl
#@-node:getSel
#@+node:findNodes
def findNodes(p):

    '''Find the top, work, before and after nodes.
    p should be in tree whose root is a @test node containing
    'work', 'before' and 'after' children.'''

    for p in p.self_and_parents_iter():
        if p.headString().startswith('@test '):
            break
    top    = p and p.copy()
    work   = top and top.firstChild() 
    before = work and work.next()     
    after  = before and before.next()
    if (
        work   and work.headString().startswith('work') and
        before and before.headString().startswith('before') and
        after  and after.headString().startswith('after')
    ):
        return top,work,before,after
    else:
        return None,None,None,None
#@nonl
#@-node:findNodes
#@+node:putSelectionInHeadline
def putSelectionInHeadline (c,p,prefix,sel):

    # g.trace(p.headString(),repr(sel))

    if not sel:
        sel = ('1.0','1.0')

    s = '%s sel=%s,%s' % (prefix,sel[0],sel[1])

    c.setHeadString(p,s)

#@-node:putSelectionInHeadline
#@-node:@command do-before
#@+node:@command do-after
@
p should be in tree whose root is a @test node containing 'work', 'before' and
'after' children. If all is as expected, copy the work node to the after node,
and represent the selection range of the work node in the headline of the after node.
@c

@others

sel = getSel(c)
top,work,before,after = findNodes(p)
if top:
    c.beginUpdate()
    try:
        c.setBodyString(after,work.bodyString())
        putSelectionInHeadline(c,after,'after',sel)
    finally:
        c.endUpdate()
else:
    g.es_print('do-after: not in @test tree')
#@nonl
#@+node:getSel
def getSel(c):

    w = c.frame.body.bodyCtrl
    sel = g.app.gui.getSelectionRange(w)
    if not sel:
        i = g.app.gui.getInsertPoint(w)
        sel = (i,i)
    return sel
#@nonl
#@-node:getSel
#@+node:findNodes
def findNodes(p):

    '''Find the top, work, before and after nodes.
    p should be in tree whose root is a @test node containing
    'work', 'before' and 'after' children.'''

    for p in p.self_and_parents_iter():
        if p.headString().startswith('@test '):
            break
    top    = p and p.copy()
    work   = top and top.firstChild()
    before = work and work.next()
    after  = before and before.next()
    if (
        work   and work.headString().startswith('work') and
        before and before.headString().startswith('before') and
        after  and after.headString().startswith('after')
    ):
        return top,work,before,after
    else:
        return None,None,None,None
#@nonl
#@-node:findNodes
#@+node:putSelectionInHeadline
def putSelectionInHeadline (c,p,prefix,sel):

    # g.trace(p.headString(),repr(sel))

    if not sel:
        sel = ('1.0','1.0')

    s = '%s sel=%s,%s' % (prefix,sel[0],sel[1])

    c.setHeadString(p,s)

#@-node:putSelectionInHeadline
#@-node:@command do-after
#@+node:@test kill-line
import leoTest
leoTest.runEditCommandTest(c,p)
#@nonl
#@+node:work
line 1
line 3
#@nonl
#@-node:work
#@+node:before sel=2.0,2.6
line 1
line 2
line 3
#@nonl
#@-node:before sel=2.0,2.6
#@+node:after sel=2.0,2.0
line 1
line 3
#@nonl
#@-node:after sel=2.0,2.0
#@-node:@test kill-line
#@-node:Scripts that make unit tests
#@+node:Write cleaned files to 'clean' directory
# Writes all derived files in the outline to 'clean' directories.
# For example, writes test/foo.py to test/clean/foo.py.

@others

for p in c.allNodes_iter():
    if p.isAnyAtFileNode():
        clean_file(p)

#@+node:clean_file
def clean_file(p):

    at = c.atFileCommands

    if hasattr(p.v.t,'tnodeList'):
        has_list = True
        old_list =  p.v.t.tnodeList[:]
    else:
        has_list = False

    at.write(
        root=p,nosentinels=True,
        thinFile=False,
        scriptWrite=True,
        toString=True,
        write_strips_blank_lines=False)

    if has_list:
        p.v.t.tnodeList = old_list

    fileName = g.os_path_normpath(g.os_path_join(
        at.default_directory,
        'clean',
        g.os_path_basename(p.anyAtFileNodeName())))

    # g.trace(p.headString(),len(at.stringOutput),fileName)

    # Adapted from at.openFileForWritingHelper
    path = g.os_path_dirname(fileName)
    if not g.os_path_exists(path):
        g.es('clean directory does not exist',path)
        return

    try:
        f = file(fileName,'w')
        f.write(at.stringOutput)
        f.close()
        g.es_print('wrote',fileName)
    except IOError:
        g.es_print('can not write',fileName,color='red')
        g.es_exception()
#@-node:clean_file
#@-node:Write cleaned files to 'clean' directory
#@+node:zodb get/put scripts
#@+node:@@button zodb-get-tree
name = c.config.getString('zodb_storage_file')
db = g.init_zodb(name)
connection = db.open()
try:
    fileName = c.fileName()
    root = connection.root()
    v = root.get(fileName)
    if v:
        g.es_print('get-tree: %s' % repr(v))
    else:
        g.es_print('get-tree failed: %s' % fileName)
finally:
    get_transaction().commit()
    connection.close()
#@nonl
#@-node:@@button zodb-get-tree
#@+node:@@button zodb-get-node
name = c.config.getString('zodb_storage_file')
db = g.init_zodb(name)
connection = db.open()
try:
    fileName = c.fileName()
    root = connection.root()
    key = 'vnode: %s' % p.headString()
    v = root.get(key)
    if v:
        g.es_print('get-node %s: %s' % (key,repr(v)))
    else:
        g.es_print('get-node %s failed.' % key)
finally:
    get_transaction().commit()
    connection.close()
#@nonl
#@-node:@@button zodb-get-node
#@+node:@@button zodb-put-tree
name = c.config.getString('zodb_storage_file')
db = g.init_zodb(name)
connection = db.open()
try:
    fileName = c.fileName()
    root = connection.root()
    root[fileName] = c.rootPosition().v
finally:
    get_transaction().commit()
    connection.close()
#@nonl
#@-node:@@button zodb-put-tree
#@+node:@@button zodb-put-node
name = c.config.getString('zodb_storage_file')
db = g.init_zodb(name)
connection = db.open()
try:
    fileName = c.fileName()
    root = connection.root()
    v = p.v.detach()
    key = 'vnode: %s' % v.headString()
    g.es_print('put-node %s :%s' % (v.headString(),repr(v)))
    root[key] = v
finally:
    get_transaction().commit()
    connection.close()
#@nonl
#@-node:@@button zodb-put-node
#@-node:zodb get/put scripts
#@-node:Important
#@+node:Others
#@+node:AutoIt script from e
@ http://sourceforge.net/forum/message.php?msg_id=3039793

heres a short script to open a leo, then a New leo from that one, pause then exit.

still have to work out how to collect error output, especially if Leo doesn't start at all
can't then depend on Leo error reporting!

make a node
@url ./leoopen1.au3

make another
@nosent leoopen1.au3
@c

@language elisp

; AutoIt Version: 3.0 a BASIC like language
; http://www.hiddensoft.com/AutoIt/

; Opens Leo with no filename, then opens a new, closes it
; Preliminary, will eventually programatically create
; leoPlugins.txt and various leoSettings.leo
; and run commands in all permutations looking for failures.

; Paths are hardwired but later scripts will be created on the fly
; maybe Leo can have a -trace mode to output to file a log of activities?

; exit when CTRL+ALT+x is pressed
HotKeySet("^!x", "MyExit")

Func MyExit()
    Exit 
EndFunc 

Opt("SendKeyDelay", 1)
Opt("WinWaitDelay", 80)

;fix path to leo.py
Run("python c:\c\leo\V43leos\leo\src\leo.py")
Sleep(2700) 

WinWaitActive("untitled")   

Sleep(700) 
Send("!Fn")  ; how to tell if there are errors?
Sleep(2700) 

WinWaitActive("untitled1")
Send("!Fx")
Sleep(2700) 

;careful you don't close the leo you are working from
Send("!Fx")
#@nonl
#@-node:AutoIt script from e
#@+node:Clear log frame
t = c.frame.log.logCtrl
t.delete("1.0","end")
#@nonl
#@-node:Clear log frame
#@+node:Compare files
p1 = r"c:\prog\leoMenu(1).py"
p2 = r"c:\prog\leoMenu(2).py"

f1 = open(p1,"rb") ; f2 = open(p2,"rb")
lines1 = f1.readlines()
lines2 = f2.readlines()
f1.close() ; f2.close()

f1 = open(p1,"rb") ; f2 = open(p2,"rb")
text1 = f1.read()
text2 = f2.read()
f1.close() ; f2.close()

cr1 = text1.count('\r')
cr2 = text2.count('\r')

print '-'*20
print "lines ",len(lines1),len(lines2)
print "chars ",len(text1),len(text2)
print "non-cr",len(text1)-cr1,len(text2)-cr2
print "cr    ",cr1,cr2
#@nonl
#@-node:Compare files
#@+node:Compare ignoring newlines
p1 = r"c:\prog\leoMenu(1).py"
p2 = r"c:\prog\leoMenu(2).py"

f1 = open(p1,"rb") ; f2 = open(p2,"rb")
g.es("equal (raw mode)",f1.read()==f2.read())
f1.close() ; f2.close()

f1 = open(p1) ; f2 = open(p2)
g.es("equal (text mode)",f1.read()==f2.read())
f1.close() ; f2.close()
#@nonl
#@-node:Compare ignoring newlines
#@+node:Convert ChoiceMail .ini file to .csv file
@ These scripts work on the child nodes as shown.

The address children contained the actual data (removed for privacy)
#@nonl
#@+node:Scipt to remove rejected entries
p = p.firstChild().firstChild()
s = p.bodyString()
lines = g.splitLines(s)
reject = 'reason=Rejected before registration'
result = []
entries = []
for line in lines:
    if line.startswith('['):
        # Add all previous entries
        for entry in entries:
            result.append(entry)
        entries = []
        entries.append(line)
    elif line.startswith(reject):
        # Kill all previous entries.
        entries = []
    else:
        entries.append(line)

result = ''.join(result)
c.setBodyString(p,result)
print 'done!'
#@nonl
#@+node:@killcolor
@killcolor
#@+node:address
#@-node:address
#@-node:@killcolor
#@-node:Scipt to remove rejected entries
#@+node:Script to clean address
p = p.firstChild().firstChild()
s = p.bodyString()
lines = g.splitLines(s)
prefixes = (
    'access=',
    'bccsender=',
    'filtered=',
    'registered=',
    'messagetocount=',
    'messagecount=',
    'lastmessagedate=',
    'lastsource=',
    'replyToAddresses=',
    'creationdate=',
    'reason=Approved before registration',
    'reason=Address Book Contact',
    'registrationDate=',
    'registrationRequestSent=',
    'reason=Pre-approved sender',
    'preaccepted=1',
)
result = []
for line in lines:
    for prefix in prefixes:
        if line.startswith(prefix):
            # print 'removing',line
            break
    else:
        if line.startswith('name='):
            result.append(line[5:])
        else:
            result.append(line)

result = ''.join(result)
c.setBodyString(p,result)
print 'done!'
#@nonl
#@+node:@killcolor
@killcolor
#@+node:address
#@-node:address
#@-node:@killcolor
#@-node:Script to clean address
#@+node:Script to create comma delimited lists
address_p = p.firstChild()
result_p = p.firstChild().next()

result = []
entries = []
for child in address_p.children_iter():
    s = child.bodyString()
    lines = g.splitLines(s)
    for line in lines:
        if line.startswith('['):
            # Add all previous entries
            if entries:
                if len(entries) > 1:
                    # entries2 = [entries[0],entries[1]]
                    # entries2.extend(entries[2:])
                    result.append(','.join(entries[:2]))
                else:
                    result.append(entries[0])
            entries = [] ; entries2 = []
            entries.append(line.rstrip()[1:-1])
        elif line.strip():
            entries.append(line.rstrip())

result.sort()
result = '\n'.join(result)
c.setBodyString(result_p,result)
print 'done!'
#@nonl
#@+node:address
#@-node:address
#@+node:result
#@-node:result
#@-node:Script to create comma delimited lists
#@-node:Convert ChoiceMail .ini file to .csv file
#@+node:Delete All Icons
# Same as delete-all-icons command (now removed)
# A script seems safer because it can not be executed by mistake.

c.beginUpdate()
try:
    for p in c.allNodes_iter():
        if hasattr(p.v.t,"unknownAttributes"):
            a = p.v.t.unknownAttributes
            iconsList = a.get("icons")
            if iconsList:
                a["icons"] = []
                a["lineYOffset"] = 0
                p.setDirty()
                c.setChanged(True)
finally:
    c.endUpdate()
#@-node:Delete All Icons
#@+node:Script to backup Outlook Express files
# http://support.microsoft.com/default.aspx/kb/270670
# See Notes child for the manual instructions taken from the URL above.

import glob
import os

theDir = (r'C:\Documents and Settings\Ed\Local Settings\Application Data\Identities' +
    r'\{B14A7A05-7759-4249-A9C2-FDC8B4FDC50C}\Microsoft\Outlook Express')

files = glob.glob(theDir + r'\*.*')
files.sort()

#theFiles = [g.os_path_basename(z) for z in files]
#theFiles.sort()
#print g.listToString(theFiles)

dest = r'c:\OutlookBackup'
if not g.os_path_exists(dest):
    g.es_print('***** Creating %s' % dest)
    os.mkdir(dest)

for inputName in files:
    base = g.os_path_basename(inputName)
    outputName = g.os_path_join(dest,base)
    inputFile = file(inputName)
    s = inputFile.read()
    inputFile.close()
    outputFile = file(outputName,'w')
    outputFile.write(s)
    outputFile.close()
    g.es_print('***** Wrote %s' % outputName)
#@+node:Notes
@killcolor
@

To backup Outlook Express data:
• Copy mail files to a backup folder 
• Export the Address Book to a file 
• Export the mail account to a file 
• Export the news account to a file

To restore or import Outlook Express data:
• Import messages from the backup folder 
• Import the Address Book file 
• Import the mail account file 
• Import the news account file 

Step 1: Copy Mail Files to a Backup Folder (Make a backup copy of your Outlook Express e-mail message files):

(EKR: get the file name)

1. On the Tools menu, click Options. 
2. On the Maintenance tab, click Store Folder. 
3. Select the folder location, and then press CTRL+C to copy the location.
4. Click Cancel, and then click Cancel again to close the dialog box. 

r'C:\Documents and Settings\Ed\Local Settings\Application Data\Identities\{B14A7A05-7759-4249-A9C2-FDC8B4FDC50C}\Microsoft\Outlook Express'

(EKR: open the directory)

5. Click Start, and then click Run. 
6. In the Open box, press CTRL+V, and then click OK.

(EKR: Copy all files to a folder)

7. On the Edit menu, click Select All. 
8. On the Edit menu, click Copy, and then close the window.
9. Right-click any empty space on your desktop, click New, and then click Folder. 
10. Type mail backup for the folder name, and then press ENTER. 
11. Double-click the Mail Backup folder to open it. 
12. On the Edit menu, click Paste. 
13. Close the Mail Backup window. 

Step 2: Export the Address Book to a File

A .WAB (Windows Address Book) file is used by Outlook Express 5.x and 6.0 versions, even if multiple Identities are used. The individual data for each Identity is stored in a folder, by user name, within the .WAB file in use.

Exporting this data, while logged in to a specific Identity, is the only means of segregating the Address Book data. If the .WAB file becomes dissociated from the user Identities, the data can only be exported in total - not folder by folder.

Another reason to export the .WAB file to a .csv file is that if the .WAB file is shared with Microsoft Outlook, the addresses are stored in the *.pst file in Outlook. When you export the file from the Outlook Express File menu to a *.csv file it exports the correct contacts. If the Address Book is shared with Microsoft Outlook, you are not able to export from within the Address Book on the File menu. This option is dimmed or not available.

To export your Outlook Express address book: 1. On the File menu, click Export, and then click Address Book. 
2. Click Text File (Comma Separated Values), and then click Export. 
3. Click Browse. 
4. Locate the Mail Backup folder that you created. 
5. In the File Name box, type address book backup, and then click Save. 
6. Click Next. 
7. Click to select the check boxes for the fields that you want to export, and then click Finish. 
8. Click OK and then click Close. 

Step 3: Make a backup copy of your Outlook Express mail account:

1. On the Tools menu, click Accounts. 
2. On the Mail tab, click the mail account that you want to export, and then click Export. 
3. In the Save In box, locate the Mail Backup folder on your desktop, and then click Save. 
4. Repeat these steps for each mail account that you want to export. 
5. Click Close. 

Step 4: Make a backup copy of your Outlook Express news accounts:

1. On the Tools menu, click Accounts. 
2. On the News tab, click the news account that you want to export, and then click Export. 
3. In the Save In box, use locate the Mail Backup folder on your desktop, and then click Save. 
4. Repeat these steps for each news account that you want to export. 
5. Click Close. 

Importing Outlook Express data

To restore data, you may need to re-create the Identities for each user, prior to using the following steps.
***Repeat each step, as needed, for each Identity.

Step 1: Import Messages from the Backup Folder (Import your Outlook Express e-mail messages from the Backup folder)

1. On the File menu, point to Import, and then click Messages. 
2. In the Select an e-mail program to import from box, click Microsoft Outlook Express 5 or Microsoft Outlook Express 6, and then click Next. 
3. Click Import mail from an OE5 store directory or Import mail from an OE6 store directory, and then click OK. 
4. Click Browse, and then click on the Mail Backup folder on your desktop. 
5. Click OK, and then click Next. 
6. Click All folders, click Next, and then click Finish. 

Step 2: Import the Address Book File

1. On the File menu, click Import, and then click Other Address Book. 
2. Click Text File (Comma Separated Values), and then click Import. 
3. Click Browse. 
4. Locate the Mail Backup folder on your desktop, click the address book Backup.csv file, and then click Open. 
5. Click Next, and then click Finish. 
6. Click OK, and then click Close. 

Step 3: Import the Mail Account File

1. On the Tools menu, click Accounts. 
2. On the Mail tab, click Import. 
3. In the Look In box, locate the Mail Backup folder on your desktop. 
4. Click the mail account that you want to import, and then click Open. 
5. Repeat these steps for each mail account that you want to import. 
6. Click Close. 

Step 4: Import the Newsgroup Account File

1. On the Tools menu, click Accounts. 
2. On the News tab, click Import. 
3. In the Look In box, locate the Mail Backup folder on your desktop. 
4. Click the news account that you want to import, and then click Open. 
5. Repeat these steps for each news account that you want to import. 
6. Click Close.

@color
#@-node:Notes
#@-node:Script to backup Outlook Express files
#@+node:Create diagrams using Graphviz and pydot
#@+node:pydot notes by EKR
@nocolor

- I have found it easiest to create pydot objects rather than creating Graphviz strings.  It's the natural way, IMO.

- The pydot documentation is poor.  When you cut through the blah-blah-blah all that is really going on is that you use ctors to create pydot objects.  Typically you specify attributes in the ctors, but there are also getters and setters (various silly redundant flavors) to do this.

- It took me awhile to get the difference between names and labels.  Names are essentially object identifiers, and they are restricted to what are basically C identifiers.  Labels are what are shown in nodes.  The default label is the node's name.   It's a bit strange to use strings instead of Python object references, but it's no big deal.

- The documentation for Graphviz is weak.  Very few examples.  It took me a long time to realize that by default Graphviz lays out nodes and edges independently of the order in which they were created.  The ordering="out" argument to the Dot ctor overrides some parts of the layout algorithm so that nodes are laid out in roughly the definition order.  If you want to place nodes yourself, you can specify their exact position.  This would be feasible to do in a script and I haven't done that yet.

In short, Graphviz and pydot are very impressive tools.  The documentation could be improved, but once one gets the hang of things it is fairly easy to get real work done.
#@nonl
#@-node:pydot notes by EKR
#@+node:pydot docs
@nocolor
#@nonl
#@+node:General note about attributes
The original documentation repeats endlessly the same info about attributes.

Attributes can be set in several ways:

set("attributeName")

set_[attribute name], i.e. set_color, set_fontname

object.attributeName = val

Similarly, you can get attribute values with corresponding getters.
#@nonl
#@-node:General note about attributes
#@+node:Cluster(Graph)
class Cluster(Graph) 

Methods:

__init__(self, graph_name='subG', suppress_disconnected=False, **attrs)

graph_name:
    the cluster's name (the string 'cluster' will be always prepended)

suppress_disconnected:
    False: remove from the cluster any disconnected nodes.


Attributes:

attributes = ['pencolor', 'bgcolor', 'labeljust', 'labelloc', 'URL', 'fontcolor', 'fontsize', 'label', 'fontname', 'lp', 'style', 'target', 'color', 'peripheries', 'fillcolor']
#@nonl
#@-node:Cluster(Graph)
#@+node:Common
class Common 
    Common information to several classes.

Should not be directly used, several classes are derived from this one.

char_range(self, a, b)
Generate a list containing a range of characters.

is_ID(self, s)
Checks whether a string is an dot language ID.

Data:

chars_ID = None
parent_graph = None 



#@-node:Common
#@+node:Dot(Graph)
class Dot(Graph) 
    A container for handling a dot language file.

This class implements methods to write and process a dot language file.


Methods defined here:

__init__(self, **args)


Attributes:

formats = ['ps', 'ps2', 'hpgl', 'pcl', 'mif', 'pic', 'gd', 'gd2', 'gif', 'jpg', 'jpeg', 'png', 'wbmp', 'ismap', 'imap', 'cmap', 'vrml', 'vtx', 'mp', 'fig', ...]
progs = None
#@nonl
#@+node:create and create_xxx
create(self, prog='dot', format='ps')
Creates and returns a Postscript representation of the graph.

create will write the graph to a temporary dot file and process
it with the program given by 'prog' (which defaults to 'twopi'),
reading the Postscript output and returning it as a string is the
operation is successful.
On failure None is returned.

There's also the preferred possibility of using:

        create_'format'(prog='program')

which are automatically defined for all the supported formats.

[create_ps(), create_gif(), create_dia(), ...]
#@nonl
#@-node:create and create_xxx
#@+node:write and write_xxx
write(self, path, prog='dot', format='raw')
Writes a graph to a file.

Given a filename 'path' it will open/create and truncate
such file and write on it a representation of the graph
defined by the dot object and in the format specified by
'format'.

The format 'raw' is used to dump the string representation
of the Dot object, without further processing.

The output can be processed by any of graphviz tools, defined
in 'prog', which defaults to 'dot'

Returns True or False according to the success of the write operation.

There's also the preferred possibility of using:

        write_'format'(path, prog='program')

which are automatically defined for all the supported formats.
[write_ps(), write_gif(), write_dia(), ...]
#@nonl
#@-node:write and write_xxx
#@-node:Dot(Graph)
#@+node:Edge
class Edge(__builtin__.object, Common) 
    A graph edge.

This class represents a graph's edge with all its attributes.

edge(src, dst, attribute=value, ...)

src: source node's name
dst: destination node's name


--------------------------------------------------------------------------------
Methods defined here:

__eq__(self, edge)
Compare two edges.

If the parent graph is directed, arcs linking
node A to B are considered equal and A->B != B->A

If the parent graph is undirected, any edge
connecting two nodes is equal to any other
edge connecting the same nodes, A->B == B->A

__init__(self, src, dst, **attrs)

get_destination(self)
Get the edge's destination node name.

get_source(self)
Get the edges source node name.

parse_node_ref(self, node_str)

set(self, name, value)
Set an attribute value by name.

Given an attribute 'name' it will set its value to 'value'.
There's always the possibility of using the methods:
        set_'name'(value)
which are defined for all the existing attributes.
to_string(self)
Returns a string representation of the edge in dot language.

--------------------------------------------------------------------------------
Data and other attributes defined here:

__dict__ = <dictproxy object>
dictionary for instance variables (if defined)
__weakref__ = <attribute '__weakref__' of 'Edge' objects>
list of weak references to the object (if defined)
attributes = ['style', 'target', 'pos', 'layer', 'tooltip', 'color', 'showboxes', 'URL', 'fontcolor', 'fontsize', 'label', 'fontname', 'comment', 'lp', 'arrowhead', 'arrowsize', 'arrowtail', 'constraint', 'decorate', 'dir', ...]
#@nonl
#@-node:Edge
#@+node:Error
class Error(exceptions.Exception) 
    General error handling class.

Methods defined here:

__init__(self, value)
__str__(self)
#@nonl
#@-node:Error
#@+node:Graph(Common)
class Graph(__builtin__.object, Common) 
    Class representing a graph in Graphviz's dot language.

This class implements the methods to work on a representation
of a graph in Graphviz's dot language.


Data and other attributes:

__dict__ = <dictproxy object>
dictionary for instance variables (if defined)

__weakref__ = <attribute '__weakref__' of 'Graph' objects>
list of weak references to the object (if defined)

attributes = ['Damping', 'bb', 'center', 'clusterrank', 'compound', 'concentrate', 'defaultdist', 'dim', 'fontpath', 'epsilon', 'layers', 'layersep', 'margin', 'maxiter', 'mclimit', 'mindist', 'pack', 'packmode', 'model', 'page', ...]
#@nonl
#@+node:Graph.__init__
__init__(self, graph_name='G', type='digraph', strict=False, suppress_disconnected=False, simplify=False, **attrs)


graph_name: the graph's name

type: 'graph' or 'digraph'

suppress_disconnected:
    defaults to False, which will remove from the graph any disconnected nodes.

simplify:
    if True it will avoid displaying equal edges, i.e. only one edge between two nodes. removing the duplicated ones.

All the attributes defined in the Graphviz dot language should be supported.

Attributes can be set through the dynamically generated methods:

set_[attribute name], i.e. set_size, set_fontname

or using the instance's attributes:

 Graph.[attribute name], i.e. graph_instance.label, graph_instance.fontname
#@nonl
#@-node:Graph.__init__
#@+node:add_edge
add_edge(self, graph_edge)

Adds an edge object to the graph.
#@nonl
#@-node:add_edge
#@+node:add_node
add_node(self, graph_node)

Adds a node object to the graph.
#@nonl
#@-node:add_node
#@+node:add_subgraph
add_subgraph(self, sgraph)

Adds an edge object to the graph.
#@-node:add_subgraph
#@+node:getters...
get(self, name)
Get an attribute value by name.

get_'name'() is defined for all attributes.

get_edge(self, src, dst)
Retrieved an edge from the graph.
Returns a list, a single Edge, or None

get_edge_list(self)
Returns the list of Edge instances composing the graph.

get_name(self)
Get the graph's name.

get_node(self, name)
Given a node's name the corresponding Node instance will be returned.
Returns a list, a single Node or None.

get_node_list(self)
Returns the list of Node instances composing the graph.

get_simplify(self)
Get whether to simplify or not.

get_strict(self, val)
Get graph's 'strict' mode (True, False).
This option is only valid for top level graphs.

get_subgraph(self, name)
Given a subgraph's name the corresponding Subgraph instance will be returned.
Returns a list of Subgraphs, a single Subgraph or None.

get_subgraph_list(self)
Returns the list of Subgraph instances in the graph.

get_suppress_disconnected(val)
Get if suppress disconnected is set.

get_type(self)
Get the graph's type, 'graph' or 'digraph'.
#@nonl
#@-node:getters...
#@+node:setters...
set(self, name, value)
Set an attribute value by name.

set_'name'(value) are defined for all the existing attributes.

set_graph_parent(self, parent)
Sets a graph and its elements to point the the parent.
Any subgraph added to a parent graph receives a reference to the parent to access some common data.

set_name(self, graph_name)
Set the graph's name.

set_simplify(self, simplify)
Set whether to simplify or not.
 If True it will avoid displaying equal edges.

set_strict(self, val)
Set graph to 'strict' mode.
This option is only valid for top level graphs.

set_suppress_disconnected(val)
Suppress disconnected nodes in the output graph.

set_type(self, graph_type)
Set the graph's type, 'graph' or 'digraph'.

#@-node:setters...
#@+node:toString
to_string(self, indent='')
Returns a string representation of the graph in dot language.
#@nonl
#@-node:toString
#@-node:Graph(Common)
#@+node:Node(Common)
class Node(__builtin__.object, Common) 
    A graph node.

This class represents a graph's node with all its attributes.


Data and attributes:

__dict__ = <dictproxy object>
dictionary for instance variables (if defined)

__weakref__ = <attribute '__weakref__' of 'Node' objects>
list of weak references to the object (if defined)

attributes = ['showboxes', 'URL', 'fontcolor', 'fontsize', 'label', 'fontname', 'comment', 'root', 'toplabel', 'vertices', 'width', 'z', 'bottomlabel', 'distortion', 'fixedsize', 'group', 'height', 'orientation', 'pin', 'rects', ...]
#@nonl
#@+node:Node.__init__
node(name, attribute=value, ...)

name: node's name

All the attributes defined in the Graphviz dot language should be supported.

__init__(self, name, **attrs)
#@-node:Node.__init__
#@+node:get_name
get_name(self)
Get the node's name.
#@nonl
#@-node:get_name
#@+node:set, set_x and set_name
set(self, name, value)
Set an attribute value by name.

Given an attribute 'name' it will set its value to 'value'.

set_'name'(value) is defined for all the existing attributes.

set_name(self, node_name)
Set the node's name.
#@nonl
#@-node:set, set_x and set_name
#@+node:toString
to_string(self)
Returns a string representation of the node in dot language.
#@nonl
#@-node:toString
#@-node:Node(Common)
#@+node:Subgraph(Graph)
class Subgraph(Graph) 

Methods:

__init__(self, graph_name='subG', suppress_disconnected=False, **attrs)

graph_name:
    the subgraph's name

suppress_disconnected:
    False: removes from the subgraph any disconnected nodes.

Attributes:

attributes = ['Damping', 'bb', 'center', 'clusterrank', 'compound', 'concentrate', 'defaultdist', 'dim', 'fontpath', 'epsilon', 'layers', 'layersep', 'margin', 'maxiter', 'mclimit', 'mindist', 'pack', 'packmode', 'model', 'page', ...]
#@nonl
#@-node:Subgraph(Graph)
#@+node:Functions
#@+node:find_graphviz
find_graphviz()

Locate Graphviz's executables in the system.

Attempts  to locate  graphviz's  executables in a Unix system.
It will look for 'dot', 'twopi' and 'neato' in all the directories
specified in the PATH environment variable.
It will return a dictionary containing the program names as keys
and their paths as values.
#@nonl
#@-node:find_graphviz
#@+node:graph_from_adjacency_matrix
graph_from_adjacency_matrix(matrix, node_prefix='', directed=False)

Creates a basic graph out of an adjacency matrix.

The matrix has to be a list of rows of values
representing an adjacency matrix.
The values can be anything: bool, int, float, as long
as they can evaluate to True or False.
#@nonl
#@-node:graph_from_adjacency_matrix
#@+node:graph_from_edges
graph_from_edges(edge_list, node_prefix='', directed=False)

Creates a basic graph out of an edge list.

The edge list has to be a list of tuples representing the nodes connected by the edge.

The values can be anything: bool, int, float, str.

If the graph is undirected by default, it is only
calculated from one of the symmetric halves of the matrix.
#@-node:graph_from_edges
#@+node:graph_from_incidence_matrix
graph_from_incidence_matrix(matrix, node_prefix='', directed=False)

Creates a basic graph out of an incidence matrix.

The matrix has to be a list of rows of values
representing an incidence matrix.
The values can be anything: bool, int, float, as long
as they can evaluate to True or False.
#@nonl
#@-node:graph_from_incidence_matrix
#@-node:Functions
#@-node:pydot docs
#@+node:@url http://www.research.att.com/sw/tools/graphviz/refs.html
#@-node:@url http://www.research.att.com/sw/tools/graphviz/refs.html
#@+node:@url http://dkbza.org/pydot/pydot.html
#@-node:@url http://dkbza.org/pydot/pydot.html
#@+node:Write an outline using Graphviz
import string

try:
    import pydot
except:
    s = "pydot must be installed"
    print s ; es(s,color="red")
    pydot = None

<< code >>

if pydot:
    graph = pydot.Dot(simplify=True,ordering="out")
    root = g.findNodeInTree(p,"Root")
    addLeoNodesToGraph(root,graph,top=True)
    graph.write_jpeg(r'c:\prog\test\pydotOut.jpg',prog='dot')
#@+node:<< code >>
@others
#@nonl
#@+node:addLeoNodesToGraph
def addLeoNodesToGraph(p,graph,top=False):

    # Create p's vnode.
    thisNode = pydot.Node(name=vnodeRepr(p.v),label=vnodeLabel(p.v))
    graph.add_node(thisNode)

    if p.hasChildren():
        child = p.firstChild()
        childNode = addLeoNodesToGraph(child,graph)
        graph.add_node(childNode)
        edge2 = pydot.Edge(tnodeRepr(p.v.t),vnodeRepr(child.v))
        graph.add_edge(edge2)

        while child.hasNext():
            next = child.next()
            edge = pydot.Edge(vnodeRepr(child.v),vnodeRepr(next.v),dir="both")
            nextNode = addLeoNodesToGraph(next,graph)
            graph.add_node(nextNode)
            graph.add_edge(edge)
            child = next

    tnode = pydot.Node(name=tnodeRepr(p.v.t),shape="box",label=tnodeLabel(p.v.t))
    edge1 = pydot.Edge(vnodeRepr(p.v),tnodeRepr(p.v.t),arrowhead="none")
    graph.add_edge(edge1)
    graph.add_node(tnode)

    if 0: # Confusing.
        if not top and p.v._parent:
            edge = pydot.Edge(vnodeRepr(p.v),vnodeRepr(p.v._parent),
                style="dotted",arrowhead="onormal")
            graph.add_edge(edge)

    if 0: # Marginally useful.
        for v in p.v.t.vnodeList:
            edge = pydot.Edge(tnodeRepr(p.v.t),vnodeRepr(v),
                style="dotted",arrowhead="onormal")
            graph.add_edge(edge)

    return thisNode
#@nonl
#@-node:addLeoNodesToGraph
#@+node:tnode/vnodeLabel
def tnodeLabel(t):

    return "t %d [%d]" % (id(t),len(t.vnodeList))

def vnodeLabel(v):

    return "v %d %s" % (id(v),v.t.headString)
#@-node:tnode/vnodeLabel
#@+node:tnode/vnodeRepr
def dotId(s):

    """Convert s to a C id"""

    s2 = [ch for ch in s if ch in (string.letters + string.digits + '_')]
    return string.join(s2,'')

def tnodeRepr(t):

    return "t_%d" % id(t)

def vnodeRepr(v):

    return "v_%d_%s" % (id(v),dotId(v.headString()))
#@nonl
#@-node:tnode/vnodeRepr
#@-node:<< code >>
#@+node:Root
#@+node:clone
#@+node:Child1
#@+node:GrandChild
#@-node:GrandChild
#@-node:Child1
#@+node:Child2
#@-node:Child2
#@-node:clone
#@-node:Root
#@-node:Write an outline using Graphviz
#@-node:Create diagrams using Graphviz and pydot
#@+node:Execute @test, @suite or file
@

To run the script:

1. Make the 'Exec File' node a script button.

2. Select either the @test node or the @thin node.

(You must change @@thin to @thin for this to work.)

3. Hit the Exec File button.
#@nonl
#@+node:@test __name__
assert __name__ == '__builtin__', '__name__ is %s' % __name__
#@nonl
#@-node:@test __name__
#@+node:@@thin executeFileTest.py
@language python
@tabwidth -4

print __name__
#print 'hi'

assert __name__ == '__main__', '__name__ is %s' % __name__
#@nonl
#@-node:@@thin executeFileTest.py
#@+node:Exec File
<< docstring >>
<< imports >>

@others

doatest()
#@nonl
#@+node:<< docstring >>
"""this script will run python from Leo with @file somefile.py
using the py2.4 (2.3 compatible) subprocess module
to verify and code correct in the quickest possible manner.
unittests or other w/stdout stderr redirected to log.
wimped out for the moment on making it virtual.

If the presently selected node is an @test or @suite node this script runs them,
otherwise the script expects the node to a valid @file node that has been
written to disk and updated.

less complicated and thus more advanced unittesting:
py.test, similar to unittest as far as adding support.
and @test and @suite, many examples in test.leo
and my own dutest in dyna_menu with redirection to log
which calls leoTest.py on @test and @suite nodes 
otherwise for doctesting w/o needing an @file written.

good overview, see Recent Posts:
http://agiletesting.blogspot.com
http://agiletesting.blogspot.com/2005/01/
Python unit testing part 3: the py.test tool and library 
python-unit-testing-part-2-doctest part-1 unittest

tested win9x Leo4.3a2 py2.4 2.3
tested but little and probably not completely.
if it works once its done, right?
make an @button or add to a menu or plugin

Note: 
you may see some extraneous traceback 
vrs run outside Leo, can't be helped.

Warning: this may run arbitrary code.
no warranty expressed or implied!
"""
#@-node:<< docstring >>
#@+node:<< imports >>
import leoTest

try:
    import subprocess # Exists only in Python 2.4.
except ImportError:
    subprocess = None

import os
import StringIO
import sys
import unittest
#@nonl
#@-node:<< imports >>
#@+node:getAtFileName
def getAtFileName():

    '''Return the full path from any kind of @file node and applicable @path directives.'''

    # should return cwd, cd to files path
    # script should return cwd after run

    fname = p.anyAtFileNodeName()
    if fname:
        d = g.scanDirectives(c)
        path = d.get("path")
        fname = g.os_path_abspath(g.os_path_normpath(g.os_path_join(path, fname)))

    return fname
#@nonl
#@-node:getAtFileName
#@+node:doatest
def doatest():

    _sosav = sys.__stdout__
    _sesav = sys.__stderr__
    sys.stdout = sys.__stdout__ = g.fileLikeObject()
    sys.stderr = sys.__stderr__ = g.fileLikeObject()

    h = p.headString()
    if h.startswith('@suite') or h.startswith('@test'):
        leoTest.doTests(all= False,verbosity=2)
    else: # Try to run an @file unittest.
        name = getAtFileName()
        if name:
            g.executeFile(name,'-v')
        else:
            g.es('Expecting @test, @suite or any @file node',color='blue')

    oo = sys.stdout.get()
    oe = sys.stderr.get()
    sys.stdout.close()
    sys.stderr.close()
    sys.stdout = sys.__stdout__ = _sosav
    sys.stderr = sys.__stderr__ = _sesav
    lines = g.splitLines(oo + oe)
    empty = True
    for line in lines:
        if line.rstrip():
            empty = False
    if lines and not empty:
        g.es('Output from test...',color='blue')
        for line in lines:
            g.es(line.rstrip())
    g.es('Test done',color='blue')

    c.frame.putStatusLine(' fini ', color= 'DodgerBlue')
#@nonl
#@-node:doatest
#@-node:Exec File
#@-node:Execute @test, @suite or file
#@+node:FilterHoist script
from leoPlugins import *
from leoGlobals import *
from leoNodes import *
import Tkinter
import re
import sys

@others

fhp = None
fhp_entry = None

if 1:
    addMenu("none",None)
else:
    hooks = choose(sys.platform == 'win32',
        ('open2',"new"),
        ('start2','open2',"new"))

    print "hi"

    registerHandler(hooks,addMenu)

    __version__ = ".1"
    plugin_signon(__name__)
#@+node:description
@nocolor

@ This is what it does:

1. Under Outline it puts an Option called 'FilterHoist'
2. Selecting the option pops up an ugly little window.  On it is a section where
you can type in text.  You can close the window with the close button.  You
can activate the functionality with the Filter Button.
3. Filtering will walk the Leo tree, looking for a text match from the Text
field with the Nodes bodyString.
4. After finding some nodes it creates a new node at the root.  Then it clones
the matching nodes under that new node.  A Hoist operation is performed on the
new node.  This gives a view of all matching nodes.

I put this together because I wanted a find that was based in terms of Leo's
nodes.  Find as it is bounces you around the Tree(it bothers me).  This brings
the nodes to you and presents them.  As it is I may work further on this if
people like the idea.  It's possible I might migrate it to the NodeRoundup plugin
to.

You need 2.3 python; it uses generators in it's find method.

That was one motivation for writing this thing, using a generator vs. Recursive
approach to tree walking.
#@nonl
#@-node:description
#@+node:filter
def filter(c,e):

    pat = re.compile(e.get())

    c.beginUpdate()
    try: # update...
        t = tnode('','A Filtered Hoist')
        newRoot = c.rootVnode().insertAfter(t)
        p = c.rootVnode()
        while p:
            if pat.search(p.bodyString()):
                clone = p.clone(p)
                clone.moveToLastChildOf(newRoot)
            p = p.threadNext()
        newRoot.moveToRoot(c.rootVnode())
        c.setCurrentVnode(newRoot)
    finally:
        c.endUpdate()

    c.hoist()
    fhp.withdraw()
#@-node:filter
#@+node:old code
#@+node:filter
def OLDfilter(c,e):

    v = c.rootVnode()

    nodes = []
    while v:
        nodes.append(v)
        v = v.next()

    regex = re.compile(e.get())
    c.beginUpdate()
    try: # update...
        t = tnode('','A Filtered Hoist')
        ticker = c.rootVnode().insertAfter(t)
        for z in nodes:
            for x in search(z,regex):
                clone = x.clone( x )
                clone.moveToNthChildOf(ticker,0)
        c.setCurrentVnode(ticker)
        ticker.moveToRoot(c.rootVnode())
    finally:
        c.endUpdate()
    c.hoist()
    fhp.withdraw()
#@-node:filter
#@+node:search
from __future__ import generators # To make the code work in Python 2.2.

def OLDsearch(vn,regex):

    sn = vn 
    while vn != None:
        if regex.search( vn.t.bodyString ) : yield vn
        nc = vn.numberOfChildren()
        if nc == 0:
            i = vn.childIndex()
            p = vn.parent()
            if p == None: 
                vn = None
                continue
            if i == 0:
                while 1:
                    if p == sn :
                        vn = None
                        break
                    vn = p.back() 
                    if vn == None:
                        p = p.parent()
                        continue
                    break
                continue                                                        
            vn = p.nthChild( i - 1)
            continue
        vn = vn.nthChild( nc - 1 )
#@-node:search
#@-node:old code
#@+node:filterHoist
def filterHoist(c):

    global fhp
    global e

    if fhp is None:

        fhp = Tkinter.Toplevel()
        fhp.title('FilterHoist')

        fhp_entry = e = Tkinter.Entry(fhp)
        e.pack(side="top",fill="both")

        def closeCallback(fhp=fhp):
            fhp.withdraw()

        def filterCallback(c=c,entry=e):
            filter(c,entry)

        b1 = Tkinter.Button(fhp,text='Close',command=closeCallback)
        b2 = Tkinter.Button(fhp,text='Filter',command=filterCallback)
        b1.pack(side="left")
        b2.pack(side="right")

    fhp.geometry('200x200+250+250') 
    fhp.deiconify()
    fhp_entry.focus_set()
#@-node:filterHoist
#@+node:addMenu
def addMenu(tag,keywords):

    c = top()

    trace()

    def callback(c=c):
        filterHoist(c)

    table = ("FilterHoist",None,callback),

    c.frame.menu.createMenuItemsFromTable("Outline",table)
#@nonl
#@-node:addMenu
#@-node:FilterHoist script
#@+node:Get statistics using dis module
# routines to gather static statistics about opcodes based on dis module.
import compiler
import dis
import os
import string
import sys
import types

@others
#@nonl
#@+node:go
def go():

    dir = "c:/prog/leoCVS/leo/"
    modules = getModules(dir)
    stats = [0] * 256
    try:
        # Importing these might start leo itself and hang idle.
        modules.remove("leo")
        modules.remove("openLeo")
        modules.remove("openEkr")
        modules.remove("setup")
    except: pass
    # print modules

    for m in modules:
        try:
            print "module:", m
            exec("import " + m)
            a = eval(m)
            any(a,stats)
        except:
            import traceback ; traceback.print_exc()
            print "----- no matching class in", m

    g.print_stats(stats)
#@nonl
#@-node:go
#@+node:getFiles
def getFiles (dir):

    # Generate the list of modules.
    allFiles = os.listdir(dir)
    files = []
    for f in allFiles:
        head,tail = g.os_path_split(f)
        root,ext = g.os_path_splitext(tail)
        if ext==".py":
            files.append(g.os_path_join(dir,f))

    return files
#@nonl
#@-node:getFiles
#@+node:getModules
def getModules (dir):

    """Return the list of Python files in dir."""

    files = []

    try:
        allFiles = os.listdir(dir)
        for f in allFiles:
            head,tail = g.os_path_split(f)
            fn,ext = g.os_path_splitext(tail)
            if ext==".py":
                files.append(fn)
    except: pass

    return files
#@nonl
#@-node:getModules
#@+node:any
def any(x,stats,printName = 0):
    # based on dis.dis()
    """Gathers statistics for classes, methods, functions, or code."""
    if not x:
        return
    if type(x) is types.InstanceType:
        x = x.__class__
    if hasattr(x, 'im_func'):
        x = x.im_func
    if hasattr(x, 'func_code'):
        x = x.func_code
    if hasattr(x, '__dict__'):
        items = x.__dict__.items()
        items.sort()
        for name, x1 in items:
            if type(x1) in (types.MethodType,
                            types.FunctionType,
                            types.CodeType):
                if printName: print name
                try:
                    any(x1,stats)
                except TypeError, msg:
                    print "Sorry:", msg
    elif hasattr(x, 'co_code'):
        code(x,stats)
    else:
        raise TypeError, \
              "don't know how to disassemble %s objects" % \
              type(x).__name__
#@nonl
#@-node:any
#@+node:code
def code (co, stats):
    """Gather static count statistics for a code object."""

    codeList = co.co_code
    # Count the number of occurances of each opcode.
    i = 0 ;  n = len(codeList)
    while i < n:
        c = codeList[i]
        op = ord(c)
        stats[op] += 1
        i = i+1
        if op >= dis.HAVE_ARGUMENT:
            i = i+2
#@nonl
#@-node:code
#@+node:print_stats
def print_stats (stats):

    stats2 = [] ; total = 0
    for i in xrange(0,256):
        if stats[i] > 0:
            stats2.append((stats[i],i))
        total += stats[i]

    stats2.sort()
    stats2.reverse()
    for stat,i in stats2:
        print string.rjust(repr(stat),6), dis.opname[i]
    print "total", total
#@nonl
#@-node:print_stats
#@-node:Get statistics using dis module
#@+node:Linux install script
@first

"""
A simple script to install Leo on Linux.

Contributed by David McNab <david@rebirthing.co.nz>
"""

import commands,os,sys # commands module is for Unix only.

# We must be root to use this script.
if os.getuid() != 0:
    print "You need to run this install script as root"
    sys.exit(1)

# Create /usr/lib/leo and copy all files there.
print "***** Installing Leo to /usr/lib/leo..."
commands.getoutput("mkdir -p /usr/lib/leo")
commands.getoutput("cp -rp * /usr/lib/leo")

# Create user's 'leo' command script into /usr/bin/leo
print "***** Creating Leo startup script -> /usr/bin/leo"
fd = open("/usr/bin/leo", "w")
fd.write("""#!/usr/bin/python
import commands,sys
files = " ".join(sys.argv[1:])
print commands.getoutput("python /usr/lib/leo/leo.py %s" % files)
""")
fd.close()
commands.getoutput("chmod 755 /usr/bin/leo")
print "***** Leo installed successfully - type 'leo filename.leo' to use it."
#@nonl
#@-node:Linux install script
#@+node:Scipts related to the new colorizer
#@+node:Find longest
# Find longest body text

pmax = p.copy()
n = len(p.bodyString())

for p in p.self_and_subtree_iter():
    if len(p.bodyString()) > n:
        n = len(p.bodyString())
        pmax = p.copy()

c.selectPosition(pmax)
c.redraw()
#@nonl
#@-node:Find longest
#@+node:Find most tags
def tags(p):
    c.selectPosition(p)
    w = c.frame.body.bodyCtrl
    names = w.tag_names()
    total = 0
    for name in names:
        theList = w.tag_ranges(name)
        if theList:
            print name,w.tag_ranges(name)
            total += len(theList)
    return total

pmax = p.copy()
n = tags(p) # len(p.bodyString())

for p in p.self_and_subtree_iter():
    # if len(p.bodyString()) > n:
    n2 = tags(p)
    if n2 > n:
        n = n2
        pmax = p.copy()

c.selectPosition(pmax)
c.redraw()
#@nonl
#@-node:Find most tags
#@-node:Scipts related to the new colorizer
#@+node:Finding @file nodes
current = c.currentPosition()
for p in current.self_and_parents_iter():
    if p.isAnyAtFileNode():
        d = g.scanDirectives(c,p=p)
        path = d.get('path')
        print g.os_path_join(path,p.atFileNodeName())
        break
#@nonl
#@-node:Finding @file nodes
#@+node:munge-db
'''A script to munge database records so they conform to an existing db's format.'''
# Almost infinitely easier to do this in Python rather than FileMaker Pro's laughable scripting language.
path = r'c:\rebecca\REBECCA9_97.txt'
path2 = r'c:\rebecca\REBECCA9_97-converted.txt'
f = file(path)
s = f.read() ; f.close()
f = file(path2,'w')
print '-' * 40, len(s)
lines = s.split('\n')
n = 0
for line in lines:
    n += 1
    if not line.strip(): continue
    # bug: fails for fields containing commas.  Should use a regex instead.
    fields = line.split(',')
    result = []
    info1,last,full,addr1,addr2,zip,info2,info3 = fields
    assert full.startswith('"')
    data = full[1:-1].strip().split(' ')
    first = '"%s"' % data[0].strip()
    data = addr2[1:-1].strip().split(' ')
    city = '"%s"' % ''.join(data[:-2])
    state = '"%s"' % data[-2].upper()
    for field in (first,last,addr1,city,state,zip,info1,info2,info3):
        if field.startswith('"'):
            # Capitalize each word.
            s = field[1:-1].strip()
            aList = s.split(' ')
            s = ' '.join([z.capitalize() for z in aList])
            result.append('"%s"' % s)
        else:
            result.append(field)
    s = ','.join(result)
    print s
    f.write(s+'\n')
f.close()
print '%d records' % n
#@-node:munge-db
#@-node:Others
#@+node:Prototypes
#@+node:Debugger canvas
import Tkinter as Tk
import tkFont

images = {}

@others

h = 500 ; w = 900

top = Tk.Toplevel(None) ; top.title("Debugger")

outer = Tk.Frame(top,height=h,width=w)
outer.pack(expand=1,fill='both')

canvas = Tk.Canvas(outer,background='LightSteelBlue1',width=14)
canvas.pack(side='left',fill='y',expand=0)

text = Tk.Text(outer)
text.pack(side='left',fill='both',expand=1,pady=0,padx=0)

line_h = getLineHeight(text)
# print line_h
image = getImage('minusnode.gif',canvas)

y = line_h / 2 - 2
while y < h:
    id = canvas.create_image(4,y,image=image,anchor="nw")
    y += line_h
#@+node:getImage
def getImage(name,canvas):

    icon = images.get(name)
    if icon: return icon

    try:
        fullname = g.os_path_join(g.app.loadDir,"..","Icons",name)
        fullname = g.os_path_normpath(fullname)
        image = Tk.PhotoImage(master=canvas,file=fullname)
        images [name] = image
        return image
    except:
        g.es("Exception loading: " + fullname)
        g.es_exception()
        return None
#@nonl
#@-node:getImage
#@+node:getLineHeight
def getLineHeight (text):

    try:
        fontName = text.cget('font')
        font = tkFont.Font(font=fontName)
        metrics = font.metrics()
        return metrics ["linespace"]
    except Exception:
        g.es("exception setting outline line height")
        g.es_exception()
        return 20 # default
#@nonl
#@-node:getLineHeight
#@-node:Debugger canvas
#@+node:Ipython Shell
<< imports >>

@others

if 1:
    << use IPShellEmbed >>
else:
    shell = LeoShell('shell')
    # g.redirectStdout()
    g.es_print('-'*40)
    body = p.bodyString()
    for line in g.splitLines(body):
        g.es(str(shell.prefilter(line,None)))
    #g.restoreStdout()
#@nonl
#@+node:<< imports >>
import IPython
import IPython.genutils
from IPython.Struct import Struct

import __builtin__
import __main__
import os
import re
import sys

# we need the directory where IPython itself is installed
IPython_dir = os.path.dirname(IPython.__file__)
#@nonl
#@-node:<< imports >>
#@+node:<< use IPShellEmbed >>
shell = IPython.Shell.IPShellEmbed (
    argv=[],
    banner='Welcome to IPython in Leo',
    exit_msg='Bye',
    rc_override={
        'confirm_exit':0,
        #'readline':0, # Crashes the interactive interp.
    },
)
shell()
#@nonl
#@-node:<< use IPShellEmbed >>
#@+node:class dummyCache
class dummyCache:

    @others
#@nonl
#@+node:ctor
def __init__ (self,user_ns):

    self.last_prompt = None
    self.prompt_count = 0
    self.user_ns = user_ns

    if 1:
        self.prompt1 = dummyPrompt('name=prompt1')
        self.prompt2 = dummyPrompt('name=prompt2')
        self.prompt_out = dummyPrompt('name=prompt_out')
    else:
        input_sep='\n'
        self.ps1_str = '>>> '   ### self._set_prompt_str(ps1,'In [\\#]: ','>>> ')
        self.ps2_str = '... '   ### self._set_prompt_str(ps2,'   .\\D.: ','... ')
        self.ps_out_str = ','   ### self._set_prompt_str(ps_out,'Out[\\#]: ','')
        pad_left=True

        self.prompt1 = IPython.Prompts.Prompt1(self,
            sep=input_sep,prompt=self.ps1_str,pad_left=pad_left)
        self.prompt2 = IPython.Prompts.Prompt2(self,
            prompt=self.ps2_str,pad_left=pad_left)
        self.prompt_out = IPython.Prompts.PromptOut(self,
            sep='',prompt=self.ps_out_str,pad_left=pad_left)

    self.last_prompt = self.prompt1 # Total kludge.
#@nonl
#@-node:ctor
#@+node:All others
def __len__ (self):
    return 0

def insert(self,n,line):
    pass

def pop(self):
    return ''
#@nonl
#@-node:All others
#@-node:class dummyCache
#@+node:class dummyPrompt
class dummyPrompt (IPython.Prompts.BasePrompt):

    """Interactive prompt similar to Mathematica's."""

	@others
#@nonl
#@+node:__init__
def __init__(self,cache=None,sep=None,prompt=None,pad_left=False,name='prompt'):

    self.name = name
#@nonl
#@-node:__init__
#@+node:set_p_str
def set_p_str(self):
    """ Set the interpolating prompt strings.

    This must be called every time the color settings change, because the
    prompt_specials global may have changed."""

    return ###

    import os,time  # needed in locals for prompt string handling
    loc = locals()
    self.p_str = ItplNS('%s%s%s' %
                        ('${self.sep}${self.col_p}',
                         multiple_replace(prompt_specials, self.p_template),
                         '${self.col_norm}'),self.cache.user_ns,loc)

    self.p_str_nocolor = ItplNS(multiple_replace(prompt_specials_nocolor,
                                                 self.p_template),
                                self.cache.user_ns,loc)
#@-node:set_p_str
#@+node:write
def write(self,msg):  # dbg

    return '' ###

    sys.stdout.write(msg)
    return ''
#@-node:write
#@+node:__str__
def __str__(self):
    """Return a string form of the prompt.

    This for is useful for continuation and output prompts, since it is
    left-padded to match lengths with the primary one (if the
    self.pad_left attribute is set)."""

    return self.name ###

    out_str = str_safe(self.p_str)
    if self.pad_left:
        # We must find the amount of padding required to match lengths,
        # taking the color escapes (which are invisible on-screen) into
        # account.
        esc_pad = len(out_str) - len(str_safe(self.p_str_nocolor))
        format = '%%%ss' % (len(str(self.cache.last_prompt))+esc_pad)
        return format % out_str
    else:
        return out_str
#@-node:__str__
#@+node:Unchanged
if 0:
    @others
#@nonl
#@+node:cwd_filt
# these path filters are put in as methods so that we can control the
# namespace where the prompt strings get evaluated.

def cwd_filt(self,depth):

    """Return the last depth elements of the current working directory.

    $HOME is always replaced with '~'.
    If depth==0, the full path is returned."""

    cwd = os.getcwd().replace(HOME,"~")
    out = os.sep.join(cwd.split(os.sep)[-depth:])
    if out:
        return out
    else:
        return os.sep
#@-node:cwd_filt
#@+node:cwd_filt2
def cwd_filt2(self,depth):
    """Return the last depth elements of the current working directory.

    $HOME is always replaced with '~'.
    If depth==0, the full path is returned."""

    cwd = os.getcwd().replace(HOME,"~").split(os.sep)
    if '~' in cwd and len(cwd) == depth+1:
        depth += 1
    out = os.sep.join(cwd[-depth:])
    if out:
        return out
    else:
        return os.sep
#@-node:cwd_filt2
#@-node:Unchanged
#@+node:auto_rewrite
def auto_rewrite(self,*args,**keys):
    return ''
#@nonl
#@-node:auto_rewrite
#@-node:class dummyPrompt
#@+node:class LeoShell
class LeoShell (IPython.iplib.InteractiveShell):

    @others

    # Set the default prefilter() function (this can be user-overridden)
    prefilter = _prefilter
#@nonl
#@+node:ctor
def __init__ (self,name):

    self.shell = self
    self.name = name

    << directory stuff >>
    << set sensible command line defaults for self.rc >>
    << define regexp's >>
    << define escape stuff >>
    << define namespaces >>
    << create alias table >>
    << define inpsector >>

    self.inputcache = dummyCache(self.user_ns)
    self.outputcache = dummyCache(self.user_ns)
    self.CACHELENGTH = 0
#@nonl
#@+node:<< directory stuff >>
# EKR: take from make_IPython.

# Platform-dependent suffix and directory names
if os.name == 'posix':
    rc_suffix = ''
    ipdir_def = '.ipython'
else:
    rc_suffix = '.ini'
    ipdir_def = '_ipython'

# default directory for configuration
if 1: ### Leo
    ipythondir = g.app.loadDir
else:
    ipythondir = os.path.abspath(
        os.environ.get('IPYTHONDIR',
        os.path.join(IP.home_dir,ipdir_def)))
#@nonl
#@-node:<< directory stuff >>
#@+node:<< set sensible command line defaults for self.rc >>
# EKR: take from make_IPython

# This should have everything from  cmdline_opts and cmdline_only
self.rc = Struct(
    autocall = 1,
    autoindent=0,
    automagic = 1,
    banner = 1,
    cache_size = 1000,
    c = '',
    classic = 0,
    colors = 'NoColor',
    color_info = 0,
    confirm_exit = 1,
    debug = 0,
    deep_reload = 0,
    editor = '0',
    help = 0,
    ignore = 0,
    ipythondir = ipythondir,
    log = 0,
    logfile = '',
    logplay = '',
    multi_line_specials = 1,
    messages = 1,
    nosep = 0,
    pdb = 0,
    pprint = 0,
    profile = '',
    prompt_in1 = 'In [\\#]:',
    prompt_in2 = '   .\\D.:',
    prompt_out = 'Out[\\#]:',
    prompts_pad_left = 1,
    quick = 0,
    readline = 1,
    readline_merge_completions = 1,
    readline_omit__names = 0,
    rcfile = 'ipythonrc' + rc_suffix,
    screen_length = 0,
    separate_in = '\n',
    separate_out = '\n',
    separate_out2 = '',
    system_verbose = 0,
    gthread = 0,
    qthread = 0,
    wthread = 0,
    pylab = 0,
    tk = 0,
    upgrade = 0,
    Version = 0,
    xmode = 'Verbose',
    magic_docstrings = 0,  # undocumented, for doc generation
)
#@nonl
#@-node:<< set sensible command line defaults for self.rc >>
#@+node:<< define regexp's >>
# Don't get carried away with trying to make the autocalling catch too
# much:  it's better to be conservative rather than to trigger hidden
# evals() somewhere and end up causing side effects.

self.line_split = re.compile(
    r'^([\s*,;/])'
    r'([\?\w\.]+\w*\s*)'
    r'(\(?.*$)'
)

# RegExp to identify potential function names
self.re_fun_name = re.compile(r'[a-zA-Z_]([a-zA-Z0-9_.]*) *$')

# RegExp to exclude strings with this start from autocalling
self.re_exclude_auto = re.compile('^[!=()<>,\*/\+-]|^is ')

# try to catch also methods for stuff in lists/tuples/dicts: off
# (experimental). For this to work, the line_split regexp would need
# to be modified so it wouldn't break things at '['. That line is
# nasty enough that I shouldn't change it until I can test it _well_.
#self.re_fun_name = re.compile (r'[a-zA-Z_]([a-zA-Z0-9_.\[\]]*) ?$')
#@nonl
#@-node:<< define regexp's >>
#@+node:<< define escape stuff >>
# escapes for automatic behavior on the command line
self.ESC_SHELL = '!'
self.ESC_HELP  = '?'
self.ESC_MAGIC = '%'
self.ESC_QUOTE = ','
self.ESC_QUOTE2 = ';'
self.ESC_PAREN = '/'

# And their associated handlers
self.esc_handlers = {
    self.ESC_PAREN: self.handle_auto,
    self.ESC_QUOTE: self.handle_auto,
    self.ESC_QUOTE2:self.handle_auto,
    self.ESC_MAGIC: self.handle_magic,
    self.ESC_HELP:  self.handle_help,
    self.ESC_SHELL: self.handle_shell_escape,
}
#@nonl
#@-node:<< define escape stuff >>
#@+node:<< define namespaces >>
# Set __name__ to __main__ to better match the behavior of the normal interpreter.

self.user_ns = {
    '__name__'     :'__main__',
    '__builtins__' : __builtin__,
}

self.internal_ns = __main__.__dict__.copy()
#@nonl
#@-node:<< define namespaces >>
#@+node:<< create alias table >>
# dict of names to be treated as system aliases.  Each entry in the
# alias table must be a 2-tuple of the form (N,name), where N is the
# number of positional arguments of the alias.
self.alias_table = {}
#@nonl
#@-node:<< create alias table >>
#@+node:<< define inpsector >>
ins_colors = IPython.OInspect.InspectColors
code_colors = IPython.PyColorize.ANSICodeColors

self.inspector = IPython.OInspect.Inspector(ins_colors,code_colors,'NoColor')
#@nonl
#@-node:<< define inpsector >>
#@-node:ctor
#@+node:usage
def usage(self):

    return 'A usage message'
#@nonl
#@-node:usage
#@+node:log
def log(self,line,continuation=None):

    # Called by the logger (not sure how).
    if 0:
        g.trace(line)
#@nonl
#@-node:log
#@+node:system
def system(self,s):

    g.trace(s)
    pass
#@nonl
#@-node:system
#@+node:_prefilter
def _prefilter(self, line, continue_prompt):
    """Calls different preprocessors, depending on the form of line."""

    << about this function >>

    # save the line away in case we crash, so the post-mortem handler can record it
    self._last_input_line = line

    #print '***line: <%s>' % line # dbg
    if not line.strip():
        << handle empty line >>

    # print '***cont',continue_prompt  # dbg
    # special handlers are only allowed for single line statements
    if continue_prompt and not self.rc.multi_line_specials:
        return self.handle_normal(line,continue_prompt)

    # Get the structure of the input
    pre,iFun,theRest = self.split_user_input(line)
    #print 'pre <%s> iFun <%s> rest <%s>' % (pre,iFun,theRest)  # dbg

    << First check for explicit escapes in the last/first character >>
    << Next, check if we can automatically execute this thing >>
    << Let's try to find if the input line is a magic fn >>
    << execute comparisons, assignsments or function calls >>

    # If we get here, we have a normal Python line. Log and return.
    return self.handle_normal(line,continue_prompt)
#@nonl
#@+node:<< about this function >>
# All handlers *must* return a value, even if it's blank ('').

# Lines are NOT logged here. Handlers should process the line as
# needed, update the cache AND log it (so that the input cache array
# stays synced).

# This function is _very_ delicate, and since it's also the one which
# determines IPython's response to user input, it must be as efficient
# as possible.  For this reason it has _many_ returns in it, trying
# always to exit as quickly as it can figure out what it needs to do.

# This function is the main responsible for maintaining IPython's
# behavior respectful of Python's semantics.  So be _very_ careful if
# making changes to anything here.
#@nonl
#@-node:<< about this function >>
#@+node:<< handle empty line >>
# the input history needs to track even empty lines

if not continue_prompt:
    self.outputcache.prompt_count -= 1

return self.handle_normal('',continue_prompt)
#@nonl
#@-node:<< handle empty line >>
#@+node:<< First check for explicit escapes in the last/first character >>
handler = None
if line[-1] == self.ESC_HELP:
    handler = self.esc_handlers.get(line[-1])  # the ? can be at the end

if handler is None:
    # look at the first character of iFun, NOT of line, so we skip
    # leading whitespace in multiline input
    handler = self.esc_handlers.get(iFun[0:1])

if handler is not None:
    return handler(line,continue_prompt,pre,iFun,theRest)

# Emacs ipython-mode tags certain input lines
if line.endswith('# PYTHON-MODE'):
    return self.handle_emacs(line,continue_prompt)
#@nonl
#@-node:<< First check for explicit escapes in the last/first character >>
#@+node:<< Next, check if we can automatically execute this thing >>
# Allow ! in multi-line statements if multi_line_specials is on:
if (
    continue_prompt and self.rc.multi_line_specials and iFun.startswith(self.ESC_SHELL)
):
    return self.handle_shell_escape(line,continue_prompt,
            pre=pre,iFun=iFun,theRest=theRest)
#@nonl
#@-node:<< Next, check if we can automatically execute this thing >>
#@+node:<< Let's try to find if the input line is a magic fn >>
oinfo = None

if hasattr(self,'magic_'+iFun):
    oinfo = self._ofind(iFun) # FIXME - _ofind is part of Magic
    if oinfo['ismagic']:
        # Be careful not to call magics when a variable assignment is
        # being made (ls='hi', for example)
        if (
            self.rc.automagic and
            (len(theRest)==0 or theRest[0] not in '!=()<>,') and 
            (self.rc.multi_line_specials or not continue_prompt)
        ):
            return self.handle_magic(line,continue_prompt,pre,iFun,theRest)
        else:
            return self.handle_normal(line,continue_prompt)
#@nonl
#@-node:<< Let's try to find if the input line is a magic fn >>
#@+node:<< execute comparisons, assignsments or function calls >>
# If the rest of the line begins with an (in)equality, assginment or
# function call, we should not call _ofind but simply execute it.
# This avoids spurious geattr() accesses on objects upon assignment.
#
# It also allows users to assign to either alias or magic names true
# python variables (the magic/alias systems always take second seat to
# true python code).
if theRest and theRest[0] in '!=()':
    return self.handle_normal(line,continue_prompt)

if oinfo is None:
    oinfo = self._ofind(iFun) # FIXME - _ofind is part of Magic

if not oinfo['found']:
    return self.handle_normal(line,continue_prompt)
else:
    #print 'iFun <%s> rest <%s>' % (iFun,theRest) # dbg
    if oinfo['isalias']:
        return self.handle_alias(line,continue_prompt,
                                     pre,iFun,theRest)

    if self.rc.autocall and \
           not self.re_exclude_auto.match(theRest) and \
           self.re_fun_name.match(iFun) and \
           callable(oinfo['obj']) :
        #print 'going auto'  # dbg
        return self.handle_auto(line,continue_prompt,pre,iFun,theRest)
    else:
        #print 'was callable?', callable(oinfo['obj'])  # dbg
        return self.handle_normal(line,continue_prompt)
#@nonl
#@-node:<< execute comparisons, assignsments or function calls >>
#@-node:_prefilter
#@+node:prefilter & helpers
#@+node:_prefilter
def _prefilter(self, line, continue_prompt):
    """Calls different preprocessors, depending on the form of line."""

    << about this function >>

    #if line.startswith('%crash'): raise RuntimeError,'Crash now!'  # dbg

    # save the line away in case we crash, so the post-mortem handler can record it
    self._last_input_line = line

    if not line.strip():
        return ''

    # special handlers are only allowed for single line statements
    if continue_prompt and not self.rc.multi_line_specials:
        return line

    # Get the structure of the input
    pre,iFun,theRest = self.split_user_input(line)
    #print 'pre <%s> iFun <%s> rest <%s>' % (pre,iFun,theRest)  # dbg

    << First check for explicit escapes in the last/first character >>
    << Next, check if we can automatically execute this thing >>
    << Let's try to find if the input line is a magic fn >>
    << execute comparisons, assignsments or function calls >>

    # A normal Python line.
    return line
#@nonl
#@+node:<< about this function >>
# All handlers *must* return a value, even if it's blank ('').

# Lines are NOT logged here. Handlers should process the line as
# needed, update the cache AND log it (so that the input cache array
# stays synced).

# This function is the main responsible for maintaining IPython's
# behavior respectful of Python's semantics.  So be _very_ careful if
# making changes to anything here.
#@nonl
#@-node:<< about this function >>
#@+node:<< First check for explicit escapes in the last/first character >>
handler = None
if line[-1] == self.ESC_HELP:
    handler = self.esc_handlers.get(line[-1])  # the ? can be at the end

if handler is None:
    # look at the first character of iFun, NOT of line, so we skip
    # leading whitespace in multiline input
    handler = self.esc_handlers.get(iFun[0:1])

if handler is not None:
    return handler(line,continue_prompt,pre,iFun,theRest)

# Emacs ipython-mode tags certain input lines
if line.endswith('# PYTHON-MODE'):
    return self.handle_emacs(line,continue_prompt)
#@nonl
#@-node:<< First check for explicit escapes in the last/first character >>
#@+node:<< Next, check if we can automatically execute this thing >>
# Allow ! in multi-line statements if multi_line_specials is on:
if (
    continue_prompt and self.rc.multi_line_specials and iFun.startswith(self.ESC_SHELL)
):
    return self.handle_shell_escape(line,continue_prompt,pre=pre,iFun=iFun,theRest=theRest)
#@nonl
#@-node:<< Next, check if we can automatically execute this thing >>
#@+node:<< Let's try to find if the input line is a magic fn >>
oinfo = None

if hasattr(self,'magic_'+iFun):
    oinfo = self._ofind(iFun) # FIXME - _ofind is part of Magic
    if oinfo['ismagic']:
        # Be careful not to call magics when a variable assignment is
        # being made (ls='hi', for example)
        if (
            self.rc.automagic and
            (len(theRest)==0 or theRest[0] not in '!=()<>,') and 
            (self.rc.multi_line_specials or not continue_prompt)
        ):
            return self.handle_magic(line,continue_prompt,pre,iFun,theRest)
        else:
            return line
#@nonl
#@-node:<< Let's try to find if the input line is a magic fn >>
#@+node:<< execute comparisons, assignsments or function calls >>
# If the rest of the line begins with an (in)equality, assginment or
# function call, we should not call _ofind but simply execute it.
# This avoids spurious geattr() accesses on objects upon assignment.
#
# It also allows users to assign to either alias or magic names true
# python variables (the magic/alias systems always take second seat to
# true python code).
if theRest and theRest[0] in '!=()':
    return line

if oinfo is None:
    oinfo = self._ofind(iFun) # FIXME - _ofind is part of Magic

if not oinfo['found']:
    return line

if oinfo['isalias']:
    return self.handle_alias(line,continue_prompt,pre,iFun,theRest)

if (self.rc.autocall and not self.re_exclude_auto.match(theRest) and 
    self.re_fun_name.match(iFun) and callable(oinfo['obj'])
):
    return self.handle_auto(line,continue_prompt,pre,iFun,theRest)
else:
    return line
#@nonl
#@-node:<< execute comparisons, assignsments or function calls >>
#@-node:_prefilter
#@+node:handle_normal ( no longer used)
def handle_normal(self,line,continue_prompt=None,pre=None,iFun=None,theRest=None):
    """Handle normal input lines. Use as a template for handlers."""

    g.trace(line)

    if 0:
        self.log(line,continue_prompt)
        self.update_cache(line)

    return line
#@nonl
#@-node:handle_normal ( no longer used)
#@+node:handle_alias (done)
def handle_alias(self,line,continue_prompt=None,pre=None,iFun=None,theRest=None):

    """Handle alias input lines. """

    theRest = esc_quotes(theRest)

    line_out = "%s%s.call_alias('%s','%s')" % (pre,self.name,iFun,theRest)

    return line_out
#@nonl
#@-node:handle_alias (done)
#@+node:handle_shell_escape (needs work)
def handle_shell_escape(self, line, continue_prompt=None,pre=None,iFun=None,theRest=None):

    """Execute the line in a shell, empty return value"""

    # Example of a special handler. Others follow a similar pattern.
    if continue_prompt:  # multi-line statements
        if iFun.startswith('!!'):
            print 'SyntaxError: !! is not allowed in multiline statements'
            return pre
        else:
            cmd = ("%s %s" % (iFun[1:],theRest)).replace('"','\\"')
            line_out = '%s%s.system("%s")' % (pre,self.name,cmd)
    else: # single-line input
        if line.startswith('!!'):
            # rewrite iFun/theRest to properly hold the call to %sx and
            # the actual command to be executed, so handle_magic can work
            # correctly
            theRest = '%s %s' % (iFun[2:],theRest)
            iFun = 'sx'
            return self.handle_magic('%ssx %s' % (
                self.ESC_MAGIC,line[2:]),continue_prompt,pre,iFun,theRest)
        else:
            cmd = esc_quotes(line[1:])
            line_out = '%s.system("%s")' % (self.name,cmd)

    return line_out
#@-node:handle_shell_escape (needs work)
#@+node:handle_magic (done)
def handle_magic(self, line, continue_prompt=None,pre=None,iFun=None,theRest=None):

    """Execute magic functions.

    Also log them with a prepended # so the log is clean Python."""

    return '%sipmagic("%s")' % (pre,esc_quotes('%s %s' % (iFun,theRest)))
#@nonl
#@-node:handle_magic (done)
#@+node:handle_auto (may need work)
def handle_auto(self, line, continue_prompt=None,pre=None,iFun=None,theRest=None):

    """Hande lines which can be auto-executed, quoting if requested."""

    # This should only be active for single-line input!
    if continue_prompt:
        return line
    elif pre == self.ESC_QUOTE: # Auto-quote splitting on whitespace
        return '%s("%s")\n' % (iFun,'", "'.join(theRest.split()) )
    elif pre == self.ESC_QUOTE2: # Auto-quote whole string
        return '%s("%s")\n' % (iFun,theRest)
    else: # Auto-paren
        if theRest[0:1] in ('=','['):
            # Don't autocall in these cases.  They can be rebindings of an existing callable's name,
            # or item access for an object which is BOTH callable and implements __getitem__.
            return '%s %s\n' % (iFun,theRest)
        if theRest.endswith(';'):
            return '%s(%s);\n' % (iFun.rstrip(),theRest[:-1])
        else:
            return '%s(%s)\n' % (iFun.rstrip(),theRest)
#@nonl
#@-node:handle_auto (may need work)
#@+node:handle_help (may need work) (and it's stupid anyway)
def handle_help(self, line, continue_prompt=None,pre=None,iFun=None,theRest=None):
    """Try to get some help for the object.

    obj? or ?obj   -> basic information.
    obj?? or ??obj -> more details.
    """

    # Don't process lines which would be otherwise valid python, such as "x=1 # what?"
    try:
        code.compile_command(line)
    except SyntaxError: # Only handle stuff which is NOT valid syntax
        if line[0]==self.ESC_HELP:
            line = line[1:]
        elif line[-1]==self.ESC_HELP:
            line = line[:-1]
        if line:
            self.magic_pinfo(line)
        else:
            page(self.usage,screen_lines=self.rc.screen_length)
        return '' # Empty string is needed here!
    except: pass

    return line
#@nonl
#@-node:handle_help (may need work) (and it's stupid anyway)
#@+node:handle_emacs
def handle_emacs(self,line,continue_prompt=None,pre=None,iFun=None,theRest=None):
    """Handle input lines marked by python-mode."""

    # Currently, nothing is done.
    # The input cache shouldn't be updated
    return line
#@nonl
#@-node:handle_emacs
#@+node:safe_execfile (not used)
#@-node:safe_execfile (not used)
#@+node:split_user_input
if 0: # ref
    self.line_split = re.compile(
        r'^([\s*,;/])'          # Groups[0]: s, followed by special chars: , ; or /
        r'([\?\w\.]+\w*\s*)'    # Groups[1]: one char,
        r'(\(?.*$)' )           # Groups[2]: arg list

def split_user_input(self,line):

    """Split user input into pre-char, function part and rest."""

    lsplit = self.line_split.match(line)

    if lsplit is None:  # no regexp match returns None
        try:
            iFun,theRest = line.split(None,1)
        except ValueError:
            iFun,theRest = line,''
        pre = re.match('^(\s*)(.*)',line).groups()[0]
    else:
        pre,iFun,theRest = lsplit.groups()

    print 'line:<%s>' % line # dbg
    print 'pre <%s> iFun <%s> rest <%s>' % (pre,iFun.strip(),theRest) # dbg

    return pre,iFun.strip(),theRest
#@nonl
#@-node:split_user_input
#@+node:update_cache
def update_cache(self, line):

    """puts line into cache"""

    pass
#@nonl
#@-node:update_cache
#@-node:prefilter & helpers
#@-node:class LeoShell
#@+node:ipmagic & palias
def ipmagic(s):
    g.trace()
    return IPython.iplib.ipmagic(s)

def ipalias(s):
    g.trace()
    return IPython.iplib.ipalias(s)
#@nonl
#@-node:ipmagic & palias
#@+node:esc_quotes
def esc_quotes(s):

    return IPython.iplib.esc_quotes(s)
#@nonl
#@-node:esc_quotes
#@-node:Ipython Shell
#@+node:keyPressed
def keyPressed( self, event ):

    << create the command >>
    self.kTconsume = self.kRconsume = consume = self.emacs.masterCommand(event,command)
    if consume:
        # Block the event from going elsewhere, like the DocumentModel.
        event.consume()
        return

    kc = event.getKeyChar()
    if self.tab_for_colon and kc == '\n':
        event.consume()
        self.insertPreviousLeadAndNewline()
    if self.completers.has_key(kc):
        << handle auto completion >>
    elif kc == '\t' and self.tab_width == -4:
        << handle auto-tabbing >>
#@+node:<< create the command >>
modifiers = event.getModifiers()
mtxt = event.getKeyModifiersText(modifiers)
ktxt = event.getKeyText(event.getKeyCode())

if mtxt == ktxt:
    command = mtxt
else:
    command = '%s %s' % (mtxt,ktxt).strip()
#@-node:<< create the command >>
#@+node:<< handle auto completion >>
editor = self.emacs.editor
doc = editor.getDocument()
pos = editor.getCaretPosition()
try:
    pc = doc.getText( pos -1, 1 )
    if pc in ( '"', "'" ): return
except: pass

event.consume()
self.kTconsume = True
self.kRconsume = True
ac = self.completers[ kc ]
doc.insertString( pos, '%s%s' %( kc, ac ), None )
editor.setCaretPosition( pos + 1 )
if hasattr(self.emacs.c.frame.body.editor, "autocompleter"):
    self.emacs.c.frame.body.editor.autocompleter.hideAutoBox() 
#@nonl
#@-node:<< handle auto completion >>
#@+node:<< handle auto-tabbing >>
self.kTconsume = True
self.kRconsume = True
event.consume()
editor = self.emacs.editor
doc = editor.getDocument()
pos = editor.getCaretPosition()
try:
    doc.insertString( pos, " " * 4, None )
except: pass
#@nonl
#@-node:<< handle auto-tabbing >>
#@-node:keyPressed
#@+node:pyclbr module (parses python)
@ This is probably a better way of parsing Python text.
It does not import the module, so it is safe for untrusted code.
@c
import pyclbr # Python Command Line Browser support.
import sys
print '*' * 40 ; print
fileNames = ("leoCommands.py","leo.py","leoAtFile.py")
fileNames = (r"c:\Python23\Lib\site-packages\Pmw\Pmw_1_1\lib\PmwPanedWidget.py"),

for fileName in fileNames:
    dir,file = g.os_path_split(fileName)
    moduleName,ext = g.os_path_splitext(file)
    moduleDict = pyclbr.readmodule_ex(moduleName,[dir] + sys.path)
    print "module", moduleName,'-' * 40
    items = []
    for funcOrClass in moduleDict.keys():
        o = moduleDict.get(funcOrClass) # o is a descriptor.
        try:
            mdict = o.methods # Fails for functions.
            items.append((int(o.lineno),"*class",o.name),)
            mkeys = mdict.keys()
            for method in mkeys:
                lineno = mdict.get(method)
                items.append((int(lineno),"method",method),)
        except AttributeError:
            # funcOrClass is a function descriptor
            items.append((int(o.lineno),"function",o.name),)
    items.sort()
    for line,kind,name in items:
        print "%4d %8s %s" % (line,kind,name)
#@nonl
#@-node:pyclbr module (parses python)
#@+node:Setting Tk config values safely
import Tkinter as Tk

<< documentation about how to set general options >>
t = Tk.Text()

print '-' * 20

settings = (
    ('height','xyz'),
    ('width',30),
    ('xyzzy',2),
)

widget_keys = t.keys() # List of all valid settings for this widget.
widget_keys.sort()

# Make a list of valid settings, and warn about invalid settings.
valid_settings = []
for key,val in settings:
    if key in widget_keys:
        setting = key,val
        valid_settings.append(setting)
    else:
        s = "'%s' is not a valid Tk option for this widget" % key
        print s ; g.es(s,color='blue')
valid_settings.sort()

print 'before changes...'
for key,val in valid_settings:
    print '%s = %s' % (key,str(t.cget(key)))

for key,val in valid_settings:
    d = {key:val}
    try:
        if 1: # The preferred way, using the 'extended call syntax'.
            # This was introduced in Python 2.0.
            t.configure(**d)
        else: # The Python 1.x way.  Deprecated since Python 2.3.
            apply(t.configure,[],d)
    except Tk.TclError:
        s = "Tk exception setting '%s' to %s" % (key,repr(val))
        print s ; g.es(s,color='blue')

print 'after changes...'
for key,val in valid_settings:
    print '%s = %s' % (key, str(t.cget(key)))

if 0:
    print ; print 'all keys...'
    for key in widget_keys:
        print '%s = %s' % (key, str(t.cget(key)))
#@nonl
#@+node:<< documentation about how to set general options >>
@nocolor
@
The keyword argument syntax is of course much more elegant, and less error prone. However, for compatibility with existing code, Tkinter still supports the older syntax. You shouldn't use this syntax in new programs, even if it might be tempting in some cases. For example, if you create a custom widget which needs to pass configuration options along to its parent class, you may come up with something like:

@color

    def __init__(self, master, **kw):
        Canvas.__init__(self, master, kw) # kw is a dictionary

@nocolor
This works just fine with the current version of Tkinter, but it may not work with future versions. A more general approach is to use the apply function:
@color

    def __init__(self, master, **kw):
        apply(Canvas.__init__, (self, master), kw)

@nocolor
The apply function takes a function (an unbound method, in this case), a tuple with arguments (which must include self since we're calling an unbound method), and optionally, a dictionary which provides the keyword arguments.

--------- Apply is deprecated ---------

apply( function, args[, keywords]) 

The function argument must be a callable object (a user-defined or built-in function or method, or a class object) and the args argument must be a sequence. The function is called with args as the argument list; the number of arguments is the length of the tuple. If the optional keywords argument is present, it must be a dictionary whose keys are strings. It specifies keyword arguments to be added to the end of the argument list.

Calling apply() is different from just calling function(args), since in that case there is always exactly one argument. The use of apply() is equivalent to function(*args, **keywords). Use of apply() is not necessary since the ``extended call syntax,'' as used in the last example, is completely equivalent. 

Deprecated since release 2.3. Use the extended call syntax instead, as described above.
#@-node:<< documentation about how to set general options >>
#@-node:Setting Tk config values safely
#@+node:Zip files experiments
class ZipFile(  	file[, mode[, compression]])
    Open a ZIP file, where file can be either a path to a file (a string) or a file-like object. The mode parameter should be 'r' to read an existing file, 'w' to truncate and write a new file, or 'a' to append to an existing file. For mode is 'a' and file refers to an existing ZIP file, then additional files are added to it. If file does not refer to a ZIP file, then a new ZIP archive is appended to the file. This is meant for adding a ZIP archive to another file, such as python.exe. Using

cat myzip.zip >> python.exe

    also works, and at least WinZip can read such files. compression is the ZIP compression method to use when writing the archive, and should be ZIP_STORED or ZIP_DEFLATED; unrecognized values will cause RuntimeError to be raised. If ZIP_DEFLATED is specified but the zlib module is not available, RuntimeError is also raised. The default is ZIP_STORED. 
#@nonl
#@+node:@url http://docs.python.org/lib/module-zipfile.html
#@-node:@url http://docs.python.org/lib/module-zipfile.html
#@+node:write zip
import zipfile

path = r'c:\prog\leoCVS\leo\test\ziptest.zip'

zf = zipfile.ZipFile(path,'w',zipfile.ZIP_DEFLATED)
zf.writestr('This is a test: EKR','ziptest')
zf.close()
#@nonl
#@-node:write zip
#@+node:read zip
import zipfile

path = r'c:\prog\leoCVS\leo\src\leoPy.leo'
path = r'c:\prog\leoCVS\leo\test\ziptest.zip'

if zipfile.is_zipfile(path):
    try:
        zf = zipfile.ZipFile(path)
        names = zf.namelist()
        for name in names:
            print 'name',repr(name),'contents',zf.read(name)
        zf.close()
    except Exception:
        g.es_exception()
        zf = None
#@nonl
#@-node:read zip
#@-node:Zip files experiments
#@+node:Zope test
@color

import ZODB
import ZODB.FileStorage

storage = ZODB.FileStorage.FileStorage(r"c:\prog\zopeTemp\leo.fs")
# g.trace(storage)
db = ZODB.DB(storage)
try:
    try:
        print '-' * 20
        print 'opening ZODB'
        connection = db.open()
        g.trace(connection)
        root = connection.root()
        if 1:
            t = get_transaction()
            t.begin()
            # root.clear()
            root ['count'] = root.get('count',0) + 1
            t.commit()
        g.trace(root)
        connection.close()
    except Exception:
        g.es_exception()
finally:
    print 'closing ZODB'
    db.close()
#@-node:Zope test
#@-node:Prototypes
#@+node:Reports
#@+node:Count pages
nodes = 0 ; lines = 0

c.clearAllVisited()
for p in c.allNodes_iter():
    if not p.v.t.isVisited():
        p.v.t.setVisited()
        nodes += 1
        lines += len(g.splitLines(p.bodyString()))

pages = ((nodes * 10) + lines) / 50
s = "%d nodes,  %d lines, %d pages" % (nodes,lines,pages)
print s ; g.es(s)
#@nonl
#@-node:Count pages
#@+node:Count separate nodes (tnodes)
# p = g.findTopLevelNode("Code")

tnodes = {} ; count = 0
for p in p.self_and_subtree_iter():
    tnodes[p.v.t]=p.v.t
    count += 1

s = "%4s: %d vnodes, %d distinct" % ("Code",count,len(tnodes.keys()))
g.es_print(s)

tnodes = {} ; count = 0
for p in c.allNodes_iter():
    tnodes[p.v.t]=p.v.t
    count += 1

s = "%4s: %d vnodes, %d distinct" % ("All",count,len(tnodes.keys()))
g.es_print(s)
#@nonl
#@-node:Count separate nodes (tnodes)
#@+node:Count total, visible nodes
total,visible = 0,0


for p in c.allNodes_iter():
    total += 1

p = c.rootPosition()
while p:
    visible += 1
    p.moveToVisNext(c)

print "total,visible",total,visible
#@nonl
#@-node:Count total, visible nodes
#@+node:Dump fileIndex
for p in c.allNodes_iter():
    print p.v.t.fileIndex
#@nonl
#@-node:Dump fileIndex
#@+node:Print all headlines, properly indented
for p in c.allNodes_iter():
    print p.level()*' ',p.headString()
#@-node:Print all headlines, properly indented
#@+node:Print all uAs (unknown attributes)
for p in c.allNodes_iter():
    h = p.headString()
    if hasattr(p.v,'unknownAttributes'):   print 'v',h,p.v.unknownAttributes
    if hasattr(p.v.t,'unknownAttributes'): print 't',h,p.v.t.unknownAttributes
#@nonl
#@-node:Print all uAs (unknown attributes)
#@+node:Print default font
font = g.app.config.defaultFont

print font.cget("family"), font.cget("weight")
#@nonl
#@-node:Print default font
#@+node:Print gnx
print "gnx", p.v.t.fileIndex, p.headString()
#@nonl
#@-node:Print gnx
#@+node:Print sys.path
import os
import sys
for s in sys.path:
    exists = os.path.exists(s)
    print "%5s %s" % (exists,s)
#@nonl
#@-node:Print sys.path
#@+node:Print tnodeList's
print '-'*20
for p in c.allNodes_iter():
    if hasattr(p.v.t,"tnodeList"):
        print p,p.v.t.tnodeList
#@nonl
#@-node:Print tnodeList's
#@+node:Report loaded plugins
print "Loaded plugins..."

for s in g.app.loadedPlugins:
    print s
#@nonl
#@-node:Report loaded plugins
#@+node:Report newlines
path = g.os_path_join(g.app.loadDir,"leo.py")

try:
    f = open(path,"rb")
    s = f.read()
    f.close()

    cr = 0 ; nl = 0
    for ch in s:
        if ch == '\r': cr += 1
        if ch == '\n': nl += 1

    m = "cr %d, nl %d %s" % (cr,nl,path)
    print m ; g.es(m)

except IOError:
    print "can not open",path
#@nonl
#@-node:Report newlines
#@+node:Show font
config = g.app.config
body = c.frame.body.bodyCtrl

font = config.getFontFromParams(
        "body_text_font_family", "body_text_font_size",
        "body_text_font_slant",  "body_text_font_weight",
        tag = "body")

print font
print body

# body.configure(font=font)
#@nonl
#@-node:Show font
#@+node:Show settings
import tkFont

@others

# Body pane.
fn = c.frame.body.cget("font")
font = tkFont.Font(font=fn)
name,size,slant,weight = getFontSettings(font)
print "body:",fn,name,size,slant,weight

if 0:

    # Log pane.
    fn = c.frame.log.getFontConfig()
    font = tkFont.Font(font=fn)
    name,size,slant,weight = getFontSettings(font)
    g.es("log:" + name + "," + `size` + "," + slant + "," + weight)

    # Tree pane.
    font = c.frame.tree.getFont()
    name,size,slant,weight = getFontSettings(font)
    g.es("head:" + name + "," + `size` + "," + slant + "," + weight)
#@nonl
#@+node:getFontSettings
def getFontSettings (font):

    name   = font.cget("family")
    size   = font.cget("size")
    slant  = font.cget("slant")
    weight = font.cget("weight")

    return name, size, slant, weight
#@nonl
#@-node:getFontSettings
#@-node:Show settings
#@-node:Reports
#@+node:Scripts used by commands
#@+node:@thin tangle_done.py
#@-node:@thin tangle_done.py
#@+node:@thin untangle_done.py
#@-node:@thin untangle_done.py
#@+node:Find/change scripts
# Note:  the initScriptFind script makes setting up Find/Change scripts very easy.
#@nonl
#@+node:Routines that execute script-find and script-change
#@+node:doFind...Script
def doFindScript (self):

    g.app.searchDict["type"] = "find"
    self.runFindScript()

def doFindAllScript (self):

    """The user has just pressed the Find All button with script-find radio button checked.

    N.B. Only this code is executed."""

    g.app.searchDict["type"] = "findAll"
    while 1:
        self.runFindScript()
        if not g.app.searchDict.get("continue"):
            break

def runFindScript (self):

    c = self.c
    try:
        exec c.find_text in {} # Use {} to get a pristine environment.
    except:
        g.es("exception executing find script")
        g.es_exception(full=False)
        g.app.searchDict["continue"] = False # 2/1/04
#@-node:doFind...Script
#@+node:doChange...Script
def doChangeScript (self):

    g.app.searchDict["type"] = "change"
    self.runChangeScript()

def doChangeAllScript (self):

    """The user has just pressed the Change All button with script-change box checked.

    N.B. Only this code is executed."""

    g.app.searchDict["type"] = "changeAll"
    while 1:
        self.runChangeScript()
        if not g.app.searchDict.get("continue"):
            break

def runChangeScript (self):

    c = self.c
    try:
        assert(c.script_change_flag) # 2/1/04
        exec c.change_text in {} # Use {} to get a pristine environment.
    except:
        g.es("exception executing change script")
        g.es_exception(full=False)
        g.app.searchDict["continue"] = False # 2/1/04
#@nonl
#@-node:doChange...Script
#@-node:Routines that execute script-find and script-change
#@+node:Find script to check for dubious leading whitespace
# Initialize Leo's find panel using the named children of this node.
g.app.searchDict = {} # Clear dict.get("v") logic.
g.initScriptFind("Find script")

c.selectVnode(c.rootPosition())
c.redraw()
#@nonl
#@+node:Find script
v = c.currentVnode()
last_v = g.app.searchDict.get("v")
if last_v and v == last_v:
    v = v.threadNext()
found = False
while v:
    lines = v.bodyString().split('\n')
    for s in lines:
        i = g.skip_ws(s,0)
        leading = s[0:i]
        if leading:
            blanks, tabs = 0,0
            for ch in leading:
                if ch == ' ':
                    blanks += 1
                if ch == '\t':
                    tabs += 1 ; break
            if blanks > 0 and tabs > 0:
                # g.trace(leading)
                g.es("blanks precede leading tab: " + v.headString())
                found = True ; break
    if found: break
    v = v.threadNext()
if found:
    g.app.searchDict["v"] = v
    c.selectVnode(v)
    c.redraw()
else:
    g.es("done",color="blue")
    g.app.searchDict["v"] = None
g.app.searchDict["continue"] = False
#@nonl
#@-node:Find script
#@+node:test
if 1:
    dubious line
#@nonl
#@-node:test
#@-node:Find script to check for dubious leading whitespace
#@+node:Find script to clean whitespace
# Initialize Leo's find panel using the named children of this node.

g.app.searchDict = {} # Clear dict.get("v") logic.
g.initScriptFind("Find script","Change script")

c.selectVnode(c.rootPosition())
c.redraw()
#@nonl
#@+node:Find script
v = c.currentVnode() ; d = g.app.searchDict
findAll = d.get("type") == "findAll"

if findAll and c.suboutline_only_flag:
    after = v.nodeAfterTree()
else:
    after = None

found = False ; count = 0
while v and v != after and not found:
    s = v.bodyString()
    lines = s.split('\n')
    for line in lines:
        if line and not line.strip():
            if findAll:
                g.es(v.headString()) ; count += 1
            else:
                c.frame.tree.expandAllAncestors(v)
                c.selectVnode(v)
                c.redraw() # Necessary to make the new node visible.
                if not findAll:
                    g.es("found node with whitespace to clean")
                found = True 
            break
    v = v.threadNext()

if not found:
    if findAll:
        g.es("found %d nodes" % (count), color="blue")
    else:
        g.es("not found")
#@-node:Find script
#@+node:Change script
d = g.app.searchDict
changeAll = d.get("type") == "changeAll"
count = d.get("count",0)
if changeAll:
    v = d.get("v")
    if v:
        v = v.threadNext()
        after = d.get("after")
        if v == after: v = None
    else:
        v = c.currentVnode()
        d["count"] = 0
        after = g.choose(c.suboutline_only_flag,v.nodeAfterTree(),None)
        d["after"] = after
    d["v"] = v ; d["continue"] = v != None
else:
    v = c.currentVnode()

if v:
    s = oldText = v.bodyString()
    lines = s.split('\n')
    lines = [line.rstrip() for line in lines]
    s = '\n'.join(lines)
    if s != oldText:
        c.setBodyString(v,s,encoding=g.app.tkEncoding)
        if changeAll:
            g.es(v.headString()) ; d["count"] = count + 1
        else:
            c.frame.body.onBodyChanged(v,"Change",oldText=oldText) # Handles undo.
            c.frame.body.setInsertPointToStartOfLine(0)
else:
    if changeAll:
        g.es("found %d nodes" % (count), color="blue")
    else:
        g.es("done")
#@nonl
#@-node:Change script
#@-node:Find script to clean whitespace
#@+node:Find scripts to convert @doc comments to doc strings
# Initialize Leo's find panel using the named children of this node.

g.app.searchDict = {} # Clear dict.get("v") logic.
g.initScriptFind("Find script","Change script")

# Start searching at the top.
c.selectVnode(c.rootPosition())
#@nonl
#@+node:Find script
import re

docPart = re.compile("""^(@$|@ |@doc)(.*)$
^@c[ \t]*(.*?)$
^(def[ \t]*.*?:.*?)$
(.*)""", re.MULTILINE | re.DOTALL)

d = g.app.searchDict
v = c.currentVnode()
# Move past previously matched node.
last_v = d.get("v")
if last_v:
    if v == last_v: v = last_v.threadNext()
    d["v"] = None
d["m"] = None ; d["c"] = c
while v:
    m = docPart.match(v.bodyString())
    if m:
        d["m"] = m ; d["v"] = v
        c.frame.tree.expandAllAncestors(v)
        c.selectVnode(v)
        c.redraw() # Necessary to make the new node visible.
        break
    v = v.threadNext()
if not d.get("v"):
    g.es("no @doc part found",color="blue")
#@nonl
#@-node:Find script
#@+node:Change script
def replaceDocPart(m,body):
    # Warning: m.group(0) is the _whole_ match.
    directive = m.group(1)
    doc = m.group(2)
    blanks = m.group(3).strip()
    if blanks: blanks += "\n\n"
    else: blanks = ""
    defLine = m.group(4)
    rest = m.group(5)
    docList = doc.split('\n')
    doc = string.join(docList,"\n\t")
    if body.hasTextSelection():  # If text is selected only that text becomes the doc part.
        sel = body.getSelectedText()
        i = doc.find(sel)
        if i > -1:
            doc = doc[:i] + doc[i + len(sel):] # Remove selected text.
            return directive + doc.rstrip() + "\n@c\n\n" + defLine + '\n\n\t"""' + sel + '"""\n' + rest
        else:
            g.es("selection should be in @doc part")
            return None # This disables any replacement.
    else:
        return blanks + defLine + '\n\n\t"""' + doc.strip() + '"""\n' + rest

d = g.app.searchDict ; c = d.get("c") ; v = d.get("v") ; m = d.get("m")
if c and v and m:
    body = c.frame.body
    oldText = v.bodyString()
    s = replaceDocPart(m,body)
    if s: # Don't make a replacement if there was an error.
        c.setBodyString(v,s,encoding=g.app.tkEncoding)
        body.onBodyChanged(v,"Change",oldText=oldText) # Handles undo.
#@nonl
#@-node:Change script
#@+node:re tests
import re

s = """@doc line 0
line 1d
line 2
@c

# a comment

def abc(self): xx

after 1
after 2"""

pat = re.compile("""^(@$|@ |@doc)(.*?)$
^@c[ \t]*(.*?)$
^(def[ \t]*.*?:.*?)$
(.*)""", re.MULTILINE | re.DOTALL)

m = pat.match(s)
print "---"
if m:
    print "doc:   ", m.group(2).strip()
    print "blanks:", m.group(3).strip()
    print "def:   ", m.group(4).strip()
    print "rest:  ", m.group(5).strip()
else:
    print "no match"
#@-node:re tests
#@+node:early find script
# This script was a breakthrough.
# Executing this script initializes the Find text from the given script string.
# To do: get the script from a named child of this node.

script = """
v = c.currentVnode()
print v
v = v.threadNext()
c.selectVnode(v)"""

g.app.searchDict = {} # Communication between search & change scripts
c.script_search_flag = True
c.find_text = script
c.frame.findPanel.init(c)
c.frame.OnFindPanel()
#@nonl
#@-node:early find script
#@-node:Find scripts to convert @doc comments to doc strings
#@+node:Other find scripts
# This file contains functions for non-interactive searching.
# You might find these useful while running other scripts.

import leo, string, re

@others
#@nonl
#@+node:changeAll
def changeAll ( commander, findPat, changePat, bodyFlag = 1 ):
    """
    changeAll	make changes in an entire Leo outline.

    commander	Commands object for a Leo outline window.
    findPat		the search string.
    changePat	the replacement string.
    bodyFlag	True: change body text.  False: change headline text.
    """
    n = len(changePat)
    v = commander.rootVnode()
    pos = 0
    while v != None:
        v, pos = changeNext(v, pos, findPat, changePat, bodyFlag)
        pos = pos + n
#@nonl
#@-node:changeAll
#@+node:changeNext
def changeNext ( v, pos, findPat, changePat, bodyFlag = 1 ):
    """
    changeNext:	use string.find() to change text in a Leo outline.

    v	 		the vnode to start the search.
    pos			the position within the body text of v to start the search.
    findPat		the search string.
    changePat	the replacement string.
    bodyFlag	True: change body text.  False: change headline text.

    returns a tuple (v,pos) showing where the change occured.
    returns (None,0) if no further match in the outline was found.

    Note: if (v,pos) is a tuple returned previously from changeNext,
    changeNext(v,pos+len(findPat),findPat,changePat)
    changes the next matching string.
    """
    n = len(findPat)
    v, pos = findNext(v, pos, findPat, bodyFlag)
    if v == None:
        return None, 0
    if bodyFlag:
        s = v.bodyString()
        # s[pos:pos+n] = changePat
        s = s[:pos] + changePat + s[pos+n:]
        c.setBodyString(v,s)
    else:
        s = v.headString()
        # s[pos:pos+n] = changePat
        s = s[:pos] + changePat + s[pos+n:]
        c.setHeadString(v,s)
        print "setting head string: ", result
    return v, pos
#@nonl
#@-node:changeNext
#@+node:changePrev
def changePrev ( v, pos, findPat, changePat, bodyFlag = 1 ):
    """
    changePrev:	use string.rfind() to change text in a Leo outline.

    v	 		the vnode to start the search.
    pos			the position within the body text of v to start the search.
    findPat		the search string.
    changePat	the replacement string.
    bodyFlag	True: change body text.  False: change headline text.

    returns a tuple (v,pos) showing where the change occured.
    returns (None,0) if no further match in the outline was found.

    Note: if (v,pos) is a tuple returned previously from changePrev,
    changePrev(v,pos-len(findPat),findPat,changePat)
    changes the next matching string.
    """
    n = len(findPat)
    v, pos = findPrev(v, pos, findPat, bodyFlag)
    if v == None:
        return None, 0
    if bodyFlag:
        s = v.bodyString()
        # s[pos:pos+n] = changePat
        s = s[:pos] + changePat + s[pos+n:]
        c.setBodyString(v,s)
    else:
        s = v.headString()
        #s[pos:pos+n] = changePat
        s = s[:pos] + changePat + s[pos+n:]
        c.setHeadString(v,s)
    return v, pos
#@nonl
#@-node:changePrev
#@+node:findAll
def findAll(c,pattern,bodyFlag=1):
    """
    findAll		search an entire Leo outline for a pattern.

    c        commander for a Leo outline window.
    pattern		the search string.
    bodyFlag	True: search body text. False: search headline text.

    returns a list of tuples (v,pos) showing where matches occured.
    returns [] if no match were found.
    """
    v = c.rootVnode()
    n = len(pattern)
    result = [] ; pos = 0
    while v != None:
        v, pos = findNext(v,pos,pattern,bodyFlag)
        if v:
            result.append ((v, pos),)
        pos = pos + n
    return result
#@nonl
#@-node:findAll
#@+node:findNext
def findNext ( v, pos, pattern, bodyFlag = 1 ):
    """
    findNext:	use string.find() to find a pattern in a Leo outline.

    v	 		the vnode to start the search.
    pos			the position within the body text of v to start the search.
    pattern		the search string.
    bodyFlag	True: search body text.  False: search headline text.

    returns a tuple (v,pos) showing where the match occured.
    returns (None,0) if no further match in the outline was found.

    Note: if (v,pos) is a tuple returned previously from findNext,
    findNext(v,pos+len(pattern),pattern) finds the next match.
    """
    while v != None:
        if bodyFlag:
            s = v.bodyString()
        else:
            s = v.headString()
        pos = s.find(pattern,pos )
        if pos != -1:
            return v, pos
        v = v.threadNext()
        pos = 0
    return None, 0
#@nonl
#@-node:findNext
#@+node:findPrev
def findPrev ( v, pos, pattern, bodyFlag = 1 ):
    """
    findPrev:	use string.rfind() to find a pattern in a Leo outline.

    v	 		the vnode to start the search.
    pos			the position within the body text of v to start the search.
    pattern		the search string
    bodyFlag	True: search body text.  False: search headline text.

    returns a tuple (v,pos) showing where the match occured.
    returns (None,0) if no further match in the outline was found.

    Note: if (v,pos) is a tuple returned previously from findPrev,
    findPrev(v,pos-len(pattern),pattern) finds the next match.
    """
    while v != None:
        if bodyFlag:
            s = v.bodyString()
        else:
            s = v.headString()
        pos = s.rfind(pattern,0,pos)
        if pos != -1:
            return v, pos
        v  = v.threadBack()
        pos = -1
    return None, 0
#@nonl
#@-node:findPrev
#@+node:reChangeAll
def reChangeAll ( commander, findPat, changePat, bodyFlag, reFlags = None ):
    """
    reChangeAll: make changes in an entire Leo outline using re module.

    commander	Commands object for a Leo outline window.
    findPat		the search string.
    changePat	the replacement string.
    bodyFlag	True: change body text.  False: change headline text.
    reFlags		flags argument to re.search().
    """
    n = len(changePat)
    v = commander.rootVnode()
    pos = 0
    while v != None:
        v, mo, pos = reChangeNext(
            v, pos, findPat, changePat, bodyFlag, reFlags)
        pos = pos + n
#@nonl
#@-node:reChangeAll
#@+node:reChangeNext
def reChangeNext ( v, pos, findPat, changePat, bodyFlag, reFlags = None ):
    """
    reChangeNext: use re.search() to change text in a Leo outline.

    v	 		the vnode to start the search.
    pos			the position within the body text of v to start the search.
    findPat		the search string.
    changePat	the replacement string.
    bodyFlag	True: change body text.  False: change headline text.
    reFlags		flags argument to re.search().

    returns a tuple (v,pos) showing where the change occured.
    returns (None,0) if no further match in the outline was found.

    Note: if (v,pos) is a tuple returned previously from reChangeNext,
    reChangeNext(v,pos+len(findPat),findPat,changePat,bodyFlag)
    changes the next matching string.
    """
    n = len(findPat)
    v, mo, pos = reFindNext(v, pos, findPat, bodyFlag, reFlags)
    if v == None:
        return None, None, 0
    if bodyFlag:
        s = v.bodyString()
        print s, findPat, changePat
        # s[pos:pos+n] = changePat
        s = s[:pos] + changePat + s[pos+n:]
        c.setBodyString(v,s)
    else:
        s = v.headString()
        # s[pos:pos+n] = changePat
        s = s[:pos] + changePat + s[pos+n:]
        c.setHeadString(v,s)
    return v, mo, pos
#@nonl
#@-node:reChangeNext
#@+node:reChangePrev
def reChangePrev ( v, pos, findPat, changePat, bodyFlag, reFlags = None ):
    """
    reChangePrev: use re.search() to change text in a Leo outline.

    v	 		the vnode to start the search.
    pos			the position within the body text of v to start the search.
    findPat		the search string.
    changePat	the replacement string.
    bodyFlag	True: change body text.  False: change headline text.
    reFlags		flags argument to re.search().

    returns a tuple (v,pos) showing where the change occured.
    returns (None,0) if no further match in the outline was found.

    Note: if (v,pos) is a tuple returned previously from reChangePrev,
    reChangePrev(v,pos-len(findPat),findPat,changePat,bodyFlag)
    changes the next matching string.
    """
    n = len(findPat)
    v, mo, pos = reFindPrev(v, pos, findPat, bodyFlag, reFlags)
    if v == None:
        return None, None, 0
    if bodyFlag:
        s = v.bodyString()
        # s[pos:pos+n] = changePat
        s = s[:pos] + changePat + s[pos+n:]
        c.setBodString(v,s)
    else:
        s = v.headString()
        # s[pos:pos+n] = changePat
        s = s[:pos] + changePat + s[pos+n:]
        c.setHeadString(v,s)
    return v, mo, pos
#@nonl
#@-node:reChangePrev
#@+node:reFindAll
def reFindAll(c,findPat,bodyFlag,reFlags=None):
    """
    reFindAll	search an entire Leo outline using re module.

    c	     commander for a Leo outline window.
    pattern		the search string.
    bodyFlag	True: search body text.  False: search headline text.
    reFlags		flags argument to re.search().

    returns a list of tuples (v,pos) showing where matches occured.
    returns [] if no match were found.
    """
    v = c.rootVnode()
    n = len(findPat)
    result = [] ; pos = 0
    while v != None:
        v, mo, pos = reFindNext(v,pos,findPat,bodyFlag,reFlags)
        if v != None:
            result.append ( (v,mo,pos) )
        pos = pos + n
    return result
#@nonl
#@-node:reFindAll
#@+node:reFindNext
def reFindNext ( v, pos, pattern, bodyFlag, reFlags = None ):
    """
    reFindNext:	use re.search() to find pattern in a Leo outline.

    v	 		the vnode to start the search.
    pos			the position within the body text of v to start the search.
    pattern		the search string
    bodyFlag	True: search body text.  False: search headline text.
    reFlags		the flags argument to re.search()

    returns a tuple (v,mo,pos) showing where the match occured.
    returns (None,None,0) if no further match in the outline was found.
    mo is a "match object"

    Note: if (v,pos) is a tuple returned previously from reFindNext,
    reFindNext(v,pos+len(pattern),pattern) finds the next match.
    """
    while v != None:
        if bodyFlag:
            s = v.bodyString()
        else:
            s = v.headString()
        if reFlags == None:
            mo = re.search ( pattern, s[pos:] )
        else:
            mo = re.search ( pattern, s[pos:], reFlags )
        if mo != None:
            return v, mo, pos + mo.start()
        v = v.threadNext()
        pos = 0
    return None, None, 0
#@nonl
#@-node:reFindNext
#@+node:reFindPrev
def reFindPrev ( v, pos, pattern, bodyFlag, reFlags = None ):
    """
    reFindPrev:	use re.search() to find pattern in a Leo outline.

    v	 		the vnode to start the search.
    pos			the position within the body text of v to start the search.
    pattern		the search string
    bodyFlag	True: search body text.  False: search headline text.
    reFlags		the flags argument to re.search()

    returns a tuple (v,mo,pos) showing where the match occured.
    returns (None,None,0) if no further match in the outline was found.

    Note 1: Searches vnodes in reverse (v.threadBack) direction.
    Searches text of vnodes in _forward_ direction.

    Note 2: if (v,pos) is a tuple returned previously from reFindPrev,
    reFindPrev(v,pos-len(pattern),pattern) finds the next match.
    """
    while v != None:
        if bodyFlag:
            s = v.bodyString()
        else:
            s = v.headString()
        # Forward search through text...
        if reFlags == None:
            mo = re.search ( pattern, s[pos:] )
        else:
            mo = re.search ( pattern, s[pos:], reFlags )
        if mo != None:
            return v, mo, pos+mo.start()
        # Reverse search through vnode.
        v = v.threadBack()
        pos = 0
    return None, None, 0
#@nonl
#@-node:reFindPrev
#@+node:lineAtPos
def lineAtPos ( s, pos ):
    """
    lineAtPos: return the line of a string containing the given index.
    s		a string
    pos		an index into s
    """
    # find the start of the line containing the match
    if len(s) < 1:
        return ""
    if pos > len(s):
        pos = len(s)-1

    while pos > 0:
        if s[pos] == '\n':
            pos = pos + 1
            break
        else:
            pos = pos - 1
    # return the line containing the match
    s = s[pos:]
    list = s.split("\n")
    return list[0]
#@nonl
#@-node:lineAtPos
#@+node:printFindList
def printFindList( findList, bodyFlag = 1 ):
    """
    printFindList:	Print matching lines from the list.

    findList:		a list of (v,pos) tuples returned from findAll().
    Only the line containing the match is printed.
    Lines are printed once for each match found on the line.
    """
    for v,pos in findList:
        if v != None:
            if bodyFlag:
                s = v.bodyString()
            else:
                s = v.headString()
            print lineAtPos(s, pos)
#@nonl
#@-node:printFindList
#@-node:Other find scripts
#@-node:Find/change scripts
#@-node:Scripts used by commands
#@+node:Text editing scripts
#@+node:@@button Create headlines from body
# Create child nodes whose headline are all lines in the body.
s = p.bodyString()
lines = g.splitLines(s)
lines = [z.strip() for z in lines if z.strip()]

c.beginUpdate()
try:
    for line in lines:
        p2 = p.insertAsLastChild()
        while line.find('  ') > -1:
            line = line.replace('  ',' ')
        i = line.find('#')
        if i == -1:
            p2.initHeadString(line)
        else:
            # Put comments in the body.
            p2.initHeadString(line[:i].strip())
            c.setBodyString(p2,line[i:].strip())
finally:
    c.endUpdate()
#@nonl
#@-node:@@button Create headlines from body
#@+node:add tab after each :
@color

s = p.bodyString()

result = []
lines = g.splitLines(s)
for line in lines:
    i = line.find(':')
    if i > -1:
        result.append(line[:i+1] + ' '*4 + line[i+1:])
    else:
        result.append(line)

s = ''.join(result)
g.trace(s)
c.setBodyString(p,s)
#@-node:add tab after each :
#@+node:add tab before each =
@color

s = p.bodyString()

result = []
lines = g.splitLines(s)
for line in lines:
    i = line.find('=')
    if i > -1:
        result.append(line[:i] + ' '*4 + line[i:])
    else:
        result.append(line)

s = ''.join(result)
g.trace(s)
# c.setBodyString(p,s)
#@nonl
#@-node:add tab before each =
#@+node:Clone all nodes to child node
# Clone all nodes specified in the child node called 'to be cloned'
root = p.copy()
child = p.firstChild()
assert(child.headString().lower().strip() == 'to be cloned')
s = child.bodyString()
lines = g.splitLines(s)

seen = {}
c.beginUpdate()
try:
    for p in c.allNodes_iter():
        if not seen.get(p.v.t):
            seen[p.v.t] = True
            h = p.headString().strip()
            for line in lines:
                if h == line.strip():
                    print 'found',h
                    clone = p.clone()
                    clone.moveToLastChildOf(root)
finally:
    c.endUpdate()
#@nonl
#@+node:To be cloned
@test getHandlersForTag
@test unit testing with embedded class
@test move-lines-up
@test helpForMinibuffer
@test g.pdb
@test g.es_exception
@test g.es_trace
@test pasteText
@test g.getScript strips crlf
@test forward-end-word (start of word)
@test moveToHelper
@test scrollHelper
@test c.contractAllHeadlines
@test g.create_temp_file
@test fc.deleteFileWithMessage
@test compareHelper
@test apropos_bindings
@test apropos_find_commands
@test atFile_rename
@test g.removeExtraLws
@test moveUpOrDownHelper
@test compareHelper-warning
@test directiveKind4
@test isPlainKey
@test g.convertPythonIndexToRowCol
@test setMoveCol
@test leoBody.getSelectionAreas & test
@test parseLeoSentinel
@test selfInsertCommand-2 (replacing tabs)
@test extendHelper
@test onClick
@test selfInsertCommand-1
@test g.convertRowColToPythonIndex
@test round trip toUnicode toEncodedString
@test that all @test nodes in derived files start with if g.unitTesting
@test g.reportBadChars
#@-node:To be cloned
#@-node:Clone all nodes to child node
#@+node:delete from ':' to end (script)
@color

s = p.bodyString()

result = []
lines = g.splitLines(s)
for line in lines:
    i = line.find(':')
    if i > -1:
        result.append(line[:i])
        if line and line[-1] == '\n':
            result.append('\n')
    else:
        result.append(line)

s = ''.join(result)
c.setBodyString(p,s)
#@nonl
#@-node:delete from ':' to end (script)
#@+node:delete from first blank
@color

s = p.bodyString()

result = []
lines = g.splitLines(s)
for line in lines:
    i = line.find(' ')
    if i > -1:
        result.append(line[:i])
        if line and line[-1] == '\n':
            result.append('\n')
    else:
        result.append(line)

s = ''.join(result)
c.setBodyString(p,s)
#@nonl
#@-node:delete from first blank
#@+node:insert begin/endUpdate
u = c.undoer ; w = c.frame.body.bodyCtrl

s1 = '''\
    c.beginUpdate()
    try:'''
s2 = '''\
    finally:
        c.endUpdate()'''

b = u.beforeChangeNodeContents(p)

i, j = g.app.gui.getSelectionRange(w)
if i != j:
    s = w.get(i,j)
    s = ''.join(['\t'+line for line in g.splitLines(s)])
    w.delete(i,j)
    w.insert(i,s1+'\n'+s+'\n'+s2)
else:
    w.insert(i,s1+'\n\t\t\n'+s2)

u.afterChangeNodeContents(p,'add-begin/endUpdate',b)
#@nonl
#@+node:Test


    line one
    line two
#@nonl
#@-node:Test
#@-node:insert begin/endUpdate
#@+node:Remove blank trailing lines
for p in p.self_and_subtree_iter():
    s = p.bodyString()
    target = s.rstrip()
    if s != target:
        c.setBodyString(p,target)
        g.es(p.headString())
#@nonl
#@-node:Remove blank trailing lines
#@+node:Remove all @test nodes
count = 0
c.beginUpdate()
try:
    p = c.rootVnode()
    while p:
        h = p.headString()
        if h.startswith('@test '):
            next = p.nodeAfterTree()
            print 'deleting',h
            count += 1
            p.doDelete()
            p = next
        else:
            if h.startswith('@thin'): print h
            p.moveToThreadNext()
finally:
    c.endUpdate()
print '%d @test nodes deleted' % count
#@nonl
#@-node:Remove all @test nodes
#@+node:replace =.* with = None
@color

s = p.bodyString()

result = []
lines = g.splitLines(s)
for line in lines:
    i = line.find('=')
    if i > -1:
        result.append(line[:i] + '= None')
        if line and line[-1] == '\n':
            result.append('\n')
    else:
        result.append(line)

s = ''.join(result)
c.setBodyString(p,s)
#@nonl
#@-node:replace =.* with = None
#@+node:Set trailing ws script
'''This script quickly ends all nodes in the selected tree with exactly one
newline and marks all @thin/@file nodes dirty if any of their descendents have
been changed.'''

@others

c.beginUpdate()
try:
    p = c.currentPosition()
    pass1(p) # Make the changes and do p.v.t.setDirty for all changed nodes p.
    pass2() # Quickly set all @thin/@file nodes dirty if any of their descendents are dirty.
    g.es_print('done')
finally:
    c.endUpdate()
#@nonl
#@+node:pass1
def pass1(root):

    '''Remove trailing newlines from all nodes.'''

    count = 0 ; seen = {}
    for p in root.self_and_subtree_iter():
        if seen.get(p.v.t): continue
        s = p.bodyString()
        if s:
            s2 = s.rstrip() + '\n'
            if s2 != s:
                s2 = g.toUnicode(s2,g.app.tkEncoding,reportErrors=True)
                p.v.t.bodyString = s2
                seen [p.v.t] = True
                p.v.t.setDirty() # Just set the bit: do **not** redraw!
                count += 1

    g.es_print("pass 1: %d nodes converted" % count)
#@-node:pass1
#@+node:pass2
def pass2():

    '''Quickly mark all changed @file nodes dirty.'''

    count = 0

    # Important: we must look at **all** nodes because of clones.
    for p in c.allNodes_iter():
        if p.isAnyAtFileNode():
            root = p.copy()
            for p2 in root.self_and_subtree_iter():
                if p2.v.t.isDirty():
                    root.setDirty()
                    count += 1
                    break

    g.es_print("pass 2: %d @file/@thin nodes set dirty" % count)
#@-node:pass2
#@-node:Set trailing ws script
#@+node:split defs in body text to child nodes
# script to split node containing multiple def's into child nodes

def createChild (parent,body,line):
    p = parent.insertAsLastChild()
    h = line [3:].strip()
    i = h.find('(')
    if i > -1:
        func = h [: i].strip()
    h2 = g.choose(func,func,h)
    p.setHeadString(h2)
    p.setTnodeText(''.join(body))

s = p.bodyString()
if s.strip():
    b = c.undoer.beforeChangeTree(p)
    c.beginUpdate()
    try:
        lines = g.splitLines(s) ; body = [] ; changed = False ; lastDefLine = ''
        for line in lines:
            if g.match_word(line,0,'def'):
                if body and lastDefLine:
                    createChild(p,body,lastDefLine)
                    body = [] ; changed = True
                lastDefLine = line
            body.append(line)
        if body and lastDefLine:
            createChild(p,body,lastDefLine)
            changed = True
        if changed:
            c.setChanged(True) ; c.setBodyString(p,'')
    finally:
        c.undoer.afterChangeTree(p,'split-defs',b)
        c.endUpdate()
#@nonl
#@-node:split defs in body text to child nodes
#@+node:stripBlankLines
print '-' * 60

# Tag the start of the command.
c.undoer.setUndoParams("Change All",c.currentPosition())

n = 0 ; total = 0
for p in c.currentPosition().self_and_subtree_iter():
    total += 1
    body = p.bodyString()
    s = g.stripBlankLines(body)
    if s != body:
        n += 1
        c.setBodyString(p,s,g.app.tkEncoding)
        c.undoer.setUndoParams("Change",p,
            oldText=body,newText=s,oldSel=None, newSel=None)

# Tag the end of the command.
c.undoer.setUndoParams("Change All",c.currentPosition()) 

print "%d nodes changed (%d total)" % (n,total)
#@+node:Test
@ignore
a
  b

c  

last
#@-node:Test
#@-node:stripBlankLines
#@-node:Text editing scripts
#@-all
#@nonl
#@-node:@file leoScripts.txt
#@-leo
