From 4d4a9dc2de1c52bb2117f89824f0a2213e90b55b Mon Sep 17 00:00:00 2001 From: Graham Cole <chckens@sucs.org> Date: Sat, 15 Nov 2008 16:08:37 +0000 Subject: [PATCH] add planet (venus branch) to our SVN --- lib/venus/.htaccess | 22 + lib/venus/AUTHORS | 7 + lib/venus/LICENCE | 84 + lib/venus/README | 20 + lib/venus/THANKS | 41 + lib/venus/TODO | 7 + lib/venus/docs/config.html | 164 + lib/venus/docs/contributing.html | 67 + lib/venus/docs/docs.css | 100 + lib/venus/docs/docs.js | 54 + lib/venus/docs/etiquette.html | 48 + lib/venus/docs/filters.html | 105 + lib/venus/docs/img/shadowAlpha.png | Bin 0 -> 3403 bytes lib/venus/docs/index.html | 55 + lib/venus/docs/installation.html | 129 + lib/venus/docs/migration.html | 42 + lib/venus/docs/normalization.html | 107 + lib/venus/docs/templates.html | 184 + lib/venus/docs/venus.svg | 109 + .../filters/categories/categories.xslt | 82 + .../examples/filters/guess-language/README | 37 + .../examples/filters/guess-language/en.data | 15131 ++++++++++ .../examples/filters/guess-language/fr.data | 22710 ++++++++++++++++ .../filters/guess-language/guess-language.py | 58 + .../filters/guess-language/learn-language.py | 25 + .../filters/guess-language/trigram.py | 188 + .../filters/xpath-sifter/xpath-sifter.ini | 55 + lib/venus/examples/foaf-based.ini | 44 + lib/venus/examples/images/edd.png | Bin 0 -> 9918 bytes lib/venus/examples/images/jdub.png | Bin 0 -> 9814 bytes lib/venus/examples/images/keybuk.png | Bin 0 -> 8496 bytes lib/venus/examples/images/thom.png | Bin 0 -> 7257 bytes lib/venus/examples/opml-top100.ini | 53 + lib/venus/examples/planet-schmanet.ini | 78 + lib/venus/expunge.py | 17 + lib/venus/filters/addsearch.genshi | 30 + lib/venus/filters/addsearch.xslt | 70 + lib/venus/filters/coral_cdn_filter.py | 18 + .../filters/delDupName/byline_author.xslt | 29 + lib/venus/filters/delDupName/p_by_name.xslt | 17 + lib/venus/filters/delDupName/p_from.xslt | 15 + lib/venus/filters/detitle.xslt | 25 + lib/venus/filters/excerpt.py | 109 + lib/venus/filters/h1title.xslt | 30 + lib/venus/filters/html2xhtml.plugin | 6 + lib/venus/filters/mememe.plugin | 496 + lib/venus/filters/minhead.py | 36 + lib/venus/filters/notweets.py | 6 + lib/venus/filters/regexp_sifter.py | 44 + lib/venus/filters/stripAd/feedburner.sed | 1 + lib/venus/filters/stripAd/google_ad_map.sed | 1 + lib/venus/filters/stripAd/yahoo.sed | 1 + lib/venus/filters/xhtml2html.plugin | 31 + lib/venus/filters/xpath_sifter.py | 23 + lib/venus/planet.py | 72 + lib/venus/planet/__init__.py | 40 + lib/venus/planet/config.py | 400 + lib/venus/planet/csv_config.py | 29 + lib/venus/planet/expunge.py | 67 + lib/venus/planet/foaf.py | 197 + lib/venus/planet/idindex.py | 99 + lib/venus/planet/opml.py | 154 + lib/venus/planet/reconstitute.py | 334 + lib/venus/planet/scrub.py | 132 + lib/venus/planet/shell/__init__.py | 67 + lib/venus/planet/shell/_genshi.py | 143 + lib/venus/planet/shell/dj.py | 50 + lib/venus/planet/shell/plugin.py | 64 + lib/venus/planet/shell/py.py | 22 + lib/venus/planet/shell/sed.py | 19 + lib/venus/planet/shell/tmpl.py | 272 + lib/venus/planet/shell/xslt.py | 78 + lib/venus/planet/spider.py | 482 + lib/venus/planet/splice.py | 167 + .../planet/vendor/compat_logging/__init__.py | 1196 + .../planet/vendor/compat_logging/config.py | 299 + .../planet/vendor/compat_logging/handlers.py | 728 + lib/venus/planet/vendor/feedparser.py | 3612 +++ lib/venus/planet/vendor/html5lib/__init__.py | 15 + lib/venus/planet/vendor/html5lib/constants.py | 816 + .../vendor/html5lib/filters/__init__.py | 0 .../planet/vendor/html5lib/filters/_base.py | 10 + .../html5lib/filters/inject_meta_charset.py | 63 + .../planet/vendor/html5lib/filters/lint.py | 88 + .../vendor/html5lib/filters/optionaltags.py | 175 + .../vendor/html5lib/filters/whitespace.py | 41 + .../planet/vendor/html5lib/html5parser.py | 1985 ++ .../planet/vendor/html5lib/inputstream.py | 602 + .../vendor/html5lib/liberalxmlparser.py | 147 + lib/venus/planet/vendor/html5lib/sanitizer.py | 202 + .../vendor/html5lib/serializer/__init__.py | 3 + .../html5lib/serializer/htmlserializer.py | 218 + .../html5lib/serializer/xhtmlserializer.py | 9 + lib/venus/planet/vendor/html5lib/tokenizer.py | 1009 + .../vendor/html5lib/treebuilders/__init__.py | 65 + .../vendor/html5lib/treebuilders/_base.py | 330 + .../vendor/html5lib/treebuilders/dom.py | 203 + .../vendor/html5lib/treebuilders/etree.py | 266 + .../html5lib/treebuilders/simpletree.py | 205 + .../vendor/html5lib/treebuilders/soup.py | 158 + .../vendor/html5lib/treewalkers/__init__.py | 52 + .../vendor/html5lib/treewalkers/_base.py | 154 + .../planet/vendor/html5lib/treewalkers/dom.py | 37 + .../vendor/html5lib/treewalkers/etree.py | 112 + .../html5lib/treewalkers/genshistream.py | 67 + .../vendor/html5lib/treewalkers/pulldom.py | 52 + .../vendor/html5lib/treewalkers/simpletree.py | 72 + .../vendor/html5lib/treewalkers/soup.py | 36 + lib/venus/planet/vendor/html5lib/utils.py | 36 + lib/venus/planet/vendor/htmltmpl.py | 1421 + lib/venus/planet/vendor/httplib2/__init__.py | 917 + lib/venus/planet/vendor/httplib2/iri2uri.py | 110 + lib/venus/planet/vendor/portalocker.py | 93 + lib/venus/planet/vendor/timeoutsocket.py | 424 + lib/venus/spider.py | 22 + lib/venus/splice.py | 18 + lib/venus/themes/asf/config.ini | 21 + lib/venus/themes/asf/default.css | 533 + lib/venus/themes/asf/index.html.xslt | 339 + lib/venus/themes/asf/personalize.js | 297 + lib/venus/themes/classic_fancy/config.ini | 20 + .../themes/classic_fancy/index.html.tmpl | 126 + lib/venus/themes/classic_fancy/planet.css | 150 + lib/venus/themes/common/atom.xml.xslt | 80 + lib/venus/themes/common/foafroll.xml.xslt | 39 + .../themes/common/images/feed-icon-10x10.png | Bin 0 -> 469 bytes lib/venus/themes/common/images/foaf.png | Bin 0 -> 1393 bytes lib/venus/themes/common/images/logo.png | Bin 0 -> 5413 bytes lib/venus/themes/common/images/opml.png | Bin 0 -> 804 bytes lib/venus/themes/common/images/planet.png | Bin 0 -> 426 bytes lib/venus/themes/common/images/tcosm11.gif | Bin 0 -> 203 bytes lib/venus/themes/common/images/venus.ico | Bin 0 -> 894 bytes lib/venus/themes/common/images/venus.png | Bin 0 -> 570 bytes lib/venus/themes/common/opml.xml.xslt | 40 + lib/venus/themes/common/rss10.xml.tmpl | 37 + lib/venus/themes/common/rss20.xml.tmpl | 33 + lib/venus/themes/common/validate.html.xslt | 146 + lib/venus/themes/django/bland.css | 39 + lib/venus/themes/django/config.ini | 11 + lib/venus/themes/django/index.html.dj | 49 + lib/venus/themes/genshi_fancy/config.ini | 20 + .../themes/genshi_fancy/index.html.genshi | 95 + lib/venus/themes/genshi_fancy/planet.css | 150 + lib/venus/themes/mobile/config.ini | 24 + lib/venus/themes/mobile/mobile.html.xslt | 199 + lib/venus/themes/musings/config.ini | 18 + lib/venus/themes/musings/default.css | 402 + lib/venus/themes/musings/index.html.xslt | 293 + lib/venus/themes/musings/personalize.js | 220 + 149 files changed, 63148 insertions(+) create mode 100644 lib/venus/.htaccess create mode 100644 lib/venus/AUTHORS create mode 100644 lib/venus/LICENCE create mode 100644 lib/venus/README create mode 100644 lib/venus/THANKS create mode 100644 lib/venus/TODO create mode 100644 lib/venus/docs/config.html create mode 100644 lib/venus/docs/contributing.html create mode 100644 lib/venus/docs/docs.css create mode 100644 lib/venus/docs/docs.js create mode 100644 lib/venus/docs/etiquette.html create mode 100644 lib/venus/docs/filters.html create mode 100644 lib/venus/docs/img/shadowAlpha.png create mode 100644 lib/venus/docs/index.html create mode 100644 lib/venus/docs/installation.html create mode 100644 lib/venus/docs/migration.html create mode 100644 lib/venus/docs/normalization.html create mode 100644 lib/venus/docs/templates.html create mode 100644 lib/venus/docs/venus.svg create mode 100644 lib/venus/examples/filters/categories/categories.xslt create mode 100644 lib/venus/examples/filters/guess-language/README create mode 100644 lib/venus/examples/filters/guess-language/en.data create mode 100644 lib/venus/examples/filters/guess-language/fr.data create mode 100644 lib/venus/examples/filters/guess-language/guess-language.py create mode 100755 lib/venus/examples/filters/guess-language/learn-language.py create mode 100644 lib/venus/examples/filters/guess-language/trigram.py create mode 100644 lib/venus/examples/filters/xpath-sifter/xpath-sifter.ini create mode 100644 lib/venus/examples/foaf-based.ini create mode 100644 lib/venus/examples/images/edd.png create mode 100644 lib/venus/examples/images/jdub.png create mode 100644 lib/venus/examples/images/keybuk.png create mode 100644 lib/venus/examples/images/thom.png create mode 100644 lib/venus/examples/opml-top100.ini create mode 100644 lib/venus/examples/planet-schmanet.ini create mode 100755 lib/venus/expunge.py create mode 100644 lib/venus/filters/addsearch.genshi create mode 100644 lib/venus/filters/addsearch.xslt create mode 100644 lib/venus/filters/coral_cdn_filter.py create mode 100644 lib/venus/filters/delDupName/byline_author.xslt create mode 100644 lib/venus/filters/delDupName/p_by_name.xslt create mode 100644 lib/venus/filters/delDupName/p_from.xslt create mode 100644 lib/venus/filters/detitle.xslt create mode 100644 lib/venus/filters/excerpt.py create mode 100644 lib/venus/filters/h1title.xslt create mode 100644 lib/venus/filters/html2xhtml.plugin create mode 100644 lib/venus/filters/mememe.plugin create mode 100644 lib/venus/filters/minhead.py create mode 100644 lib/venus/filters/notweets.py create mode 100644 lib/venus/filters/regexp_sifter.py create mode 100644 lib/venus/filters/stripAd/feedburner.sed create mode 100644 lib/venus/filters/stripAd/google_ad_map.sed create mode 100644 lib/venus/filters/stripAd/yahoo.sed create mode 100644 lib/venus/filters/xhtml2html.plugin create mode 100644 lib/venus/filters/xpath_sifter.py create mode 100755 lib/venus/planet.py create mode 100644 lib/venus/planet/__init__.py create mode 100644 lib/venus/planet/config.py create mode 100755 lib/venus/planet/csv_config.py create mode 100644 lib/venus/planet/expunge.py create mode 100644 lib/venus/planet/foaf.py create mode 100644 lib/venus/planet/idindex.py create mode 100755 lib/venus/planet/opml.py create mode 100644 lib/venus/planet/reconstitute.py create mode 100644 lib/venus/planet/scrub.py create mode 100644 lib/venus/planet/shell/__init__.py create mode 100644 lib/venus/planet/shell/_genshi.py create mode 100644 lib/venus/planet/shell/dj.py create mode 100644 lib/venus/planet/shell/plugin.py create mode 100644 lib/venus/planet/shell/py.py create mode 100644 lib/venus/planet/shell/sed.py create mode 100644 lib/venus/planet/shell/tmpl.py create mode 100644 lib/venus/planet/shell/xslt.py create mode 100644 lib/venus/planet/spider.py create mode 100644 lib/venus/planet/splice.py create mode 100644 lib/venus/planet/vendor/compat_logging/__init__.py create mode 100644 lib/venus/planet/vendor/compat_logging/config.py create mode 100644 lib/venus/planet/vendor/compat_logging/handlers.py create mode 100755 lib/venus/planet/vendor/feedparser.py create mode 100644 lib/venus/planet/vendor/html5lib/__init__.py create mode 100644 lib/venus/planet/vendor/html5lib/constants.py create mode 100644 lib/venus/planet/vendor/html5lib/filters/__init__.py create mode 100644 lib/venus/planet/vendor/html5lib/filters/_base.py create mode 100644 lib/venus/planet/vendor/html5lib/filters/inject_meta_charset.py create mode 100644 lib/venus/planet/vendor/html5lib/filters/lint.py create mode 100644 lib/venus/planet/vendor/html5lib/filters/optionaltags.py create mode 100644 lib/venus/planet/vendor/html5lib/filters/whitespace.py create mode 100644 lib/venus/planet/vendor/html5lib/html5parser.py create mode 100644 lib/venus/planet/vendor/html5lib/inputstream.py create mode 100644 lib/venus/planet/vendor/html5lib/liberalxmlparser.py create mode 100644 lib/venus/planet/vendor/html5lib/sanitizer.py create mode 100644 lib/venus/planet/vendor/html5lib/serializer/__init__.py create mode 100644 lib/venus/planet/vendor/html5lib/serializer/htmlserializer.py create mode 100644 lib/venus/planet/vendor/html5lib/serializer/xhtmlserializer.py create mode 100644 lib/venus/planet/vendor/html5lib/tokenizer.py create mode 100755 lib/venus/planet/vendor/html5lib/treebuilders/__init__.py create mode 100755 lib/venus/planet/vendor/html5lib/treebuilders/_base.py create mode 100644 lib/venus/planet/vendor/html5lib/treebuilders/dom.py create mode 100755 lib/venus/planet/vendor/html5lib/treebuilders/etree.py create mode 100755 lib/venus/planet/vendor/html5lib/treebuilders/simpletree.py create mode 100644 lib/venus/planet/vendor/html5lib/treebuilders/soup.py create mode 100644 lib/venus/planet/vendor/html5lib/treewalkers/__init__.py create mode 100644 lib/venus/planet/vendor/html5lib/treewalkers/_base.py create mode 100644 lib/venus/planet/vendor/html5lib/treewalkers/dom.py create mode 100644 lib/venus/planet/vendor/html5lib/treewalkers/etree.py create mode 100644 lib/venus/planet/vendor/html5lib/treewalkers/genshistream.py create mode 100644 lib/venus/planet/vendor/html5lib/treewalkers/pulldom.py create mode 100644 lib/venus/planet/vendor/html5lib/treewalkers/simpletree.py create mode 100644 lib/venus/planet/vendor/html5lib/treewalkers/soup.py create mode 100644 lib/venus/planet/vendor/html5lib/utils.py create mode 100644 lib/venus/planet/vendor/htmltmpl.py create mode 100644 lib/venus/planet/vendor/httplib2/__init__.py create mode 100644 lib/venus/planet/vendor/httplib2/iri2uri.py create mode 100644 lib/venus/planet/vendor/portalocker.py create mode 100644 lib/venus/planet/vendor/timeoutsocket.py create mode 100755 lib/venus/spider.py create mode 100755 lib/venus/splice.py create mode 100644 lib/venus/themes/asf/config.ini create mode 100644 lib/venus/themes/asf/default.css create mode 100644 lib/venus/themes/asf/index.html.xslt create mode 100644 lib/venus/themes/asf/personalize.js create mode 100644 lib/venus/themes/classic_fancy/config.ini create mode 100644 lib/venus/themes/classic_fancy/index.html.tmpl create mode 100644 lib/venus/themes/classic_fancy/planet.css create mode 100644 lib/venus/themes/common/atom.xml.xslt create mode 100644 lib/venus/themes/common/foafroll.xml.xslt create mode 100644 lib/venus/themes/common/images/feed-icon-10x10.png create mode 100644 lib/venus/themes/common/images/foaf.png create mode 100644 lib/venus/themes/common/images/logo.png create mode 100644 lib/venus/themes/common/images/opml.png create mode 100644 lib/venus/themes/common/images/planet.png create mode 100644 lib/venus/themes/common/images/tcosm11.gif create mode 100644 lib/venus/themes/common/images/venus.ico create mode 100644 lib/venus/themes/common/images/venus.png create mode 100644 lib/venus/themes/common/opml.xml.xslt create mode 100644 lib/venus/themes/common/rss10.xml.tmpl create mode 100644 lib/venus/themes/common/rss20.xml.tmpl create mode 100644 lib/venus/themes/common/validate.html.xslt create mode 100644 lib/venus/themes/django/bland.css create mode 100644 lib/venus/themes/django/config.ini create mode 100644 lib/venus/themes/django/index.html.dj create mode 100644 lib/venus/themes/genshi_fancy/config.ini create mode 100644 lib/venus/themes/genshi_fancy/index.html.genshi create mode 100644 lib/venus/themes/genshi_fancy/planet.css create mode 100644 lib/venus/themes/mobile/config.ini create mode 100644 lib/venus/themes/mobile/mobile.html.xslt create mode 100644 lib/venus/themes/musings/config.ini create mode 100644 lib/venus/themes/musings/default.css create mode 100644 lib/venus/themes/musings/index.html.xslt create mode 100644 lib/venus/themes/musings/personalize.js diff --git a/lib/venus/.htaccess b/lib/venus/.htaccess new file mode 100644 index 0000000..7a73b05 --- /dev/null +++ b/lib/venus/.htaccess @@ -0,0 +1,22 @@ +# Replace index + +IndexOptions DescriptionWidth=80 +IndexOptions +SuppressHTMLPreamble +IndexIgnore header.html footer.html index.atom +HeaderName /code/venus/header.html +ReadmeName /code/venus/footer.html + +AddDefaultCharset utf-8 + +# Redirect missing tgz and zip files to tarify.cgi + +RewriteEngine on + +RewriteCond %{REQUEST_FILENAME} !-s +RewriteRule (.*)\.tgz$ tarify.cgi?dir=$1 + +RewriteCond %{REQUEST_FILENAME} !-s +RewriteRule (.*)\.zip$ tarify.cgi?dir=$1 + +RewriteCond %{REQUEST_FILENAME} !-s +RewriteRule (.*).atom$ bzr-feed.cgi?dir=$1 diff --git a/lib/venus/AUTHORS b/lib/venus/AUTHORS new file mode 100644 index 0000000..4de27da --- /dev/null +++ b/lib/venus/AUTHORS @@ -0,0 +1,7 @@ +Sam Ruby <rubys@intertwingly.net> + +This codebase represents a radical refactoring of Planet 2.0, which lists +the following authors: + +Scott James Remnant <scott@netsplit.com> +Jeff Waugh <jdub@perkypants.org> diff --git a/lib/venus/LICENCE b/lib/venus/LICENCE new file mode 100644 index 0000000..1090fa3 --- /dev/null +++ b/lib/venus/LICENCE @@ -0,0 +1,84 @@ +Planet is released under the same licence as Python, here it is: + + +A. HISTORY OF THE SOFTWARE +========================== + +Python was created in the early 1990s by Guido van Rossum at Stichting Mathematisch Centrum (CWI) in the Netherlands as a successor of a language called ABC. Guido is Python's principal author, although it includes many contributions from others. The last version released from CWI was Python 1.2. In 1995, Guido continued his work on Python at the Corporation for National Research Initiatives (CNRI) in Reston, Virginia where he released several versions of the software. Python 1.6 was the last of the versions released by CNRI. In 2000, Guido and the Python core development team moved to BeOpen.com to form the BeOpen PythonLabs team. Python 2.0 was the first and only release from BeOpen.com. + +Following the release of Python 1.6, and after Guido van Rossum left CNRI to work with commercial software developers, it became clear that the ability to use Python with software available under the GNU Public License (GPL) was very desirable. CNRI and the Free Software Foundation (FSF) interacted to develop enabling wording changes to the Python license. Python 1.6.1 is essentially the same as Python 1.6, with a few minor bug fixes, and with a different license that enables later versions to be GPL-compatible. Python 2.1 is a derivative work of Python 1.6.1, as well as of Python 2.0. + +After Python 2.0 was released by BeOpen.com, Guido van Rossum and the other PythonLabs developers joined Digital Creations. All intellectual property added from this point on, starting with Python 2.1 and its alpha and beta releases, is owned by the Python Software Foundation (PSF), a non-profit modeled after the Apache Software Foundation. See http://www.python.org/psf/ for more information about the PSF. + +Thanks to the many outside volunteers who have worked under Guido's direction to make these releases possible. + +B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON +=============================================================== + +PSF LICENSE AGREEMENT +--------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and the Individual or Organization ("Licensee") accessing and otherwise using Python 2.1.1 software in source or binary form and its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use Python 2.1.1 alone or in any derivative version, provided, however, that PSF's License Agreement and PSF's notice of copyright, i.e., "Copyright (c) 2001 Python Software Foundation; All Rights Reserved" are retained in Python 2.1.1 alone or in any derivative version prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on or incorporates Python 2.1.1 or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to Python 2.1.1. + +4. PSF is making Python 2.1.1 available to Licensee on an "AS IS" basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 2.1.1 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON 2.1.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 2.1.1, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between PSF and Licensee. This License Agreement does not grant permission to use PSF trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python 2.1.1, Licensee agrees to be bound by the terms and conditions of this License Agreement. + +BEOPEN.COM TERMS AND CONDITIONS FOR PYTHON 2.0 +---------------------------------------------- + +BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1 + +1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the Individual or Organization ("Licensee") accessing and otherwise using this software in source or binary form and its associated documentation ("the Software"). + +2. Subject to the terms and conditions of this BeOpen Python License Agreement, BeOpen hereby grants Licensee a non-exclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use the Software alone or in any derivative version, provided, however, that the BeOpen Python License is retained in the Software, alone or in any derivative version prepared by Licensee. + +3. BeOpen is making the Software available to Licensee on an "AS IS" basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. + +4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +5. This License Agreement will automatically terminate upon a material breach of its terms and conditions. + +6. This License Agreement shall be governed by and interpreted in all respects by the law of the State of California, excluding conflict of law provisions. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between BeOpen and Licensee. This License Agreement does not grant permission to use BeOpen trademarks or trade names in a trademark sense to endorse or promote products or services of Licensee, or any third party. As an exception, the "BeOpen Python" logos available at http://www.pythonlabs.com/logos.html may be used according to the permissions granted on that web page. + +7. By copying, installing or otherwise using the software, Licensee agrees to be bound by the terms and conditions of this License Agreement. + +CNRI OPEN SOURCE GPL-COMPATIBLE LICENSE AGREEMENT +------------------------------------------------- + +1. This LICENSE AGREEMENT is between the Corporation for National Research Initiatives, having an office at 1895 Preston White Drive, Reston, VA 20191 ("CNRI"), and the Individual or Organization ("Licensee") accessing and otherwise using Python 1.6.1 software in source or binary form and its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, CNRI hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use Python 1.6.1 alone or in any derivative version, provided, however, that CNRI's License Agreement and CNRI's notice of copyright, i.e., "Copyright (c) 1995-2001 Corporation for National Research Initiatives; All Rights Reserved" are retained in Python 1.6.1 alone or in any derivative version prepared by Licensee. Alternately, in lieu of CNRI's License Agreement, Licensee may substitute the following text (omitting the quotes): "Python 1.6.1 is made available subject to the terms and conditions in CNRI's License Agreement. This Agreement together with Python 1.6.1 may be located on the Internet using the following unique, persistent identifier (known as a handle): 1895.22/1013. This Agreement may also be obtained from a proxy server on the Internet using the following URL: http://hdl.handle.net/1895.22/1013". + +3. In the event Licensee prepares a derivative work that is based on or incorporates Python 1.6.1 or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to Python 1.6.1. + +4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS" basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. + +5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON 1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material breach of its terms and conditions. + +7. This License Agreement shall be governed by the federal intellectual property law of the United States, including without limitation the federal copyright law, and, to the extent such U.S. federal law does not apply, by the law of the Commonwealth of Virginia, excluding Virginia's conflict of law provisions. Notwithstanding the foregoing, with regard to derivative works based on Python 1.6.1 that incorporate non-separable material that was previously distributed under the GNU General Public License (GPL), the law of the Commonwealth of Virginia shall govern this License Agreement only as to issues arising under or with respect to Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between CNRI and Licensee. This License Agreement does not grant permission to use CNRI trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party. + +8. By clicking on the "ACCEPT" button where indicated, or by copying, installing or otherwise using Python 1.6.1, Licensee agrees to be bound by the terms and conditions of this License Agreement. + + ACCEPT + +CWI PERMISSIONS STATEMENT AND DISCLAIMER +---------------------------------------- + +Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, The Netherlands. All rights reserved. + +Permission to use, copy, modify, and distribute this software and its documentation for any purpose and without fee is hereby granted, provided that the above copyright notice appear in all copies and that both that copyright notice and this permission notice appear in supporting documentation, and that the name of Stichting Mathematisch Centrum or CWI not be used in advertising or publicity pertaining to distribution of the software without specific, written prior permission. + +STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. \ No newline at end of file diff --git a/lib/venus/README b/lib/venus/README new file mode 100644 index 0000000..59924c3 --- /dev/null +++ b/lib/venus/README @@ -0,0 +1,20 @@ +Planet +------ + +Planet is a flexible feed aggregator. It downloads news feeds published by +web sites and aggregates their content together into a single combined feed, +latest news first. This version of Planet is named Venus as it is the +second major version. The first version is still in wide use and is +also actively being maintained. + +It uses Mark Pilgrim's Universal Feed Parser to read from CDF, RDF, RSS and +Atom feeds; Leonard Richardson's Beautiful Soup to correct markup issues; +and either Tomas Styblo's templating engine or Daniel Viellard's implementation +of XSLT to output static files in any format you can dream up. + +To get started, check out the documentation in the docs directory. If you have +any questions or comments, please don't hesitate to use the planet mailing list: + + http://lists.planetplanet.org/mailman/listinfo/devel + +Keywords: feed, blog, aggregator, RSS, RDF, Atom, OPML, Python diff --git a/lib/venus/THANKS b/lib/venus/THANKS new file mode 100644 index 0000000..eb7c72f --- /dev/null +++ b/lib/venus/THANKS @@ -0,0 +1,41 @@ +DeWitt Clinton - Mac OSX +Mary Gardiner - PythonPath +Elias Torres - FOAF OnlineAccounts +Jacques Distler - Template patches +Michael Koziarski - HTTP Auth fix +Brian Ewins - Win32 / Portalocker +Joe Gregorio - python versioning for filters, verbose tests, spider_threads +Harry Fuecks - Pipe characters in file names, filter bug +Eric van der Vlist - Filters to add language, category information +Chris Dolan - mkdir cache; default template_dirs; fix xsltproc +David Sifry - rss 2.0 xslt template based on http://atom.geekhood.net/ +Morten Frederiksen - Support WordPress LinkManager OPML +Harry Fuecks - default item date to feed date +Antonio Cavedoni - Django templates +Morten Frederiksen - expungeCache +Lenny Domnitser - Coral CDN support for URLs with non-standard ports +Amit Chakradeo - Allow read-only files to be overwritten +Matt Brubeck - fix new_channel +Aristotle Pagaltzis - ensure byline_author filter doesn't drop foreign markup + +This codebase represents a radical refactoring of Planet 2.0, which lists +the following contributors: + +Patches and Bug Fixes +--------------------- + +Chris Dolan - fixes, exclude filtering, duplicate culling +David Edmondson - filtering +Lucas Nussbaum - locale configuration +David Pashley - cache code profiling and recursion fixing +Gediminas Paulauskas - days per page + + +Spycyroll Maintainers +--------------------- + +Vattekkat Satheesh Babu +Richard Jones +Garth Kidd +Eliot Landrum +Bryan Richard diff --git a/lib/venus/TODO b/lib/venus/TODO new file mode 100644 index 0000000..3efd52a --- /dev/null +++ b/lib/venus/TODO @@ -0,0 +1,7 @@ +TODO +==== + + * Allow display normalisation to specified timezone + + Some Planet admins would like their feed to be displayed in the local + timezone, instead of UTC. diff --git a/lib/venus/docs/config.html b/lib/venus/docs/config.html new file mode 100644 index 0000000..0ed6e59 --- /dev/null +++ b/lib/venus/docs/config.html @@ -0,0 +1,164 @@ +<!DOCTYPE html PUBLIC + "-//W3C//DTD XHTML 1.1 plus MathML 2.0 plus SVG 1.1//EN" + "http://www.w3.org/2002/04/xhtml-math-svg/xhtml-math-svg.dtd"> +<html xmlns="http://www.w3.org/1999/xhtml"> +<head> +<script type="text/javascript" src="docs.js"></script> +<link rel="stylesheet" type="text/css" href="docs.css"/> +<title>Venus Configuration</title> +</head> +<body> + +<h2>Configuration</h2> +<p>Configuration files are in <a href="http://docs.python.org/lib/module- +ConfigParser.html">ConfigParser</a> format which basically means the same +format as INI files, i.e., they consist of a series of +<code>[sections]</code>, in square brackets, with each section containing a +list of <code>name:value</code> pairs (or <code>name=value</code> pairs, if +you prefer).</p> +<p>You are welcome to place your entire configuration into one file. +Alternately, you may factor out the templating into a "theme", and +the list of subscriptions into one or more "reading lists".</p> +<h3 id="planet"><code>[planet]</code></h3> +<p>This is the only required section, which is a bit odd as none of the +parameters listed below are required. Even so, you really do want to +provide many of these, especially ones that identify your planet and +either (or both) of <code>template_files</code> and <code>theme</code>.</p> +<p>Below is a complete list of predefined planet configuration parameters, +including <del>ones not (yet) implemented by Venus</del> and <ins>ones that +are either new or implemented differently by Venus</ins>.</p> + +<blockquote> +<dl class="compact code"> +<dt>name</dt> +<dd>Your planet's name</dd> +<dt>link</dt> +<dd>Link to the main page</dd> +<dt>owner_name</dt> +<dd>Your name</dd> +<dt>owner_email</dt> +<dd>Your e-mail address</dd> + +</dl> +<dl class="compact code"> + +<dt>cache_directory</dt> +<dd>Where cached feeds are stored</dd> +<dt>output_dir</dt> +<dd>Directory to place output files</dd> + +</dl> +<dl class="compact code"> + +<dt><ins>output_theme</ins></dt> +<dd>Directory containing a <code>config.ini</code> file which is merged +with this one. This is typically used to specify templating and bill of +material information.</dd> +<dt>template_files</dt> +<dd>Space-separated list of output template files</dd> +<dt><ins>template_directories</ins></dt> +<dd>Space-separated list of directories in which <code>template_files</code> +can be found</dd> +<dt><ins>bill_of_materials</ins></dt> +<dd>Space-separated list of files to be copied as is directly from the <code>template_directories</code> to the <code>output_dir</code></dd> +<dt>filter</dt> +<dd>Regular expression that must be found in the textual portion of the entry</dd> +<dt>exclude</dt> +<dd>Regular expression that must <b>not</b> be found in the textual portion of the entry</dd> +<dt><ins>filters</ins></dt> +<dd>Space-separated list of <a href="filters.html">filters</a> to apply to +each entry</dd> +<dt><ins>filter_directories</ins></dt> +<dd>Space-separated list of directories in which <code>filters</code> +can be found</dd> + +</dl> +<dl class="compact code"> + +<dt>items_per_page</dt> +<dd>How many items to put on each page. <ins>Whereas Planet 2.0 allows this to +be overridden on a per template basis, Venus currently takes the maximum value +for this across all templates.</ins></dd> +<dt><del>days_per_page</del></dt> +<dd>How many complete days of posts to put on each page This is the absolute, hard limit (over the item limit)</dd> +<dt>date_format</dt> +<dd><a href="http://docs.python.org/lib/module-time.html#l2h-2816">strftime</a> format for the default 'date' template variable</dd> +<dt>new_date_format</dt> +<dd><a href="http://docs.python.org/lib/module-time.html#l2h-2816">strftime</a> format for the 'new_date' template variable <ins>only applies to htmltmpl templates</ins></dd> +<dt><del>encoding</del></dt> +<dd>Output encoding for the file, Python 2.3+ users can use the special "xml" value to output ASCII with XML character references</dd> +<dt><del>locale</del></dt> +<dd>Locale to use for (e.g.) strings in dates, default is taken from your system</dd> +<dt>activity_threshold</dt> +<dd>If non-zero, all feeds which have not been updated in the indicated +number of days will be marked as inactive</dd> + +</dl> +<dl class="compact code"> + +<dt>log_level</dt> +<dd>One of <code>DEBUG</code>, <code>INFO</code>, <code>WARNING</code>, <code>ERROR</code> or <code>CRITICAL</code></dd> +<dt><ins>log_format</ins></dt> +<dd><a href="http://docs.python.org/lib/node422.html">format string</a> to +use for logging output. Note: this configuration value is processed +<a href="http://docs.python.org/lib/ConfigParser-objects.html">raw</a></dd> +<dt>feed_timeout</dt> +<dd>Number of seconds to wait for any given feed</dd> +<dt>new_feed_items</dt> +<dd>Maximum number of items to include in the output from any one feed</dd> +<dt><ins>spider_threads</ins></dt> +<dd>The number of threads to use when spidering. When set to 0, the default, +no threads are used and spidering follows the traditional algorithm.</dd> +<dt><ins>http_cache_directory</ins></dt> +<dd>If <code>spider_threads</code> is specified, you can also specify a +directory to be used for an additional HTTP cache to front end the Venus +cache. If specified as a relative path, it is evaluated relative to the +<code>cache_directory</code>.</dd> +<dt><ins>cache_keep_entries</ins></dt> +<dd>Used by <code>expunge</code> to determine how many entries should be +kept for each source when expunging old entries from the cache directory. +This may be overriden on a per subscription feed basis.</dd> +</dl> +<p>Additional options can be found in +<a href="normalization.html#overrides">normalization level overrides</a>.</p> +</blockquote> + +<h3 id="default"><code>[DEFAULT]</code></h3> +<p>Values placed in this section are used as default values for all sections. +While it is true that few values make sense in all sections; in most cases +unused parameters cause few problems.</p> + +<h3 id="subscription"><code>[</code><em>subscription</em><code>]</code></h3> +<p>All sections other than <code>planet</code>, <code>DEFAULT</code>, or are +named in <code>[planet]</code>'s <code>filters</code> or +<code>templatefiles</code> parameters +are treated as subscriptions and typically take the form of a +<acronym title="Uniform Resource Identifier">URI</acronym>.</p> +<p>Parameters placed in this section are passed to templates. While +you are free to include as few or as many parameters as you like, most of +the predefined themes presume that at least <code>name</code> is defined.</p> +<p>The <code>content_type</code> parameter can be defined to indicate that +this subscription is a <em>reading list</em>, i.e., is an external list +of subscriptions. At the moment, three formats of reading lists are supported: +<code>opml</code>, <code>foaf</code>, <code>csv</code>, and +<code>config</code>. In the future, +support for formats like <code>xoxo</code> could be added.</p> +<p><a href="normalization.html#overrides">Normalization overrides</a> can +also be defined here.</p> + +<h3 id="template"><code>[</code><em>template</em><code>]</code></h3> +<p>Sections which are listed in <code>[planet] template_files</code> are +processed as <a href="templates.html">templates</a>. With Planet 2.0, +it is possible to override parameters like <code>items_per_page</code> +on a per template basis, but at the current time Planet Venus doesn't +implement this.</p> +<p><ins><a href="filters.html">Filters</a> can be defined on a per-template basis, and will be used to post-process the output of the template.</ins></p> + +<h3 id="filter"><code>[</code><em>filter</em><code>]</code></h3> +<p>Sections which are listed in <code>[planet] filters</code> are +processed as <a href="filters.html">filters</a>.</p> +<p>Parameters which are listed in this section are passed to the filter +in a language specific manner. Given the way defaults work, filters +should be prepared to ignore parameters that they didn't expect.</p> +</body> +</html> diff --git a/lib/venus/docs/contributing.html b/lib/venus/docs/contributing.html new file mode 100644 index 0000000..2cf95e1 --- /dev/null +++ b/lib/venus/docs/contributing.html @@ -0,0 +1,67 @@ +<!DOCTYPE html PUBLIC + "-//W3C//DTD XHTML 1.1 plus MathML 2.0 plus SVG 1.1//EN" + "http://www.w3.org/2002/04/xhtml-math-svg/xhtml-math-svg.dtd"> +<html xmlns="http://www.w3.org/1999/xhtml"> +<head> +<script type="text/javascript" src="docs.js"></script> +<link rel="stylesheet" type="text/css" href="docs.css"/> +<title>Contributing</title> +</head> +<body> +<h2>Contributing</h2> +<p>If you make changes to Venus, you have no obligation to share them. +And unlike systems based on <code>CVS</code> or <code>subversion</code>, +there is no notion of “committers” — everybody is +a peer.</p> +<p>If you should chose to share your changes, the steps outlined below may +increase your changes of your code being picked up.</p> + +<h3>Documentation and Tests</h3> +<p>For best results, include both documentation and tests in your +contribution.</p> +<p>Documentation can be found in the <code>docs</code> directory. It is +straight XHTML.</p> +<p>Test cases can be found in the +<a href="http://localhost/~rubys/venus/tests/">tests</a> directory, and +make use of the +<a href="http://docs.python.org/lib/module-unittest.html">Python Unit testing framework</a>. To run them, simply enter:</p> +<blockquote><pre>python runtests.py</pre></blockquote> + +<h3>Bzr</h3> +<p>If you have done a <a href="index.html">bzr get</a>, you have already set up +a repository. The only additional step you might need to do is to introduce +yourself to <a href="http://bazaar-vcs.org/">bzr</a>. Type in the following, +after replacing the <b>bold text</b> with your information:</p> + +<blockquote><pre>bzr whoami '<b>Your Name</b> <<b>youremail</b>@<b>example.com</b>>'</pre></blockquote> + +<p>Then, simply make the changes you like. When you are done, type:</p> + +<blockquote><pre>bzr st</pre></blockquote> + +<p>This will tell you which files you have modified, and which ones you may +have added. If you add files and you want them to be included, simply do a:</p> + +<blockquote><pre>bzr add file1 file2...</pre></blockquote> + +<p>You can also do a <code>bzr diff</code> to see if there are any changes +which you made that you don't want included. I can't tell you how many +debug print statements I have caught this way.</p> + +<p>Next, type:</p> + +<blockquote><pre>bzr commit</pre></blockquote> + +<p>This will allow you to enter a comment describing your change. If your +repository is already on your web server, simple let others know where they +can find it. If not, you can simply ftp or scp the files to your web server +— no additional software needs to be installed on that machine.</p> + +<h3>Telling others</h3> +<p>Once you have a change worth sharing, post a message on the +<a href="http://lists.planetplanet.org/mailman/listinfo/devel">mailing list</a>.</p> +<p>Also, consider setting up a <a href="http://bzr.mfd-consult.dk/bzr-feed/">bzr-feed</a> for your repository, so people who wish to do so can automatically +be notified of every change.</p> +<p>There now is even an nascent <a href="http://planet.intertwingly.net/venus/">planet</a> being formed which combines these feeds of changes. You can <a href="http://planet.intertwingly.net/venus/atom.xml">subscribe</a> to it too.</p> +</body> +</html> diff --git a/lib/venus/docs/docs.css b/lib/venus/docs/docs.css new file mode 100644 index 0000000..5ac258a --- /dev/null +++ b/lib/venus/docs/docs.css @@ -0,0 +1,100 @@ +body { + background-color: #fff; + color: #333; + font-family: 'Lucida Grande', Verdana, Geneva, Lucida, Helvetica, sans-serif; + font-size: small; + margin: 40px; + padding: 0; +} + +a:link, a:visited { + background-color: transparent; + color: #333; + text-decoration: none !important; + border-bottom: 1px dotted #333 !important; +} + +a:hover { + background-color: transparent; + color: #934; + text-decoration: none !important; + border-bottom: 1px dotted #993344 !important; +} + +pre, code { + background-color: #FFF; + color: #00F; + font-size: large +} + +h1 { + margin: 8px 0 10px 20px; + padding: 0; + font-variant: small-caps; + letter-spacing: 0.1em; + font-family: "Book Antiqua", Georgia, Palatino, Times, "Times New Roman", serif; +} + +h2 { + clear: both; +} + +ul, ul.outer > li { + margin: 14px 0 10px 0; +} + +.z { + float:left; + background: url(img/shadowAlpha.png) no-repeat bottom right !important; + margin: -15px 0 20px -15px !important; +} + +.z .logo { + color: magenta; +} + +.z p { + margin: 14px 0 10px 15px !important; +} + +.z .sectionInner { + width: 730px; + background: none !important; + padding: 0 !important; + } + +.z .sectionInner .sectionInner2 { + border: 1px solid #a9a9a9; + padding: 4px; + margin: -6px 6px 6px -6px !important; +} + +ins { + background-color: #FFF; + color: #F0F; + text-decoration: none; +} + +dl.compact { + margin-bottom: 1em; + margin-top: 1em; +} + +dl.compact > dt { + clear: left; + float: left; + margin-bottom: 0; + padding-right: 8px; + margin-top: 0; + list-style-type: none; +} + +dl.compact > dd { + margin-bottom: 0; + margin-top: 0; + margin-left: 10em; +} + +th, td { + font-size: small; +} diff --git a/lib/venus/docs/docs.js b/lib/venus/docs/docs.js new file mode 100644 index 0000000..e5944f8 --- /dev/null +++ b/lib/venus/docs/docs.js @@ -0,0 +1,54 @@ +window.onload=function() { + var vindex = document.URL.lastIndexOf('venus/'); + if (vindex<0) vindex = document.URL.lastIndexOf('planet/'); + var base = document.URL.substring(0,vindex+6); + + var body = document.getElementsByTagName('body')[0]; + var div = document.createElement('div'); + div.setAttribute('class','z'); + var h1 = document.createElement('h1'); + var span = document.createElement('span'); + span.appendChild(document.createTextNode('\u2640')); + span.setAttribute('class','logo'); + h1.appendChild(span); + h1.appendChild(document.createTextNode(' Planet Venus')); + + var inner2=document.createElement('div'); + inner2.setAttribute('class','sectionInner2'); + inner2.appendChild(h1); + + var p = document.createElement('p'); + p.appendChild(document.createTextNode("Planet Venus is an awesome \u2018river of news\u2019 feed reader. It downloads news feeds published by web sites and aggregates their content together into a single combined feed, latest news first.")); + inner2.appendChild(p); + + p = document.createElement('p'); + var a = document.createElement('a'); + a.setAttribute('href',base); + a.appendChild(document.createTextNode('Download')); + p.appendChild(a); + p.appendChild(document.createTextNode(" \u00b7 ")); + a = document.createElement('a'); + a.setAttribute('href',base+'docs/index.html'); + a.appendChild(document.createTextNode('Documentation')); + p.appendChild(a); + p.appendChild(document.createTextNode(" \u00b7 ")); + a = document.createElement('a'); + a.setAttribute('href',base+'tests/'); + a.appendChild(document.createTextNode('Unit tests')); + p.appendChild(a); + p.appendChild(document.createTextNode(" \u00b7 ")); + a = document.createElement('a'); + a.setAttribute('href','http://lists.planetplanet.org/mailman/listinfo/devel'); + a.appendChild(document.createTextNode('Mailing list')); + p.appendChild(a); + inner2.appendChild(p); + + var inner1=document.createElement('div'); + inner1.setAttribute('class','sectionInner'); + inner1.setAttribute('id','inner1'); + inner1.appendChild(inner2); + + div.appendChild(inner1); + + body.insertBefore(div, body.firstChild); +} diff --git a/lib/venus/docs/etiquette.html b/lib/venus/docs/etiquette.html new file mode 100644 index 0000000..a567e77 --- /dev/null +++ b/lib/venus/docs/etiquette.html @@ -0,0 +1,48 @@ +<!DOCTYPE html PUBLIC + "-//W3C//DTD XHTML 1.1 plus MathML 2.0 plus SVG 1.1//EN" + "http://www.w3.org/2002/04/xhtml-math-svg/xhtml-math-svg.dtd"> +<html xmlns="http://www.w3.org/1999/xhtml"> +<head> +<script type="text/javascript" src="docs.js"></script> +<link rel="stylesheet" type="text/css" href="docs.css"/> +<title>Etiquette</title> +</head> +<body> +<h2>Etiquette</h2> +<p>You would think that people who publish syndication feeds do it with the +intent to be syndicated. But the truth is that we live in a world where +<a href="http://en.wikipedia.org/wiki/Deep_linking">deep linking</a> can +cause people to complain. Nothing is safe. But that doesn’t +stop us from doing links.</p> + +<p>These concerns tend to increase when you profit, either directly via ads or +indirectly via search engine rankings, from the content of others.</p> + +<p>While there are no hard and fast rules that apply here, here’s are a +few things you can do to mitigate the concern:</p> + +<ul> +<li>Aggressively use robots.txt, meta tags, and the google/livejournal +atom namespace to mark your pages as not to be indexed by search +engines.</li> +<blockquote><p><dl> +<dt><a href="http://www.robotstxt.org/">robots.txt</a>:</dt> +<dd><p><code>User-agent: *<br/> +Disallow: /</code></p></dd> +<dt>index.html:</dt> +<dd><p><code><<a href="http://www.robotstxt.org/wc/meta-user.html">meta name="robots"</a> content="noindex,nofollow"/></code></p></dd> +<dt>atom.xml:</dt> +<dd><p><code><feed xmlns:indexing="<a href="http://community.livejournal.com/lj_dev/696793.html">urn:atom-extension:indexing</a>" indexing:index="no"></code></p> +<p><code><access:restriction xmlns:access="<a href="http://www.bloglines.com/about/specs/fac-1.0">http://www.bloglines.com/about/specs/fac-1.0</a>" relationship="deny"/></code></p></dd> +</dl></p></blockquote> +<li><p>Ensure that all <a href="http://nightly.feedparser.org/docs/reference-entry-source.html#reference.entry.source.rights">copyright</a> and <a href="http://nightly.feedparser.org/docs/reference-entry-license.html">licensing</a> information is propagated to the +combined feed(s) that you produce.</p></li> + +<li><p>Add no advertising. Consider filtering out ads, lest you +be accused of using someone’s content to help your friends profit.</p></li> + +<li><p>Most importantly, if anyone does object to their content being included, +quickly and without any complaint, remove them.</p></li> +</ul> +</body> +</html> diff --git a/lib/venus/docs/filters.html b/lib/venus/docs/filters.html new file mode 100644 index 0000000..2348005 --- /dev/null +++ b/lib/venus/docs/filters.html @@ -0,0 +1,105 @@ +<!DOCTYPE html PUBLIC + "-//W3C//DTD XHTML 1.1 plus MathML 2.0 plus SVG 1.1//EN" + "http://www.w3.org/2002/04/xhtml-math-svg/xhtml-math-svg.dtd"> +<html xmlns="http://www.w3.org/1999/xhtml"> +<head> +<script type="text/javascript" src="docs.js"></script> +<link rel="stylesheet" type="text/css" href="docs.css"/> +<title>Venus Filters</title> +</head> +<body> +<h2>Filters and Plugins</h2> +<p>Filters and plugins are simple Unix pipes. Input comes in +<code>stdin</code>, parameters come from the config file, and output goes to +<code>stdout</code>. Anything written to <code>stderr</code> is logged as an +ERROR message. If no <code>stdout</code> is produced, the entry is not written +to the cache or processed further; in fact, if the entry had previously been +written to the cache, it will be removed.</p> + +<p>There are two types of filters supported by Venus, input and template.</p> +<p>Input to an input filter is a aggressively +<a href="normalization.html">normalized</a> entry. For +example, if a feed is RSS 1.0 with 10 items, the filter will be called ten +times, each with a single Atom 1.0 entry, with all textConstructs +expressed as XHTML, and everything encoded as UTF-8.</p> +<p>Input to a template filter will be the output produced by the template.</p> + +<p>You will find a small set of example filters in the <a +href="../filters">filters</a> directory. The <a +href="../filters/coral_cdn_filter.py">coral cdn filter</a> will change links +to images in the entry itself. The filters in the <a +href="../filters/stripAd/">stripAd</a> subdirectory will strip specific +types of advertisements that you may find in feeds.</p> + +<p>The <a href="../filters/excerpt.py">excerpt</a> filter adds metadata (in +the form of a <code>planet:excerpt</code> element) to the feed itself. You +can see examples of how parameters are passed to this program in either +<a href="../tests/data/filter/excerpt-images.ini">excerpt-images</a> or +<a href="../examples/opml-top100.ini">opml-top100.ini</a>. +Alternately parameters may be passed +<abbr title="Uniform Resource Identifier">URI</abbr> style, for example: +<a href="../tests/data/filter/excerpt-images2.ini">excerpt-images2</a>. +</p> + +<p>The <a href="../filters/xpath_sifter.py">xpath sifter</a> is a variation of +the above, including or excluding feeds based on the presence (or absence) of +data specified by <a href="http://www.w3.org/TR/xpath20/">xpath</a> +expressions. Again, parameters can be passed as +<a href="../tests/data/filter/xpath-sifter.ini">config options</a> or +<a href="../tests/data/filter/xpath-sifter2.ini">URI style</a>. +</p> + +<p>The <a href="../filters/regexp_sifter.py">regexp sifter</a> operates just +like the xpath sifter, except it uses +<a href="http://docs.python.org/lib/re-syntax.html">regular expressions</a> +instead of XPath expressions.</p> + +<h3>Notes</h3> + +<ul> +<li>Any filters listed in the <code>[planet]</code> section of your config.ini +will be invoked on all feeds. Filters listed in individual +<code>[feed]</code> sections will only be invoked on those feeds. +Filters listed in <code>[template]</code> sections will be invoked on the +output of that template.</li> + +<li>Input filters are executed when a feed is fetched, and the results are +placed into the cache. Changing a configuration file alone is not sufficient to +change the contents of the cache — typically that only occurs after +a feed is modified.</li> + +<li>Filters are simply invoked in the order they are listed in the +configuration file (think unix pipes). Planet wide filters are executed before +feed specific filters.</li> + +<li>The file extension of the filter is significant. <code>.py</code> invokes +python. <code>.xslt</code> involkes XSLT. <code>.sed</code> and +<code>.tmpl</code> (a.k.a. htmltmp) are also options. Other languages, like +perl or ruby or class/jar (java), aren't supported at the moment, but these +would be easy to add.</li> + +<li>If the filter name contains a redirection character (<code>></code>), +then the output stream is +<a href="http://en.wikipedia.org/wiki/Tee_(Unix)">tee</a>d; one branch flows +through the specified filter and the output is planced into the named file; the +other unmodified branch continues onto the next filter, if any. +One use case for this function is to use +<a href="../filters/xhtml2html.plugin">xhtml2html</a> to produce both an XHTML +and an HTML output stream from one source.</li> + +<li>Templates written using htmltmpl or django currently only have access to a +fixed set of fields, whereas XSLT and genshi templates have access to +everything.</li> + +<li>Plugins differ from filters in that while filters are forked, plugins are +<a href="http://docs.python.org/lib/module-imp.html">imported</a>. This +means that plugins are limited to Python and are run in-process. Plugins +therefore have direct access to planet internals like configuration and +logging facitilies, as well as access to the bundled libraries like the +<a href="http://feedparser.org/docs/">Universal Feed Parser</a> and +<a href="http://code.google.com/p/html5lib/">html5lib</a>; but it also +means that functions like <code>os.abort()</code> can't be recovered +from.</li> +</ul> +</body> +</html> diff --git a/lib/venus/docs/img/shadowAlpha.png b/lib/venus/docs/img/shadowAlpha.png new file mode 100644 index 0000000000000000000000000000000000000000..a2561df971728d988424100c74c817916eca1979 GIT binary patch literal 3403 zcmeAS@N?(olHy`uVBq!ia0y~yV738bR}L1Sh{z?w93Z7#;u=xnT$Gwvl9`{U5R#dj z$`F!Ks$i<%mYSqsWME*TU}$J%1Vly(x&~$j21QvmX+Ul4C7!;n>{pmr1x4l8+mz-4 zg*Xd5B8wRqxP?HN@zUM8KR`j2bVpxD28NCO+<y{T85npLd%8G=RNQ)dD=_P}gNVb$ z=QmoMI7<Fo^ZLE%OK~<Axa&}TZh_;s>HqZdXMAQjaPeGm)a##I4DP>8^|Q}*osX?x zu(<br=bMK+&sH%uEZ%$mdCeKIdAmO_6zti)bKT!vZx1ma_!yV2eXj24gQv^)vo`!c zP`33!<$7O`zL)PyvmP=ksM;_rX<=`e@_}K3kQ_tMA!Y}!3I-2Oeg@4^$<aU<O$npf zVYEybEf_~@htVo>w4E^8SQ>2<4nWJ;;$Ch1?$dLgm)?6;Yr9_CdHMW*W(@x<zwcc8 z)#rR_{m)tFcb?<mXJEf^s5o}%&s(#KBkS*6>+ip*R06EH_T4pml(Y0_%+Z_b^VZki z+Ig-L)GH{z-FHJSJv;l^O{dMW8|Geryoiy(edpWebG3Q>oX-o7q}^hCpz*y@X6IS? ZpGWQ1{0Pup3+%oyc)I$ztaD0e0swh%>OlYi literal 0 HcmV?d00001 diff --git a/lib/venus/docs/index.html b/lib/venus/docs/index.html new file mode 100644 index 0000000..c461d7f --- /dev/null +++ b/lib/venus/docs/index.html @@ -0,0 +1,55 @@ +<!DOCTYPE html PUBLIC + "-//W3C//DTD XHTML 1.1 plus MathML 2.0 plus SVG 1.1//EN" + "http://www.w3.org/2002/04/xhtml-math-svg/xhtml-math-svg.dtd"> +<html xmlns="http://www.w3.org/1999/xhtml"> +<head> +<script type="text/javascript" src="docs.js"></script> +<link rel="stylesheet" type="text/css" href="docs.css"/> +<title>Venus Documentation</title> +</head> +<body> +<h2>Table of Contents</h2> +<ul class="outer"> +<li><a href="installation.html">Getting started</a></li> +<li>Basic Features +<ul> +<li><a href="config.html">Configuration</a></li> +<li><a href="templates.html">Templates</a></li> +</ul> +</li> +<li>Advanced Features +<ul> +<li><a href="venus.svg">Architecture</a></li> +<li><a href="normalization.html">Normalization</a></li> +<li><a href="filters.html">Filters and Plugins</a></li> +</ul> +</li> +<li>Other +<ul> +<li><a href="migration.html">Migration from Planet 2.0</a></li> +<li><a href="contributing.html">Contributing</a></li> +<li><a href="etiquette.html">Etiquette</a></li> +</ul> +</li> +<li>Reference +<ul> +<li><a href="http://www.planetplanet.org/">Planet</a></li> +<li><a href="http://feedparser.org/docs/">Universal Feed Parser</a></li> +<li><a href="http://code.google.com/p/html5lib/">html5lib</a></li> +<li><a href="http://htmltmpl.sourceforge.net/">htmltmpl</a></li> +<li><a href="http://bitworking.org/projects/httplib2/">httplib2</a></li> +<li><a href="http://www.w3.org/TR/xslt">XSLT</a></li> +<li><a href="http://www.gnu.org/software/sed/manual/html_mono/sed.html">sed</a></li> +<li><a href="http://www.djangoproject.com/documentation/templates/">Django templates</a></li> +</ul> +</li> +<li>Credits and License +<ul> +<li><a href="../AUTHORS">Authors</a></li> +<li><a href="../THANKS">Contributors</a></li> +<li><a href="../LICENCE">License</a></li> +</ul> +</li> +</ul> +</body> +</html> diff --git a/lib/venus/docs/installation.html b/lib/venus/docs/installation.html new file mode 100644 index 0000000..d8edf98 --- /dev/null +++ b/lib/venus/docs/installation.html @@ -0,0 +1,129 @@ +<!DOCTYPE html PUBLIC + "-//W3C//DTD XHTML 1.1 plus MathML 2.0 plus SVG 1.1//EN" + "http://www.w3.org/2002/04/xhtml-math-svg/xhtml-math-svg.dtd"> +<html xmlns="http://www.w3.org/1999/xhtml"> +<head> +<script type="text/javascript" src="docs.js"></script> +<link rel="stylesheet" type="text/css" href="docs.css"/> +<title>Venus Installation</title> +</head> +<body> +<h2>Installation</h2> +<p>Venus has been tested on Linux, and Mac OSX, and Windows.</p> + +<p>You'll need at least Python 2.2 installed on your system, we recommend +Python 2.4 though as there may be bugs with the earlier libraries.</p> + +<p>Everything Pythonesque Planet needs to provide basic operation should be +included in the distribution. Some optional features may require +additional libraries, for example:</p> +<ul> +<li>Usage of XSLT requires either +<a href="http://xmlsoft.org/XSLT/xsltproc2.html">xsltproc</a> +or <a href="http://xmlsoft.org/XSLT/python.html">python-libxslt</a>.</li> +<li>The current interface to filters written in non-templating languages +(e.g., python) uses the +<a href="http://docs.python.org/lib/module-subprocess.html">subprocess</a> +module which was introduced in Python 2.4.</li> +<li>Usage of FOAF as a reading list requires +<a href="http://librdf.org/">librdf</a>.</li> +</ul> + +<h3>General Instructions</h3> + +<p> +These instructions apply to any platform. Check the instructions +below for more specific instructions for your platform. +</p> + +<ol> +<li><p>If you are reading this online, you will need to +<a href="../index.html">download</a> and extract the files into a folder somewhere. +You can place this wherever you like, <code>~/planet</code> +and <code>~/venus</code> are good +choices, but so's anywhere else you prefer.</p></li> +<li><p>This is very important: from within that directory, type the following +command:</p> +<blockquote><code>python runtests.py</code></blockquote> +<p>This should take anywhere from a one to ten seconds to execute. No network +connection is required, and the script cleans up after itself. If the +script completes with an "OK", you are good to go. Otherwise stopping here +and inquiring on the +<a href="http://lists.planetplanet.org/mailman/listinfo/devel">mailing list</a> + is a good idea as it can save you lots of frustration down the road.</p></li> +<li><p>Make a copy of one of the <code>ini</code> the files in the +<a href="../examples">examples</a> subdirectory, +and put it wherever you like; I like to use the Planet's name (so +<code>~/planet/debian</code>), but it's really up to you.</p></li> +<li><p>Edit the <code>config.ini</code> file in this directory to taste, +it's pretty well documented so you shouldn't have any problems here. Pay +particular attention to the <code>output_dir</code> option, which should be +readable by your web server. If the directory you specify in your +<code>cache_dir</code> exists; make sure that it is empty.</p></li> +<li><p>Run it: <code>python planet.py pathto/config.ini</code></p> +<p>You'll want to add this to cron, make sure you run it from the +right directory.</p></li> +<li><p>(Optional)</p> +<p>Tell us about it! We'd love to link to you on planetplanet.org :-)</p></li> +<li><p>(Optional)</p> +<p>Build your own themes, templates, or filters! And share!</p></li> +</ol> + +<h3 id="macosx">Mac OS X and Fink Instructions</h3> + +<p> +The <a href="http://fink.sourceforge.net/">Fink Project</a> packages +various open source software for MacOS. This makes it a little easier +to get started with projects like Planet Venus. +</p> + +<p> +Note: in the following, we recommend explicitly +using <code>python2.4</code>. As of this writing, Fink is starting to +support <code>python2.5</code> but the XML libraries, for example, are +not yet ported to the newer python so Venus will be less featureful. +</p> + +<ol> + <li><p>Install the XCode development tools from your Mac OS X install + disks</p></li> + <li><p><a href="http://fink.sourceforge.net/download/">Download</a> + and install Fink</p></li> + <li><p>Tell fink to install the Planet Venus prerequisites:<br /> + <code>fink install python24 celementtree-py24 bzr-py24 libxslt-py24 + libxml2-py24</code></p></li> + <li><p><a href="../index.html">Download</a> and extract the Venus files into a + folder somewhere</p></li> + <li><p>Run the tests: <code>python2.4 runtests.py</code><br /> This + will warn you that the RDF library is missing, but that's + OK.</p></li> + <li><p>Continue with the general steps above, starting with Step 3. You + may want to explicitly specify <code>python2.4</code>.</p></li> +</ol> + +<h3 id="ubuntu">Ubuntu Linux (Edgy Eft) instructions</h3> + +<p>Before starting, issue the following command:</p> + +<blockquote><pre>sudo apt-get install bzr python2.4-librdf</pre></blockquote> + +<h3 id="windows">Windows instructions</h3> + +<p> + htmltmpl templates (and Django too, since it currently piggybacks on + the htmltmpl implementation) on Windows require + the <a href="http://sourceforge.net/projects/pywin32/">pywin32</a> + module. +</p> + +<h3 id="python22">Python 2.2 instructions</h3> + +<p>If you are running Python 2.2, you may also need to install <a href="http://pyxml.sourceforge.net/">pyxml</a>. If the +following runs without error, you do <b>not</b> have the problem.</p> +<blockquote><pre>python -c "__import__('xml.dom.minidom').dom.minidom.parseString('<entry xml:lang=\"en\"/>')"</pre></blockquote> +<p>Installation of pyxml varies by platform. For Ubuntu Linux (Dapper Drake), issue the following command:</p> + +<blockquote><pre>sudo apt-get install python2.2-xml</pre></blockquote> + +</body> +</html> diff --git a/lib/venus/docs/migration.html b/lib/venus/docs/migration.html new file mode 100644 index 0000000..545a7f6 --- /dev/null +++ b/lib/venus/docs/migration.html @@ -0,0 +1,42 @@ +<!DOCTYPE html PUBLIC + "-//W3C//DTD XHTML 1.1 plus MathML 2.0 plus SVG 1.1//EN" + "http://www.w3.org/2002/04/xhtml-math-svg/xhtml-math-svg.dtd"> +<html xmlns="http://www.w3.org/1999/xhtml"> +<head> +<script type="text/javascript" src="docs.js"></script> +<link rel="stylesheet" type="text/css" href="docs.css"/> +<title>Venus Migration</title> +</head> +<body> +<h2>Migration from Planet 2.0</h2> +<p>The intent is that existing Planet 2.0 users should be able to reuse +their existing <code>config.ini</code> and <code>.tmpl</code> files, +but the reality is that users will need to be aware of the following:</p> +<ul> +<li>You will need to start over with a new cache directory as the format +of the cache has changed dramatically.</li> +<li>Existing <code>.tmpl</code> and <code>.ini</code> files should work, +though some <a href="config.html">configuration</a> options (e.g., +<code>days_per_page</code>) have not yet been implemented</li> +<li>No testing has been done on Python 2.1, and it is presumed not to work.</li> +<li>To take advantage of all features, you should install the optional +XML and RDF libraries described on +the <a href="installation.html">Installation</a> page.</li> +</ul> + +<p> +Common changes to config.ini include: +</p> +<ul> + <li><p>Filename changes:</p> +<pre> +examples/fancy/index.html.tmpl => themes/classic_fancy/index.html.tmpl +examples/atom.xml.tmpl => themes/common/atom.xml.xslt +examples/rss20.xml.tmpl => themes/common/rss20.xml.tmpl +examples/rss10.xml.tmpl => themes/common/rss10.xml.tmpl +examples/opml.xml.tmpl => themes/common/opml.xml.xslt +examples/foafroll.xml.tmpl => themes/common/foafroll.xml.xslt +</pre></li> +</ul> +</body> +</html> diff --git a/lib/venus/docs/normalization.html b/lib/venus/docs/normalization.html new file mode 100644 index 0000000..fb6d01d --- /dev/null +++ b/lib/venus/docs/normalization.html @@ -0,0 +1,107 @@ +<!DOCTYPE html PUBLIC + "-//W3C//DTD XHTML 1.1 plus MathML 2.0 plus SVG 1.1//EN" + "http://www.w3.org/2002/04/xhtml-math-svg/xhtml-math-svg.dtd"> +<html xmlns="http://www.w3.org/1999/xhtml"> +<head> +<script type="text/javascript" src="docs.js"></script> +<link rel="stylesheet" type="text/css" href="docs.css"/> +<title>Venus Normalization</title> +</head> +<body> +<h2>Normalization</h2> +<p>Venus builds on, and extends, the <a +href="http://www.feedparser.org/">Universal Feed Parser</a> and <a +href="http://code.google.com/p/html5lib/">html5lib</a> to +convert all feeds into Atom 1.0, with well formed XHTML, and encoded as UTF-8, +meaning that you don't have to worry about funky feeds, tag soup, or character +encoding.</p> +<h3>Encoding</h3> +<p>Input data in feeds may be encoded in a variety of formats, most commonly +ASCII, ISO-8859-1, WIN-1252, AND UTF-8. Additionally, many feeds make use of +the wide range of +<a href="http://www.w3.org/TR/html401/sgml/entities.html">character entity +references</a> provided by HTML. Each is converted to UTF-8, an encoding +which is a proper superset of ASCII, supports the entire range of Unicode +characters, and is one of +<a href="http://www.w3.org/TR/2006/REC-xml-20060816/#charsets">only two</a> +encodings required to be supported by all conformant XML processors.</p> +<p>Encoding problems are one of the more common feed errors, and every +attempt is made to correct common errors, such as the inclusion of +the so-called +<a href="http://www.fourmilab.ch/webtools/demoroniser/">moronic</a> versions +of smart-quotes. In rare cases where individual characters can not be +converted to valid UTF-8 or into +<a href="http://www.w3.org/TR/xml/#charsets">characters allowed in XML 1.0 +documents</a>, such characters will be replaced with the Unicode +<a href="http://www.fileformat.info/info/unicode/char/fffd/index.htm">Replacement character</a>, with a title that describes the original character whenever possible.</p> +<p>In order to support the widest range of inputs, use of Python 2.3 or later, +as well as the installation of the python <code>iconvcodec</code>, is +recommended.</p> +<h3>HTML</h3> +<p>A number of different normalizations of HTML are performed. For starters, +the HTML is +<a href="http://www.feedparser.org/docs/html-sanitization.html">sanitized</a>, +meaning that HTML tags and attributes that could introduce javascript or +other security risks are removed.</p> +<p>Then, +<a href="http://www.feedparser.org/docs/resolving-relative-links.html">relative +links are resolved</a> within the HTML. This is also done for links +in other areas in the feed too.</p> +<p>Finally, unmatched tags are closed. This is done with a +<a href="http://code.google.com/p/html5lib/">knowledge of the semantics of HTML</a>. Additionally, a +<a href="http://golem.ph.utexas.edu/~distler/blog/archives/000165.html#sanitizespec">large +subset of MathML</a>, as well as a +<a href="http://www.w3.org/TR/SVGMobile/">tiny profile of SVG</a> +is also supported.</p> +<h3>Atom 1.0</h3> +<p>The Universal Feed Parser also +<a href="http://www.feedparser.org/docs/content-normalization.html">normalizes the content of feeds</a>. This involves a +<a href="http://www.feedparser.org/docs/reference.html">large number of elements</a>; the best place to start is to look at +<a href="http://www.feedparser.org/docs/annotated-examples.html">annotated examples</a>. Among other things a wide variety of +<a href="http://www.feedparser.org/docs/date-parsing.html">date formats</a> +are converted into +<a href="http://www.ietf.org/rfc/rfc3339.txt">RFC 3339</a> formatted dates.</p> +<p>If no <a href="http://www.feedparser.org/docs/reference-entry-id.html">ids</a> are found in entries, attempts are made to synthesize one using (in order):</p> +<ul> +<li><a href="http://www.feedparser.org/docs/reference-entry-link.html">link</a></li> +<li><a href="http://www.feedparser.org/docs/reference-entry-title.html">title</a></li> +<li><a href="http://www.feedparser.org/docs/reference-entry-summary.html">summary</a></li> +<li><a href="http://www.feedparser.org/docs/reference-entry-content.html">content</a></li> +</ul> +<p>If no <a href="http://www.feedparser.org/docs/reference-feed- +updated.html">updated</a> dates are found in an entry, the updated date from +the feed is used. If no updated date is found in either the feed or +the entry, the current time is substituted.</p> +<h3 id="overrides">Overrides</h3> +<p>All of the above describes what Venus does automatically, either directly +or through its dependencies. There are a number of errors which can not +be corrected automatically, and for these, there are configuration parameters +that can be used to help.</p> +<ul> +<li><code>ignore_in_feed</code> allows you to list any number of elements +or attributes which are to be ignored in feeds. This is often handy in the +case of feeds where the <code>author</code>, <code>id</code>, +<code>updated</code> or <code>xml:lang</code> values can't be trusted.</li> +<li><code>title_type</code>, <code>summary_type</code>, +<code>content_type</code> allow you to override the +<a href="http://www.feedparser.org/docs/reference-entry-title_detail.html#reference.entry.title_detail.type"><code>type</code></a> +attributes on these elements.</li> +<li><code>name_type</code> does something similar for +<a href="http://www.feedparser.org/docs/reference-entry-author_detail.html#reference.entry.author_detail.name">author names</a></li> +<li><code>future_dates</code> allows you to specify how to deal with dates which are in the future. +<ul style="margin:0"> +<li><code>ignore_date</code> will cause the date to be ignored (and will therefore default to the time the entry was first seen) until the feed is updated and the time indicated is past, at which point the entry will be updated with the new date.</li> +<li><code>ignore_entry</code> will cause the entire entry containing the future date to be ignored until the date is past.</li> +<li>Anything else (i.e.. the default) will leave the date as is, causing the entries that contain these dates sort to the top of the planet until the time passes.</li> +</ul> +</li> +<li><code>xml_base</code> will adjust the <code>xml:base</code> values in effect for each of the text constructs in the feed (things like <code>title</code>, <code>summary</code>, and <code>content</code>). Other elements in the feed (most notably, <code>link</code> are not affected by this value. +<ul style="margin:0"> +<li><code>feed_alternate</code> will replace the <code>xml:base</code> in effect with the value of the <code>alternate</code> <code>link</code> found either in the enclosed <code>source</code> or enclosing <code>feed</code> element.</li> +<li><code>entry_alternate</code> will replace the <code>xml:base</code> in effect with the value of the <code>alternate</code> <code>link</code> found in this entry.</li> +<li>Any other value will be treated as a <a href="http://www.ietf.org/rfc/rfc3986.txt">URI reference</a>. These values may be relative or absolute. If relative, the <code>xml:base</code> values in each text construct will each be adjusted separately using to the specified value.</li> +</ul> +</li> +</ul> +</body> +</html> diff --git a/lib/venus/docs/templates.html b/lib/venus/docs/templates.html new file mode 100644 index 0000000..b9fd9c1 --- /dev/null +++ b/lib/venus/docs/templates.html @@ -0,0 +1,184 @@ +<!DOCTYPE html PUBLIC + "-//W3C//DTD XHTML 1.1 plus MathML 2.0 plus SVG 1.1//EN" + "http://www.w3.org/2002/04/xhtml-math-svg/xhtml-math-svg.dtd"> +<html xmlns="http://www.w3.org/1999/xhtml"> +<head> +<script type="text/javascript" src="docs.js"></script> +<link rel="stylesheet" type="text/css" href="docs.css"/> +<title>Venus Templates</title> +</head> +<body> +<h2>Templates</h2> +<p>Template names take the form +<em>name</em><code>.</code><em>ext</em><code>.</code><em>type</em>, where +<em>name</em><code>.</code><em>ext</em> identifies the name of the output file +to be created in the <code>output_directory</code>, and <em>type</em> +indicates which language processor to use for the template.</p> +<p>Like with <a href="filter.html">filters</a>, templates may be written +in a variety of languages and are based on the standard Unix pipe convention +of producing <code>stdout</code> from <code>stdin</code>, but in practice +two languages are used more than others:</p> +<h3>htmltmpl</h3> +<p>Many find <a href="http://htmltmpl.sourceforge.net/">htmltmpl</a> +easier to get started with as you can take a simple example of your +output file, sprinkle in a few <code><TMPL_VAR></code>s and +<code><TMPL_LOOP></code>s and you are done. Eventually, however, +you may find that your template involves <code><TMPL_IF></code> +blocks inside of attribute values, and you may find the result difficult +to read and create correctly.</p> +<p>It is also important to note that htmltmpl based templates do not +have access to the full set of information available in the feed, just +the following (rather substantial) subset:</p> + +<blockquote> +<table border="1" cellpadding="5" cellspacing="0"> +<tr><th>VAR</th><th>type</th><th>source</th></tr> +<tr><td>author</td><td>String</td><td><a href="http://feedparser.org/docs/reference-feed-author.html">author</a></td></tr> +<tr><td>author_name</td><td>String</td><td><a href="http://feedparser.org/docs/reference-feed-author_detail.html#reference.feed.author_detail.name">author_detail.name</a></td></tr> +<tr><td>generator</td><td>String</td><td><a href="http://feedparser.org/docs/reference-feed-generator.html">generator</a></td></tr> +<tr><td>id</td><td>String</td><td><a href="http://feedparser.org/docs/reference-feed-id.html">id</a></td></tr> +<tr><td>icon</td><td>String</td><td><a href="http://feedparser.org/docs/reference-feed-icon.html">icon</a></td></tr> +<tr><td>last_updated_822</td><td>Rfc822</td><td><a href="http://feedparser.org/docs/reference-feed-icon.html">updated_parsed</a></td></tr> +<tr><td>last_updated_iso</td><td>Rfc3399</td><td><a href="http://feedparser.org/docs/reference-feed-icon.html">updated_parsed</a></td></tr> +<tr><td>last_updated</td><td>PlanetDate</td><td><a href="http://feedparser.org/docs/reference-feed-icon.html">updated_parsed</a></td></tr> +<tr><td>link</td><td>String</td><td><a href="http://feedparser.org/docs/reference-feed-link.html">link</a></td></tr> +<tr><td>logo</td><td>String</td><td><a href="http://feedparser.org/docs/reference-feed-logo.html">logo</a></td></tr> +<tr><td>rights</td><td>String</td><td><a href="http://feedparser.org/docs/reference-feed-rights_detail.html#reference.feed.rights_detail.value">rights_detail.value</a></td></tr> +<tr><td>subtitle</td><td>String</td><td><a href="http://feedparser.org/docs/reference-feed-subtitle_detail.html#reference.feed.subtitle_detail.value">subtitle_detail.value</a></td></tr> +<tr><td>title</td><td>String</td><td><a href="http://feedparser.org/docs/reference-feed-title_detail.html#reference.feed.title_detail.value">title_detail.value</a></td></tr> +<tr><td>title_plain</td><td>Plain</td><td><a href="http://feedparser.org/docs/reference-feed-title_detail.html#reference.feed.title_detail.value">title_detail.value</a></td></tr> +<tr><td rowspan="2">url</td><td rowspan="2">String</td><td><a href="http://feedparser.org/docs/reference-feed-links.html#reference.feed.links.href">links[rel='self'].href</a></td></tr> +<tr><td><a href="http://feedparser.org/docs/reference-headers.html">headers['location']</a></td></tr> +</table> +</blockquote> + +<p>Note: when multiple sources are listed, the last one wins</p> +<p>In addition to these variables, Planet Venus makes available two +arrays, <code>Channels</code> and <code>Items</code>, with one entry +per subscription and per output entry respectively. The data values +within the <code>Channels</code> array exactly match the above list. +The data values within the <code>Items</code> array are as follows:</p> + +<blockquote> +<table border="1" cellpadding="5" cellspacing="0"> +<tr><th>VAR</th><th>type</th><th>source</th></tr> +<tr><td>author</td><td>String</td><td><a href="http://feedparser.org/docs/reference-entry-author.html">author</a></td></tr> +<tr><td>author_email</td><td>String</td><td><a href="http://feedparser.org/docs/reference-entry-author_detail.html#reference.entry.author_detail.email">author_detail.email</a></td></tr> +<tr><td>author_name</td><td>String</td><td><a href="http://feedparser.org/docs/reference-entry-author_detail.html#reference.entry.author_detail.name">author_detail.name</a></td></tr> +<tr><td>author_uri</td><td>String</td><td><a href="http://feedparser.org/docs/reference-entry-author_detail.html#reference.entry.author_detail.href">author_detail.href</a></td></tr> +<tr><td>content_language</td><td>String</td><td><a href="http://feedparser.org/docs/reference-entry-content.html#reference.entry.content.language">content[0].language</a></td></tr> +<tr><td rowspan="2">content</td><td rowspan="2">String</td><td><a href="http://feedparser.org/docs/reference-entry-summary_detail.html#reference.entry.summary_detail.value">summary_detail.value</a></td></tr> +<tr><td><a href="http://feedparser.org/docs/reference-entry-content.html#reference.entry.content.value">content[0].value</a></td></tr> +<tr><td rowspan="2">date</td><td rowspan="2">PlanetDate</td><td><a href="http://feedparser.org/docs/reference-entry-published_parsed.html">published_parsed</a></td></tr> +<tr><td><a href="http://feedparser.org/docs/reference-entry-updated_parsed.html">updated_parsed</a></td></tr> +<tr><td rowspan="2">date_822</td><td rowspan="2">Rfc822</td><td><a href="http://feedparser.org/docs/reference-entry-published_parsed.html">published_parsed</a></td></tr> +<tr><td><a href="http://feedparser.org/docs/reference-entry-updated_parsed.html">updated_parsed</a></td></tr> +<tr><td rowspan="2">date_iso</td><td rowspan="2">Rfc3399</td><td><a href="http://feedparser.org/docs/reference-entry-published_parsed.html">published_parsed</a></td></tr> +<tr><td><a href="http://feedparser.org/docs/reference-entry-updated_parsed.html">updated_parsed</a></td></tr> +<tr><td><ins>enclosure_href</ins></td><td>String</td><td><a href="http://feedparser.org/docs/reference-entry-enclosures.html#reference.entry.enclosures.href">enclosures[0].href</a></td></tr> +<tr><td><ins>enclosure_length</ins></td><td>String</td><td><a href="http://feedparser.org/docs/reference-entry-enclosures.html#reference.entry.enclosures.length">enclosures[0].length</a></td></tr> +<tr><td><ins>enclosure_type</ins></td><td>String</td><td><a href="http://feedparser.org/docs/reference-entry-enclosures.html#reference.entry.enclosures.type">enclosures[0].type</a></td></tr> +<tr><td><ins>guid_isPermaLink</ins></td><td>String</td><td><a href="http://blogs.law.harvard.edu/tech/rss#ltguidgtSubelementOfLtitemgt">isPermaLink</a></td></tr> +<tr><td>id</td><td>String</td><td><a href="http://feedparser.org/docs/reference-entry-id.html">id</a></td></tr> +<tr><td>link</td><td>String</td><td><a href="http://feedparser.org/docs/reference-entry-links.html#reference.entry.links.href">links[rel='alternate'].href</a></td></tr> +<tr><td>new_channel</td><td>String</td><td><a href="http://feedparser.org/docs/reference-entry-id.html">id</a></td></tr> +<tr><td rowspan="2">new_date</td><td rowspan="2">NewDate</td><td><a href="http://feedparser.org/docs/reference-entry-published_parsed.html">published_parsed</a></td></tr> +<tr><td><a href="http://feedparser.org/docs/reference-entry-updated_parsed.html">updated_parsed</a></td></tr> +<tr><td>rights</td><td>String</td><td><a href="http://feedparser.org/docs/reference-entry-rights_detail.html#reference.entry.rights_detail.value">rights_detail.value</a></td></tr> +<tr><td>title_language</td><td>String</td><td><a href="http://feedparser.org/docs/reference-entry-title_detail.html#reference.entry.title_detail.language">title_detail.language</a></td></tr> +<tr><td>title_plain</td><td>Plain</td><td><a href="http://feedparser.org/docs/reference-entry-title_detail.html#reference.entry.title_detail.value">title_detail.value</a></td></tr> +<tr><td>title</td><td>String</td><td><a href="http://feedparser.org/docs/reference-entry-title_detail.html#reference.entry.title_detail.value">title_detail.value</a></td></tr> +<tr><td>summary_language</td><td>String</td><td><a href="http://feedparser.org/docs/reference-entry-summary_detail.html#reference.entry.summary_detail.language">summary_detail.language</a></td></tr> +<tr><td>updated</td><td>PlanetDate</td><td><a href="http://feedparser.org/docs/reference-entry-updated_parsed.html">updated_parsed</a></td></tr> +<tr><td>updated_822</td><td>Rfc822</td><td><a href="http://feedparser.org/docs/reference-entry-updated_parsed.html">updated_parsed</a></td></tr> +<tr><td>updated_iso</td><td>Rfc3399</td><td><a href="http://feedparser.org/docs/reference-entry-updated_parsed.html">updated_parsed</a></td></tr> +<tr><td>published</td><td>PlanetDate</td><td><a href="http://feedparser.org/docs/reference-entry-published_parsed.html">published_parsed</a></td></tr> +<tr><td>published_822</td><td>Rfc822</td><td><a href="http://feedparser.org/docs/reference-entry-published_parsed.html">published_parsed</a></td></tr> +<tr><td>published_iso</td><td>Rfc3399</td><td><a href="http://feedparser.org/docs/reference-entry-published_parsed.html">published_parsed</a></td></tr> +</table> +</blockquote> +<p>Note: variables above which start with +<code>new_</code> are only set if their values differ from the previous +Item.</p> + +<h3>django</h3> + +<p> + If you have the <a href="http://www.djangoproject.com/">Django</a> + framework installed, + <a href="http://www.djangoproject.com/documentation/templates/" + >Django templates</a> are automatically available to Venus + projects. You will have to save them with a <code>.html.dj</code> + extension in your themes. The variable set is the same as the one + from htmltmpl, above. In the Django template context you'll have + access to <code>Channels</code> and <code>Items</code> and you'll be + able to iterate through them. +</p> + +<p> + You also have access to the <code>Config</code> dictionary, which contains + the Venus configuration variables from your <code>.ini</code> file. +</p> + +<p> + If you lose your way and want to introspect all the variable in the + context, there's the useful <code>{% debug %}</code> template tag. +</p> + +<p> + In the <code>themes/django/</code> you'll find a sample Venus theme + that uses the Django templates that might be a starting point for + your own custom themes. +</p> + +<p> + All the standard Django template tags and filter are supposed to + work, with the notable exception of the <code>date</code> filter on + the updated and published dates of an item (it works on the main + <code>{{ date }}</code> variable). +</p> + +<p> + Please note that Django, and therefore Venus' Django support, + requires at least Python 2.3. +</p> + +<h3>xslt</h3> +<p><a href="http://www.w3.org/TR/xslt">XSLT</a> is a paradox: it actually +makes some simple things easier to do than htmltmpl, and certainly can +make more difficult things possible; but it is fair to say that many +find XSLT less approachable than htmltmpl.</p> +<p>But in any case, the XSLT support is easier to document as the +input is a <a href="normalization.html">highly normalized</a> feed, +with a few extension elements.</p> +<ul> +<li><code>atom:feed</code> will have the following child elements: +<ul> +<li>A <code>planet:source</code> element per subscription, with the same child elements as <a href="http://www.atomenabled.org/developers/syndication/atom-format-spec.php#element.source"><code>atom:source</code></a>, as well as +an additional child element in the planet namespace for each +<a href="config.html#subscription">configuration parameter</a> that applies to +this subscription.</li> +<li><a href="http://www.feedparser.org/docs/reference-version.html"><code>planet:format</code></a> indicating the format and version of the source feed.</li> +<li><a href="http://www.feedparser.org/docs/reference-bozo.html"><code>planet:bozo</code></a> which is either <code>true</code> or <code>false</code>.</li> +</ul> +</li> +<li><code>atom:updated</code> and <code>atom:published</code> will have +a <code>planet:format</code> attribute containing the referenced date +formatted according to the <code>[planet] date_format</code> specified +in the configuration</li> +</ul> + +<h3>genshi</h3> +<p>Genshi approaches the power of XSLT, but with a syntax that many Python +programmers find more natural, succinct and expressive. Genshi templates +have access to the full range of <a href="http://feedparser.org/docs/reference.html">feedparser</a> values, with the following additions:</p> +<ul> +<li>In addition to a <code>feed</code> element which describes the feed +for your planet, there is also a <code>feeds</code> element which contains +the description for each subscription.</li> +<li>All <code>feed</code>, <code>feeds</code>, and <code>source</code> elements have a child <code>config</code> element which contains the config.ini entries associated with that feed.</li> +<li>All text construct detail elements (<code>subtitle</code>, <code>rights</code>, <code>title</code>, <code>summary</code>, <code>content</code>) also contain a <code>stream</code> element which contains the value as a Genshi stream.</li> +<li>Each of the <code>entries</code> has a <code>new_date</code> and <code>new_feed</code> value which indicates if this entry's date or feed differs from the preceeding entry.</li> +</ul> +</body> +</html> diff --git a/lib/venus/docs/venus.svg b/lib/venus/docs/venus.svg new file mode 100644 index 0000000..3ae3e63 --- /dev/null +++ b/lib/venus/docs/venus.svg @@ -0,0 +1,109 @@ +<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 1280 1024" xmlns:xlink="http://www.w3.org/1999/xlink">> + <defs> + <g id="feed"> + <path d="M10,15l75,0l0,75l-75,0z" fill="#F80" + stroke-linejoin="round" stroke-width="20" stroke="#F80"/> + <circle cx="15" cy="82" r="6" fill="#FFF"/> + <path d="M35,82s0-20-20-20 M55,82s0-40-40-40 M75,82s0-60-60-60" + stroke-linecap="round" stroke-width="12" stroke="#FFF" fill="none"/> + </g> + + <g id="entry"> + <g fill="none"> + <ellipse stroke="#689" rx="3" ry="22"/> + <ellipse stroke="#eb4" rx="3" ry="22" transform="rotate(-66)"/> + <ellipse stroke="#8ac" rx="3" ry="22" transform="rotate(66)"/> + <circle stroke="#451" r="22"/> + </g> + <g fill="#689" stroke="#FFF"> + <circle fill="#8ac" r="6.5"/> + <circle cy="-22" r="4.5"/> + <circle cx="-20" cy="9" r="4.5"/> + <circle cx="20" cy="9" r="4.5"/> + </g> + </g> + <g id="node" stroke="none"> + <circle r="18" fill="#049"/> + <path d="M-14,7a16,16,0,0,1,22-21a15,15,0,0,0-14,2a3,3,0,1,1-5,5 + a15,15,0,0,0-3,14" fill="#FFF"/> + </g> + <path d="M-14-6a44,62,0,0,0,28,0l0,12a44,62,0,0,0-28,0z" + fill="#049" id="arc"/> + </defs> + + <rect height="1024" width="1280" fill="#0D0"/> + + <use xlink:href="#feed" x="220" y="30"/> + <use xlink:href="#feed" x="150" y="60"/> + <use xlink:href="#feed" x="100" y="100"/> + <use xlink:href="#feed" x="60" y="150"/> + <use xlink:href="#feed" x="30" y="220"/> + + <g fill="#F00" stroke-linejoin="round" stroke-width="12" stroke="#F88"> + <path d="M50,800l0,180l1000,0l0-180z" fill="#FFF"/> + <path d="M150,330l400,0l0,300l-400,0z"/> + <path d="M750,200l200,0 l0,110l100,0l0,60l-100,0 l0,40l100,0l0,60l-100,0 + l0,40l100,0l0,60l-100,0 l0,130l70,70l-340,0l70,-70z"/> + </g> + + <path d="M1080,360l100,0l0,-70l-30,-30l-70,0z" fill="#FFF"/> + <path d="M1180,290l-30,0l0,-30" fill="none" stroke="#000"/> + <use xlink:href="#feed" x="1080" y="380"/> + + <g transform="translate(1080,500)"> + <use xlink:href="#arc" transform="translate(76,50) rotate(90)"/> + <use xlink:href="#arc" transform="translate(50,35) rotate(-30)"/> + <use xlink:href="#arc" transform="translate(50,65) rotate(30)"/> + <use xlink:href="#node" transform="translate(24,50)"/> + <use xlink:href="#node" transform="translate(76,80)"/> + <use xlink:href="#node" transform="translate(76,20)"/> + </g> + + <path d="M260,150s100,60,90,280 M170,270s150,0,180,120 + M200,200s150,0,150,200l0,450m-100,-70l100,70l100,-70 + M850,807l0,-200m-70,70l70,-70l70,70" + stroke="#000" fill="none" stroke-width="40"/> + + <ellipse cx="350" cy="368" fill="#FFF" rx="80" ry="30"/> + <ellipse cx="850" cy="238" fill="#FFF" rx="80" ry="30"/> + <g font-size="32" fill="#FFF" text-anchor="middle"> + <text x="350" y="380" fill="#F00">Spider</text> + <text x="350" y="460">Universal Feed Parser</text> + <text x="350" y="530">html5lib</text> + <text x="350" y="600">Reconstitute</text> + <text x="350" y="750">Filter(s)</text> + <text x="850" y="250" fill="#F00">Splice</text> + <text x="950" y="350">Template</text> + <text x="950" y="450">Template</text> + <text x="950" y="550">Template</text> + <text x="1126" y="330" fill="#000">HTML</text> + </g> + + + <use xlink:href="#entry" x="100" y="900"/> + <use xlink:href="#entry" x="180" y="950"/> + <use xlink:href="#entry" x="200" y="850"/> + <use xlink:href="#entry" x="290" y="920"/> + <use xlink:href="#entry" x="400" y="900"/> + <use xlink:href="#entry" x="470" y="840"/> + <use xlink:href="#entry" x="500" y="930"/> + <use xlink:href="#entry" x="570" y="870"/> + <use xlink:href="#entry" x="620" y="935"/> + <use xlink:href="#entry" x="650" y="835"/> + <use xlink:href="#entry" x="690" y="900"/> + <use xlink:href="#entry" x="720" y="835"/> + <use xlink:href="#entry" x="730" y="950"/> + <use xlink:href="#entry" x="760" y="900"/> + <use xlink:href="#entry" x="790" y="835"/> + <use xlink:href="#entry" x="800" y="950"/> + <use xlink:href="#entry" x="830" y="900"/> + <use xlink:href="#entry" x="860" y="835"/> + <use xlink:href="#entry" x="870" y="950"/> + <use xlink:href="#entry" x="900" y="900"/> + <use xlink:href="#entry" x="930" y="835"/> + <use xlink:href="#entry" x="940" y="950"/> + <use xlink:href="#entry" x="970" y="900"/> + <use xlink:href="#entry" x="1000" y="835"/> + <use xlink:href="#entry" x="1010" y="950"/> + +</svg> diff --git a/lib/venus/examples/filters/categories/categories.xslt b/lib/venus/examples/filters/categories/categories.xslt new file mode 100644 index 0000000..fa95464 --- /dev/null +++ b/lib/venus/examples/filters/categories/categories.xslt @@ -0,0 +1,82 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE xsl:stylesheet [ +<!ENTITY categoryTerm "WebSemantique"> +]> +<!-- + + This transformation is released under the same licence as Python + see http://www.intertwingly.net/code/venus/LICENCE. + + Author: Eric van der Vlist <vdv@dyomedea.com> + + This transformation is meant to be used as a filter that determines if + Atom entries are relevant to a specific topic and adds the corresonding + <category/> element when it is the case. + + This is done by a simple keyword matching mechanism. + + To customize this filter to your needs: + + 1) Replace WebSemantique by your own category name in the definition of + the categoryTerm entity above. + 2) Review the "upper" and "lower" variables that are used to convert text + nodes to lower case and replace common ponctuation signs into spaces + to check that they meet your needs. + 3) Define your own list of keywords in <d:keyword/> elements. Note that + the leading and trailing spaces are significant: "> rdf <" will match rdf + as en entier word while ">rdf<" would match the substring "rdf" and + "> rdf<" would match words starting by rdf. Also note that the test is done + after conversion to lowercase. + + To use it with venus, just add this filter to the list of filters, for instance: + +filters= categories.xslt guess_language.py + +--> +<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" + xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://www.w3.org/2005/Atom" + xmlns:d="http://ns.websemantique.org/data/" exclude-result-prefixes="d atom" version="1.0"> + <xsl:variable name="upper" + >,.;AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZzÀà ÃáÂâÃãÄäÅåÆæÇçÈèÉéÊêËëÌìÃÃÎîÃïÃðÑñÒòÓóÔôÕõÖöØøÙùÚúÛûÜüÃýÞþ</xsl:variable> + <xsl:variable name="lower" + > aabbccddeeffgghhiijjkkllmmnnooppqqrrssttuuvvwwxxyyzzaaaaaaaaaaaaææcceeeeeeeeiiiiiiiiððnnooooooooooøøuuuuuuuuyyþþ</xsl:variable> + <d:keywords> + <d:keyword> wiki semantique </d:keyword> + <d:keyword> wikis semantiques </d:keyword> + <d:keyword> web semantique </d:keyword> + <d:keyword> websemantique </d:keyword> + <d:keyword> semantic web</d:keyword> + <d:keyword> semweb</d:keyword> + <d:keyword> rdf</d:keyword> + <d:keyword> owl </d:keyword> + <d:keyword> sparql </d:keyword> + <d:keyword> topic map</d:keyword> + <d:keyword> doap </d:keyword> + <d:keyword> foaf </d:keyword> + <d:keyword> sioc </d:keyword> + <d:keyword> ontology </d:keyword> + <d:keyword> ontologie</d:keyword> + <d:keyword> dublin core </d:keyword> + </d:keywords> + <xsl:template match="@*|node()"> + <xsl:copy> + <xsl:apply-templates select="@*|node()"/> + </xsl:copy> + </xsl:template> + <xsl:template match="atom:entry/atom:updated"> + <xsl:copy> + <xsl:apply-templates select="@*|node()"/> + </xsl:copy> + <xsl:variable name="concatenatedText"> + <xsl:for-each select="../atom:title|../atom:summary|../atom:content|../atom:category/@term"> + <xsl:text> </xsl:text> + <xsl:value-of select="translate(., $upper, $lower)"/> + </xsl:for-each> + <xsl:text> </xsl:text> + </xsl:variable> + <xsl:if test="document('')/*/d:keywords/d:keyword[contains($concatenatedText, .)]"> + <category term="WebSemantique"/> + </xsl:if> + </xsl:template> + <xsl:template match="atom:category[@term='&categoryTerm;']"/> +</xsl:stylesheet> diff --git a/lib/venus/examples/filters/guess-language/README b/lib/venus/examples/filters/guess-language/README new file mode 100644 index 0000000..b1c1c2e --- /dev/null +++ b/lib/venus/examples/filters/guess-language/README @@ -0,0 +1,37 @@ +This filter is released under the same licence as Python +see http://www.intertwingly.net/code/venus/LICENCE. + +Author: Eric van der Vlist <vdv@dyomedea.com> + +This filter guesses whether an Atom entry is written +in English or French. It should be trivial to chose between +two other languages, easy to extend to more than two languages +and useful to pass these languages as Venus configuration +parameters. + +The code used to guess the language is the one that has been +described by Douglas Bagnall as the Python recipe titled +"Language detection using character trigrams" +http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/326576. + +To add support for a new language, this language must first be +"learned" using learn-language.py. This learning phase is nothing +more than saving a pickled version of the Trigram object for this +language. + +To learn Finnish, you would execute: + +$ ./learn-language.py http://gutenberg.net/dirs/1/0/4/9/10492/10492-8.txt fi.data + +where http://gutenberg.net/dirs/1/0/4/9/10492/10492-8.txt is a text +representative of the Finnish language and "fi.data" is the name of the +data file for "fi" (ISO code for Finnish). + +To install this filter, copy this directory under the Venus +filter directory and declare it in your filters list, for instance: + +filters= categories.xslt guess-language/guess-language.py + +NOTE: this filter depends on Amara +(http://uche.ogbuji.net/tech/4suite/amara/) + diff --git a/lib/venus/examples/filters/guess-language/en.data b/lib/venus/examples/filters/guess-language/en.data new file mode 100644 index 0000000..a7c5ca6 --- /dev/null +++ b/lib/venus/examples/filters/guess-language/en.data @@ -0,0 +1,15131 @@ +(itrigram +Trigram +p1 +(dp2 +S'length' +p3 +F5471.4726536829185 +sS'lut' +p4 +(dp5 +V b +p6 +(dp7 +Va +I7 +sVe +I568 +sVi +I11 +sVl +I17 +sVo +I20 +sVr +I56 +sVu +I188 +sVy +I148 +ssV c +p8 +(dp9 +Va +I189 +sVe +I20 +sVd +I2 +sVi +I12 +sVh +I74 +sVl +I16 +sVo +I463 +sVr +I22 +sVu +I2 +ssVgu +p10 +(dp11 +Va +I9 +sVe +I5 +sVi +I9 +sVm +I1 +sVl +I2 +sVn +I3 +sVr +I1 +sVt +I1 +ssV a +p12 +(dp13 +V +I330 +sVc +I85 +sVb +I70 +sVd +I47 +sVg +I73 +sVf +I83 +sVi +I3 +sVm +I148 +sVl +I216 +sVn +I858 +sVp +I48 +sVs +I282 +sVr +I101 +sVu +I15 +sVt +I203 +sVw +I28 +sVv +I7 +ssV f +p14 +(dp15 +Va +I109 +sVe +I95 +sVi +I76 +sVl +I16 +sVo +I371 +sVr +I155 +sVu +I23 +sVt +I2 +ssV g +p16 +(dp17 +Va +I16 +sVe +I45 +sVi +I68 +sVl +I8 +sVo +I89 +sVr +I81 +sVu +I11 +ssV d +p18 +(dp19 +Va +I90 +sVe +I275 +sVi +I178 +sVo +I142 +sVr +I24 +sVu +I18 +sVw +I1 +ssV e +p20 +(dp21 +Va +I48 +sVc +I3 +sVd +I12 +sVf +I12 +sVi +I7 +sVm +I13 +sVl +I14 +sVn +I107 +sVq +I17 +sVp +I1 +sVs +I26 +sVr +I5 +sVt +I31 +sVv +I132 +sVy +I10 +sVx +I117 +ssV j +p22 +(dp23 +Ve +I11 +sVu +I41 +sVo +I11 +ssV k +p24 +(dp25 +Vi +I22 +sVe +I14 +sVn +I90 +ssV h +p26 +(dp27 +Va +I525 +sVe +I739 +sVi +I320 +sVo +I168 +sVu +I33 +sVy +I2 +ssV i +p28 +(dp29 +Vd +I17 +sVg +I3 +sVf +I62 +sVm +I87 +sVl +I14 +sVn +I599 +sVs +I287 +sVr +I3 +sVt +I254 +ssV n +p30 +(dp31 +Va +I19 +sVu +I5 +sVe +I98 +sVi +I10 +sVo +I438 +ssV o +p32 +(dp33 +Vc +I14 +sVb +I32 +sVd +I2 +sVf +I741 +sVl +I9 +sVn +I225 +sVp +I36 +sVs +I1 +sVr +I90 +sVu +I70 +sVt +I44 +sVw +I47 +sVv +I20 +ssV l +p34 +(dp35 +Va +I80 +sVe +I154 +sVi +I107 +sVo +I115 +sVs +I4 +sVu +I1 +ssV m +p36 +(dp37 +Va +I319 +sVe +I281 +sVg +I1 +sVi +I107 +sVo +I229 +sVu +I149 +sVy +I347 +ssV r +p38 +(dp39 +Vi +I18 +sVu +I12 +sVe +I378 +sVa +I20 +sVo +I25 +ssV s +p40 +(dp41 +Va +I111 +sVc +I31 +sVe +I206 +sVi +I105 +sVh +I339 +sVm +I7 +sVl +I7 +sVo +I297 +sVp +I70 +sVu +I197 +sVt +I85 +sVw +I6 +sVy +I5 +ssV p +p42 +(dp43 +Va +I102 +sVe +I104 +sVi +I7 +sVh +I2 +sVl +I63 +sVo +I84 +sVr +I205 +sVu +I22 +ssV q +p44 +(dp45 +Vu +I40 +ssV v +p46 +(dp47 +Va +I23 +sVi +I40 +sVe +I95 +sVo +I4 +ssV w +p48 +(dp49 +Va +I243 +sVe +I172 +sVi +I425 +sVh +I382 +sVo +I157 +sVr +I39 +ssV t +p50 +(dp51 +Va +I56 +sVe +I60 +sVi +I69 +sVh +I1425 +sVm +I1 +sVo +I997 +sVr +I66 +sVu +I4 +sVw +I21 +ssV u +p52 +(dp53 +Vg +I1 +sVi +I1 +sVn +I132 +sVp +I26 +sVs +I50 +sVr +I10 +sVt +I4 +ssV z +p54 +(dp55 +Vi +I1 +ssVgn +p56 +(dp57 +Va +I2 +sV +I7 +sVe +I3 +sVi +I6 +sV, +I2 +sVo +I4 +sVs +I1 +ssV x +p58 +(dp59 +V +I1 +ssV y +p60 +(dp61 +Vi +I2 +sVe +I38 +sVo +I469 +ssV-Y +p62 +(dp63 +Vo +I3 +ssVlk +p64 +(dp65 +Vi +I5 +sV +I2 +sVs +I3 +sVe +I3 +ssVgi +p66 +(dp67 +Vb +I1 +sVe +I4 +sVn +I111 +sVr +I29 +sVt +I5 +sVv +I45 +sVz +I2 +ssVgh +p68 +(dp69 +V +I61 +sVb +I1 +sVe +I3 +sVi +I2 +sV, +I7 +sVl +I11 +sVt +I170 +sV; +I2 +ssV B +p70 +(dp71 +Va +I5 +sVE +I1 +sVi +I1 +sVo +I2 +sVR +I2 +sVu +I12 +sVy +I6 +sVU +I2 +sVe +I6 +ssV C +p72 +(dp73 +Va +I15 +sVA +I8 +sVe +I1 +sVh +I58 +sVl +I2 +sVO +I31 +sV. +I6 +sVr +I2 +sVo +I33 +ssV-g +p74 +(dp75 +Vr +I1 +ssV A +p76 +(dp77 +V +I3 +sVr +I1 +sVB +I1 +sVd +I8 +sVg +I1 +sVf +I3 +sVS +I3 +sVm +I4 +sVl +I14 +sVN +I2 +sVs +I11 +sVR +I2 +sVu +I6 +sVT +I1 +sVw +I1 +sVn +I5 +sVL +I5 +sVt +I11 +ssV F +p78 +(dp79 +Va +I2 +sVo +I7 +sVi +I1 +sVl +I1 +sVO +I4 +sV. +I1 +sVI +I1 +sVr +I89 +sVT +I2 +sVR +I2 +ssV G +p80 +(dp81 +VE +I2 +sVi +I1 +sVo +I4 +sVI +I1 +sVu +I17 +sVU +I9 +sVe +I1 +ssV D +p82 +(dp83 +VA +I3 +sVe +I32 +sVi +I3 +sVO +I3 +sVI +I3 +sVu +I2 +sVo +I7 +sVE +I29 +ssV E +p84 +(dp85 +VB +I2 +sVD +I1 +sVv +I4 +sVl +I1 +sVn +I4 +sVd +I9 +sVT +I6 +sVX +I1 +sVV +I2 +sVx +I2 +sVN +I1 +sVt +I9 +ssV J +p86 +(dp87 +Va +I60 +sVu +I1 +sVo +I18 +sVO +I17 +ssVtw +p88 +(dp89 +Vi +I1 +sVa +I2 +sVe +I18 +sVo +I17 +ssVtt +p90 +(dp91 +Va +I28 +sVe +I129 +sVi +I11 +sVl +I36 +sVr +I6 +sVy +I4 +ssV I +p92 +(dp93 +V +I766 +sVF +I3 +sVI +I2 +sVs +I2 +sVf +I25 +sVM +I1 +sVL +I1 +sVN +I7 +sVS +I1 +sVt +I27 +sVV +I1 +sVX +I1 +sV; +I1 +sVn +I12 +sV, +I5 +sVT +I1 +ssVtr +p94 +(dp95 +Va +I39 +sVe +I58 +sVi +I31 +sVo +I23 +sVu +I33 +sVy +I18 +ssVts +p96 +(dp97 +V +I63 +sV" +I1 +sVe +I4 +sV* +I1 +sV, +I20 +sV. +I10 +sV; +I3 +ssV L +p98 +(dp99 +Va +I96 +sVA +I35 +sVE +I1 +sVI +I5 +sVo +I22 +sVi +I1 +sVe +I6 +ssV M +p100 +(dp101 +Va +I62 +sVA +I2 +sVC +I1 +sVe +I1 +sVG +I1 +sVi +I32 +sVO +I1 +sV. +I2 +sVy +I35 +sVR +I40 +sVu +I2 +sVo +I12 +sVY +I1 +sVE +I4 +sVr +I96 +sVU +I1 +sVI +I1 +ssV R +p102 +(dp103 +VE +I5 +sVe +I81 +sVo +I1 +sV. +I4 +ssV S +p104 +(dp105 +VO +I1 +sVA +I4 +sVE +I1 +sV +I1 +sVi +I64 +sVh +I42 +sVM +I2 +sVm +I6 +sVo +I5 +sV. +I20 +sVp +I1 +sVu +I90 +sVt +I24 +sVH +I3 +sVI +I2 +sVU +I24 +sVe +I10 +sVT +I1 +ssV P +p106 +(dp107 +Va +I7 +sVA +I1 +sVe +I1 +sVl +I3 +sVO +I1 +sV. +I1 +sVR +I9 +sVU +I4 +sVo +I7 +sVr +I29 +ssVtm +p108 +(dp109 +Va +I2 +sV +I4 +sV" +I1 +sVe +I12 +sVo +I3 +ssV V +p110 +(dp111 +Va +I1 +sV +I1 +sVe +I66 +sVI +I3 +sVo +I1 +sV. +I4 +sVE +I50 +ssV W +p112 +(dp113 +VA +I4 +sVe +I27 +sVi +I11 +sVh +I33 +sVo +I1 +sVH +I1 +ssVth +p114 +(dp115 +Va +I378 +sV +I240 +sVe +I1023 +sV' +I4 +sVf +I4 +sVi +I255 +sVh +I1 +sVl +I3 +sVo +I105 +sV. +I9 +sVs +I10 +sVr +I15 +sV! +I1 +sVy +I5 +sVu +I1 +sV; +I2 +sV: +I1 +sV, +I10 +ssVti +p116 +(dp117 +Va +I6 +sVc +I42 +sVb +I4 +sVe +I29 +sVf +I21 +sVm +I71 +sVl +I41 +sVo +I338 +sVn +I114 +sVs +I18 +sVr +I16 +sVt +I9 +sVv +I27 +ssVtf +p118 +(dp119 +Vu +I18 +ssV [ +p120 +(dp121 +VE +I1 +sVD +I1 +sVg +I1 +sV* +I3 +sVM +I1 +sV1 +I4 +sV3 +I2 +sV2 +I3 +sVt +I3 +sVx +I1 +ssVG- +p122 +(dp123 +V +I1 +sVt +I4 +sVT +I1 +ssVte +p124 +(dp125 +Va +I11 +sV +I126 +sVc +I5 +sVe +I11 +sVd +I149 +sVg +I2 +sVf +I5 +sVm +I28 +sVl +I69 +sVp +I1 +sVn +I150 +sV. +I8 +sVs +I24 +sVr +I287 +sVv +I8 +sVx +I46 +sV; +I1 +sV: +I1 +sV, +I13 +sV? +I1 +ssV-y +p126 +(dp127 +Vo +I2 +ssVtc +p128 +(dp129 +Vh +I29 +sV. +I1 +ssVta +p130 +(dp131 +Vc +I31 +sVb +I19 +sVg +I12 +sVf +I3 +sVi +I30 +sVk +I50 +sVl +I27 +sVn +I61 +sVs +I1 +sVr +I3 +sVu +I2 +sVt +I57 +sVy +I9 +sVx +I3 +sV, +I1 +ssV " +p132 +(dp133 +VA +I3 +sVC +I2 +sVB +I2 +sVD +I2 +sVG +I1 +sVF +I3 +sVI +I22 +sVH +I3 +sVM +I3 +sVO +I2 +sVN +I4 +sVP +I6 +sVS +I6 +sVR +I1 +sVW +I2 +sVY +I2 +sVa +I1 +sVb +I4 +sVd +I1 +sVi +I2 +sVh +I2 +sVo +I2 +sVp +I1 +sVs +I1 +sVr +I1 +sVt +I2 +sVw +I2 +sVy +I2 +ssV # +p134 +(dp135 +V9 +I1 +sV6 +I1 +ssS' ' +p136 +(dp137 +V +I606 +sV# +I1 +sV" +I9 +sV& +I1 +sV( +I4 +sV* +I12 +sV- +I3 +sV2 +I2 +sVA +I19 +sVC +I47 +sVB +I4 +sVE +I8 +sVD +I8 +sVG +I2 +sVF +I17 +sVI +I68 +sVH +I6 +sVJ +I7 +sVM +I56 +sVL +I34 +sVO +I3 +sVN +I2 +sVP +I12 +sVS +I37 +sVR +I15 +sVU +I9 +sVT +I26 +sVW +I20 +sVV +I18 +sVY +I33 +sVX +I32 +sV[ +I12 +sVa +I172 +sVc +I92 +sVb +I78 +sVe +I62 +sVd +I78 +sVg +I25 +sVf +I67 +sVi +I107 +sVh +I118 +sVk +I7 +sVj +I8 +sVm +I103 +sVl +I35 +sVo +I81 +sVn +I40 +sVq +I4 +sVp +I65 +sVs +I128 +sVr +I49 +sVu +I25 +sVt +I140 +sVw +I108 +sVv +I11 +sVy +I34 +ssV & +p138 +(dp139 +Vc +I20 +ssV $ +p140 +(dp141 +V2 +I1 +ssV * +p142 +(dp143 +VB +I1 +sVE +I2 +sV* +I8 +sVn +I1 +sVT +I1 +sVW +I1 +sVe +I1 +ssV: +p144 +(dp145 +Va +I4 +sV +I4 +sVb +I3 +sVF +I1 +sVi +I3 +sVh +I2 +sVM +I3 +sVl +I1 +sVo +I1 +sVm +I2 +sVI +I3 +sVS +I1 +sV" +I3 +sVt +I3 +sVw +I1 +sVy +I1 +sVs +I1 +ssV ( +p146 +(dp147 +Va +I7 +sVC +I1 +sVI +I2 +sVh +I1 +sV* +I1 +sVo +I5 +sVi +I2 +sVs +I1 +sVT +I1 +sV~ +I1 +sV_ +I1 +sVt +I1 +ssV . +p148 +(dp149 +V +I2 +sVs +I1 +sVt +I1 +ssV / +p150 +(dp151 +V +I1 +sVe +I1 +ssV - +p152 +(dp153 +Va +I1 +sV- +I2 +ssV 2 +p154 +(dp155 +V8 +I1 +sV7 +I1 +sV0 +I5 +ssV 3 +p156 +(dp157 +V1 +I1 +sV0 +I1 +ssVGE +p158 +(dp159 +VS +I3 +sVT +I3 +sVN +I1 +ssV 1 +p160 +(dp161 +V9 +I3 +sV0 +I4 +ssV 6 +p162 +(dp163 +V1 +I1 +sV0 +I1 +ssVWI +p164 +(dp165 +VS +I1 +ssV 4 +p166 +(dp167 +V0 +I1 +ssVg +p168 +(dp169 +V( +I1 +sVA +I2 +sVC +I2 +sVE +I4 +sVD +I1 +sVF +I1 +sVI +I1 +sVH +I1 +sVM +I5 +sVL +I5 +sVP +I1 +sVS +I2 +sVR +I3 +sVV +I1 +sVa +I60 +sVc +I12 +sVb +I25 +sVe +I14 +sVd +I10 +sVg +I4 +sVf +I16 +sVi +I48 +sVh +I50 +sVm +I57 +sVl +I11 +sVo +I35 +sVn +I7 +sVq +I1 +sVp +I9 +sVs +I26 +sVr +I4 +sVu +I6 +sVt +I113 +sVw +I33 +sVv +I5 +sVy +I8 +ssVg/ +p170 +(dp171 +VC +I1 +ssVg. +p172 +(dp173 +V +I22 +ssV 8 +p174 +(dp175 +V0 +I2 +ssVGL +p176 +(dp177 +VI +I1 +ssV < +p178 +(dp179 +Vh +I1 +ssV = +p180 +(dp181 +V +I1 +ssVAS +p182 +(dp183 +V +I1 +sVC +I2 +sV- +I1 +ssVgg +p184 +(dp185 +Ve +I8 +sVl +I1 +ssVGu +p186 +(dp187 +Ve +I2 +sVt +I15 +ssVt: +p188 +(dp189 +V +I2 +ssVt; +p190 +(dp191 +V +I32 +ssVt9 +p192 +(dp193 +V0 +I1 +sV6 +I1 +ssV-- +p194 +(dp195 +V +I2 +sV" +I1 +sV- +I3 +sVF +I1 +sVI +I14 +sVM +I2 +sVS +I1 +sVU +I1 +sVT +I2 +sVY +I3 +sVa +I2 +sVb +I2 +sVe +I1 +sVd +I1 +sVg +I1 +sVf +I1 +sVi +I2 +sVh +I2 +sVm +I1 +sVn +I1 +sVp +I1 +sVs +I4 +sVt +I5 +sVw +I1 +sVv +I1 +sVy +I2 +ssVCl +p196 +(dp197 +Va +I2 +ssVge +p198 +(dp199 +V! +I1 +sV +I49 +sVd +I32 +sVf +I1 +sV) +I1 +sVm +I8 +sV, +I7 +sVo +I1 +sVn +I37 +sVs +I7 +sVr +I35 +sVt +I44 +sV; +I5 +sV. +I8 +ssVt. +p200 +(dp201 +V +I99 +sV" +I2 +ssVt/ +p202 +(dp203 +Va +I1 +sVe +I1 +ssVt, +p204 +(dp205 +V +I187 +sV" +I5 +ssV-I +p206 +(dp207 +V +I14 +sVS +I1 +ssVt* +p208 +(dp209 +V +I1 +ssVVO +p210 +(dp211 +VU +I1 +ssVt) +p212 +(dp213 +V +I2 +sV, +I1 +ssVGo +p214 +(dp215 +Va +I1 +sVd +I2 +sVo +I2 +ssVt' +p216 +(dp217 +Vs +I3 +ssVt" +p218 +(dp219 +V) +I1 +ssVt +p220 +(dp221 +V# +I1 +sV( +I3 +sV1 +I2 +sV< +I1 +sVA +I1 +sVC +I10 +sVB +I1 +sVE +I1 +sVD +I1 +sVG +I15 +sVF +I18 +sVI +I114 +sVH +I3 +sVM +I18 +sVL +I10 +sVO +I1 +sVP +I2 +sVS +I10 +sVR +I10 +sVT +I3 +sVW +I1 +sVY +I1 +sV[ +I1 +sVa +I170 +sVc +I46 +sVb +I98 +sVe +I35 +sVd +I44 +sVg +I21 +sVf +I69 +sVi +I167 +sVh +I180 +sVk +I18 +sVj +I3 +sVm +I104 +sVl +I50 +sVo +I179 +sVn +I42 +sVq +I6 +sVp +I50 +sVs +I123 +sVr +I35 +sVu +I17 +sVt +I253 +sVw +I153 +sVv +I10 +sVy +I60 +ssVt! +p222 +(dp223 +V +I6 +sV" +I7 +ssVEM +p224 +(dp225 +VE +I1 +sVN +I1 +ssVzi +p226 +(dp227 +Vp +I2 +sVn +I3 +ssV3] +p228 +(dp229 +V +I2 +ssVze +p230 +(dp231 +V +I5 +sVs +I1 +sVm +I1 +sVd +I6 +sV. +I1 +ssVM. +p232 +(dp233 +V +I3 +ssVza +p234 +(dp235 +V +I2 +sVr +I1 +ssVgo +p236 +(dp237 +V +I12 +sV: +I1 +sVe +I1 +sVi +I18 +sV, +I4 +sVo +I30 +sVn +I12 +sVu +I8 +sVt +I9 +sVv +I5 +sV. +I3 +ssVaw +p238 +(dp239 +Va +I25 +sV +I11 +sVe +I1 +sV' +I4 +sVi +I4 +sVk +I2 +sV, +I2 +sV. +I1 +sVs +I2 +sVy +I1 +sVn +I5 +ssVEN +p240 +(dp241 +V +I2 +sVC +I1 +sVB +I7 +sVE +I1 +sVD +I4 +sVT +I3 +ssVlw +p242 +(dp243 +Va +I27 +ssVEI +p244 +(dp245 +VT +I1 +ssVyw +p246 +(dp247 +Vh +I2 +ssVgm +p248 +(dp249 +Ve +I8 +sVo +I3 +ssVF +p250 +(dp251 +VA +I1 +sVD +I1 +sVM +I1 +sVS +I1 +sVT +I1 +sVW +I1 +sVY +I3 +ssVgl +p252 +(dp253 +Va +I9 +sVy +I6 +sVe +I7 +sVi +I2 +sVo +I2 +ssVbh +p254 +(dp255 +Vo +I3 +ssVmt +p256 +(dp257 +Vh +I4 +ssVmf +p258 +(dp259 +Vo +I11 +ssV@l +p260 +(dp261 +Vo +I1 +ssV@p +p262 +(dp263 +Vo +I1 +ssVt@ +p264 +(dp265 +Vp +I1 +ssVg, +p266 +(dp267 +V +I40 +ssVrd +p268 +(dp269 +Va +I4 +sV +I51 +sVe +I13 +sVi +I13 +sV, +I5 +sVo +I1 +sV. +I5 +sV) +I1 +sVs +I19 +sVn +I1 +sV! +I3 +sV; +I4 +sV: +I2 +sVl +I7 +ssVe' +p270 +(dp271 +Vs +I8 +ssVMe +p272 +(dp273 +Vl +I4 +ssV-e +p274 +(dp275 +Vn +I1 +ssVMa +p276 +(dp277 +Vi +I46 +sVc +I1 +sVr +I15 +sVm +I2 +ssVms +p278 +(dp279 +V +I18 +sV, +I1 +sVe +I13 +sVt +I9 +sVp +I1 +ssV-d +p280 +(dp281 +Vi +I1 +sVa +I3 +sVo +I1 +ssVe! +p282 +(dp283 +V +I16 +sV" +I2 +ssVMo +p284 +(dp285 +Vt +I10 +sVn +I2 +ssVMi +p286 +(dp287 +Vs +I29 +sVc +I2 +sVd +I1 +ssVSp +p288 +(dp289 +Ve +I1 +ssVMu +p290 +(dp291 +Vc +I2 +ssV-f +p292 +(dp293 +Va +I1 +sVi +I1 +sVo +I1 +ssVbj +p294 +(dp295 +Vu +I1 +sVe +I21 +ssVMr +p296 +(dp297 +Vs +I31 +sV. +I67 +ssV-a +p298 +(dp299 +V +I1 +sVn +I2 +ssVEB +p300 +(dp301 +VC +I2 +ssVMy +p302 +(dp303 +V +I37 +ssVO +p304 +(dp305 +Vw +I1 +sVI +I1 +sVH +I1 +sVM +I21 +sVL +I16 +sVO +I1 +sVS +I2 +sVR +I2 +sVT +I3 +sVW +I1 +sVY +I2 +ssVME +p306 +(dp307 +V +I5 +sVR +I2 +sVD +I2 +sV, +I1 +ssVMG +p308 +(dp309 +VE +I1 +ssVMA +p310 +(dp311 +VI +I3 +sVY +I1 +sVL +I2 +sVG +I3 +sVD +I1 +ssVtx +p312 +(dp313 +Vt +I3 +ssVMC +p314 +(dp315 +VI +I1 +ssVMB +p316 +(dp317 +VE +I1 +ssV31 +p318 +(dp319 +V, +I1 +ssVty +p320 +(dp321 +V +I101 +sV" +I2 +sVi +I1 +sV) +I1 +sV- +I2 +sV, +I21 +sVl +I2 +sVp +I1 +sV; +I5 +sV. +I7 +ssVMI +p322 +(dp323 +VS +I1 +sVT +I3 +ssVMU +p324 +(dp325 +V +I1 +sVS +I1 +sV" +I1 +ssV3* +p326 +(dp327 +VE +I1 +ssV; +p328 +(dp329 +Va +I90 +sVo +I3 +sVb +I62 +sVe +I1 +sVD +I1 +sVf +I12 +sVI +I27 +sVh +I21 +sVm +I2 +sVL +I1 +sVO +I1 +sVn +I9 +sVi +I9 +sVs +I8 +sV" +I7 +sVt +I9 +sVw +I9 +sVy +I11 +sVR +I1 +sVd +I1 +ssVMP +p330 +(dp331 +VL +I1 +ssV K +p332 +(dp333 +VI +I1 +sVi +I1 +sVe +I3 +sVn +I1 +ssVf; +p334 +(dp335 +V +I7 +ssVp- +p336 +(dp337 +V- +I1 +ssVMY +p338 +(dp339 +V +I1 +ssV H +p340 +(dp341 +Va +I7 +sVA +I3 +sVe +I64 +sVi +I7 +sVo +I10 +sVI +I7 +sVu +I4 +sVE +I4 +ssV"d +p342 +(dp343 +Vo +I1 +ssVFR +p344 +(dp345 +VO +I2 +ssVtu +p346 +(dp347 +Va +I25 +sVb +I1 +sVd +I5 +sVi +I1 +sVl +I8 +sVo +I1 +sVn +I20 +sVp +I3 +sVr +I64 +sVt +I3 +ssVFT +p348 +(dp349 +VP +I2 +ssVix +p350 +(dp351 +Vi +I2 +sV +I4 +sVe +I10 +sVt +I4 +ssV N +p352 +(dp353 +Va +I1 +sVE +I2 +sVo +I9 +sVU +I1 +sVO +I7 +sVe +I2 +ssVg? +p354 +(dp355 +V +I1 +sV" +I2 +ssV' +p356 +(dp357 +Vp +I1 +ssV O +p358 +(dp359 +V +I1 +sVC +I1 +sVF +I6 +sVh +I1 +sVf +I1 +sVN +I1 +sVR +I10 +sVu +I4 +sVT +I2 +sVn +I5 +sV. +I1 +ssVS* +p360 +(dp361 +V* +I1 +sVV +I1 +ssVtp +p362 +(dp363 +V +I2 +ssVS- +p364 +(dp365 +VI +I1 +ssVS, +p366 +(dp367 +V +I1 +ssVS" +p368 +(dp369 +V. +I1 +ssVS +p370 +(dp371 +Vc +I1 +sVb +I1 +sVe +I1 +sVd +I1 +sVF +I2 +sVf +I1 +sVv +I1 +sVO +I4 +sVS +I1 +sVo +I1 +sVV +I2 +sVE +I2 +ssV't +p372 +(dp373 +V +I1 +ssVFO +p374 +(dp375 +VR +I6 +ssVtn +p376 +(dp377 +Vi +I4 +sVe +I6 +ssVFr +p378 +(dp379 +Vi +I4 +sVe +I87 +sVo +I2 +ssV"N +p380 +(dp381 +Vo +I4 +ssVm! +p382 +(dp383 +V +I2 +ssVm +p384 +(dp385 +V( +I3 +sVC +I3 +sVI +I12 +sVM +I6 +sVL +I4 +sVS +I1 +sVW +I1 +sVa +I57 +sVc +I6 +sVb +I9 +sVe +I15 +sVd +I6 +sVg +I14 +sVf +I19 +sVi +I17 +sVh +I26 +sVk +I2 +sVm +I14 +sVl +I3 +sVo +I15 +sVn +I14 +sVp +I8 +sVs +I43 +sVr +I7 +sVu +I3 +sVt +I53 +sVw +I14 +sVv +I5 +sVy +I10 +ssVto +p386 +(dp387 +V +I900 +sVg +I12 +sVf +I2 +sVm +I2 +sV- +I5 +sVl +I18 +sVo +I54 +sV. +I1 +sVp +I2 +sVr +I31 +sVu +I1 +sVt +I6 +sVw +I35 +sVn +I18 +sV; +I1 +sV: +I1 +sV, +I1 +sV? +I1 +ssVm- +p388 +(dp389 +V +I1 +sV- +I2 +ssVm, +p390 +(dp391 +V +I52 +ssVO. +p392 +(dp393 +V +I1 +ssVm. +p394 +(dp395 +V +I28 +sV" +I1 +ssVm) +p396 +(dp397 +V. +I1 +ssVtl +p398 +(dp399 +Vy +I53 +sVe +I53 +ssVFa +p400 +(dp401 +Vc +I1 +sVr +I1 +ssVd! +p402 +(dp403 +V +I7 +sV" +I2 +ssV-v +p404 +(dp405 +Ve +I1 +ssVvi +p406 +(dp407 +Vc +I15 +sVe +I9 +sVd +I7 +sVg +I1 +sVl +I8 +sVo +I25 +sVn +I71 +sVs +I26 +sVr +I1 +sVt +I14 +sVv +I2 +ssVFi +p408 +(dp409 +Vl +I1 +ssVu? +p410 +(dp411 +V +I1 +ssVm> +p412 +(dp413 +V +I1 +ssVFl +p414 +(dp415 +Ve +I1 +ssVm; +p416 +(dp417 +V +I6 +ssVFo +p418 +(dp419 +Vr +I7 +ssVoq +p420 +(dp421 +Vu +I6 +ssVir +p422 +(dp423 +Va +I9 +sV +I81 +sVc +I11 +sVe +I64 +sVi +I15 +sVm +I8 +sVl +I29 +sVo +I2 +sV, +I6 +sVs +I31 +sVr +I3 +sVu +I1 +sVt +I11 +sVy +I2 +sV; +I1 +sV. +I1 +ssV T +p424 +(dp425 +Ve +I2 +sVi +I1 +sVh +I69 +sVo +I12 +sVr +I1 +sVu +I2 +sVO +I49 +sVH +I11 +ssV9. +p426 +(dp427 +V9 +I1 +ssViu +p428 +(dp429 +Vc +I1 +sVm +I8 +sVs +I1 +ssV U +p430 +(dp431 +Vp +I9 +sVS +I1 +sVN +I2 +sVn +I8 +ssVSu +p432 +(dp433 +Vs +I79 +sVr +I1 +sVm +I8 +sVc +I2 +ssVSt +p434 +(dp435 +Va +I6 +sVr +I18 +ssV99 +p436 +(dp437 +V7 +I1 +sV6 +I1 +ssVSi +p438 +(dp439 +Vs +I1 +sVr +I59 +sVl +I2 +sVn +I3 +ssVSh +p440 +(dp441 +Ve +I42 +ssVSo +p442 +(dp443 +Vm +I3 +sV, +I1 +sVl +I1 +ssVSm +p444 +(dp445 +Va +I5 +sVi +I6 +ssVG. +p446 +(dp447 +V; +I1 +ssV90 +p448 +(dp449 +V +I2 +ssV93 +p450 +(dp451 +V* +I1 +ssVvo +p452 +(dp453 +Vc +I3 +sVi +I7 +sVk +I7 +sVl +I3 +sVu +I23 +sVt +I4 +sVw +I2 +ssV94 +p454 +(dp455 +V6 +I1 +ssV97 +p456 +(dp457 +V1 +I1 +sV +I1 +ssV96 +p458 +(dp459 +V +I2 +ssVme +p460 +(dp461 +Va +I34 +sV +I265 +sVe +I9 +sVd +I39 +sVf +I1 +sV- +I2 +sVm +I12 +sV, +I52 +sVo +I1 +sVn +I143 +sVs +I75 +sVr +I26 +sV! +I4 +sVt +I30 +sVw +I3 +sV; +I11 +sV. +I37 +sV? +I3 +sVl +I5 +ssV-r +p462 +(dp463 +Vo +I7 +ssVmg +p464 +(dp465 +Ve +I1 +ssV Y +p466 +(dp467 +Ve +I3 +sVO +I8 +sVo +I56 +ssVma +p468 +(dp469 +V +I3 +sVc +I7 +sVz +I1 +sVd +I28 +sVg +I13 +sVi +I23 +sVk +I53 +sV' +I1 +sVm +I4 +sVl +I14 +sVn +I115 +sVs +I6 +sVr +I52 +sVt +I48 +sVy +I75 +sV. +I1 +sV, +I1 +ssVmb +p470 +(dp471 +Vi +I1 +sVa +I3 +sVr +I1 +sVe +I14 +sVl +I7 +ssVmm +p472 +(dp473 +Va +I12 +sVu +I3 +sVe +I34 +sVi +I4 +sVo +I12 +ssVml +p474 +(dp475 +Vy +I6 +sVe +I1 +ssVmo +p476 +(dp477 +Vd +I4 +sVm +I15 +sVo +I1 +sVn +I45 +sVs +I50 +sVr +I91 +sVu +I13 +sVt +I69 +sVv +I13 +ssVmn +p478 +(dp479 +Vi +I3 +sV +I2 +sVs +I1 +sVe +I1 +ssVmi +p480 +(dp481 +Va +I7 +sVe +I1 +sVd +I5 +sVg +I31 +sVl +I40 +sVn +I72 +sVs +I49 +sVr +I10 +sVt +I24 +sVx +I5 +ssVvr +p482 +(dp483 +Ve +I1 +ssVSU +p484 +(dp485 +VC +I1 +sVS +I23 +ssVST +p486 +(dp487 +VA +I2 +sV +I1 +sVR +I2 +sVE +I1 +ssVmu +p488 +(dp489 +Vc +I54 +sVs +I100 +sVl +I2 +sVn +I3 +ssVRW +p490 +(dp491 +VI +I1 +ssVSI +p492 +(dp493 +VR +I2 +sVB +I1 +sVO +I2 +ssVSH +p494 +(dp495 +VE +I3 +ssVSO +p496 +(dp497 +VN +I18 +ssVmp +p498 +(dp499 +Va +I28 +sVe +I16 +sVh +I3 +sVl +I31 +sVo +I23 +sVr +I18 +sVu +I13 +sVt +I17 +ssVSM +p500 +(dp501 +VA +I2 +ssVUT +p502 +(dp503 +VI +I1 +sV +I5 +sV* +I1 +sVE +I7 +ssVSC +p504 +(dp505 +VI +I2 +sVL +I1 +ssV"p +p506 +(dp507 +Vu +I1 +ssVSA +p508 +(dp509 +VM +I4 +sVN +I23 +ssVmy +p510 +(dp511 +V +I292 +sVs +I56 +sV; +I1 +ssV.9 +p512 +(dp513 +V3 +I1 +ssVSE +p514 +(dp515 +VQ +I1 +sV +I2 +sVN +I2 +sV. +I1 +ssVLe +p516 +(dp517 +Va +I1 +sVt +I5 +ssV"r +p518 +(dp519 +Ve +I1 +ssVYO +p520 +(dp521 +VU +I8 +ssVLa +p522 +(dp523 +Vk +I1 +sVd +I76 +sVn +I19 +ssVLo +p524 +(dp525 +Vn +I22 +ssVf: +p526 +(dp527 +V +I1 +ssV"s +p528 +(dp529 +Vm +I1 +ssVGU +p530 +(dp531 +VT +I10 +ssVRP +p532 +(dp533 +VO +I1 +ssVf +p534 +(dp535 +V2 +I1 +sV8 +I1 +sVD +I1 +sVF +I15 +sVI +I14 +sVH +I1 +sVM +I17 +sVL +I12 +sVP +I1 +sVS +I5 +sVR +I6 +sVT +I1 +sVV +I1 +sVa +I75 +sVc +I28 +sVb +I11 +sVe +I14 +sVd +I9 +sVg +I10 +sVf +I16 +sVi +I49 +sVh +I134 +sVk +I1 +sVj +I1 +sVm +I70 +sVl +I16 +sVo +I18 +sVn +I9 +sVq +I3 +sVp +I15 +sVs +I45 +sVr +I17 +sVu +I5 +sVt +I119 +sVw +I22 +sVv +I6 +sVy +I55 +ssVf! +p536 +(dp537 +V +I1 +ssVdm +p538 +(dp539 +Vi +I14 +ssVUS +p540 +(dp541 +VA +I23 +sVI +I1 +sVE +I1 +sVT +I1 +ssV 9 +p542 +(dp543 +V0 +I1 +ssVf, +p544 +(dp545 +V +I22 +ssVf- +p546 +(dp547 +Vc +I1 +sVw +I1 +ssVf. +p548 +(dp549 +V +I11 +ssVs; +p550 +(dp551 +V +I34 +ssVs: +p552 +(dp553 +V +I6 +ssVLD +p554 +(dp555 +V +I3 +ssVLE +p556 +(dp557 +VT +I1 +ssV," +p558 +(dp559 +V +I22 +ssVe" +p560 +(dp561 +V +I1 +ssV, +p562 +(dp563 +V" +I19 +sV& +I19 +sV1 +I1 +sV2 +I2 +sVA +I2 +sVC +I4 +sVB +I1 +sVE +I3 +sVD +I2 +sVF +I4 +sVI +I121 +sVH +I1 +sVM +I3 +sVL +I2 +sVO +I2 +sVP +I1 +sVS +I3 +sVR +I3 +sVT +I1 +sV[ +I1 +sVa +I430 +sVc +I26 +sVb +I111 +sVe +I16 +sVd +I15 +sVg +I4 +sVf +I56 +sVi +I96 +sVh +I73 +sVk +I3 +sVj +I2 +sVm +I59 +sVl +I12 +sVo +I53 +sVn +I18 +sVq +I1 +sVp +I11 +sVs +I46 +sVr +I3 +sVu +I2 +sVt +I180 +sVw +I142 +sVv +I3 +sVy +I28 +ssVLA +p564 +(dp565 +VI +I1 +sVR +I1 +sVD +I35 +ssVLL +p566 +(dp567 +V +I2 +ssV,- +p568 +(dp569 +V- +I19 +ssVYe +p570 +(dp571 +Vt +I3 +ssVLI +p572 +(dp573 +VA +I1 +sV +I1 +sVC +I7 +sVE +I1 +sVG +I1 +sVM +I3 +sVS +I1 +sVT +I3 +ssVs* +p574 +(dp575 +V +I1 +sV* +I1 +ssVs) +p576 +(dp577 +V +I1 +sV; +I1 +ssVLU +p578 +(dp579 +VS +I1 +sVD +I2 +ssVs. +p580 +(dp581 +V +I119 +sV" +I3 +ssV,0 +p582 +(dp583 +V0 +I3 +ssVs, +p584 +(dp585 +V +I191 +sV" +I1 +ssVs" +p586 +(dp587 +V. +I1 +ssVs! +p588 +(dp589 +V! +I1 +sV +I10 +ssVs +p590 +(dp591 +V" +I5 +sV( +I1 +sV* +I1 +sVC +I2 +sVD +I1 +sVF +I5 +sVI +I40 +sVM +I21 +sVL +I8 +sVP +I3 +sVS +I12 +sVR +I4 +sVV +I7 +sV[ +I1 +sVa +I235 +sVc +I71 +sVb +I62 +sVe +I62 +sVd +I53 +sVg +I31 +sVf +I62 +sVi +I126 +sVh +I96 +sVk +I3 +sVj +I15 +sVm +I111 +sVl +I32 +sVo +I127 +sVn +I85 +sVq +I6 +sVp +I53 +sVs +I129 +sVr +I36 +sVu +I17 +sVt +I181 +sVw +I87 +sVv +I24 +sVy +I28 +ssVs' +p592 +(dp593 +V +I1 +sVs +I6 +ssVr! +p594 +(dp595 +V +I6 +ssVfr +p596 +(dp597 +Vi +I36 +sVa +I11 +sVe +I11 +sVo +I110 +ssVo? +p598 +(dp599 +V +I1 +sV" +I1 +ssVft +p600 +(dp601 +Vy +I1 +sVp +I2 +sVe +I36 +sVw +I2 +sV +I12 +ssVfu +p602 +(dp603 +Vs +I12 +sVr +I8 +sVt +I4 +sVl +I58 +sVn +I4 +ssVL +p604 +(dp605 +VP +I2 +sVD +I1 +sVo +I1 +sV6 +I1 +ssVu! +p606 +(dp607 +V) +I1 +sV +I3 +ssVfy +p608 +(dp609 +Vi +I4 +sV +I9 +sV, +I1 +ssVL, +p610 +(dp611 +V +I1 +ssVI; +p612 +(dp613 +V +I1 +ssV?0 +p614 +(dp615 +V0 +I1 +ssVfa +p616 +(dp617 +Vc +I16 +sVi +I19 +sVm +I24 +sVl +I4 +sVn +I7 +sVs +I10 +sVr +I18 +sVu +I6 +sVt +I20 +sVv +I14 +ssVSS +p618 +(dp619 +VI +I1 +sV +I3 +ssVr? +p620 +(dp621 +V +I3 +sV" +I4 +ssVfe +p622 +(dp623 +Va +I20 +sV +I14 +sVc +I72 +sVe +I47 +sV' +I3 +sVm +I1 +sV, +I10 +sV. +I5 +sVs +I9 +sVr +I41 +sV! +I1 +sVt +I4 +sVw +I15 +sV? +I1 +sVl +I10 +ssVff +p624 +(dp625 +Va +I10 +sV +I9 +sVe +I73 +sVi +I14 +sV, +I1 +sVo +I9 +ssVUD +p626 +(dp627 +VI +I2 +ssVfi +p628 +(dp629 +Va +I1 +sVc +I25 +sVe +I22 +sVd +I4 +sVg +I1 +sVf +I2 +sVl +I10 +sVn +I30 +sVs +I2 +sVr +I31 +sVt +I10 +sVv +I2 +sVx +I10 +ssVfl +p630 +(dp631 +Vi +I10 +sVe +I3 +sVu +I11 +sVa +I6 +sVo +I1 +ssVH +p632 +(dp633 +VD +I1 +sVO +I1 +ssVfo +p634 +(dp635 +Vr +I454 +sVu +I20 +sVl +I13 +sVo +I3 +sVn +I3 +ssVCY +p636 +(dp637 +V +I23 +sV. +I5 +ssVg' +p638 +(dp639 +Vs +I7 +ssVsy +p640 +(dp641 +V +I13 +sVm +I2 +sVl +I3 +sV, +I3 +sV; +I1 +sV. +I3 +ssVEv +p642 +(dp643 +Ve +I4 +ssVYo +p644 +(dp645 +Vu +I61 +ssVY. +p646 +(dp647 +V +I5 +ssVss +p648 +(dp649 +Va +I14 +sV +I133 +sV: +I1 +sVe +I40 +sVi +I87 +sVm +I3 +sV, +I17 +sVo +I5 +sV. +I10 +sV! +I3 +sVu +I19 +sVw +I1 +sV; +I8 +sVn +I1 +sVl +I2 +ssVY +p650 +(dp651 +VB +I1 +sVE +I1 +sVD +I12 +sVK +I1 +sVM +I1 +sVO +I3 +sVS +I24 +sVT +I7 +ssVsp +p652 +(dp653 +Va +I5 +sVe +I67 +sVi +I31 +sVl +I8 +sVo +I34 +sVr +I1 +sVu +I2 +ssVsw +p654 +(dp655 +Va +I1 +sVe +I11 +sVo +I1 +ssVsu +p656 +(dp657 +Va +I18 +sVc +I70 +sVb +I30 +sVe +I2 +sVd +I5 +sVg +I2 +sVf +I11 +sVi +I6 +sVm +I1 +sVl +I3 +sVn +I3 +sVp +I31 +sVs +I13 +sVr +I89 +ssVst +p658 +(dp659 +Va +I98 +sV +I307 +sV: +I1 +sVe +I75 +sV! +I1 +sVi +I61 +sV- +I2 +sVm +I2 +sV, +I24 +sVo +I42 +sV. +I5 +sVs +I2 +sVr +I52 +sVu +I7 +sVy +I5 +sV; +I5 +sVn +I2 +sVl +I6 +ssVsk +p660 +(dp661 +V +I8 +sVe +I2 +sV) +I1 +sV, +I2 +sV. +I1 +sVi +I3 +ssVsi +p662 +(dp663 +Vc +I5 +sVb +I39 +sVd +I31 +sVg +I19 +sVm +I3 +sVl +I19 +sVo +I56 +sVn +I82 +sVp +I3 +sVs +I40 +sVr +I17 +sVt +I64 +sVv +I10 +sVx +I5 +sVz +I2 +ssVsh +p664 +(dp665 +Va +I65 +sV +I45 +sVe +I214 +sVi +I34 +sVm +I10 +sV, +I3 +sVo +I83 +sVn +I1 +sVr +I3 +sVu +I2 +sVy +I4 +sVl +I1 +ssVso +p666 +(dp667 +V +I139 +sVc +I9 +sVb +I1 +sVf +I7 +sVm +I81 +sVl +I44 +sVo +I38 +sVn +I73 +sVr +I23 +sVu +I4 +sV. +I5 +sV, +I4 +ssVsn +p668 +(dp669 +V1 +I4 +sVe +I7 +ssVsm +p670 +(dp671 +Va +I7 +sVi +I7 +sVe +I3 +sVo +I1 +ssVsl +p672 +(dp673 +Vi +I13 +sVy +I9 +sVe +I2 +sVa +I3 +ssVsc +p674 +(dp675 +Va +I18 +sVe +I3 +sVi +I9 +sVh +I12 +sVl +I5 +sVo +I17 +sVr +I13 +sVu +I1 +ssVsb +p676 +(dp677 +Va +I17 +sVe +I1 +ssVsa +p678 +(dp679 +Vc +I5 +sVb +I7 +sVd +I8 +sVg +I5 +sVf +I3 +sVi +I25 +sVk +I3 +sVm +I12 +sVl +I5 +sVn +I82 +sVp +I6 +sVr +I12 +sVu +I1 +sVt +I34 +sVw +I10 +sVv +I4 +sVy +I28 +ssVUB +p680 +(dp681 +VL +I2 +ssVsg +p682 +(dp683 +Va +I1 +sVu +I1 +ssVsf +p684 +(dp685 +Vy +I2 +sVi +I10 +sVe +I1 +sVa +I6 +ssVse +p686 +(dp687 +V! +I2 +sV +I179 +sV) +I1 +sV, +I32 +sV. +I15 +sV; +I3 +sV: +I2 +sV? +I2 +sVa +I3 +sVc +I12 +sVe +I85 +sVd +I68 +sVf +I1 +sVm +I6 +sVl +I126 +sVn +I99 +sVq +I8 +sVp +I10 +sVs +I23 +sVr +I54 +sVt +I12 +sVw +I1 +sVv +I14 +sVx +I2 +ssVsd +p688 +(dp689 +Va +I8 +ssVy) +p690 +(dp691 +V +I2 +sV. +I1 +ssVh: +p692 +(dp693 +V +I1 +ssVy- +p694 +(dp695 +V- +I2 +sVm +I1 +sVt +I2 +ssVy, +p696 +(dp697 +V +I106 +sV" +I2 +ssVy. +p698 +(dp699 +V +I56 +sV" +I2 +ssVy! +p700 +(dp701 +V +I3 +sV" +I1 +ssVy +p702 +(dp703 +V( +I4 +sV1 +I1 +sVC +I2 +sVB +I1 +sVD +I2 +sVF +I1 +sVI +I9 +sVH +I3 +sVJ +I5 +sVM +I3 +sVL +I1 +sVO +I1 +sVP +I1 +sVS +I72 +sVR +I2 +sVT +I1 +sVV +I1 +sVa +I136 +sVc +I51 +sVb +I72 +sVe +I40 +sVd +I128 +sVg +I26 +sVf +I64 +sVi +I69 +sVh +I79 +sVk +I7 +sVj +I5 +sVm +I49 +sVl +I35 +sVo +I106 +sVn +I21 +sVp +I53 +sVs +I94 +sVr +I46 +sVu +I22 +sVt +I158 +sVw +I76 +sVv +I10 +sVy +I17 +ssV 5 +p704 +(dp705 +V% +I1 +ssVGI +p706 +(dp707 +VN +I3 +sVV +I1 +ssV% +p708 +(dp709 +Vo +I4 +ssVy' +p710 +(dp711 +Vs +I2 +ssVRe +p712 +(dp713 +Va +I1 +sVp +I1 +sVg +I78 +sVf +I1 +ssVy; +p714 +(dp715 +V +I29 +ssV!" +p716 +(dp717 +V +I14 +ssVV. +p718 +(dp719 +V +I4 +ssVy? +p720 +(dp721 +V +I1 +ssVRo +p722 +(dp723 +Vy +I1 +ssVSa +p724 +(dp725 +Vy +I1 +ssVRi +p726 +(dp727 +Vg +I1 +ssVRT +p728 +(dp729 +VI +I1 +sV* +I2 +ssV25 +p730 +(dp731 +V +I1 +ssVld +p732 +(dp733 +V +I277 +sVe +I5 +sV' +I7 +sVi +I3 +sV- +I1 +sV, +I30 +sVo +I3 +sV. +I11 +sVr +I8 +sVn +I1 +sV; +I2 +sV: +I1 +sV? +I1 +ssVle +p734 +(dp735 +V! +I4 +sV +I203 +sV" +I1 +sV' +I2 +sV, +I36 +sV. +I14 +sV; +I4 +sV: +I1 +sVa +I92 +sVc +I19 +sVb +I1 +sVe +I1 +sVd +I31 +sVg +I8 +sVf +I12 +sVi +I3 +sVm +I7 +sVn +I49 +sVs +I59 +sVr +I6 +sVt +I54 +sVv +I7 +sVx +I4 +ssV20 +p736 +(dp737 +V0 +I4 +sV% +I1 +ssVlc +p738 +(dp739 +Vu +I3 +sVo +I3 +ssVRR +p740 +(dp741 +VA +I4 +ssVla +p742 +(dp743 +V +I1 +sVc +I24 +sVb +I3 +sVd +I27 +sVg +I4 +sVi +I23 +sVm +I15 +sVn +I36 +sVp +I1 +sVs +I25 +sVr +I46 +sVu +I9 +sVt +I36 +sVw +I12 +sVv +I2 +sVy +I11 +sVx +I1 +ssVln +p744 +(dp745 +Ve +I3 +ssVlo +p746 +(dp747 +Vd +I3 +sVg +I5 +sVi +I1 +sVm +I1 +sVo +I34 +sVn +I56 +sVq +I1 +sVp +I2 +sVs +I13 +sVr +I1 +sVu +I24 +sVt +I2 +sVw +I39 +sVv +I30 +sVy +I2 +ssVll +p748 +(dp749 +Va +I9 +sV +I379 +sV: +I1 +sVe +I23 +sVi +I28 +sV- +I11 +sV, +I23 +sVo +I32 +sVn +I1 +sV) +I1 +sVs +I3 +sV! +I1 +sVu +I3 +sVy +I84 +sV; +I6 +sV. +I34 +sV? +I1 +ssVlm +p750 +(dp751 +V +I2 +sVe +I1 +sVl +I2 +sVo +I7 +sVn +I1 +ssV28 +p752 +(dp753 +V, +I1 +ssV29 +p754 +(dp755 +V. +I1 +ssVli +p756 +(dp757 +Va +I20 +sVc +I35 +sVb +I12 +sVe +I49 +sVd +I1 +sVg +I37 +sVf +I15 +sVh +I1 +sVk +I30 +sVm +I4 +sVo +I8 +sVn +I57 +sVp +I3 +sVs +I21 +sVr +I8 +sVt +I58 +sVv +I11 +ssVlv +p758 +(dp759 +Ve +I18 +ssVRE +p760 +(dp761 +V! +I1 +sVA +I2 +sVC +I1 +sVG +I3 +sV +I3 +sVM +I1 +sVS +I2 +ssVlt +p762 +(dp763 +Va +I1 +sV +I9 +sVe +I15 +sVi +I4 +sVh +I5 +sV, +I1 +sVo +I4 +sV. +I3 +sVs +I3 +sVy +I6 +ssVlu +p764 +(dp765 +Vc +I7 +sVe +I14 +sVd +I17 +sVn +I1 +sVs +I7 +sVr +I1 +sVt +I19 +ssVlr +p766 +(dp767 +Ve +I8 +ssVls +p768 +(dp769 +V +I9 +sVu +I4 +sVe +I6 +sVo +I9 +sVt +I1 +ssVlp +p770 +(dp771 +Vi +I1 +sV +I10 +sV- +I1 +ssVRC +p772 +(dp773 +VY +I28 +sVH +I1 +ssV-" +p774 +(dp775 +V +I1 +ssVRN +p776 +(dp777 +VO +I49 +ssVRO +p778 +(dp779 +VJ +I7 +sVM +I2 +sVT +I1 +sVV +I1 +ssVRI +p780 +(dp781 +VC +I1 +sVB +I1 +sVN +I9 +ssVly +p782 +(dp783 +V +I398 +sVi +I4 +sV- +I1 +sV, +I29 +sV. +I17 +sV; +I9 +sVz +I1 +ssVyi +p784 +(dp785 +Vs +I1 +sVe +I2 +sVn +I38 +ssVym +p786 +(dp787 +Ve +I3 +sVm +I1 +sVo +I9 +sVp +I1 +ssVyl +p788 +(dp789 +Ve +I2 +sVl +I3 +ssVyo +p790 +(dp791 +Vu +I473 +sVn +I18 +ssVt? +p792 +(dp793 +V +I3 +sV" +I1 +ssVya +p794 +(dp795 +Vb +I1 +sVl +I4 +ssV2] +p796 +(dp797 +V +I4 +ssVyb +p798 +(dp799 +Vo +I2 +ssVye +p800 +(dp801 +Va +I13 +sVd +I7 +sVl +I1 +sV, +I1 +sVs +I13 +sVr +I1 +sVt +I21 +ssV.0 +p802 +(dp803 +V4 +I1 +ssVyz +p804 +(dp805 +Ve +I1 +ssVR +p806 +(dp807 +VA +I2 +sVC +I1 +sVB +I2 +sVD +I1 +sVF +I2 +sVI +I2 +sVi +I1 +sVs +I1 +sVO +I1 +sVN +I1 +sVP +I3 +sVS +I1 +sVR +I3 +sVt +I2 +sVv +I1 +sV" +I1 +ssVR, +p808 +(dp809 +V +I2 +sV- +I1 +ssVyp +p810 +(dp811 +Ve +I3 +ssVys +p812 +(dp813 +V +I37 +sVe +I55 +sVi +I2 +sVh +I12 +sV- +I1 +sV, +I2 +sV. +I4 +sVt +I1 +ssVyr +p814 +(dp815 +Vi +I8 +ssVyt +p816 +(dp817 +Vh +I27 +sVe +I1 +ssVm" +p818 +(dp819 +V +I1 +ssVCr +p820 +(dp821 +Ve +I1 +sVu +I1 +ssVl' +p822 +(dp823 +Vs +I1 +ssVl +p824 +(dp825 +V( +I1 +sVE +I1 +sVF +I1 +sVI +I6 +sVM +I1 +sVN +I1 +sVP +I6 +sVS +I2 +sVT +I1 +sVW +I1 +sVa +I48 +sVc +I14 +sVb +I43 +sVe +I10 +sVd +I14 +sVg +I8 +sVf +I26 +sVi +I26 +sVh +I24 +sVk +I7 +sVj +I1 +sVm +I23 +sVl +I11 +sVo +I29 +sVn +I31 +sVq +I2 +sVp +I13 +sVs +I32 +sVr +I13 +sVu +I4 +sVt +I74 +sVw +I13 +sVv +I3 +sVy +I16 +ssVl! +p826 +(dp827 +V +I4 +ssVl. +p828 +(dp829 +V +I48 +ssVl, +p830 +(dp831 +V +I48 +ssVl- +p832 +(dp833 +Vb +I1 +sVd +I1 +sVf +I2 +sV- +I1 +sVo +I2 +sVn +I2 +sVu +I1 +sVt +I1 +ssVl) +p834 +(dp835 +V +I1 +sV. +I1 +ssVAs +p836 +(dp837 +V +I7 +sVs +I3 +sVk +I1 +ssV-M +p838 +(dp839 +Vr +I2 +sVe +I3 +ssVl? +p840 +(dp841 +V" +I1 +ssVl: +p842 +(dp843 +V +I2 +ssVl; +p844 +(dp845 +V +I8 +ssVem +p846 +(dp847 +Va +I28 +sV +I27 +sVb +I17 +sVe +I60 +sVi +I4 +sV- +I1 +sV, +I3 +sVo +I21 +sVn +I6 +sVp +I26 +sVs +I12 +sVu +I1 +sVy +I1 +sV; +I1 +sV. +I4 +ssVel +p848 +(dp849 +Va +I11 +sV +I23 +sVc +I3 +sVe +I16 +sVd +I7 +sVf +I116 +sVi +I95 +sVl +I78 +sVo +I8 +sV. +I1 +sVp +I12 +sVs +I5 +sVt +I9 +sVv +I7 +sVy +I111 +ssVeo +p850 +(dp851 +Vp +I2 +sVv +I2 +sVu +I3 +sVn +I1 +ssVen +p852 +(dp853 +V! +I1 +sV +I287 +sV* +I2 +sV- +I1 +sV, +I23 +sV. +I6 +sV; +I3 +sV: +I1 +sV? +I1 +sVa +I11 +sVc +I94 +sVb +I15 +sVe +I47 +sVd +I140 +sVg +I26 +sVi +I21 +sVj +I5 +sVl +I1 +sVo +I20 +sVn +I2 +sVq +I2 +sVs +I57 +sVr +I3 +sVu +I1 +sVt +I388 +sVv +I1 +sVy +I1 +sVz +I2 +ssVei +p854 +(dp855 +Vg +I5 +sVn +I51 +sVp +I1 +sVs +I3 +sVr +I18 +sVt +I17 +sVv +I38 +ssVh, +p856 +(dp857 +V +I30 +ssV8, +p858 +(dp859 +V +I1 +ssVej +p860 +(dp861 +Ve +I2 +sVu +I5 +sVo +I5 +ssVee +p862 +(dp863 +Va +I11 +sV +I62 +sVc +I2 +sVd +I37 +sVi +I15 +sVk +I14 +sVm +I29 +sV, +I1 +sVn +I106 +sVp +I21 +sVs +I4 +sVr +I5 +sVt +I35 +sVl +I43 +ssVed +p864 +(dp865 +V! +I5 +sV +I663 +sVe +I91 +sVd +I1 +sVg +I6 +sVi +I43 +sV- +I2 +sV, +I94 +sVo +I1 +sVn +I4 +sV. +I34 +sVs +I1 +sVu +I13 +sVy +I2 +sV; +I14 +sV: +I2 +sV] +I1 +sV? +I1 +sVl +I4 +ssVeg +p866 +(dp867 +Va +I20 +sV +I1 +sVe +I1 +sVg +I3 +sVi +I84 +sVl +I5 +sVr +I20 +sVu +I5 +ssVef +p868 +(dp869 +Va +I2 +sV +I4 +sVe +I13 +sVf +I12 +sVi +I6 +sV, +I1 +sVo +I59 +sVl +I3 +sVr +I1 +sVu +I19 +sVt +I12 +sVy +I2 +ssVea +p870 +(dp871 +V +I12 +sVc +I24 +sVb +I11 +sVd +I49 +sVg +I9 +sVk +I31 +sVl +I55 +sVn +I30 +sVs +I119 +sVr +I216 +sVu +I5 +sVt +I81 +sVv +I48 +sV; +I1 +sVz +I2 +sV, +I1 +ssVec +p872 +(dp873 +Va +I8 +sVe +I70 +sVi +I17 +sVh +I2 +sVk +I2 +sVl +I14 +sVo +I17 +sV. +I1 +sVr +I1 +sVu +I13 +sVt +I216 +ssVeb +p874 +(dp875 +Vy +I1 +sVa +I1 +sVr +I1 +sVe +I1 +sVt +I3 +ssVEx +p876 +(dp877 +Vc +I1 +sVe +I1 +ssVex +p878 +(dp879 +Va +I14 +sVc +I35 +sVe +I9 +sVi +I9 +sV, +I2 +sVq +I1 +sVp +I55 +sVu +I1 +sVt +I69 +ssV82 +p880 +(dp881 +V +I1 +sV5 +I1 +ssVet +p882 +(dp883 +Va +I7 +sV +I86 +sVc +I14 +sVe +I69 +sVi +I16 +sVh +I38 +sV, +I14 +sVo +I5 +sV. +I16 +sVs +I3 +sVr +I8 +sVu +I27 +sVt +I72 +sVw +I14 +sVy +I20 +sV; +I2 +sVn +I1 +sVl +I2 +ssVew +p884 +(dp885 +Va +I3 +sV +I28 +sVe +I1 +sVi +I2 +sVh +I4 +sV. +I1 +sVs +I8 +sV; +I1 +ssVev +p886 +(dp887 +Vi +I18 +sVa +I3 +sVe +I293 +sVo +I7 +ssVeq +p888 +(dp889 +Vu +I47 +ssVep +p890 +(dp891 +Va +I22 +sV +I10 +sVe +I16 +sVi +I5 +sVl +I16 +sVo +I6 +sVs +I1 +sVr +I17 +sVu +I2 +sVt +I21 +ssVes +p892 +(dp893 +V! +I2 +sV +I212 +sV' +I5 +sV) +I1 +sV- +I1 +sV, +I45 +sV. +I16 +sV; +I7 +sV: +I2 +sV? +I2 +sV] +I2 +sVa +I1 +sVc +I12 +sVe +I63 +sVd +I4 +sVi +I32 +sVh +I1 +sVo +I23 +sVp +I29 +sVs +I204 +sVu +I2 +sVt +I127 +ssVer +p894 +(dp895 +V! +I5 +sV +I839 +sV' +I31 +sV- +I9 +sV, +I166 +sV. +I84 +sV; +I30 +sV: +I3 +sV? +I7 +sVa +I51 +sVc +I17 +sVe +I240 +sVd +I4 +sVg +I17 +sVf +I28 +sVi +I142 +sVh +I15 +sVj +I1 +sVm +I23 +sVl +I9 +sVo +I9 +sVn +I91 +sVp +I4 +sVs +I184 +sVr +I11 +sVt +I46 +sVw +I10 +sVv +I33 +sVy +I142 +ssVrt +p896 +(dp897 +Va +I38 +sV@ +I1 +sVe +I19 +sV +I62 +sVi +I62 +sVh +I20 +sVf +I4 +sVm +I4 +sV, +I13 +sVo +I3 +sVn +I4 +sVs +I8 +sVu +I24 +sVy +I18 +sV; +I2 +sV. +I2 +sVl +I4 +ssVru +p898 +(dp899 +Vc +I4 +sVb +I3 +sVe +I15 +sVd +I6 +sVg +I1 +sVi +I1 +sVm +I1 +sVl +I4 +sVn +I7 +sVp +I5 +sVs +I9 +sVt +I7 +ssVrv +p900 +(dp901 +Va +I6 +sVi +I6 +sVe +I22 +ssVE. +p902 +(dp903 +V +I1 +ssVrp +p904 +(dp905 +Vr +I11 +sVe +I1 +sVl +I1 +sVo +I4 +ssVs] +p906 +(dp907 +V +I2 +ssVrr +p908 +(dp909 +Va +I7 +sVe +I18 +sVi +I48 +sVo +I8 +sVu +I4 +sVy +I35 +ssVrs +p910 +(dp911 +Va +I13 +sV +I92 +sVe +I81 +sVd +I4 +sV' +I1 +sVi +I11 +sVh +I1 +sV, +I39 +sVo +I18 +sV. +I42 +sV) +I1 +sVu +I17 +sVt +I44 +sV; +I4 +sV? +I1 +ssVE! +p912 +(dp913 +V* +I1 +ssVry +p914 +(dp915 +V +I191 +sVb +I1 +sVi +I20 +sV, +I13 +sVo +I1 +sV. +I7 +sVt +I15 +sVw +I1 +sV; +I2 +sV? +I1 +ssVe] +p916 +(dp917 +V +I1 +ssVre +p918 +(dp919 +V! +I3 +sV +I363 +sV) +I1 +sV, +I66 +sV. +I23 +sV; +I13 +sV? +I2 +sVa +I183 +sVc +I68 +sVb +I2 +sVe +I64 +sVd +I194 +sVg +I19 +sVf +I49 +sVi +I1 +sVh +I7 +sVj +I10 +sVm +I50 +sVl +I54 +sVo +I2 +sVn +I43 +sVq +I21 +sVp +I44 +sVs +I183 +sVr +I2 +sVt +I63 +sVw +I4 +sVv +I35 +sVy +I1 +ssVey +p920 +(dp921 +V +I39 +sVe +I10 +sVm +I8 +sV, +I3 +sVo +I10 +sV. +I2 +sV; +I1 +ssVrg +p922 +(dp923 +V +I13 +sVe +I21 +sVi +I9 +sV, +I1 +sV/ +I1 +sVu +I1 +sVo +I4 +sVy +I1 +ssVra +p924 +(dp925 +Vc +I29 +sVb +I34 +sVd +I7 +sVg +I13 +sVi +I18 +sVm +I4 +sVl +I23 +sVo +I4 +sVn +I36 +sVp +I2 +sVs +I5 +sVr +I5 +sVu +I1 +sVt +I64 +sVw +I8 +sVv +I3 +sVy +I4 +ssVrb +p926 +(dp927 +Vi +I5 +sVa +I3 +sVe +I2 +sVo +I1 +ssVrc +p928 +(dp929 +Ve +I25 +sVi +I4 +sVh +I52 +sVo +I7 +sVu +I10 +sVy +I27 +ssVrl +p930 +(dp931 +V! +I2 +sV +I12 +sVe +I9 +sVd +I21 +sV' +I1 +sVi +I2 +sV, +I8 +sVo +I5 +sV. +I3 +sVs +I1 +sVa +I1 +sVy +I21 +sV; +I1 +ssVrm +p932 +(dp933 +Va +I12 +sV +I17 +sVe +I13 +sVi +I28 +sV, +I8 +sVl +I5 +sV) +I1 +sVs +I6 +sVt +I4 +sV. +I1 +ssVrn +p934 +(dp935 +Va +I8 +sV +I20 +sVe +I35 +sVi +I16 +sV, +I1 +sVo +I65 +sV. +I3 +sVs +I1 +sVt +I3 +ssVro +p936 +(dp937 +V- +I1 +sVa +I5 +sVc +I6 +sVb +I13 +sVd +I8 +sVg +I5 +sVf +I8 +sVi +I1 +sVh +I1 +sVk +I1 +sVj +I28 +sVm +I124 +sVl +I5 +sVo +I27 +sVn +I18 +sVp +I21 +sVs +I9 +sVr +I4 +sVu +I38 +sVt +I35 +sVw +I12 +sVv +I20 +sVy +I5 +ssVrh +p938 +(dp939 +Va +I15 +sVo +I1 +ssVri +p940 +(dp941 +Va +I25 +sVc +I94 +sVb +I19 +sVe +I56 +sVd +I10 +sVg +I28 +sVf +I5 +sVk +I2 +sVm +I4 +sVl +I6 +sVo +I28 +sVn +I98 +sVp +I4 +sVs +I12 +sVu +I3 +sVt +I52 +sVv +I34 +sVz +I9 +ssVrj +p942 +(dp943 +Vo +I1 +ssVrk +p944 +(dp945 +Va +I1 +sV +I7 +sVe +I6 +sV- +I1 +sVl +I5 +sV, +I2 +sVs +I2 +sV. +I2 +ssVe- +p946 +(dp947 +V- +I7 +sV +I1 +sVb +I1 +sVM +I3 +ssVe, +p948 +(dp949 +V +I318 +sV" +I6 +sV- +I3 +ssVe. +p950 +(dp951 +V +I157 +sVc +I1 +sV" +I4 +ssVe) +p952 +(dp953 +V +I3 +sV; +I1 +ssVED +p954 +(dp955 +VI +I3 +sV +I5 +sV, +I1 +ssVEG +p956 +(dp957 +VI +I3 +sVL +I1 +ssVEF +p958 +(dp959 +VO +I1 +ssVEA +p960 +(dp961 +VC +I1 +sVR +I1 +sVD +I1 +sVV +I1 +ssVe +p962 +(dp963 +V" +I2 +sV$ +I1 +sV( +I3 +sV1 +I1 +sV6 +I1 +sVA +I8 +sVC +I25 +sVB +I1 +sVE +I2 +sVD +I2 +sVG +I1 +sVF +I4 +sVI +I36 +sVH +I3 +sVM +I10 +sVL +I3 +sVP +I16 +sVS +I5 +sVR +I4 +sVU +I1 +sVT +I3 +sVW +I1 +sVV +I4 +sV[ +I1 +sVa +I268 +sVc +I141 +sVb +I100 +sVe +I83 +sVd +I102 +sVg +I56 +sVf +I115 +sVi +I245 +sVh +I238 +sVk +I17 +sVj +I8 +sVm +I209 +sVl +I70 +sVo +I254 +sVn +I89 +sVq +I7 +sVp +I109 +sVs +I234 +sVr +I68 +sVu +I29 +sVt +I380 +sVw +I233 +sVv +I28 +sVy +I59 +ssVEC +p964 +(dp965 +VT +I8 +ssV") +p966 +(dp967 +V. +I1 +ssVAn +p968 +(dp969 +Vd +I6 +ssVe? +p970 +(dp971 +V +I13 +sV" +I1 +ssVEY +p972 +(dp973 +V +I1 +ssVEX +p974 +(dp975 +VP +I1 +sVT +I6 +sV? +I1 +ssVe; +p976 +(dp977 +V +I55 +ssVe: +p978 +(dp979 +V +I5 +ssVET +p980 +(dp981 +V +I3 +sVE +I6 +sVT +I1 +ssVEW +p982 +(dp983 +V +I1 +ssVEV +p984 +(dp985 +VE +I2 +ssVEQ +p986 +(dp987 +VU +I1 +ssVES +p988 +(dp989 +V +I4 +sVS +I2 +sVE +I1 +sV, +I1 +sV. +I1 +ssVER +p990 +(dp991 +V +I7 +sVC +I1 +sVE +I1 +sVG +I7 +sVI +I7 +sV* +I1 +sV, +I3 +sVN +I49 +sVS +I1 +sVW +I1 +ssVEl +p992 +(dp993 +Ve +I1 +ssVEn +p994 +(dp995 +Vd +I1 +sVg +I3 +ssVEd +p996 +(dp997 +Vw +I9 +ssVNO +p998 +(dp999 +V +I2 +sVT +I5 +sVN +I49 +ssVr: +p1000 +(dp1001 +V +I6 +ssVr; +p1002 +(dp1003 +V +I36 +ssVr' +p1004 +(dp1005 +Vs +I31 +ssVr +p1006 +(dp1007 +V( +I1 +sV3 +I1 +sV4 +I1 +sVA +I8 +sVC +I6 +sVB +I1 +sVE +I1 +sVF +I8 +sVI +I21 +sVH +I1 +sVJ +I49 +sVM +I15 +sVL +I7 +sVS +I20 +sVR +I9 +sV[ +I2 +sVa +I152 +sVc +I53 +sVb +I42 +sVe +I46 +sVd +I55 +sVg +I19 +sVf +I75 +sVi +I59 +sVh +I104 +sVk +I8 +sVj +I3 +sVm +I116 +sVl +I47 +sVo +I88 +sVn +I20 +sVq +I1 +sVp +I47 +sVs +I100 +sVr +I34 +sVu +I21 +sVt +I189 +sVw +I79 +sVv +I8 +sVy +I15 +sVz +I1 +ssVGi +p1008 +(dp1009 +Vv +I1 +ssVr, +p1010 +(dp1011 +V +I181 +sV" +I2 +sV- +I9 +ssVEt +p1012 +(dp1013 +Ve +I11 +ssVr. +p1014 +(dp1015 +V +I158 +sV" +I3 +sV0 +I1 +ssVr@ +p1016 +(dp1017 +Vl +I1 +ssVr) +p1018 +(dp1019 +V, +I1 +ssVr* +p1020 +(dp1021 +V* +I1 +ssVw' +p1022 +(dp1023 +Vs +I5 +ssVNC +p1024 +(dp1025 +VI +I1 +sVE +I1 +sVL +I3 +sVO +I1 +ssVxc +p1026 +(dp1027 +Vi +I2 +sVu +I13 +sVe +I17 +sVl +I3 +sVh +I1 +ssVxa +p1028 +(dp1029 +Vc +I5 +sVm +I4 +sVt +I4 +sVg +I1 +ssV1] +p1030 +(dp1031 +V +I3 +ssVUE +p1032 +(dp1033 +VN +I1 +ssVxe +p1034 +(dp1035 +Vc +I2 +sVr +I5 +sVm +I1 +sVs +I2 +sVd +I12 +ssVxx +p1036 +(dp1037 +V1 +I1 +sVx +I4 +sV] +I1 +ssVjo +p1038 +(dp1039 +Vi +I9 +sVy +I8 +sVk +I1 +sVu +I4 +ssVxp +p1040 +(dp1041 +Vr +I10 +sVe +I27 +sVl +I16 +sVo +I2 +ssVxq +p1042 +(dp1043 +Vu +I1 +ssVxt +p1044 +(dp1045 +V +I31 +sVe +I8 +sV) +I1 +sV, +I6 +sV/ +I2 +sV. +I2 +sVs +I12 +sVr +I10 +sVu +I1 +sVo +I1 +sV9 +I2 +ssVM +p1046 +(dp1047 +VI +I1 +sVE +I1 +sVT +I2 +ssVu, +p1048 +(dp1049 +V +I42 +sV" +I3 +ssVIE +p1050 +(dp1051 +VS +I3 +sVD +I1 +ssVeu +p1052 +(dp1053 +V! +I2 +sVv +I1 +sV, +I5 +sV. +I2 +ssV-F +p1054 +(dp1055 +Vr +I1 +ssV?" +p1056 +(dp1057 +V +I13 +ssV0% +p1058 +(dp1059 +V +I3 +ssV.; +p1060 +(dp1061 +V +I1 +ssV<h +p1062 +(dp1063 +Va +I1 +ssVx] +p1064 +(dp1065 +V +I1 +ssV-i +p1066 +(dp1067 +Vs +I1 +sVn +I8 +ssV? +p1068 +(dp1069 +VA +I3 +sV +I1 +sVB +I1 +sVF +I1 +sVI +I4 +sVH +I1 +sVa +I1 +sVM +I3 +sVN +I1 +sVS +I2 +sVR +I1 +sVU +I1 +sVT +I2 +sVW +I6 +sVY +I2 +ssV"h +p1070 +(dp1071 +Ve +I2 +ssVX? +p1072 +(dp1073 +V0 +I1 +ssVc, +p1074 +(dp1075 +V +I3 +ssVXI +p1076 +(dp1077 +VI +I6 +sV +I3 +sVX +I3 +sVV +I3 +ssVx. +p1078 +(dp1079 +Vx +I1 +sVc +I1 +ssVKe +p1080 +(dp1081 +Ve +I2 +sVn +I1 +ssVx +p1082 +(dp1083 +Va +I1 +sV +I1 +sVd +I1 +sVh +I1 +sV1 +I1 +sVr +I1 +sVw +I1 +sVy +I1 +ssVKn +p1084 +(dp1085 +Vo +I1 +ssVPo +p1086 +(dp1087 +Vs +I1 +sVo +I6 +ssVRG +p1088 +(dp1089 +V" +I1 +sV- +I6 +ssVXX +p1090 +(dp1091 +VI +I10 +sV +I2 +sVX +I10 +sVV +I8 +ssVXP +p1092 +(dp1093 +VR +I1 +ssVx1 +p1094 +(dp1095 +V0 +I1 +ssV$2 +p1096 +(dp1097 +V +I1 +ssVXT +p1098 +(dp1099 +V +I3 +sVS +I3 +ssVY, +p1100 +(dp1101 +V +I1 +ssV11 +p1102 +(dp1103 +V. +I1 +ssV10 +p1104 +(dp1105 +Va +I1 +sV +I1 +sV% +I2 +sV, +I1 +sV. +I2 +sV0 +I1 +sVx +I1 +ssVOf +p1106 +(dp1107 +V +I1 +sVf +I1 +ssV-U +p1108 +(dp1109 +Vn +I1 +ssV19 +p1110 +(dp1111 +V9 +I2 +sV7 +I1 +ssV18 +p1112 +(dp1113 +V2 +I1 +ssVKI +p1114 +(dp1115 +VN +I1 +ssVu +p1116 +(dp1117 +VM +I1 +sVR +I1 +sVa +I33 +sVc +I19 +sVb +I2 +sVe +I4 +sVd +I12 +sVg +I3 +sVf +I3 +sVi +I3 +sVh +I18 +sVk +I11 +sVm +I30 +sVl +I3 +sVo +I8 +sVn +I4 +sVp +I4 +sVs +I12 +sVr +I7 +sVu +I2 +sVt +I21 +sVw +I42 +sVv +I1 +ssV&c +p1118 +(dp1119 +V, +I1 +sV. +I19 +ssVAg +p1120 +(dp1121 +Va +I1 +ssV1* +p1122 +(dp1123 +V* +I1 +ssV1, +p1124 +(dp1125 +V +I1 +ssV1. +p1126 +(dp1127 +V +I2 +sVt +I1 +ssVDO +p1128 +(dp1129 +VM +I2 +sVN +I1 +ssV-T +p1130 +(dp1131 +Vh +I2 +sVM +I1 +ssV-p +p1132 +(dp1133 +Vr +I1 +ssVDI +p1134 +(dp1135 +VC +I2 +sVE +I1 +sVD +I1 +sVN +I2 +sVS +I2 +sVR +I1 +sVU +I1 +sVT +I1 +ssVDE +p1136 +(dp1137 +VA +I2 +sV +I29 +sVD +I1 +sVM +I1 +sVN +I1 +sVR +I2 +sVX +I1 +ssVRA +p1138 +(dp1139 +VC +I1 +sVN +I4 +ssVDA +p1140 +(dp1141 +VM +I3 +ssVw: +p1142 +(dp1143 +V +I1 +ssV*n +p1144 +(dp1145 +Vo +I1 +ssVF. +p1146 +(dp1147 +V +I1 +ssVDY +p1148 +(dp1149 +V +I35 +ssV*e +p1150 +(dp1151 +Vx +I1 +ssV/ +p1152 +(dp1153 +VC +I1 +ssVDo +p1154 +(dp1155 +V +I6 +sVn +I1 +ssVk +p1156 +(dp1157 +Va +I13 +sVc +I1 +sVb +I2 +sVe +I3 +sVd +I1 +sVf +I10 +sVI +I4 +sV( +I1 +sVH +I1 +sVm +I9 +sVl +I1 +sVo +I13 +sVq +I1 +sVp +I2 +sVs +I2 +sVu +I1 +sVt +I14 +sVw +I6 +sVh +I3 +sVy +I5 +sVi +I5 +ssVx, +p1158 +(dp1159 +V +I2 +ssVDi +p1160 +(dp1161 +Vs +I1 +sVr +I1 +sVd +I2 +ssV*T +p1162 +(dp1163 +Vh +I4 +sVH +I2 +ssVk) +p1164 +(dp1165 +V, +I1 +ssV*W +p1166 +(dp1167 +VA +I1 +sVe +I1 +ssVk. +p1168 +(dp1169 +V +I6 +ssVk- +p1170 +(dp1171 +Vu +I1 +ssVDa +p1172 +(dp1173 +Vt +I1 +ssVN +p1174 +(dp1175 +VI +I2 +sVU +I1 +sVE +I2 +sVT +I32 +sVV +I8 +ssV*I +p1176 +(dp1177 +Vn +I1 +ssVxi +p1178 +(dp1179 +Vb +I1 +sVe +I6 +sVl +I1 +sVo +I8 +sVn +I2 +sVs +I4 +sVt +I1 +ssV*E +p1180 +(dp1181 +VI +I1 +sVt +I1 +sVN +I2 +ssV*F +p1182 +(dp1183 +VO +I1 +ssVDu +p1184 +(dp1185 +Vr +I2 +ssVKi +p1186 +(dp1187 +Vs +I1 +ssV*B +p1188 +(dp1189 +VE +I1 +ssVPR +p1190 +(dp1191 +VI +I2 +sVE +I2 +sVO +I8 +ssVN' +p1192 +(dp1193 +VT +I1 +ssV*: +p1194 +(dp1195 +V +I1 +ssV7 +p1196 +(dp1197 +V +I1 +ssV-h +p1198 +(dp1199 +Vu +I2 +sVe +I4 +sVo +I2 +ssV~) +p1200 +(dp1201 +V, +I1 +ssV*) +p1202 +(dp1203 +V +I1 +ssV71 +p1204 +(dp1205 +V* +I1 +ssVMN +p1206 +(dp1207 +VI +I1 +ssV.G +p1208 +(dp1209 +VU +I1 +ssV* +p1210 +(dp1211 +VY +I1 +sVc +I1 +sVT +I1 +sVf +I1 +ssV78 +p1212 +(dp1213 +V2 +I1 +ssVD, +p1214 +(dp1215 +V +I3 +ssVQU +p1216 +(dp1217 +VE +I1 +ssVkf +p1218 +(dp1219 +Va +I5 +sVu +I1 +ssVke +p1220 +(dp1221 +V! +I1 +sV +I91 +sVe +I11 +sVd +I27 +sV- +I1 +sVl +I6 +sVn +I22 +sVp +I3 +sVs +I11 +sVr +I1 +sV; +I2 +sV. +I2 +sV, +I4 +ssVki +p1222 +(dp1223 +Vl +I3 +sVn +I63 +ssVkn +p1224 +(dp1225 +Ve +I8 +sVo +I86 +ssVD +p1226 +(dp1227 +VD +I2 +sVM +I1 +sVT +I4 +sVW +I1 +sVt +I2 +ssVkl +p1228 +(dp1229 +Va +I5 +ssVks +p1230 +(dp1231 +V +I20 +sV; +I2 +sV, +I2 +sV' +I1 +sV. +I2 +ssVSe +p1232 +(dp1233 +Vy +I8 +sVn +I2 +ssVkw +p1234 +(dp1235 +Va +I2 +ssVky +p1236 +(dp1237 +V +I3 +ssVs- +p1238 +(dp1239 +V- +I5 +ssVd. +p1240 +(dp1241 +V +I61 +sV" +I1 +ssVd, +p1242 +(dp1243 +V +I171 +sV" +I2 +sV- +I3 +ssVgf +p1244 +(dp1245 +Vo +I19 +ssVWA +p1246 +(dp1247 +VR +I4 +sVN +I1 +ssVBE +p1248 +(dp1249 +V +I1 +sVR +I8 +sVF +I1 +ssVd' +p1250 +(dp1251 +Vs +I12 +ssVd" +p1252 +(dp1253 +V +I1 +ssVd +p1254 +(dp1255 +V" +I1 +sV( +I2 +sVB +I2 +sVE +I1 +sVD +I5 +sVG +I2 +sVF +I5 +sVI +I62 +sVH +I1 +sVJ +I1 +sVM +I18 +sVL +I5 +sVS +I17 +sVR +I4 +sVW +I2 +sV[ +I1 +sVa +I166 +sVc +I41 +sVb +I122 +sVe +I45 +sVd +I32 +sVg +I15 +sVf +I71 +sVi +I115 +sVh +I168 +sVk +I4 +sVj +I5 +sVm +I109 +sVl +I17 +sVo +I97 +sVn +I91 +sVq +I4 +sVp +I29 +sVs +I101 +sVr +I18 +sVu +I20 +sVt +I300 +sVw +I107 +sVv +I7 +sVy +I42 +ssVWH +p1256 +(dp1257 +VA +I1 +ssVd? +p1258 +(dp1259 +V +I2 +sV" +I1 +ssVJO +p1260 +(dp1261 +VH +I17 +ssVd: +p1262 +(dp1263 +V +I7 +ssVd; +p1264 +(dp1265 +V +I28 +ssVJE +p1266 +(dp1267 +VC +I7 +ssV-S +p1268 +(dp1269 +Vi +I1 +ssVn- +p1270 +(dp1271 +Vh +I1 +sV- +I4 +sVl +I7 +ssV=T +p1272 +(dp1273 +Vr +I1 +ssVWe +p1274 +(dp1275 +V +I21 +sVr +I2 +sVl +I3 +sVd +I2 +ssVFI +p1276 +(dp1277 +VT +I1 +ssVWo +p1278 +(dp1279 +Vr +I1 +ssVJu +p1280 +(dp1281 +Vn +I1 +ssVWi +p1282 +(dp1283 +Vl +I5 +sVg +I3 +sVt +I3 +ssVWh +p1284 +(dp1285 +Vy +I5 +sVa +I10 +sVe +I19 +sVo +I1 +ssVJo +p1286 +(dp1287 +Vh +I18 +ssVJa +p1288 +(dp1289 +Vm +I54 +sVn +I6 +ssVMR +p1290 +(dp1291 +V +I1 +sVS +I30 +sV. +I9 +ssVAr +p1292 +(dp1293 +Ve +I1 +sVt +I1 +ssVdn +p1294 +(dp1295 +Vi +I2 +sVe +I18 +ssVdo +p1296 +(dp1297 +V! +I1 +sV +I51 +sVe +I20 +sVi +I3 +sVm +I9 +sVl +I3 +sVo +I6 +sV, +I4 +sVp +I2 +sVu +I23 +sVw +I11 +sV. +I1 +sV; +I2 +sVn +I44 +sV? +I1 +ssVdl +p1298 +(dp1299 +Vy +I15 +sVe +I3 +ssVy" +p1300 +(dp1301 +V +I1 +sV. +I1 +ssVIG +p1302 +(dp1303 +VE +I1 +ssVA. +p1304 +(dp1305 +V +I5 +ssVdi +p1306 +(dp1307 +Va +I24 +sVc +I17 +sVe +I15 +sVd +I24 +sVg +I4 +sVf +I14 +sVl +I3 +sVn +I49 +sVs +I128 +sVr +I12 +sVu +I5 +sVt +I19 +sVv +I1 +ssVdf +p1308 +(dp1309 +Vu +I5 +ssVdg +p1310 +(dp1311 +Vi +I4 +sVe +I11 +sVm +I8 +ssVdd +p1312 +(dp1313 +Vi +I5 +sV +I2 +sVr +I6 +sVe +I11 +sV, +I1 +ssVde +p1314 +(dp1315 +Va +I104 +sV +I49 +sVc +I25 +sVb +I4 +sVe +I25 +sVd +I69 +sVg +I12 +sVf +I11 +sVi +I1 +sVj +I2 +sVm +I8 +sVl +I30 +sVn +I34 +sVp +I16 +sVs +I58 +sVr +I180 +sVt +I29 +sVv +I5 +sV. +I2 +sV, +I7 +ssVda +p1316 +(dp1317 +Vb +I5 +sVm +I5 +sVl +I2 +sVn +I11 +sVr +I10 +sVu +I38 +sVt +I9 +sVy +I40 +ssVf? +p1318 +(dp1319 +V +I2 +ssV= +p1320 +(dp1321 +VC +I1 +ssVdy +p1322 +(dp1323 +V +I98 +sVs +I12 +sV, +I3 +sV. +I1 +ssVdv +p1324 +(dp1325 +Va +I12 +sVi +I10 +sVo +I1 +ssVdw +p1326 +(dp1327 +Va +I9 +sVe +I1 +ssVdu +p1328 +(dp1329 +Va +I1 +sV +I1 +sVc +I37 +sVe +I7 +sVi +I1 +sVl +I9 +sVp +I1 +sVr +I7 +sVt +I8 +ssVdr +p1330 +(dp1331 +Va +I8 +sVe +I31 +sVo +I2 +ssVds +p1332 +(dp1333 +V +I30 +sVh +I11 +sV, +I7 +sVo +I6 +sV. +I8 +sV; +I3 +ssVnm +p1334 +(dp1335 +Vi +I1 +ssVqu +p1336 +(dp1337 +Vi +I54 +sVa +I35 +sVe +I32 +ssVGe +p1338 +(dp1339 +Vr +I1 +ssVW +p1340 +(dp1341 +VG +I1 +ssV(h +p1342 +(dp1343 +Vi +I1 +ssVm: +p1344 +(dp1345 +V +I1 +ssVE +p1346 +(dp1347 +Vo +I1 +sVC +I28 +sVE +I1 +sVM +I1 +sVs +I1 +sVO +I4 +sVN +I2 +sVP +I1 +sVS +I6 +sVT +I4 +sVw +I1 +sVV +I7 +sVt +I3 +ssVd] +p1348 +(dp1349 +V +I1 +ssVPL +p1350 +(dp1351 +VI +I1 +ssVPr +p1352 +(dp1353 +Vi +I5 +sVa +I1 +sVe +I2 +sVo +I25 +ssVuv +p1354 +(dp1355 +Vr +I1 +ssVUL +p1356 +(dp1357 +VA +I1 +ssVw! +p1358 +(dp1359 +V +I1 +ssVw +p1360 +(dp1361 +VI +I3 +sVL +I1 +sVN +I1 +sVa +I17 +sVc +I4 +sVb +I7 +sVe +I2 +sVd +I15 +sVg +I4 +sVf +I5 +sVi +I12 +sVh +I20 +sVm +I12 +sVl +I5 +sVo +I5 +sVn +I12 +sVq +I1 +sVp +I2 +sVs +I9 +sVu +I2 +sVt +I25 +sVw +I11 +sVv +I1 +sVy +I6 +ssVw. +p1362 +(dp1363 +V +I5 +ssVw, +p1364 +(dp1365 +V +I26 +ssV.e +p1366 +(dp1367 +Vd +I1 +ssV#9 +p1368 +(dp1369 +V4 +I1 +ssVPa +p1370 +(dp1371 +Vy +I1 +sVr +I5 +sVg +I1 +ssVrf +p1372 +(dp1373 +Vi +I1 +sVe +I22 +sVu +I7 +ssVPe +p1374 +(dp1375 +Vr +I1 +ssVBe +p1376 +(dp1377 +V +I3 +sVs +I2 +sVl +I1 +ssVw; +p1378 +(dp1379 +V +I3 +ssV#6 +p1380 +(dp1381 +V +I1 +ssVPl +p1382 +(dp1383 +Va +I1 +sVe +I2 +ssV02 +p1384 +(dp1385 +V] +I1 +ssV.x +p1386 +(dp1387 +Vx +I1 +ssV00 +p1388 +(dp1389 +V +I2 +sV, +I2 +sV. +I2 +sV1 +I3 +sV0 +I3 +sV2 +I1 +sV= +I1 +ssV01 +p1390 +(dp1391 +V +I1 +sV. +I2 +ssV." +p1392 +(dp1393 +V +I23 +ssV04 +p1394 +(dp1395 +V. +I1 +ssVPU +p1396 +(dp1397 +VR +I1 +sVB +I2 +sVN +I1 +ssV- +p1398 +(dp1399 +VH +I2 +ssVuo +p1400 +(dp1401 +Vs +I1 +ssV0= +p1402 +(dp1403 +VT +I1 +ssV0 +p1404 +(dp1405 +Vx +I1 +sVb +I1 +sVm +I1 +sVt +I1 +sVd +I3 +ssVPA +p1406 +(dp1407 +VR +I1 +ssVju +p1408 +(dp1409 +Vs +I28 +sVr +I8 +sVd +I19 +ssVXL +p1410 +(dp1411 +VI +I1 +sV +I1 +ssV0. +p1412 +(dp1413 +V +I1 +sVz +I1 +sVt +I1 +sVG +I1 +ssVPO +p1414 +(dp1415 +VS +I2 +ssV0, +p1416 +(dp1417 +V0 +I3 +ssVlf +p1418 +(dp1419 +Va +I3 +sV +I80 +sVi +I2 +sV- +I2 +sV, +I19 +sVo +I1 +sV. +I8 +sV! +I1 +sV; +I6 +sV: +I1 +sV? +I2 +ssVwe +p1420 +(dp1421 +Va +I10 +sV +I78 +sVe +I29 +sVd +I10 +sVl +I41 +sVn +I7 +sVr +I71 +sVv +I36 +ssVwa +p1422 +(dp1423 +Vb +I1 +sVi +I7 +sVk +I2 +sVl +I4 +sVn +I21 +sVs +I186 +sVr +I100 +sVt +I5 +sVy +I55 +ssVlg +p1424 +(dp1425 +Vi +I1 +sVe +I4 +ssVwo +p1426 +(dp1427 +V +I17 +sVr +I57 +sVu +I70 +sVm +I25 +sVn +I7 +ssVwn +p1428 +(dp1429 +V +I68 +sV, +I11 +sV. +I10 +sVs +I2 +sVr +I1 +sV; +I3 +ssVwl +p1430 +(dp1431 +Ve +I6 +ssVwk +p1432 +(dp1433 +Vw +I2 +ssVwi +p1434 +(dp1435 +Vc +I1 +sVd +I4 +sVf +I19 +sVl +I114 +sVn +I27 +sVs +I49 +sVt +I240 +ssVwh +p1436 +(dp1437 +Vi +I144 +sVa +I69 +sVe +I85 +sVy +I3 +sVo +I92 +ssVP +p1438 +(dp1439 +Vp +I1 +sVd +I1 +ssV27 +p1440 +(dp1441 +V8 +I1 +ssVws +p1442 +(dp1443 +V! +I1 +sV +I10 +sVl +I1 +sV, +I3 +ssVwr +p1444 +(dp1445 +Vi +I20 +sVe +I9 +sVo +I10 +ssV[* +p1446 +(dp1447 +V] +I3 +ssVje +p1448 +(dp1449 +Va +I10 +sVc +I53 +sVs +I1 +ssVP. +p1450 +(dp1451 +V +I1 +ssVwy +p1452 +(dp1453 +Ve +I1 +ssVBa +p1454 +(dp1455 +Vt +I5 +ssV". +p1456 +(dp1457 +V +I3 +ssVuf +p1458 +(dp1459 +Vf +I11 +ssVua +p1460 +(dp1461 +Vd +I14 +sVg +I3 +sVi +I13 +sVl +I31 +sVn +I1 +sVr +I14 +sVt +I17 +ssV0x +p1462 +(dp1463 +V. +I1 +ssVRS +p1464 +(dp1465 +VI +I1 +sV. +I30 +ssV0a +p1466 +(dp1467 +V. +I1 +ssV] +p1468 +(dp1469 +Va +I3 +sV +I6 +sVd +I1 +sVp +I1 +sVt +I1 +sVY +I1 +ssVck +p1470 +(dp1471 +V +I12 +sVe +I3 +sVi +I4 +sVn +I3 +sVy +I3 +sV. +I1 +ssVci +p1472 +(dp1473 +Va +I21 +sVe +I20 +sVd +I7 +sVf +I1 +sVl +I5 +sVo +I9 +sVn +I9 +sVp +I5 +sVs +I5 +sVr +I10 +sVt +I7 +sVv +I2 +ssVch +p1474 +(dp1475 +Va +I40 +sV +I269 +sVe +I43 +sVi +I26 +sVh +I46 +sV- +I1 +sVm +I9 +sV, +I9 +sVo +I22 +sV. +I5 +sVu +I3 +sV; +I1 +ssVco +p1476 +(dp1477 +Vd +I1 +sVm +I128 +sVl +I8 +sVo +I4 +sVn +I233 +sVq +I5 +sVp +I16 +sVs +I3 +sVr +I14 +sVu +I134 +sVv +I8 +ssVcl +p1478 +(dp1479 +Vi +I7 +sVe +I23 +sVu +I20 +sVa +I19 +sVo +I6 +ssVcc +p1480 +(dp1481 +Va +I12 +sVe +I13 +sVi +I1 +sVo +I27 +sVr +I1 +sVu +I10 +ssVca +p1482 +(dp1483 +V +I54 +sVc +I6 +sVb +I2 +sVd +I1 +sV' +I12 +sVm +I18 +sVl +I25 +sVn +I117 +sVp +I11 +sVs +I20 +sVr +I21 +sVu +I21 +sVt +I27 +sV" +I1 +sV; +I2 +sV. +I1 +sV, +I14 +ssVce +p1484 +(dp1485 +Va +I7 +sV +I178 +sV: +I1 +sVe +I7 +sVd +I34 +sV' +I1 +sV) +I1 +sVi +I43 +sV- +I1 +sVm +I5 +sV, +I36 +sVn +I10 +sVp +I18 +sVs +I49 +sVr +I30 +sV! +I1 +sVt +I1 +sV; +I7 +sV. +I26 +sV? +I2 +sVl +I17 +ssVcd +p1486 +(dp1487 +V +I2 +ssV60 +p1488 +(dp1489 +V +I1 +ssV61 +p1490 +(dp1491 +V8 +I1 +ssVcy +p1492 +(dp1493 +V! +I3 +sV +I29 +sV' +I1 +sVi +I1 +sV- +I1 +sV, +I11 +sV. +I3 +sVs +I1 +sV; +I2 +ssVA +p1494 +(dp1495 +Vg +I1 +ssV), +p1496 +(dp1497 +V +I5 +ssV). +p1498 +(dp1499 +V +I4 +ssVcs +p1500 +(dp1501 +V +I1 +sVo +I1 +ssVcr +p1502 +(dp1503 +Va +I1 +sVi +I13 +sVe +I30 +sVu +I3 +sVy +I2 +ssVcq +p1504 +(dp1505 +Vu +I16 +ssVcu +p1506 +(dp1507 +Vm +I12 +sVl +I31 +sVn +I1 +sVs +I21 +sVr +I15 +sVt +I5 +ssVct +p1508 +(dp1509 +Va +I10 +sV +I88 +sV" +I1 +sVe +I42 +sV' +I2 +sVi +I97 +sVl +I19 +sVo +I6 +sV, +I13 +sVs +I5 +sVr +I5 +sVu +I15 +sV; +I2 +sV. +I9 +ssVpr +p1510 +(dp1511 +Vi +I37 +sVa +I8 +sVe +I122 +sVu +I6 +sVo +I110 +ssVps +p1512 +(dp1513 +V +I14 +sV- +I1 +sVe +I1 +sV, +I3 +sVt +I2 +ssVpp +p1514 +(dp1515 +Ve +I33 +sVi +I13 +sVl +I12 +sVo +I37 +sVr +I8 +sVy +I18 +ssVC. +p1516 +(dp1517 +V +I6 +ssVpt +p1518 +(dp1519 +Va +I1 +sV +I20 +sVe +I6 +sVi +I21 +sV, +I1 +sVo +I1 +sVs +I2 +sVu +I1 +sV; +I1 +ssVpu +p1520 +(dp1521 +Vb +I3 +sVd +I3 +sVl +I3 +sVn +I7 +sVs +I1 +sVr +I8 +sVt +I17 +ssVpy +p1522 +(dp1523 +V! +I1 +sV +I16 +sV, +I1 +sV. +I5 +sVr +I8 +sV: +I1 +ssVY; +p1524 +(dp1525 +V +I1 +ssV"y +p1526 +(dp1527 +Vo +I2 +ssVpa +p1528 +(dp1529 +Vc +I3 +sVb +I2 +sVg +I1 +sVi +I15 +sVl +I1 +sVn +I10 +sVs +I18 +sVr +I92 +sVt +I18 +sVy +I5 +ssVpd +p1530 +(dp1531 +Va +I1 +ssVpe +p1532 +(dp1533 +Va +I49 +sV +I22 +sVc +I50 +sVe +I5 +sVd +I8 +sVm +I1 +sV, +I4 +sVo +I2 +sVn +I49 +sVs +I4 +sVr +I128 +sVt +I2 +sV; +I1 +sV. +I1 +sV] +I1 +sVl +I3 +ssVph +p1534 +(dp1535 +Vy +I2 +sVi +I1 +sVa +I1 +sV +I1 +ssVpi +p1536 +(dp1537 +Va +I1 +sVc +I1 +sVe +I5 +sVd +I6 +sVl +I2 +sVn +I32 +sVs +I7 +sVr +I17 +sVt +I13 +ssVh; +p1538 +(dp1539 +V +I5 +ssVpo +p1540 +(dp1541 +Vb +I1 +sVi +I21 +sVk +I9 +sVl +I5 +sVo +I11 +sVn +I10 +sVp +I1 +sVs +I94 +sVr +I26 +sVu +I2 +sVt +I3 +sVw +I22 +sVv +I1 +ssVpl +p1542 +(dp1543 +Vy +I14 +sVa +I69 +sVe +I50 +sVi +I19 +sVo +I3 +ssVpm +p1544 +(dp1545 +Ve +I1 +ssVCI +p1546 +(dp1547 +VI +I2 +sVA +I5 +sVm +I1 +sVD +I1 +ssVCH +p1548 +(dp1549 +VA +I1 +sV +I2 +ssVCO +p1550 +(dp1551 +VU +I28 +sVG +I1 +sVN +I3 +ssVc. +p1552 +(dp1553 +V +I3 +sVe +I1 +sV, +I18 +ssVCM +p1554 +(dp1555 +VU +I2 +ssVCL +p1556 +(dp1557 +VA +I1 +sVU +I3 +ssVTr +p1558 +(dp1559 +Vi +I2 +ssVCA +p1560 +(dp1561 +VT +I8 +ssVc +p1562 +(dp1563 +Vd +I3 +sVi +I1 +sVn +I1 +sVp +I1 +sVr +I1 +sVu +I1 +sVT +I1 +sVt +I1 +ssVCE +p1564 +(dp1565 +V +I2 +ssVCD +p1566 +(dp1567 +VI +I2 +ssV*V +p1568 +(dp1569 +Ve +I1 +ssVCR +p1570 +(dp1571 +V +I1 +ssV40 +p1572 +(dp1573 +V0 +I1 +ssVCU +p1574 +(dp1575 +VL +I1 +ssVCT +p1576 +(dp1577 +V +I8 +sV, +I2 +ssVCh +p1578 +(dp1579 +Va +I10 +sVr +I2 +sVu +I46 +ssVCo +p1580 +(dp1581 +Vp +I1 +sVr +I1 +sVm +I3 +sVu +I26 +sVn +I2 +ssVap +p1582 +(dp1583 +Va +I5 +sV +I1 +sVe +I3 +sVi +I3 +sVo +I3 +sVp +I78 +sVs +I15 +sVr +I2 +sVt +I5 +ssVp; +p1584 +(dp1585 +V +I1 +ssVCa +p1586 +(dp1587 +Vs +I1 +sVr +I4 +sVt +I9 +sVn +I3 +ssVCe +p1588 +(dp1589 +Vr +I1 +sVn +I1 +ssVt- +p1590 +(dp1591 +Vr +I1 +sV- +I2 +ssVp +p1592 +(dp1593 +Va +I4 +sVc +I1 +sVb +I1 +sVe +I1 +sVg +I1 +sVf +I5 +sVi +I3 +sVh +I7 +sVm +I3 +sVl +I1 +sVo +I2 +sVp +I3 +sVs +I3 +sVu +I4 +sVt +I8 +sVw +I3 +sVv +I1 +ssVp' +p1594 +(dp1595 +Vs +I2 +ssVp* +p1596 +(dp1597 +V* +I1 +ssVG +p1598 +(dp1599 +VB +I2 +ssVp. +p1600 +(dp1601 +V +I3 +ssV6] +p1602 +(dp1603 +V +I1 +ssVp, +p1604 +(dp1605 +V +I6 +ssVn] +p1606 +(dp1607 +V +I2 +ssVd- +p1608 +(dp1609 +Vh +I3 +sVc +I1 +sV- +I4 +ssVva +p1610 +(dp1611 +Vg +I1 +sVi +I11 +sVl +I23 +sVn +I22 +sVs +I2 +sVr +I6 +sVt +I8 +ssViz +p1612 +(dp1613 +Vi +I1 +sVe +I12 +ssVve +p1614 +(dp1615 +V +I384 +sVd +I68 +sV- +I1 +sVm +I4 +sV, +I18 +sVn +I97 +sVs +I24 +sVr +I360 +sVy +I1 +sVx +I6 +sV; +I1 +sV. +I5 +sVl +I14 +ssV"b +p1616 +(dp1617 +Vy +I1 +sVu +I3 +ssVip +p1618 +(dp1619 +Va +I3 +sV +I13 +sV' +I2 +sVi +I2 +sV* +I1 +sVm +I1 +sV, +I3 +sVl +I3 +sVs +I3 +sVt +I5 +sV; +I1 +sV. +I2 +ssVis +p1620 +(dp1621 +V +I557 +sV- +I1 +sV, +I13 +sV. +I3 +sV; +I1 +sV: +I1 +sV? +I1 +sVa +I17 +sVc +I23 +sVb +I1 +sVe +I50 +sVg +I2 +sVf +I18 +sVi +I25 +sVh +I90 +sVk +I7 +sVm +I7 +sVl +I9 +sVo +I6 +sVp +I27 +sVs +I47 +sVu +I5 +sVt +I106 +ssVav +p1622 +(dp1623 +Vi +I45 +sVy +I3 +sVe +I294 +sVa +I2 +sVo +I28 +ssV/C +p1624 +(dp1625 +Va +I1 +sVM +I1 +ssVit +p1626 +(dp1627 +Va +I21 +sV +I234 +sVc +I1 +sV: +I1 +sVe +I58 +sVi +I43 +sVh +I271 +sV- +I1 +sVl +I3 +sVo +I3 +sVn +I3 +sV) +I1 +sVs +I28 +sV! +I3 +sVu +I20 +sVt +I51 +sVy +I70 +sV; +I4 +sV. +I27 +sV, +I27 +sV? +I3 +ssViv +p1628 +(dp1629 +Vi +I15 +sVa +I19 +sVe +I149 +sVo +I1 +ssV-u +p1630 +(dp1631 +Vp +I1 +sVs +I1 +ssVih +p1632 +(dp1633 +Vo +I1 +ssVik +p1634 +(dp1635 +Ve +I32 +ssV X +p1636 +(dp1637 +VI +I5 +sV +I1 +sVL +I2 +sVX +I20 +sVV +I4 +ssVim +p1638 +(dp1639 +Va +I21 +sV +I96 +sVe +I65 +sVi +I5 +sV- +I2 +sVm +I18 +sV, +I25 +sVo +I2 +sV. +I13 +sVp +I63 +sVs +I14 +sV! +I2 +sVu +I1 +sV; +I5 +sV: +I1 +ssVil +p1640 +(dp1641 +V +I10 +sVe +I46 +sVd +I22 +sVf +I1 +sV) +I1 +sVk +I1 +sVl +I234 +sV, +I1 +sVi +I32 +sVs +I5 +sVt +I4 +sVy +I42 +ssVio +p1642 +(dp1643 +Vd +I3 +sVl +I7 +sVn +I430 +sVs +I1 +sVr +I9 +sVu +I51 +ssVin +p1644 +(dp1645 +V! +I1 +sV +I394 +sV" +I1 +sV' +I1 +sV- +I8 +sV, +I11 +sV. +I11 +sV; +I3 +sV: +I2 +sVa +I103 +sVc +I76 +sVe +I98 +sVd +I111 +sVg +I703 +sVf +I33 +sVi +I32 +sVh +I1 +sVk +I36 +sVj +I8 +sVl +I15 +sVn +I6 +sVq +I1 +sVs +I73 +sVu +I22 +sVt +I137 +sVw +I46 +sVv +I16 +sVy +I1 +ssVia +p1646 +(dp1647 +V +I3 +sVb +I13 +sVg +I18 +sV, +I9 +sVl +I22 +sVs +I1 +sVr +I6 +sVt +I26 +sVn +I11 +ssVvy +p1648 +(dp1649 +V +I4 +ssVic +p1650 +(dp1651 +Va +I113 +sV +I11 +sVe +I51 +sVi +I27 +sVh +I122 +sVk +I5 +sVl +I3 +sV, +I2 +sV) +I1 +sVs +I1 +sVu +I26 +sVt +I16 +sVy +I1 +ssVib +p1652 +(dp1653 +Vi +I6 +sVe +I16 +sVu +I13 +sVl +I42 +ssVie +p1654 +(dp1655 +V +I1 +sVc +I6 +sVd +I37 +sVf +I5 +sV- +I4 +sVl +I3 +sVn +I70 +sVs +I34 +sVr +I2 +sVu +I9 +sVt +I24 +sVw +I9 +sVv +I38 +sV. +I1 +ssVid +p1656 +(dp1657 +Va +I2 +sV +I70 +sV: +I2 +sVe +I68 +sVd +I5 +sVi +I5 +sV, +I6 +sVo +I4 +sVn +I2 +sV; +I2 +sV. +I1 +sVl +I3 +ssVig +p1658 +(dp1659 +Va +I1 +sVe +I13 +sVi +I7 +sVh +I107 +sVm +I3 +sVo +I1 +sVn +I24 +sVu +I1 +ssVif +p1660 +(dp1661 +V +I65 +sVe +I33 +sVf +I12 +sVi +I20 +sVl +I1 +sVu +I1 +sVt +I2 +sVy +I10 +ssV"D +p1662 +(dp1663 +Vi +I1 +sVe +I1 +ssV"F +p1664 +(dp1665 +Vr +I3 +ssVaz +p1666 +(dp1667 +Vi +I2 +sVa +I1 +sVe +I1 +ssV"A +p1668 +(dp1669 +VS +I1 +sVr +I1 +sVn +I1 +ssV"B +p1670 +(dp1671 +Vu +I2 +ssV"C +p1672 +(dp1673 +Va +I1 +sVe +I1 +ssV"w +p1674 +(dp1675 +Va +I1 +sVh +I1 +ssV"M +p1676 +(dp1677 +Va +I1 +sVy +I2 +ssV/e +p1678 +(dp1679 +Vt +I2 +ssV"O +p1680 +(dp1681 +Vh +I1 +sVf +I1 +ssV"H +p1682 +(dp1683 +Vi +I1 +sVe +I2 +ssV"I +p1684 +(dp1685 +V +I17 +sVt +I4 +sVn +I1 +ssV/a +p1686 +(dp1687 +Vr +I1 +ssV"W +p1688 +(dp1689 +Vh +I2 +ssV"P +p1690 +(dp1691 +VR +I2 +sVr +I4 +ssVI, +p1692 +(dp1693 +V +I6 +ssV"R +p1694 +(dp1695 +Vi +I1 +ssV"S +p1696 +(dp1697 +Va +I1 +sVm +I5 +ssV"i +p1698 +(dp1699 +Vf +I1 +sVn +I1 +ssVI +p1700 +(dp1701 +VD +I1 +sVK +I1 +sVa +I128 +sVc +I79 +sVb +I19 +sVe +I8 +sVd +I33 +sVg +I4 +sVf +I22 +sVi +I4 +sVh +I121 +sVk +I18 +sVm +I39 +sVl +I17 +sVo +I9 +sVn +I22 +sVp +I4 +sVs +I73 +sVr +I23 +sVu +I1 +sVt +I25 +sVw +I78 +sVv +I1 +sVy +I2 +ssVnh +p1702 +(dp1703 +Va +I10 +sVe +I1 +ssV"Y +p1704 +(dp1705 +Vo +I2 +ssVIX +p1706 +(dp1707 +V +I4 +ssV" +p1708 +(dp1709 +Va +I1 +sVA +I2 +sVc +I2 +sVB +I1 +sVd +I1 +sVi +I1 +sVh +I4 +sVS +I3 +sVW +I3 +sVI +I6 +sVs +I19 +sV" +I19 +sVt +I1 +sVw +I2 +sVH +I3 +sVr +I2 +sVb +I1 +sVT +I1 +ssVVE +p1710 +(dp1711 +V +I4 +sVR +I50 +sVN +I2 +ssV*] +p1712 +(dp1713 +V +I3 +ssVVI +p1714 +(dp1715 +VI +I8 +sV +I3 +sVD +I1 +ssVIS +p1716 +(dp1717 +V +I9 +sVC +I1 +sV" +I1 +sVE +I1 +sVS +I1 +sVT +I2 +ssVIR +p1718 +(dp1719 +V +I2 +sVE +I1 +ssVIU +p1720 +(dp1721 +VM +I1 +ssVIT +p1722 +(dp1723 +V +I1 +sVE +I3 +sVI +I2 +sVH +I1 +sVN +I1 +sVY +I4 +ssVIV +p1724 +(dp1725 +V +I4 +sVE +I2 +ssVII +p1726 +(dp1727 +VI +I8 +sV +I17 +sV, +I1 +ssVIM +p1728 +(dp1729 +VI +I3 +sVP +I1 +sVE +I1 +sV. +I1 +ssVIL +p1730 +(dp1731 +VI +I3 +sV +I2 +ssVIO +p1732 +(dp1733 +VN +I4 +ssVIN +p1734 +(dp1735 +VA +I3 +sV +I2 +sVC +I4 +sVE +I7 +sVD +I4 +sVG +I2 +sVT +I2 +ssVIA +p1736 +(dp1737 +VB +I1 +sVL +I1 +sV. +I5 +ssVIC +p1738 +(dp1739 +VI +I5 +sV +I4 +sVU +I1 +sVE +I1 +sVT +I1 +ssVIB +p1740 +(dp1741 +VI +I1 +sVU +I1 +ssVHE +p1742 +(dp1743 +V +I11 +sVR +I15 +sV, +I1 +ssVID +p1744 +(dp1745 +V +I1 +sVE +I2 +ssV": +p1746 +(dp1747 +V +I1 +ssVIF +p1748 +(dp1749 +V +I3 +ssVVa +p1750 +(dp1751 +Vn +I1 +ssVnx +p1752 +(dp1753 +Vi +I12 +ssVVe +p1754 +(dp1755 +Vr +I67 +ssVIs +p1756 +(dp1757 +V +I2 +ssVTY +p1758 +(dp1759 +V +I4 +sV; +I1 +sV, +I1 +ssVVl +p1760 +(dp1761 +V +I1 +ssVIt +p1762 +(dp1763 +Va +I1 +sV +I29 +sVs +I1 +ssVVo +p1764 +(dp1765 +Vl +I1 +ssVIm +p1766 +(dp1767 +Va +I1 +ssVIn +p1768 +(dp1769 +Vd +I1 +sV +I10 +sVt +I1 +sVf +I3 +ssVd) +p1770 +(dp1771 +V, +I1 +ssV-c +p1772 +(dp1773 +Vh +I1 +sVo +I1 +ssVTT +p1774 +(dp1775 +VM +I1 +sVE +I1 +ssVIf +p1776 +(dp1777 +V +I25 +ssV6 +p1778 +(dp1779 +Vi +I1 +sVf +I1 +ssVUp +p1780 +(dp1781 +Vp +I8 +sVo +I1 +ssVDe +p1782 +(dp1783 +V +I25 +sVc +I2 +sVs +I1 +sVp +I1 +sVf +I4 +ssV(~ +p1784 +(dp1785 +V) +I1 +ssVBC +p1786 +(dp1787 +VD +I2 +ssVBL +p1788 +(dp1789 +VI +I2 +ssV(s +p1790 +(dp1791 +Vu +I1 +ssVBO +p1792 +(dp1793 +VU +I1 +ssVBI +p1794 +(dp1795 +VL +I3 +ssV(t +p1796 +(dp1797 +Vh +I1 +ssVTS +p1798 +(dp1799 +V +I1 +sV* +I2 +ssVO? +p1800 +(dp1801 +V +I1 +ssVBU +p1802 +(dp1803 +VT +I3 +ssV"t +p1804 +(dp1805 +Vh +I2 +ssV(i +p1806 +(dp1807 +Vf +I2 +ssV(o +p1808 +(dp1809 +Vr +I4 +sVn +I1 +ssVBR +p1810 +(dp1811 +VE +I1 +sVO +I1 +ssV-b +p1812 +(dp1813 +Vy +I1 +sVr +I1 +sVu +I2 +ssVk, +p1814 +(dp1815 +V +I13 +ssVR. +p1816 +(dp1817 +V +I13 +ssVgt +p1818 +(dp1819 +Vh +I11 +ssV4. +p1820 +(dp1821 +V2 +I1 +ssVa" +p1822 +(dp1823 +V +I1 +ssV(_ +p1824 +(dp1825 +V) +I1 +ssVTO +p1826 +(dp1827 +V +I47 +sVO +I1 +sV? +I1 +ssVBo +p1828 +(dp1829 +Vx +I1 +sVt +I1 +ssVBi +p1830 +(dp1831 +Vt +I1 +ssV(T +p1832 +(dp1833 +Vh +I1 +ssVBu +p1834 +(dp1835 +Vt +I14 +ssV(I +p1836 +(dp1837 +V +I1 +sVn +I1 +ssVR* +p1838 +(dp1839 +V: +I1 +ssV-m +p1840 +(dp1841 +Vi +I2 +sVo +I2 +ssVr- +p1842 +(dp1843 +Vi +I7 +sVh +I1 +sVs +I1 +ssV(C +p1844 +(dp1845 +VM +I1 +ssVBy +p1846 +(dp1847 +V +I6 +ssVgs +p1848 +(dp1849 +V! +I2 +sV +I26 +sV, +I12 +sV. +I5 +sVt +I1 +sV; +I1 +ssVOn +p1850 +(dp1851 +V +I1 +sVe +I3 +sVl +I1 +ssVOh +p1852 +(dp1853 +V! +I1 +sV, +I1 +ssV"G +p1854 +(dp1855 +Vo +I1 +ssVHo +p1856 +(dp1857 +Vt +I2 +sVw +I7 +sVn +I1 +ssV(* +p1858 +(dp1859 +V) +I1 +ssV-l +p1860 +(dp1861 +Va +I7 +ssV30 +p1862 +(dp1863 +V +I1 +ssV); +p1864 +(dp1865 +V +I2 +ssVOu +p1866 +(dp1867 +Vr +I3 +sVg +I1 +ssVgr +p1868 +(dp1869 +Va +I29 +sVi +I6 +sVe +I87 +sVy +I6 +sVo +I10 +ssVOO +p1870 +(dp1871 +V +I1 +ssVON +p1872 +(dp1873 +V +I50 +sVC +I1 +sVE +I1 +sV' +I1 +sV, +I1 +sV. +I19 +sVS +I3 +sVT +I1 +ssVOM +p1874 +(dp1875 +VA +I2 +sV +I2 +ssVns +p1876 +(dp1877 +Va +I4 +sV +I80 +sVc +I8 +sVe +I41 +sVf +I1 +sVi +I50 +sV* +I1 +sV- +I1 +sV, +I11 +sVo +I24 +sV. +I14 +sVp +I5 +sV! +I2 +sVu +I10 +sVt +I44 +sVw +I6 +sV; +I3 +sV: +I2 +sVl +I1 +ssVOJ +p1878 +(dp1879 +VE +I7 +ssVOH +p1880 +(dp1881 +VN +I17 +ssVOG +p1882 +(dp1883 +V. +I1 +ssVOF +p1884 +(dp1885 +V +I6 +ssVOC +p1886 +(dp1887 +VR +I1 +ssVnu +p1888 +(dp1889 +Va +I5 +sVe +I12 +sVi +I2 +sVm +I2 +sVs +I1 +sVr +I3 +sVt +I5 +ssV-o +p1890 +(dp1891 +Vp +I1 +sVf +I1 +ssV5% +p1892 +(dp1893 +V +I1 +ssVOV +p1894 +(dp1895 +VI +I1 +ssVOU +p1896 +(dp1897 +V +I8 +sVR +I29 +sVT +I1 +ssVOT +p1898 +(dp1899 +VI +I1 +sV +I4 +sVH +I3 +ssVOS +p1900 +(dp1901 +VS +I1 +sVE +I1 +ssVOR +p1902 +(dp1903 +V +I15 +sVE +I1 +ssVS. +p1904 +(dp1905 +V +I51 +ssVb +p1906 +(dp1907 +Vh +I1 +ssV-n +p1908 +(dp1909 +Va +I2 +sVo +I1 +ssVHI +p1910 +(dp1911 +VS +I8 +sVM +I1 +ssVHN +p1912 +(dp1913 +VS +I17 +ssV) +p1914 +(dp1915 +Va +I1 +sVc +I2 +sVd +I1 +sVt +I2 +sVw +I1 +sVy +I2 +ssVT: +p1916 +(dp1917 +V +I1 +ssVHA +p1918 +(dp1919 +VN +I1 +sVT +I3 +sVD +I1 +sVV +I2 +ssVU +p1920 +(dp1921 +VA +I1 +sVc +I1 +sV" +I1 +sVD +I1 +sVG +I1 +sVH +I1 +sV* +I1 +sVU +I1 +sV= +I1 +ssVU" +p1922 +(dp1923 +V: +I1 +ssVo. +p1924 +(dp1925 +V +I8 +sV" +I3 +sVu +I1 +ssVo- +p1926 +(dp1927 +V +I1 +sVm +I2 +sV- +I1 +sVd +I3 +ssVo, +p1928 +(dp1929 +V +I31 +ssVa' +p1930 +(dp1931 +Vs +I13 +ssVo! +p1932 +(dp1933 +V +I2 +ssVo +p1934 +(dp1935 +V" +I3 +sV* +I1 +sVC +I10 +sVB +I2 +sVE +I2 +sVD +I1 +sVG +I1 +sVI +I5 +sVH +I1 +sVM +I16 +sVL +I14 +sVN +I1 +sVP +I4 +sVS +I6 +sVR +I5 +sVT +I1 +sVY +I1 +sVa +I69 +sVc +I42 +sVb +I75 +sVe +I34 +sVd +I47 +sVg +I30 +sVf +I36 +sVi +I35 +sVh +I102 +sVk +I10 +sVj +I5 +sVm +I127 +sVl +I39 +sVo +I31 +sVn +I33 +sVq +I2 +sVp +I42 +sVs +I90 +sVr +I41 +sVu +I16 +sVt +I103 +sVw +I49 +sVv +I12 +sVy +I40 +ssVg; +p1936 +(dp1937 +V +I10 +ssVHi +p1938 +(dp1939 +Vs +I8 +ssVo; +p1940 +(dp1941 +V +I4 +ssVo: +p1942 +(dp1943 +V +I2 +ssVHa +p1944 +(dp1945 +Vr +I2 +sVm +I1 +sVv +I4 +ssVHe +p1946 +(dp1947 +Va +I5 +sV +I38 +sVr +I23 +ssVbd +p1948 +(dp1949 +Vu +I5 +ssVbe +p1950 +(dp1951 +Va +I14 +sV +I262 +sVc +I9 +sVe +I76 +sVd +I3 +sVg +I11 +sVf +I29 +sVi +I36 +sVh +I20 +sV- +I2 +sVl +I40 +sVn +I6 +sVs +I19 +sVr +I43 +sV! +I1 +sVt +I35 +sVw +I2 +sVy +I11 +sV; +I1 +sV. +I2 +sV, +I1 +ssV_) +p1952 +(dp1953 +V +I1 +ssVba +p1954 +(dp1955 +Vc +I2 +sVb +I8 +sVd +I3 +sVn +I19 +sVs +I1 +sVr +I4 +sVt +I5 +ssVbb +p1956 +(dp1957 +Ve +I4 +sVo +I1 +ssVbl +p1958 +(dp1959 +Va +I10 +sVe +I156 +sVi +I21 +sVo +I1 +sVu +I2 +sVy +I25 +ssVbm +p1960 +(dp1961 +Vi +I9 +ssVbo +p1962 +(dp1963 +Vd +I3 +sVm +I1 +sVl +I1 +sVo +I6 +sVr +I3 +sVu +I32 +sVt +I10 +sVv +I2 +sVy +I1 +sVx +I1 +ssVc) +p1964 +(dp1965 +V +I1 +ssVbi +p1966 +(dp1967 +Va +I1 +sVd +I8 +sVl +I21 +sVn +I3 +sVr +I1 +sVt +I5 +ssVg: +p1968 +(dp1969 +V +I4 +ssVa, +p1970 +(dp1971 +V +I23 +sV- +I3 +ssVbt +p1972 +(dp1973 +Va +I3 +sV +I12 +sVe +I5 +sVf +I6 +sVl +I1 +sV, +I1 +ssVbu +p1974 +(dp1975 +Vg +I1 +sVn +I1 +sVs +I9 +sVr +I1 +sVt +I194 +sVy +I1 +ssVUn +p1976 +(dp1977 +Vi +I6 +sVd +I1 +sVl +I1 +sVf +I1 +ssVLi +p1978 +(dp1979 +Vt +I1 +ssVbr +p1980 +(dp1981 +Va +I2 +sVi +I10 +sVe +I9 +sVo +I38 +ssVbs +p1982 +(dp1983 +Vi +I2 +sVe +I12 +sVt +I2 +sVo +I11 +ssVyf +p1984 +(dp1985 +Vu +I1 +ssVAN +p1986 +(dp1987 +VY +I2 +sV +I23 +sVT +I6 +ssVby +p1988 +(dp1989 +Ve +I1 +sV +I147 +sV- +I1 +sVt +I1 +sV, +I1 +ssVoo +p1990 +(dp1991 +V! +I1 +sV +I32 +sVd +I37 +sVf +I5 +sVk +I45 +sVm +I24 +sVl +I14 +sVn +I38 +sVp +I2 +sVs +I6 +sVr +I23 +sVt +I1 +sV. +I2 +sV, +I7 +ssVon +p1992 +(dp1993 +V! +I5 +sV +I482 +sV' +I9 +sV- +I3 +sV, +I100 +sV/ +I1 +sV. +I31 +sV; +I23 +sV? +I2 +sV] +I2 +sVa +I43 +sVc +I41 +sVe +I120 +sVd +I67 +sVg +I63 +sVf +I23 +sVi +I23 +sVj +I2 +sVl +I39 +sVo +I9 +sVn +I11 +sVq +I3 +sVp +I1 +sVs +I127 +sVt +I59 +sVv +I50 +sVy +I2 +ssVom +p1994 +(dp1995 +Va +I31 +sV +I145 +sVe +I140 +sVf +I11 +sVi +I21 +sVm +I31 +sV, +I5 +sVo +I3 +sV. +I11 +sVp +I49 +sVy +I1 +sV> +I1 +ssVol +p1996 +(dp1997 +Va +I1 +sV +I8 +sVe +I39 +sVd +I24 +sVi +I9 +sVl +I25 +sVo +I11 +sV, +I2 +sVs +I1 +sVn +I1 +sVu +I20 +sVt +I1 +sVv +I11 +sVy +I1 +sV: +I1 +ssVok +p1998 +(dp1999 +Vi +I4 +sV +I25 +sVs +I9 +sVe +I24 +sV. +I1 +ssVoj +p2000 +(dp2001 +Ve +I28 +ssVoi +p2002 +(dp2003 +Vc +I13 +sVd +I4 +sVg +I1 +sVl +I1 +sVn +I44 +sVs +I2 +sVt +I1 +ssVoh +p2004 +(dp2005 +Vi +I1 +sVn +I18 +ssVog +p2006 +(dp2007 +Vi +I4 +sVy +I1 +sVr +I4 +sVe +I12 +sVn +I1 +ssVof +p2008 +(dp2009 +V +I710 +sVe +I4 +sVf +I21 +sVi +I4 +sV, +I1 +sVo +I3 +sV. +I3 +sVr +I2 +sVt +I16 +sV; +I1 +ssVoe +p2010 +(dp2011 +Vs +I21 +sVu +I1 +sVv +I1 +ssVod +p2012 +(dp2013 +V! +I1 +sV +I31 +sVe +I2 +sVd +I2 +sVg +I3 +sVi +I3 +sV- +I2 +sV, +I3 +sVn +I3 +sVu +I8 +sVa +I1 +sVy +I3 +sV. +I2 +ssVoc +p2014 +(dp2015 +Va +I3 +sVc +I14 +sVe +I6 +sVi +I9 +sVk +I2 +sVu +I1 +ssVob +p2016 +(dp2017 +Va +I11 +sV +I1 +sVb +I1 +sVe +I2 +sVj +I8 +sVl +I11 +sVo +I2 +sVs +I9 +sVt +I3 +ssVoa +p2018 +(dp2019 +Vc +I5 +sVl +I1 +ssVUR +p2020 +(dp2021 +VP +I1 +sVC +I28 +sV +I1 +ssVUM +p2022 +(dp2023 +V +I1 +sVB +I1 +ssVga +p2024 +(dp2025 +Vg +I11 +sVi +I44 +sVl +I6 +sVn +I5 +sVr +I11 +sVt +I3 +sVv +I8 +sVy +I1 +ssVUN +p2026 +(dp2027 +VI +I1 +sVD +I2 +ssVH. +p2028 +(dp2029 +V +I1 +ssVoy +p2030 +(dp2031 +Va +I4 +sV +I5 +sVe +I2 +sVf +I1 +sVi +I2 +sVm +I3 +ssVox +p2032 +(dp2033 +V +I1 +sV. +I1 +ssVow +p2034 +(dp2035 +Va +I6 +sV +I158 +sVe +I75 +sV' +I1 +sVi +I16 +sV, +I24 +sV. +I3 +sVs +I5 +sVn +I90 +sV! +I1 +sV; +I2 +sV: +I1 +sVl +I6 +ssVov +p2036 +(dp2037 +Vi +I6 +sVa +I6 +sVe +I81 +sVo +I8 +ssVou +p2038 +(dp2039 +V! +I2 +sV +I271 +sVb +I29 +sVd +I2 +sVg +I102 +sVl +I199 +sVn +I86 +sVs +I92 +sVr +I374 +sVt +I80 +sV; +I4 +sV. +I11 +sV, +I40 +sV? +I1 +ssVot +p2040 +(dp2041 +Va +I6 +sV +I297 +sVe +I22 +sVi +I17 +sVh +I183 +sV* +I1 +sV, +I12 +sVo +I1 +sV. +I3 +sV) +I1 +sVt +I2 +sV; +I1 +ssVos +p2042 +(dp2043 +Va +I2 +sVe +I80 +sVi +I21 +sVp +I6 +sVs +I43 +sVu +I2 +sVt +I63 +ssVor +p2044 +(dp2045 +V +I382 +sV) +I1 +sV* +I1 +sV, +I11 +sV. +I2 +sV; +I2 +sV: +I3 +sVa +I4 +sVc +I7 +sVb +I8 +sVe +I142 +sVd +I56 +sVg +I18 +sVi +I10 +sVk +I10 +sVm +I43 +sVl +I21 +sVo +I4 +sVn +I12 +sVs +I13 +sVr +I21 +sVt +I100 +sVw +I8 +sVy +I15 +ssVUC +p2046 +(dp2047 +VH +I1 +ssVop +p2048 +(dp2049 +V +I2 +sVe +I48 +sVi +I19 +sVl +I2 +sVo +I6 +sVp +I14 +sVr +I7 +sVu +I1 +sVt +I2 +sVy +I14 +ssV!! +p2050 +(dp2051 +V +I1 +ssV! +p2052 +(dp2053 +VA +I1 +sVC +I2 +sVB +I1 +sVD +I1 +sVG +I2 +sVF +I3 +sVI +I17 +sVH +I5 +sVK +I1 +sVM +I3 +sVL +I1 +sVO +I1 +sVN +I1 +sVP +I1 +sVT +I4 +sVW +I3 +sVa +I3 +sVb +I4 +sVf +I1 +sVi +I1 +sVh +I2 +sVj +I1 +sVt +I5 +ssVC +p2054 +(dp2055 +VD +I2 +sVo +I1 +ssV** +p2056 +(dp2057 +V +I8 +sVE +I1 +sVF +I1 +sVI +I1 +sV* +I17 +sVS +I2 +sVT +I4 +sVW +I1 +ssVh- +p2058 +(dp2059 +V- +I1 +ssVNo +p2060 +(dp2061 +V +I3 +sV; +I1 +sVt +I5 +sVw +I1 +sV, +I3 +ssV!) +p2062 +(dp2063 +V +I1 +ssVNa +p2064 +(dp2065 +Vy +I1 +ssV!* +p2066 +(dp2067 +V +I1 +sV* +I1 +ssVNe +p2068 +(dp2069 +Vi +I1 +sVv +I1 +ssVh. +p2070 +(dp2071 +V +I13 +sV" +I1 +ssV[E +p2072 +(dp2073 +Vt +I1 +ssV[D +p2074 +(dp2075 +Va +I1 +ssVh +p2076 +(dp2077 +V/ +I1 +sV2 +I1 +sV8 +I1 +sVC +I2 +sVF +I2 +sVI +I35 +sVH +I1 +sVM +I9 +sVL +I6 +sVS +I2 +sVR +I8 +sVa +I100 +sVc +I10 +sVb +I6 +sVe +I8 +sVd +I6 +sVg +I7 +sVf +I12 +sVi +I25 +sVh +I68 +sVk +I2 +sVj +I2 +sVm +I51 +sVl +I7 +sVo +I35 +sVn +I4 +sVp +I15 +sVs +I28 +sVr +I6 +sVu +I4 +sVt +I65 +sVw +I26 +sVv +I1 +sVy +I19 +ssVh! +p2078 +(dp2079 +V +I1 +sV" +I1 +ssVh' +p2080 +(dp2081 +Vs +I4 +ssV[M +p2082 +(dp2083 +Va +I1 +ssVeh +p2084 +(dp2085 +Va +I19 +sVi +I1 +sVe +I7 +ssVNI +p2086 +(dp2087 +VT +I2 +ssV"a +p2088 +(dp2089 +Vs +I1 +ssV., +p2090 +(dp2091 +V +I18 +ssVek +p2092 +(dp2093 +V +I5 +sVs +I6 +sV, +I2 +sV. +I1 +ssVu; +p2094 +(dp2095 +V +I4 +ssV[t +p2096 +(dp2097 +Vr +I1 +sVo +I2 +ssV. +p2098 +(dp2099 +V +I27 +sV" +I13 +sV. +I4 +sVA +I30 +sVC +I10 +sVB +I16 +sVE +I4 +sVD +I33 +sVG +I2 +sVF +I16 +sVI +I209 +sVH +I69 +sVK +I2 +sVJ +I34 +sVM +I51 +sVL +I21 +sVO +I9 +sVN +I9 +sVP +I7 +sVS +I61 +sVR +I7 +sVU +I2 +sVT +I45 +sVW +I33 +sVV +I73 +sVY +I23 +sVa +I2 +sVb +I1 +sVi +I1 +sVw +I2 +ssVNA +p2100 +(dp2101 +VL +I3 +ssVNB +p2102 +(dp2103 +VE +I7 +ssV[x +p2104 +(dp2105 +Vx +I1 +ssVND +p2106 +(dp2107 +VI +I1 +sV +I1 +sV* +I2 +sVE +I5 +sV, +I1 +ssVNE +p2108 +(dp2109 +V +I7 +sVD +I1 +sVG +I1 +sVS +I1 +sVW +I1 +sVY +I1 +ssVNG +p2110 +(dp2111 +V +I2 +ssVNY +p2112 +(dp2113 +V +I2 +ssVu. +p2114 +(dp2115 +V +I13 +ssV[g +p2116 +(dp2117 +Ve +I1 +ssVrw +p2118 +(dp2119 +Va +I14 +sVi +I4 +ssVMO +p2120 +(dp2121 +VN +I1 +ssV.2 +p2122 +(dp2123 +V9 +I1 +ssVNS +p2124 +(dp2125 +V +I2 +sVE +I1 +sVO +I17 +ssVNT +p2126 +(dp2127 +V! +I2 +sVA +I2 +sV +I1 +sVI +I3 +sV* +I1 +sVR +I1 +sVY +I2 +ssVNU +p2128 +(dp2129 +VM +I1 +ssVhy +p2130 +(dp2131 +V +I13 +sVs +I2 +sV, +I4 +sVp +I2 +ssVN, +p2132 +(dp2133 +V +I1 +ssV-t +p2134 +(dp2135 +Vi +I2 +sVh +I4 +sVm +I4 +sVo +I1 +sVw +I1 +ssVN. +p2136 +(dp2137 +V +I19 +ssVhr +p2138 +(dp2139 +Vi +I2 +sVu +I3 +sVe +I9 +sVo +I8 +ssVhs +p2140 +(dp2141 +V +I9 +sV, +I1 +ssVg- +p2142 +(dp2143 +Vh +I1 +sVr +I6 +sV- +I4 +ssVka +p2144 +(dp2145 +Vb +I2 +ssVht +p2146 +(dp2147 +V +I90 +sVe +I50 +sVf +I7 +sVi +I1 +sV, +I12 +sV. +I3 +sVs +I5 +sV; +I1 +sVl +I1 +ssVhu +p2148 +(dp2149 +Vs +I21 +sVr +I54 +sVm +I9 +sVt +I2 +sVn +I5 +ssVTP +p2150 +(dp2151 +V +I2 +ssVhh +p2152 +(dp2153 +Vi +I46 +sVe +I1 +ssVhi +p2154 +(dp2155 +Vc +I117 +sVb +I1 +sVe +I3 +sVg +I13 +sVm +I153 +sVl +I85 +sVo +I3 +sVn +I149 +sVp +I20 +sVs +I283 +sVr +I5 +sVt +I13 +sVv +I2 +ssVhn +p2156 +(dp2157 +Vs +I18 +sVe +I1 +ssVho +p2158 +(dp2159 +V +I30 +sVc +I1 +sVe +I1 +sVd +I1 +sVi +I5 +sVm +I30 +sVl +I23 +sVo +I15 +sVn +I10 +sVp +I29 +sVs +I37 +sVr +I30 +sVu +I184 +sVw +I78 +sV, +I7 +ssVhl +p2160 +(dp2161 +Vy +I14 +sVe +I1 +ssVhm +p2162 +(dp2163 +Ve +I19 +ssVhb +p2164 +(dp2165 +Vo +I1 +ssVD* +p2166 +(dp2167 +V +I1 +sVT +I1 +ssVha +p2168 +(dp2169 +Ve +I2 +sVd +I97 +sVk +I1 +sVm +I3 +sVl +I67 +sVn +I85 +sVp +I47 +sVs +I113 +sVr +I47 +sVt +I403 +sVv +I281 +sVz +I1 +ssVhf +p2170 +(dp2171 +Vu +I4 +ssVs? +p2172 +(dp2173 +V +I2 +sV" +I2 +ssVhe +p2174 +(dp2175 +Va +I67 +sV +I1028 +sVc +I2 +sVe +I4 +sVd +I53 +sVi +I20 +sV- +I1 +sVm +I35 +sVl +I13 +sVn +I102 +sVs +I28 +sVr +I827 +sVt +I9 +sVy +I29 +sV; +I1 +sV. +I1 +sV, +I10 +ssV[3 +p2176 +(dp2177 +V] +I2 +ssV[2 +p2178 +(dp2179 +V] +I3 +ssV[1 +p2180 +(dp2181 +V0 +I1 +sV] +I3 +ssV-w +p2182 +(dp2183 +Va +I1 +sVe +I1 +ssVuy +p2184 +(dp2185 +V +I1 +ssV*S +p2186 +(dp2187 +VT +I2 +ssVXV +p2188 +(dp2189 +VI +I8 +sV +I3 +sVl +I1 +ssVut +p2190 +(dp2191 +Va +I3 +sV +I265 +sVe +I51 +sVi +I22 +sVh +I13 +sVm +I3 +sV, +I11 +sV. +I1 +sVu +I4 +sVt +I2 +sVy +I14 +sV; +I1 +ssVgy +p2192 +(dp2193 +V +I1 +sV, +I1 +ssV.c +p2194 +(dp2195 +Vs +I1 +sVo +I1 +ssVup +p2196 +(dp2197 +V +I14 +sVe +I11 +sVd +I1 +sVi +I3 +sV, +I3 +sVo +I5 +sV. +I1 +sVp +I21 +sVs +I2 +sVr +I1 +sVu +I1 +sVt +I4 +ssVus +p2198 +(dp2199 +Va +I84 +sV +I70 +sVb +I17 +sVe +I79 +sVi +I27 +sVh +I3 +sV, +I10 +sVn +I10 +sVp +I9 +sVs +I2 +sVu +I4 +sVt +I136 +sVy +I7 +sV; +I1 +sV. +I5 +sVl +I7 +ssVur +p2200 +(dp2201 +Va +I32 +sV +I238 +sVc +I75 +sVb +I2 +sVe +I122 +sVg +I10 +sV@ +I1 +sVi +I14 +sVh +I1 +sV, +I5 +sVn +I33 +sVp +I13 +sVs +I79 +sVr +I5 +sV! +I1 +sVt +I9 +sVv +I1 +sVy +I1 +sV; +I2 +sV. +I6 +ssVum +p2202 +(dp2203 +Va +I2 +sV +I3 +sVb +I7 +sVe +I2 +sVi +I3 +sVm +I9 +sV, +I2 +sVo +I3 +sVp +I3 +sVs +I10 +ssVul +p2204 +(dp2205 +Va +I37 +sV +I33 +sVe +I3 +sVd +I198 +sVg +I5 +sVi +I3 +sVl +I18 +sVo +I5 +sV. +I5 +sVs +I1 +sV! +I1 +sVt +I12 +sVy +I1 +sV; +I1 +sVn +I1 +sV, +I2 +ssV.z +p2206 +(dp2207 +Vi +I1 +ssVun +p2208 +(dp2209 +Va +I16 +sV +I5 +sVc +I24 +sVe +I23 +sVd +I66 +sVg +I29 +sVf +I5 +sVi +I27 +sVh +I10 +sVk +I6 +sVm +I1 +sVl +I6 +sVn +I5 +sVp +I4 +sVs +I3 +sVr +I1 +sVu +I1 +sVt +I47 +sVw +I5 +sV. +I1 +ssVui +p2210 +(dp2211 +Va +I1 +sVc +I1 +sVe +I4 +sVl +I6 +sVn +I6 +sVp +I1 +sVs +I9 +sVr +I15 +sVu +I1 +sVt +I28 +sVv +I4 +ssVue +p2212 +(dp2213 +V +I21 +sVd +I12 +sVl +I2 +sV. +I1 +sVs +I24 +sVt +I5 +sVn +I22 +sV, +I4 +ssVud +p2214 +(dp2215 +V +I1 +sVe +I29 +sVd +I5 +sVg +I14 +sVi +I9 +sV. +I1 +ssVug +p2216 +(dp2217 +V +I1 +sVl +I1 +sVg +I3 +sVh +I150 +ssV.s +p2218 +(dp2219 +Ve +I1 +ssV.t +p2220 +(dp2221 +Vy +I1 +sVx +I3 +ssV.u +p2222 +(dp2223 +Vi +I1 +ssVuc +p2224 +(dp2225 +Va +I8 +sVc +I5 +sVe +I10 +sVi +I1 +sVh +I123 +sVk +I9 +sV. +I1 +sVt +I21 +ssVub +p2226 +(dp2227 +Vb +I4 +sVd +I5 +sVj +I13 +sVm +I9 +sVl +I10 +sVs +I3 +sVt +I22 +ssVac +p2228 +(dp2229 +Va +I2 +sV +I1 +sVc +I45 +sVe +I37 +sVi +I4 +sVh +I55 +sVk +I8 +sVq +I16 +sVr +I5 +sVu +I2 +sVt +I59 +sVy +I12 +ssVab +p2230 +(dp2231 +Vi +I16 +sVh +I3 +sVj +I1 +sVl +I128 +sVo +I33 +sVs +I15 +sVu +I1 +ssVae +p2232 +(dp2233 +Vl +I2 +ssVad +p2234 +(dp2235 +Va +I4 +sV +I117 +sVe +I50 +sVd +I12 +sVf +I5 +sVi +I12 +sVm +I14 +sV, +I3 +sVo +I2 +sVn +I1 +sVs +I2 +sVu +I2 +sVv +I23 +sVy +I109 +sV. +I1 +sVl +I1 +ssVag +p2236 +(dp2237 +Va +I38 +sVe +I79 +sVg +I3 +sVi +I16 +sVo +I8 +sVr +I18 +sVu +I3 +ssVaf +p2238 +(dp2239 +Vr +I9 +sVe +I3 +sVt +I18 +sVf +I59 +ssVai +p2240 +(dp2241 +Vd +I39 +sVg +I1 +sVm +I10 +sVl +I11 +sVn +I178 +sVs +I6 +sVr +I22 +sVt +I10 +ssVak +p2242 +(dp2243 +Va +I1 +sV +I15 +sVe +I97 +sVf +I5 +sVi +I19 +sVn +I1 +sVs +I3 +ssVTu +p2244 +(dp2245 +Ve +I2 +ssVam +p2246 +(dp2247 +Va +I5 +sV +I121 +sVb +I2 +sVe +I110 +sVi +I33 +sVm +I6 +sV, +I9 +sVo +I3 +sVp +I7 +sVu +I5 +ssVal +p2248 +(dp2249 +Va +I5 +sV +I91 +sVc +I3 +sVe +I15 +sVd +I78 +sVf +I8 +sVi +I12 +sVk +I12 +sVm +I13 +sVl +I277 +sVo +I24 +sV. +I5 +sVs +I9 +sVr +I8 +sVu +I4 +sVt +I25 +sVw +I27 +sVy +I1 +sV, +I12 +ssVao +p2250 +(dp2251 +Vr +I4 +ssVan +p2252 +(dp2253 +V! +I2 +sV +I307 +sV' +I17 +sV, +I35 +sV. +I9 +sV; +I7 +sV: +I1 +sVa +I12 +sVc +I83 +sVe +I8 +sVd +I717 +sVg +I50 +sVi +I17 +sVk +I7 +sVl +I2 +sVo +I13 +sVn +I67 +sVq +I5 +sVs +I25 +sVt +I68 +sVw +I4 +sVv +I1 +sVy +I103 +sVx +I12 +ssVTo +p2254 +(dp2255 +V +I11 +sV- +I1 +ssVas +p2256 +(dp2257 +Va +I3 +sV +I521 +sVc +I4 +sVe +I28 +sVi +I32 +sVh +I7 +sVk +I9 +sV, +I3 +sVo +I28 +sV. +I1 +sVp +I1 +sVs +I45 +sV! +I1 +sVu +I21 +sVt +I76 +sVy +I13 +sV; +I1 +ssVar +p2258 +(dp2259 +Va +I35 +sV +I114 +sVc +I15 +sVb +I1 +sVe +I138 +sVd +I69 +sVg +I6 +sVf +I2 +sVi +I67 +sVk +I16 +sVm +I21 +sVl +I28 +sVo +I2 +sVn +I16 +sVs +I31 +sVr +I80 +sVt +I121 +sVy +I31 +sV; +I1 +sV. +I2 +sV, +I4 +ssVau +p2260 +(dp2261 +Vc +I1 +sVd +I2 +sVg +I47 +sVl +I6 +sVn +I10 +sVs +I20 +sVt +I12 +ssVat +p2262 +(dp2263 +Va +I1 +sV +I560 +sVc +I15 +sVe +I176 +sV' +I1 +sVi +I215 +sVh +I45 +sV, +I12 +sVo +I3 +sVl +I4 +sVr +I2 +sVu +I27 +sVt +I87 +sVy +I1 +sV; +I1 +sV. +I1 +ssVTh +p2264 +(dp2265 +Va +I4 +sVe +I47 +sVi +I16 +sVo +I3 +sVr +I2 +sVu +I4 +ssVTi +p2266 +(dp2267 +Vm +I1 +ssVay +p2268 +(dp2269 +Va +I1 +sV +I140 +sVe +I5 +sV' +I1 +sVi +I7 +sV, +I16 +sV. +I11 +sVs +I43 +sV; +I8 +ssVax +p2270 +(dp2271 +V +I2 +sVe +I2 +ssVTe +p2272 +(dp2273 +Vx +I1 +sVl +I1 +ssV80 +p2274 +(dp2275 +V0 +I1 +sV +I1 +ssVAf +p2276 +(dp2277 +Vt +I3 +ssV(a +p2278 +(dp2279 +V +I1 +sVs +I3 +sVt +I1 +sVn +I2 +ssVni +p2280 +(dp2281 +Va +I2 +sVc +I11 +sVe +I18 +sVg +I9 +sVf +I2 +sVl +I1 +sVo +I26 +sVn +I50 +sVs +I24 +sVu +I1 +sVt +I24 +sVv +I5 +ssVnj +p2282 +(dp2283 +Vu +I8 +sVe +I2 +sVo +I5 +ssVnk +p2284 +(dp2285 +V +I27 +sVe +I1 +sVf +I1 +sVi +I9 +sV, +I7 +sVs +I4 +ssVnl +p2286 +(dp2287 +Vy +I56 +sVa +I1 +sVi +I1 +sVu +I5 +sVe +I2 +ssVnw +p2288 +(dp2289 +Va +I46 +sVh +I4 +sVe +I2 +sVi +I2 +sVo +I1 +ssVnn +p2290 +(dp2291 +Vi +I6 +sVe +I42 +sVu +I1 +sVo +I42 +ssVno +p2292 +(dp2293 +V +I72 +sVc +I2 +sVb +I1 +sVe +I1 +sVf +I1 +sVi +I1 +sVm +I3 +sVn +I70 +sVs +I1 +sVr +I20 +sVu +I29 +sVt +I345 +sVw +I134 +ssV46 +p2294 +(dp2295 +V] +I1 +ssVna +p2296 +(dp2297 +Vc +I2 +sVb +I19 +sVd +I1 +sVm +I7 +sVl +I99 +sVn +I12 +sVr +I7 +sVt +I67 +sVv +I1 +sVy +I1 +ssVnb +p2298 +(dp2299 +Ve +I15 +ssVnc +p2300 +(dp2301 +Ve +I223 +sVi +I15 +sVh +I3 +sVl +I34 +sVo +I18 +sVr +I7 +sVu +I4 +sVt +I2 +sVy +I12 +ssVnd +p2302 +(dp2303 +Va +I7 +sV +I775 +sV" +I1 +sVe +I127 +sV' +I5 +sVi +I35 +sV- +I3 +sV, +I34 +sVo +I26 +sVn +I8 +sVs +I43 +sVr +I3 +sVu +I26 +sV; +I6 +sV. +I7 +sV? +I1 +sVl +I3 +ssVne +p2304 +(dp2305 +V +I94 +sV' +I2 +sV, +I25 +sV. +I10 +sV; +I5 +sV? +I3 +sVa +I11 +sVc +I31 +sVe +I9 +sVd +I71 +sVg +I9 +sVf +I3 +sVi +I6 +sVm +I3 +sVo +I2 +sVn +I4 +sVq +I1 +sVs +I91 +sVr +I47 +sVt +I5 +sVw +I14 +sVv +I44 +sVy +I13 +sVx +I18 +ssVnf +p2306 +(dp2307 +Va +I2 +sVe +I9 +sVi +I16 +sVl +I11 +sVo +I19 +sVr +I1 +sVu +I7 +ssVng +p2308 +(dp2309 +Va +I12 +sV +I609 +sV! +I1 +sVe +I40 +sV' +I7 +sVf +I19 +sVi +I4 +sV- +I11 +sVl +I11 +sVo +I1 +sV. +I22 +sVs +I47 +sVr +I14 +sVu +I9 +sVt +I11 +sV; +I10 +sV: +I4 +sV, +I39 +sV? +I3 +ssVTN +p2310 +(dp2311 +VE +I1 +ssVny +p2312 +(dp2313 +V +I80 +sVb +I1 +sV) +I2 +sVm +I1 +sV, +I1 +sVo +I7 +sV. +I2 +sVt +I12 +sVw +I1 +ssVnz +p2314 +(dp2315 +Va +I2 +ssVTM +p2316 +(dp2317 +VA +I1 +sV +I1 +ssVTH +p2318 +(dp2319 +VI +I2 +sVA +I2 +sVE +I20 +sV. +I1 +ssVTI +p2320 +(dp2321 +VA +I1 +sVC +I2 +sVE +I2 +sVO +I2 +sVV +I1 +ssVnp +p2322 +(dp2323 +Vr +I2 +sVl +I3 +ssVnq +p2324 +(dp2325 +Vu +I11 +ssVnr +p2326 +(dp2327 +Va +I3 +sVi +I1 +sVe +I1 +ssVTE +p2328 +(dp2329 +VX +I6 +sVR +I1 +sVD +I3 +sVN +I8 +ssVnt +p2330 +(dp2331 +Va +I29 +sV +I220 +sVe +I113 +sVf +I1 +sVi +I95 +sVh +I18 +sVm +I8 +sV, +I45 +sVo +I22 +sV. +I29 +sVs +I33 +sVr +I34 +sV! +I9 +sVy +I3 +sVu +I1 +sV; +I12 +sV? +I1 +sVl +I30 +ssVHu +p2332 +(dp2333 +Vr +I1 +sVm +I2 +sVn +I1 +ssVnv +p2334 +(dp2335 +Vi +I39 +sVa +I2 +sVe +I26 +sVy +I1 +ssVTA +p2336 +(dp2337 +VB +I1 +sVR +I2 +sVL +I1 +ssVa +p2338 +(dp2339 +V" +I1 +sVE +I1 +sVD +I1 +sVM +I1 +sVP +I1 +sVU +I1 +sVV +I2 +sVa +I4 +sVc +I25 +sVb +I6 +sVe +I1 +sVd +I28 +sVg +I16 +sVf +I29 +sVi +I9 +sVh +I18 +sVk +I3 +sVj +I1 +sVm +I41 +sVl +I29 +sVo +I7 +sVn +I4 +sVq +I1 +sVp +I18 +sVs +I41 +sVr +I18 +sVu +I1 +sVt +I14 +sVw +I30 +sVv +I13 +sVy +I9 +ssVAC +p2340 +(dp2341 +VH +I1 +sVT +I1 +ssVAB +p2342 +(dp2343 +VI +I2 +sVO +I1 +ssV's +p2344 +(dp2345 +V +I115 +sV, +I3 +sV. +I2 +ssVAD +p2346 +(dp2347 +VY +I35 +sV +I1 +sVE +I1 +sV, +I1 +ssVAG +p2348 +(dp2349 +VE +I3 +ssVE, +p2350 +(dp2351 +V +I2 +ssVAI +p2352 +(dp2353 +VM +I1 +sVL +I1 +sVN +I2 +ssVxu +p2354 +(dp2355 +Vl +I1 +ssVAM +p2356 +(dp2357 +VA +I3 +sVE +I4 +ssVAL +p2358 +(dp2359 +VI +I5 +sV +I1 +sVL +I2 +sVD +I3 +sV, +I1 +ssVa. +p2360 +(dp2361 +V +I2 +sVt +I1 +ssVy: +p2362 +(dp2363 +V +I1 +ssVT, +p2364 +(dp2365 +V +I2 +ssVAR +p2366 +(dp2367 +V +I2 +sVR +I4 +sVE +I2 +sVT +I3 +ssVT* +p2368 +(dp2369 +V +I2 +sV* +I2 +ssVAT +p2370 +(dp2371 +V +I2 +sV: +I1 +sVT +I1 +sVH +I8 +ssVAV +p2372 +(dp2373 +VE +I2 +sVO +I1 +ssVAY +p2374 +(dp2375 +V +I1 +ssVa; +p2376 +(dp2377 +V +I3 +ssV-s +p2378 +(dp2379 +Vi +I1 +sVa +I1 +sVu +I2 +sVh +I1 +ssVg! +p2380 +(dp2381 +V +I1 +ssVT +p2382 +(dp2383 +VG +I8 +sVP +I1 +sVf +I1 +sVI +I3 +sVH +I1 +sVM +I1 +sVL +I2 +sVO +I1 +sVN +I3 +sVp +I2 +sVm +I1 +sVi +I1 +sVd +I1 +ssVT! +p2384 +(dp2385 +V +I1 +sV* +I1 +ssV"o +p2386 +(dp2387 +Vr +I1 +sVn +I1 +ssVn* +p2388 +(dp2389 +V* +I2 +ssV'T +p2390 +(dp2391 +V +I1 +ssVn, +p2392 +(dp2393 +V +I182 +sV" +I1 +ssVAd +p2394 +(dp2395 +Vi +I8 +ssVn. +p2396 +(dp2397 +V +I68 +sV" +I3 +ssVn/ +p2398 +(dp2399 +VC +I1 +ssVn +p2400 +(dp2401 +V" +I1 +sV/ +I1 +sV1 +I1 +sV3 +I1 +sV2 +I1 +sV5 +I1 +sV9 +I1 +sVA +I2 +sVE +I7 +sVF +I4 +sVI +I30 +sVK +I1 +sVM +I8 +sVL +I13 +sVS +I5 +sVR +I3 +sVU +I4 +sVT +I3 +sVW +I3 +sVV +I2 +sV[ +I1 +sVa +I154 +sVc +I31 +sVb +I51 +sVe +I50 +sVd +I26 +sVg +I19 +sVf +I52 +sVi +I87 +sVh +I109 +sVk +I5 +sVj +I3 +sVm +I91 +sVl +I19 +sVo +I127 +sVn +I14 +sVp +I31 +sVs +I77 +sVr +I24 +sVu +I21 +sVt +I254 +sVw +I88 +sVv +I10 +sVy +I41 +ssVn! +p2402 +(dp2403 +V +I8 +sV" +I1 +ssVn" +p2404 +(dp2405 +V +I1 +ssVTR +p2406 +(dp2407 +VI +I2 +sVA +I1 +ssVAm +p2408 +(dp2409 +V +I1 +sVo +I3 +ssVAl +p2410 +(dp2411 +Vi +I9 +sVl +I5 +ssVn' +p2412 +(dp2413 +Vs +I26 +sVt +I1 +ssVn: +p2414 +(dp2415 +V +I4 +ssVn; +p2416 +(dp2417 +V +I39 +ssVAu +p2418 +(dp2419 +Vs +I6 +ssVAt +p2420 +(dp2421 +V +I11 +ssVAw +p2422 +(dp2423 +Va +I1 +ssVn? +p2424 +(dp2425 +V +I3 +ssVn1 +p2426 +(dp2427 +V1 +I1 +sV0 +I3 +ssVG" +p2428 +(dp2429 +V +I1 +sssb. \ No newline at end of file diff --git a/lib/venus/examples/filters/guess-language/fr.data b/lib/venus/examples/filters/guess-language/fr.data new file mode 100644 index 0000000..597f9c5 --- /dev/null +++ b/lib/venus/examples/filters/guess-language/fr.data @@ -0,0 +1,22710 @@ +(itrigram +Trigram +p1 +(dp2 +S'length' +p3 +F8394.978022603751 +sS'lut' +p4 +(dp5 +V-D +p6 +(dp7 +Va +I3 +ssVG" +p8 +(dp9 +V +I2 +ssVG +p10 +(dp11 +Va +I1 +sVB +I2 +ssVG- +p12 +(dp13 +Vt +I5 +sVT +I1 +ssVGU +p14 +(dp15 +VT +I8 +ssVGR +p16 +(dp17 +V +I1 +ssVGE +p18 +(dp19 +VS +I3 +sVN +I1 +ssVGL +p20 +(dp21 +VI +I1 +ssVGI +p22 +(dp23 +VV +I1 +ssVGu +p24 +(dp25 +Vi +I1 +sVt +I26 +ssVGr +p26 +(dp27 +Va +I5 +sVe +I1 +ssVGe +p28 +(dp29 +Vr +I1 +sVo +I1 +sVn +I3 +ssVGa +p30 +(dp31 +Vr +I1 +sVu +I2 +sVl +I1 +sVï +I1 +ssVGo +p32 +(dp33 +Va +I1 +ssVGi +p34 +(dp35 +Vr +I9 +sVv +I1 +ssVZ, +p36 +(dp37 +V +I1 +ssVZ +p38 +(dp39 +VL +I1 +ssVèr +p40 +(dp41 +Ve +I319 +ssVès +p42 +(dp43 +V +I148 +sVe +I2 +sV- +I2 +sV, +I3 +sV; +I1 +sV_ +I1 +ssVèq +p44 +(dp45 +Vu +I4 +ssVèv +p46 +(dp47 +Vr +I2 +sVe +I4 +ssVèt +p48 +(dp49 +Vr +I1 +sVe +I9 +ssVèn +p50 +(dp51 +Ve +I24 +ssVèl +p52 +(dp53 +Ve +I7 +ssVèm +p54 +(dp55 +Ve +I30 +ssVèc +p56 +(dp57 +Vh +I2 +sVe +I23 +sVl +I14 +ssVèg +p58 +(dp59 +Vr +I8 +sVe +I7 +sVl +I1 +ssVèd +p60 +(dp61 +Ve +I4 +ssV-c +p62 +(dp63 +Vi +I14 +sVh +I6 +sVe +I16 +sVo +I2 +ssVty +p64 +(dp65 +Va +I1 +sV +I9 +sVr +I2 +sVl +I1 +sV, +I1 +ssVZa +p66 +(dp67 +Vm +I1 +ssVtw +p68 +(dp69 +Va +I3 +sVo +I1 +ssV-o +p70 +(dp71 +Vf +I1 +sVn +I7 +ssV-n +p72 +(dp73 +Ve +I1 +sVo +I15 +ssVàt +p74 +(dp75 +Ve +I1 +ssV-i +p76 +(dp77 +V8 +I1 +sVl +I83 +sVn +I5 +ssV«M +p78 +(dp79 +Vo +I1 +ssV-h +p80 +(dp81 +Ve +I1 +sVu +I1 +sVo +I3 +ssVtq +p82 +(dp83 +Vu +I1 +ssVm' +p84 +(dp85 +Va +I46 +sVe +I12 +sVé +I7 +sVh +I1 +sVo +I6 +sVi +I2 +sVy +I1 +ssVm +p86 +(dp87 +Va +I7 +sVà +I1 +sVc +I2 +sVb +I1 +sVe +I6 +sVd +I10 +sVi +I2 +sV( +I2 +sVl +I1 +sVo +I3 +sVq +I1 +sVp +I2 +sVs +I1 +sVr +I1 +sVt +I3 +ssV-t +p88 +(dp89 +Ve +I23 +sVh +I1 +sV- +I20 +sVm +I6 +sVr +I12 +sVu +I4 +ssVm, +p90 +(dp91 +V +I9 +ssVm. +p92 +(dp93 +V +I9 +ssVm) +p94 +(dp95 +V, +I1 +sV. +I1 +ssV-w +p96 +(dp97 +Vi +I1 +ssV"l +p98 +(dp99 +Ve +I1 +ssVtm +p100 +(dp101 +V +I5 +sV" +I1 +sVl +I2 +ssV9] +p102 +(dp103 +V +I2 +ssVm> +p104 +(dp105 +V +I1 +ssVm: +p106 +(dp107 +V +I2 +ssV9, +p108 +(dp109 +V +I8 +ssV9. +p110 +(dp111 +V +I5 +ssV9 +p112 +(dp113 +VU +I1 +sVD +I1 +ssV-r +p114 +(dp115 +Va +I2 +sVo +I1 +ssV99 +p116 +(dp117 +V +I1 +sV, +I1 +sV1 +I1 +sV4 +I1 +sV7 +I1 +sV9 +I1 +sV8 +I1 +ssV98 +p118 +(dp119 +V +I1 +sV, +I1 +ssV9: +p120 +(dp121 +V +I1 +ssV91 +p122 +(dp123 +V +I2 +ssV90 +p124 +(dp125 +V +I2 +sV0 +I1 +ssVm_ +p126 +(dp127 +V, +I2 +sV. +I1 +ssV92 +p128 +(dp129 +V, +I2 +ssV95 +p130 +(dp131 +V, +I1 +ssV94 +p132 +(dp133 +V +I2 +sV, +I2 +ssV97 +p134 +(dp135 +V1 +I2 +sV +I1 +sV; +I1 +sV, +I1 +ssV96 +p136 +(dp137 +V, +I3 +ssVme +p138 +(dp139 +V! +I1 +sV +I462 +sV- +I4 +sV, +I44 +sV. +I16 +sV; +I11 +sV: +I5 +sV? +I1 +sV[ +I3 +sV_ +I4 +sVa +I2 +sVd +I11 +sVi +I20 +sVm +I4 +sVl +I1 +sVo +I1 +sVn +I379 +sVs +I187 +sVr +I54 +sVu +I27 +sVt +I48 +sVz +I6 +ssV« +p140 +(dp141 +VQ +I1 +sV. +I1 +ssVma +p142 +(dp143 +Vî +I46 +sV +I59 +sV- +I1 +sV, +I3 +sVc +I7 +sVb +I4 +sVe +I2 +sVd +I67 +sVg +I25 +sVi +I262 +sVk +I3 +sVj +I14 +sVm +I1 +sVl +I72 +sVï +I1 +sVn +I185 +sVs +I17 +sVr +I70 +sVu +I22 +sVt +I39 +sVy +I23 +sVx +I3 +ssVmb +p144 +(dp145 +Va +I34 +sV +I2 +sVe +I25 +sVi +I11 +sVè +I1 +sVl +I39 +sVo +I88 +sV. +I1 +sVé +I1 +sVr +I40 +ssVmm +p146 +(dp147 +Va +I26 +sVe +I313 +sVé +I9 +sVè +I4 +sVo +I9 +sVi +I5 +sVu +I13 +ssVml +p148 +(dp149 +V +I2 +sVe +I1 +ssVmo +p150 +(dp151 +Ve +I7 +sVd +I17 +sVi +I197 +sVm +I16 +sVl +I4 +sVn +I286 +sVq +I3 +sVs +I8 +sVr +I57 +sVu +I77 +sVt +I20 +sVv +I3 +sVy +I4 +sV. +I1 +ssVmn +p152 +(dp153 +Vé +I2 +sVi +I2 +sVa +I1 +sVo +I1 +ssVmi +p154 +(dp155 +V +I15 +sVe +I53 +sVd +I4 +sVg +I1 +sVè +I20 +sV- +I1 +sVl +I85 +sVn +I36 +sVq +I1 +sVs +I75 +sVr +I27 +sVt +I39 +sV. +I1 +sV, +I8 +ssVmu +p156 +(dp157 +V +I2 +sVe +I2 +sVm +I1 +sVl +I5 +sVn +I13 +sVp +I3 +sVs +I16 +sVr +I2 +sVt +I2 +ssVmt +p158 +(dp159 +Ve +I2 +ssV 9 +p160 +(dp161 +V1 +I1 +sV0 +I3 +sV3 +I1 +sV2 +I2 +sV5 +I1 +sV4 +I1 +sV7 +I1 +sV6 +I1 +sV9 +I1 +sV8 +I1 +ssVmp +p162 +(dp163 +Va +I30 +sV +I5 +sVe +I17 +sVi +I11 +sVh +I1 +sVê +I9 +sVl +I25 +sVo +I32 +sV, +I2 +sVé +I3 +sVs +I48 +sVr +I16 +sVu +I8 +sVt +I15 +sV. +I1 +ssVms +p164 +(dp165 +V +I3 +sV, +I1 +sVt +I1 +sV. +I1 +ssV"p +p166 +(dp167 +Vu +I1 +ssVmy +p168 +(dp169 +Vs +I1 +sVr +I2 +ssVLe +p170 +(dp171 +V +I87 +sVc +I4 +sVb +I1 +sVg +I1 +sVi +I3 +sVm +I3 +sVs +I48 +sVu +I4 +sVt +I6 +sVy +I1 +ssVLa +p172 +(dp173 +V +I42 +sVv +I1 +sVm +I3 +sVt +I2 +sVn +I1 +ssVLo +p174 +(dp175 +Vi +I1 +sVm +I1 +sVs +I1 +sVu +I13 +sVn +I1 +ssV"s +p176 +(dp177 +Vm +I1 +ssVLi +p178 +(dp179 +Vs +I11 +sVm +I2 +sVt +I6 +sVn +I2 +ssVLu +p180 +(dp181 +Vc +I2 +ssVLE +p182 +(dp183 +V +I1 +sVF +I1 +sVM +I1 +sVQ +I1 +sVS +I1 +sVT +I2 +ssVLA +p184 +(dp185 +VI +I1 +sV +I1 +sVR +I1 +ssVLO +p186 +(dp187 +VR +I1 +ssVLL +p188 +(dp189 +V +I2 +sVE +I1 +ssVLI +p190 +(dp191 +VA +I1 +sVC +I2 +sVB +I1 +sVE +I1 +sVG +I1 +sVI +I1 +sVM +I3 +sVT +I3 +ssVLT +p192 +(dp193 +VA +I2 +ssVLU +p194 +(dp195 +VD +I2 +ssVLS +p196 +(dp197 +V, +I1 +ssVLP +p198 +(dp199 +VH +I1 +ssVL' +p200 +(dp201 +Va +I16 +sV +I1 +sVE +I1 +sVi +I1 +sVÉ +I1 +sVh +I1 +sVo +I2 +sVé +I1 +sVA +I2 +sVO +I2 +sVu +I4 +sVe +I2 +ssVL +p202 +(dp203 +VP +I2 +sVM +I1 +sVd +I1 +sVD +I1 +ssVL, +p204 +(dp205 +V +I1 +ssVmâ +p206 +(dp207 +Vt +I2 +ssVmé +p208 +(dp209 +V +I15 +sVc +I4 +sVe +I11 +sVd +I14 +sVm +I4 +sVl +I5 +sVn +I1 +sVp +I5 +sVs +I8 +sVr +I17 +sVt +I22 +sV. +I2 +sV? +I1 +ssVmè +p210 +(dp211 +Vr +I28 +sVt +I1 +sVn +I17 +ssVmê +p212 +(dp213 +Vm +I66 +sVl +I5 +ssVmô +p214 +(dp215 +Vn +I4 +ssV_[ +p216 +(dp217 +V2 +I1 +ssV_V +p218 +(dp219 +Va +I1 +sVo +I2 +ssV_T +p220 +(dp221 +Ve +I3 +ssV_S +p222 +(dp223 +Vi +I4 +sVe +I2 +ssV_R +p224 +(dp225 +Ve +I1 +ssV_P +p226 +(dp227 +Va +I1 +sVr +I8 +ssV_O +p228 +(dp229 +V +I1 +sVe +I1 +ssV_N +p230 +(dp231 +Vo +I1 +ssV_M +p232 +(dp233 +Vé +I10 +sVa +I2 +sVo +I2 +ssV_L +p234 +(dp235 +Va +I1 +sVe +I7 +sV' +I1 +ssV_J +p236 +(dp237 +Vo +I2 +ssV_I +p238 +(dp239 +Vt +I1 +ssV_H +p240 +(dp241 +Vi +I3 +sVe +I1 +sVo +I1 +ssV_E +p242 +(dp243 +Vx +I1 +sVl +I1 +ssV_D +p244 +(dp245 +Vi +I3 +ssV_C +p246 +(dp247 +Va +I9 +sVr +I1 +sVo +I2 +ssV_A +p248 +(dp249 +Vr +I1 +sVn +I1 +ssVrô +p250 +(dp251 +Vl +I3 +sVt +I3 +sVn +I12 +ssV_u +p252 +(dp253 +Vt +I1 +sVn +I1 +ssVrû +p254 +(dp255 +Vl +I13 +ssV_m +p256 +(dp257 +Vê +I1 +ssVrç +p258 +(dp259 +Va +I4 +sVu +I11 +sVo +I6 +ssV_i +p260 +(dp261 +Vn +I1 +ssV_h +p262 +(dp263 +Vo +I1 +ssV_g +p264 +(dp265 +Vl +I1 +ssVrî +p266 +(dp267 +Vt +I1 +ssVrè +p268 +(dp269 +Vs +I139 +sVr +I38 +sVt +I3 +sVg +I1 +ssVré +p270 +(dp271 +Vé +I2 +sV +I22 +sV, +I5 +sV; +I2 +sV? +I1 +sVa +I17 +sVc +I33 +sVb +I1 +sVe +I25 +sVd +I10 +sVg +I6 +sVf +I11 +sVi +I2 +sVm +I15 +sVl +I1 +sVn +I1 +sVp +I77 +sVs +I61 +sVr +I1 +sVu +I1 +sVt +I23 +sVv +I26 +ssVrê +p272 +(dp273 +Vc +I3 +sVm +I13 +sVt +I27 +sVv +I4 +ssVxf +p274 +(dp275 +Vo +I1 +ssV_; +p276 +(dp277 +V +I10 +ssV_: +p278 +(dp279 +V +I1 +ssV_. +p280 +(dp281 +V +I27 +sV, +I1 +ssV_, +p282 +(dp283 +V +I43 +ssV_) +p284 +(dp285 +V +I1 +ssV_ +p286 +(dp287 +Va +I2 +sVE +I1 +sVd +I4 +sV( +I1 +sVj +I1 +sVq +I1 +sVp +I1 +sVe +I1 +ssVr] +p288 +(dp289 +V +I2 +ssVr_ +p290 +(dp291 +V +I1 +sV; +I1 +sV. +I1 +ssV_É +p292 +(dp293 +Vp +I1 +sVl +I1 +ssVrt +p294 +(dp295 +Va +I43 +sV +I103 +sVe +I78 +sV@ +I2 +sVi +I188 +sVh +I4 +sV, +I10 +sVo +I23 +sV. +I8 +sVé +I17 +sVs +I21 +sV! +I1 +sVu +I32 +sVî +I1 +sVy +I2 +sV; +I4 +sVr +I2 +sV_ +I1 +ssVru +p296 +(dp297 +Va +I3 +sV +I7 +sVc +I5 +sVb +I5 +sVe +I10 +sVd +I9 +sVi +I28 +sVm +I1 +sV, +I2 +sVn +I5 +sVp +I2 +sVs +I9 +sVr +I9 +sVt +I17 +sVv +I1 +sV. +I1 +sVl +I1 +ssVrv +p298 +(dp299 +Va +I11 +sVe +I10 +sVi +I30 +sVo +I2 +sVé +I2 +sVu +I2 +ssVrw +p300 +(dp301 +Va +I1 +sVo +I1 +ssVrp +p302 +(dp303 +Vé +I1 +sVa +I1 +sVs +I11 +sVr +I15 +sVe +I5 +ssVrq +p304 +(dp305 +Vu +I52 +ssVrr +p306 +(dp307 +Va +I42 +sVe +I106 +sVi +I76 +sVh +I1 +sVê +I10 +sVo +I24 +sVé +I4 +sVu +I1 +ssVrs +p308 +(dp309 +V! +I4 +sV +I324 +sV) +I1 +sVe +I18 +sVp +I1 +sVé +I6 +sVè +I1 +sV, +I60 +sVo +I37 +sV. +I24 +sVq +I8 +sVi +I6 +sVu +I5 +sVt +I5 +sVa +I19 +sV[ +I1 +sV: +I2 +sV; +I14 +sV? +I3 +ssVry +p310 +(dp311 +Vi +I2 +sV +I14 +sV* +I1 +sVm +I1 +sV, +I2 +ssVrz +p312 +(dp313 +Ve +I2 +ssVrd +p314 +(dp315 +Va +I30 +sV +I40 +sVe +I47 +sV' +I8 +sVi +I44 +sVè +I3 +sV- +I1 +sV, +I8 +sVo +I19 +sV. +I3 +sVé +I7 +sVs +I8 +sVr +I11 +sVu +I19 +sVw +I1 +sV; +I2 +sV: +I1 +sV[ +I1 +ssVre +p316 +(dp317 +V! +I8 +sV +I911 +sV) +I2 +sV- +I8 +sV, +I162 +sV. +I75 +sV; +I64 +sV: +I22 +sV? +I19 +sVg +I28 +sV[ +I2 +sV_ +I9 +sVa +I22 +sVc +I52 +sVb +I3 +sVe +I10 +sVd +I17 +sVç +I19 +sVf +I15 +sVi +I24 +sVh +I1 +sVj +I1 +sVm +I115 +sVl +I30 +sVn +I335 +sVq +I9 +sVp +I44 +sVs +I370 +sVr +I50 +sVu +I59 +sVt +I62 +sVv +I38 +sVz +I32 +ssVrf +p318 +(dp319 +Va +I6 +sV +I3 +sVf +I1 +sVi +I1 +sVo +I1 +sVs +I2 +sVu +I2 +ssVrg +p320 +(dp321 +Va +I3 +sV +I24 +sVe +I36 +sVi +I14 +sVh +I1 +sV- +I1 +sV, +I4 +sVo +I2 +sVn +I2 +sVé +I14 +sVu +I3 +sV/ +I4 +sVè +I1 +sV; +I1 +sV. +I3 +ssVra +p322 +(dp323 +Vî +I17 +sV +I74 +sVp +I21 +sV- +I2 +sV, +I15 +sV. +I2 +sV: +I1 +sVa +I3 +sVc +I32 +sVb +I20 +sVe +I1 +sVd +I39 +sVg +I41 +sVf +I3 +sVi +I267 +sVh +I2 +sVë +I1 +sVm +I21 +sVl +I27 +sVo +I6 +sVn +I224 +sVé +I2 +sVs +I42 +sVr +I12 +sVu +I3 +sVt +I36 +sVv +I45 +sVy +I2 +ssVrb +p324 +(dp325 +Vi +I4 +sVa +I9 +sVk +I1 +sVr +I6 +sVe +I4 +ssVrc +p326 +(dp327 +V +I4 +sVc +I1 +sVe +I45 +sVi +I18 +sVh +I45 +sVk +I1 +sV, +I4 +sVo +I7 +sVî +I1 +sVé +I1 +sVs +I3 +sVr +I1 +sVè +I1 +sV; +I1 +sV. +I3 +sVl +I1 +ssVrl +p328 +(dp329 +Va +I22 +sVe +I37 +sVd +I3 +sVé +I6 +sVo +I3 +sVi +I3 +sVy +I1 +ssVrm +p330 +(dp331 +Va +I33 +sV +I4 +sV) +I1 +sVe +I58 +sVi +I27 +sVl +I1 +sVo +I11 +sV, +I1 +sVé +I14 +sV. +I1 +ssVrn +p332 +(dp333 +Va +I44 +sV +I2 +sVe +I39 +sVé +I12 +sVo +I3 +sV. +I1 +sVi +I10 +sVu +I1 +ssVro +p334 +(dp335 +Vï +I1 +sVa +I1 +sVc +I57 +sVb +I17 +sVd +I28 +sVg +I7 +sVf +I30 +sVi +I164 +sVh +I1 +sVj +I32 +sVm +I35 +sVl +I25 +sVo +I2 +sVn +I157 +sVq +I3 +sVp +I49 +sVs +I30 +sVr +I2 +sVu +I138 +sVt +I6 +sVv +I28 +sVy +I34 +ssVrh +p336 +(dp337 +Vu +I2 +ssVri +p338 +(dp339 +V +I8 +sV, +I6 +sV; +I2 +sV? +I1 +sVâ +I2 +sV_ +I1 +sVa +I35 +sVc +I16 +sVb +I36 +sVe +I146 +sVd +I16 +sVg +I34 +sVf +I5 +sVé +I1 +sVè +I24 +sVm +I15 +sVl +I15 +sVo +I19 +sVn +I53 +sVq +I12 +sVp +I12 +sVs +I102 +sVr +I38 +sVu +I2 +sVt +I110 +sVv +I63 +sVx +I9 +sVz +I3 +ssV_à +p340 +(dp341 +V +I1 +ssVr? +p342 +(dp343 +V +I10 +sV- +I1 +ssVr: +p344 +(dp345 +V +I11 +ssVr; +p346 +(dp347 +V +I48 +ssVr' +p348 +(dp349 +Va +I1 +sVs +I1 +sVo +I2 +ssVr +p350 +(dp351 +Vj +I13 +sVR +I2 +sVi +I28 +sV( +I1 +sV/ +I1 +sV1 +I2 +sV2 +I1 +sV9 +I1 +sVA +I4 +sVC +I24 +sVB +I2 +sVD +I2 +sVG +I1 +sVI +I2 +sVH +I1 +sVJ +I2 +sVM +I12 +sVL +I3 +sVN +I3 +sVP +I22 +sVâ +I1 +sVW +I1 +sV[ +I1 +sVZ +I2 +sVa +I84 +sVà +I45 +sVc +I66 +sVb +I11 +sVe +I85 +sVd +I242 +sVg +I4 +sVf +I25 +sVé +I10 +sVh +I7 +sVê +I5 +sVm +I64 +sVl +I302 +sVo +I27 +sVn +I25 +sVq +I53 +sVp +I71 +sVs +I88 +sVr +I17 +sVu +I63 +sVt +I56 +sVw +I3 +sVv +I26 +sVy +I2 +ssVr! +p352 +(dp353 +V +I6 +ssVr, +p354 +(dp355 +V +I204 +ssVr- +p356 +(dp357 +Vd +I3 +sVt +I11 +sVl +I5 +ssVr. +p358 +(dp359 +VA +I1 +sV +I92 +sV» +I1 +sV- +I1 +sV0 +I1 +ssVr/ +p360 +(dp361 +VN +I1 +ssVr* +p362 +(dp363 +V +I2 +ssVù, +p364 +(dp365 +V +I1 +ssVQu +p366 +(dp367 +Va +I12 +sVi +I5 +sVe +I26 +sVo +I8 +sV' +I6 +ssVQU +p368 +(dp369 +VI +I1 +sVE +I1 +sV' +I2 +ssVd. +p370 +(dp371 +V +I11 +ssVd, +p372 +(dp373 +V +I31 +ssVd- +p374 +(dp375 +Va +I1 +sVi +I6 +sVo +I1 +sVp +I1 +sVs +I2 +sVr +I2 +sVT +I1 +sVw +I1 +ssVd) +p376 +(dp377 +V: +I1 +ssVéq +p378 +(dp379 +Vu +I14 +ssVd' +p380 +(dp381 +Va +I91 +sVA +I18 +sVâ +I1 +sVE +I20 +sVi +I16 +sVh +I27 +sVê +I30 +sVo +I21 +sVÉ +I1 +sVé +I26 +sVs +I1 +sVu +I109 +sVO +I3 +sVy +I2 +sVe +I53 +sVU +I1 +sVI +I3 +ssVés +p382 +(dp383 +Va +I8 +sV +I81 +sVe +I47 +sVi +I13 +sVh +I2 +sV, +I37 +sVo +I11 +sV. +I11 +sVu +I36 +sV; +I5 +sV? +I3 +ssVér +p384 +(dp385 +Va +I48 +sVe +I31 +sVé +I15 +sVê +I2 +sVo +I15 +sVi +I75 +sVu +I1 +ssVéu +p386 +(dp387 +Vs +I1 +sVo +I1 +ssVét +p388 +(dp389 +Va +I276 +sVe +I34 +sVé +I80 +sVh +I1 +sVo +I21 +sVi +I34 +sVr +I51 +sVu +I5 +ssVd +p390 +(dp391 +VC +I1 +sVB +I4 +sVE +I2 +sVI +I3 +sVJ +I1 +sVM +I1 +sVP +I3 +sVS +I2 +sVW +I1 +sVV +I1 +sV[ +I1 +sV_ +I1 +sVa +I22 +sVà +I4 +sVc +I9 +sVb +I20 +sVe +I13 +sVd +I24 +sVg +I2 +sVf +I9 +sVi +I26 +sVh +I7 +sVj +I10 +sVm +I15 +sVl +I16 +sVo +I29 +sVn +I5 +sVq +I5 +sVp +I26 +sVs +I14 +sVr +I7 +sVu +I6 +sVt +I16 +sVw +I4 +sVv +I6 +sVy +I8 +ssVév +p392 +(dp393 +Va +I4 +sVe +I6 +sVé +I20 +sVè +I1 +sVo +I15 +sVi +I2 +sVu +I1 +ssVéi +p394 +(dp395 +Vs +I1 +sVm +I1 +sVt +I3 +sVd +I1 +ssVd? +p396 +(dp397 +V +I2 +ssVéj +p398 +(dp399 +Và +I17 +sVe +I4 +sVo +I3 +ssVd: +p400 +(dp401 +V +I2 +ssVél +p402 +(dp403 +Va +I36 +sVe +I13 +sVi +I21 +sVè +I3 +sVo +I4 +sVé +I3 +sVu +I1 +ssVéo +p404 +(dp405 +Vd +I3 +sVl +I6 +sVt +I1 +ssVén +p406 +(dp407 +Va +I7 +sVe +I5 +sVi +I16 +sVè +I1 +sVo +I15 +sVé +I16 +sVu +I1 +ssVéa +p408 +(dp409 +Vt +I16 +sVb +I9 +sVu +I2 +sVl +I3 +sVn +I3 +ssVéc +p410 +(dp411 +Va +I2 +sVe +I20 +sVi +I28 +sVh +I19 +sVl +I9 +sVo +I27 +sVr +I37 +sVu +I22 +ssVéb +p412 +(dp413 +Va +I8 +sVi +I1 +sVr +I4 +ssVée +p414 +(dp415 +V +I106 +sV, +I28 +sV. +I16 +sVs +I57 +sVn +I6 +sV; +I9 +sV: +I3 +sV[ +I1 +sV? +I2 +ssVéd +p416 +(dp417 +Va +I2 +sVe +I12 +sVi +I47 +sVé +I6 +sVr +I2 +sVu +I8 +ssVég +p418 +(dp419 +Va +I8 +sVe +I1 +sVi +I4 +sVè +I4 +sVl +I3 +sVo +I140 +sVn +I2 +sVé +I1 +sVr +I2 +sVu +I2 +sVy +I1 +ssVéf +p420 +(dp421 +Va +I5 +sVe +I5 +sVi +I4 +sVl +I4 +sVé +I3 +sVr +I1 +sVu +I3 +ssVé[ +p422 +(dp423 +V1 +I1 +ssVé_ +p424 +(dp425 +V, +I3 +ssVdn +p426 +(dp427 +Vi +I2 +ssVdo +p428 +(dp429 +V +I13 +sVc +I16 +sVe +I1 +sVg +I2 +sVi +I14 +sVm +I13 +sV, +I11 +sVn +I203 +sVs +I5 +sVr +I37 +sVu +I64 +sVw +I4 +sV[ +I2 +sV. +I5 +sV_ +I1 +sVl +I9 +ssVé; +p430 +(dp431 +V +I17 +ssVé: +p432 +(dp433 +V +I8 +ssVdj +p434 +(dp435 +Vu +I1 +ssVé? +p436 +(dp437 +V +I8 +ssVdi +p438 +(dp439 +V +I3 +sV, +I1 +sV1 +I8 +sVa +I43 +sVc +I13 +sVe +I34 +sVd +I393 +sVg +I13 +sVf +I13 +sVé +I1 +sVè +I2 +sVk +I1 +sVm +I3 +sVl +I3 +sVo +I2 +sVn +I41 +sVq +I6 +sVp +I1 +sVs +I151 +sVr +I50 +sVu +I5 +sVt +I434 +sVv +I5 +sVx +I25 +ssVdd +p440 +(dp441 +Vi +I8 +sVe +I1 +ssVde +p442 +(dp443 +V! +I12 +sV +I1745 +sV- +I1 +sV, +I177 +sV. +I85 +sV; +I49 +sV: +I18 +sV? +I7 +sVM +I1 +sV_ +I8 +sVa +I11 +sVc +I9 +sVb +I1 +sVd +I11 +sVg +I1 +sVf +I1 +sVm +I130 +sVl +I8 +sVn +I34 +sVp +I9 +sVs +I430 +sVr +I84 +sVu +I155 +sVt +I1 +sVv +I40 +sVy +I3 +sVx +I1 +sVz +I9 +ssVdb +p444 +(dp445 +Ve +I1 +ssVda +p446 +(dp447 +V +I25 +sVb +I6 +sVd +I1 +sVg +I1 +sVi +I54 +sV- +I9 +sVj +I2 +sVm +I46 +sVl +I10 +sVn +I401 +sVy +I5 +sVs +I2 +sVr +I1 +sVt +I33 +sVv +I9 +sV[ +I1 +sV; +I3 +sV: +I1 +sV, +I3 +sV_ +I1 +ssVé, +p448 +(dp449 +V +I78 +ssVé. +p450 +(dp451 +V +I31 +sV- +I2 +ssVdv +p452 +(dp453 +Vi +I4 +sVa +I1 +sVe +I1 +ssVé +p454 +(dp455 +Vp +I47 +sVC +I4 +sVM +I1 +sVR +I2 +sVU +I1 +sV_ +I2 +sVa +I27 +sVà +I27 +sVc +I17 +sVb +I5 +sVe +I29 +sVd +I124 +sVg +I2 +sVf +I7 +sVi +I6 +sVh +I2 +sVj +I4 +sVm +I14 +sVl +I32 +sVo +I2 +sVn +I8 +sVq +I23 +sVé +I5 +sVs +I15 +sVr +I5 +sVu +I9 +sVt +I11 +sVv +I6 +ssVdu +p456 +(dp457 +V! +I1 +sV +I315 +sVc +I14 +sVe +I11 +sVi +I30 +sVm +I1 +sVl +I1 +sV, +I8 +sVq +I1 +sVs +I2 +sVr +I12 +sVt +I1 +sVv +I1 +sV_ +I1 +sV? +I1 +ssVdr +p458 +(dp459 +Va +I31 +sVi +I9 +sVe +I108 +sVô +I2 +sVo +I24 +ssVds +p460 +(dp461 +V +I42 +sV; +I1 +sV- +I1 +sV, +I11 +sV. +I4 +ssVd_ +p462 +(dp463 +V, +I2 +sV. +I1 +ssVd[ +p464 +(dp465 +V3 +I1 +ssVw +p466 +(dp467 +VA +I1 +sVb +I1 +sVd +I1 +sVi +I1 +sVJ +I1 +sVM +I1 +sVL +I2 +sVo +I1 +sVN +I2 +sVy +I1 +sVt +I9 +sVY +I1 +ssVw. +p468 +(dp469 +Vi +I1 +sV +I2 +sVg +I2 +ssVw, +p470 +(dp471 +V +I3 +ssVéé +p472 +(dp473 +Vt +I2 +ssVéâ +p474 +(dp475 +Vt +I4 +ssV93 +p476 +(dp477 +V, +I1 +ssVw: +p478 +(dp479 +V +I1 +sV/ +I2 +ssV02 +p480 +(dp481 +V +I3 +sV* +I1 +sV] +I1 +sV, +I3 +sV/ +I1 +ssV03 +p482 +(dp483 +V +I4 +sV, +I1 +ssV00 +p484 +(dp485 +V +I13 +sV+ +I1 +sV, +I1 +sV1 +I4 +sV0 +I11 +sV3 +I2 +sV2 +I5 +sV4 +I1 +ssV01 +p486 +(dp487 +V( +I2 +sV, +I1 +sV +I4 +ssV06 +p488 +(dp489 +V. +I1 +ssV04 +p490 +(dp491 +V +I1 +ssV05 +p492 +(dp493 +V, +I1 +ssV0; +p494 +(dp495 +V +I3 +ssV09 +p496 +(dp497 +V +I1 +sV, +I1 +ssV0 +p498 +(dp499 +Va +I1 +sV +I11 +sVe +I1 +sVD +I1 +sVj +I1 +sVm +I2 +sVn +I1 +sVs +I2 +sVd +I3 +ssV0% +p500 +(dp501 +V +I1 +ssV0+ +p502 +(dp503 +V +I1 +ssV0. +p504 +(dp505 +V +I2 +sVz +I2 +sVt +I2 +ssV0, +p506 +(dp507 +V +I4 +ssVdî +p508 +(dp509 +Vt +I2 +sVn +I11 +ssVwe +p510 +(dp511 +V +I12 +sVr +I3 +sVv +I1 +ssVdè +p512 +(dp513 +Vs +I7 +sVr +I3 +sVl +I6 +ssVdé +p514 +(dp515 +V +I15 +sVc +I23 +sVb +I9 +sVe +I14 +sVd +I4 +sVg +I10 +sVf +I12 +sVj +I21 +sVm +I18 +sVl +I17 +sVn +I1 +sVp +I15 +sVs +I31 +sVr +I6 +sVt +I29 +sVv +I10 +sV. +I1 +ssVwo +p516 +(dp517 +V +I1 +sVr +I14 +sVu +I2 +ssVwn +p518 +(dp519 +Vs +I1 +sVl +I3 +ssVwi +p520 +(dp521 +Vc +I1 +sVl +I7 +sVn +I3 +sVs +I3 +sVr +I1 +sVt +I20 +ssVwh +p522 +(dp523 +Va +I2 +sVi +I3 +sVe +I4 +sVo +I3 +ssVww +p524 +(dp525 +V: +I2 +sVw +I5 +sV. +I3 +ssVwt +p526 +(dp527 +Vo +I1 +ssVws +p528 +(dp529 +V +I2 +sVl +I3 +sV, +I1 +ssVwr +p530 +(dp531 +Vi +I2 +sVo +I2 +ssVwy +p532 +(dp533 +Ve +I1 +ssV0a +p534 +(dp535 +V. +I2 +ssVié +p536 +(dp537 +V +I16 +sVe +I2 +sVg +I2 +sV, +I2 +sV. +I2 +sVs +I1 +sVt +I7 +sV; +I2 +sV: +I1 +ssViè +p538 +(dp539 +Vc +I27 +sVr +I87 +sVm +I25 +sVg +I3 +ssViâ +p540 +(dp541 +Vt +I2 +ssVC) +p542 +(dp543 +V +I1 +ssVC. +p544 +(dp545 +V +I1 +ssVC +p546 +(dp547 +Vo +I1 +sVL +I1 +sVD +I2 +sVd +I1 +ssVC' +p548 +(dp549 +Vé +I3 +sVe +I33 +ssVCI +p550 +(dp551 +VI +I2 +sVD +I1 +ssVCH +p552 +(dp553 +VA +I31 +sV +I2 +sVE +I2 +sVO +I2 +ssVCO +p554 +(dp555 +VB +I1 +sVN +I2 +ssVCL +p556 +(dp557 +VA +I1 +sVU +I2 +ssVCC +p558 +(dp559 +VX +I1 +sVC +I1 +ssVVÉ +p560 +(dp561 +VE +I1 +ssVCA +p562 +(dp563 +VN +I4 +ssVCE +p564 +(dp565 +V +I3 +sVS +I1 +ssVCD +p566 +(dp567 +VI +I2 +ssVCX +p568 +(dp569 +VX +I1 +ssVCR +p570 +(dp571 +V. +I1 +ssVCU +p572 +(dp573 +VL +I1 +ssVCT +p574 +(dp575 +V +I9 +sVE +I2 +sV, +I2 +ssVCi +p576 +(dp577 +Vc +I1 +sVn +I2 +ssVCh +p578 +(dp579 +Va +I13 +sVà +I1 +sVe +I2 +sVi +I4 +sVo +I1 +sVr +I1 +ssVCo +p580 +(dp581 +Vm +I28 +sVl +I4 +sVï +I1 +sVn +I20 +sVp +I1 +sVr +I6 +sVu +I1 +ssVCl +p582 +(dp583 +Va +I2 +sVu +I1 +sVo +I1 +ssVVé +p584 +(dp585 +Vn +I3 +ssVCa +p586 +(dp587 +Vc +I85 +sVe +I1 +sVd +I7 +sVm +I2 +sVl +I1 +sVn +I396 +sVs +I1 +sVr +I8 +sVt +I2 +sVy +I3 +ssVCe +p588 +(dp589 +V +I44 +sVc +I1 +sVl +I13 +sVn +I1 +sVp +I8 +sVs +I3 +sVr +I2 +sVu +I1 +sVt +I11 +ssVCr +p590 +(dp591 +Vé +I1 +sVo +I6 +ssVCu +p592 +(dp593 +Vn +I125 +ssVix +p594 +(dp595 +Va +I7 +sV +I52 +sVe +I1 +sVi +I4 +sV- +I2 +sV, +I6 +sV. +I7 +sV; +I2 +sV: +I1 +ssViz +p596 +(dp597 +Va +I7 +sVi +I4 +sVe +I2 +sVo +I1 +ssViq +p598 +(dp599 +Vu +I115 +ssVip +p600 +(dp601 +Va +I3 +sVe +I8 +sVp +I1 +sVi +I8 +sV* +I2 +sVm +I2 +sVl +I2 +sVo +I9 +sVé +I1 +sVs +I1 +sVt +I3 +ssVis +p602 +(dp603 +V! +I1 +sV +I672 +sVp +I22 +sV- +I8 +sV, +I101 +sV. +I26 +sV; +I15 +sV: +I4 +sV? +I6 +sV_ +I2 +sVa +I80 +sVc +I32 +sVb +I11 +sVe +I169 +sVf +I3 +sVi +I110 +sVè +I7 +sVk +I4 +sVm +I7 +sVl +I1 +sVo +I73 +sVq +I10 +sVé +I23 +sVs +I170 +sVt +I89 +sVh +I4 +ssVir +p604 +(dp605 +Va +I53 +sV +I183 +sVc +I3 +sVâ +I1 +sVe +I329 +sVg +I3 +sVé +I9 +sVè +I1 +sVm +I3 +sV, +I25 +sVo +I21 +sV. +I13 +sVi +I14 +sVs +I18 +sVu +I7 +sVt +I1 +sV; +I3 +sV? +I4 +ssViu +p606 +(dp607 +Vs +I6 +sVm +I6 +ssVit +p608 +(dp609 +V! +I1 +sV +I1361 +sV- +I61 +sV, +I64 +sV. +I28 +sV; +I18 +sV: +I34 +sVô +I15 +sV[ +I1 +sV_ +I1 +sVa +I59 +sVâ +I1 +sVe +I203 +sVi +I70 +sVh +I23 +sVl +I2 +sVo +I8 +sVé +I75 +sVs +I35 +sVr +I31 +sVu +I22 +sVt +I11 +sVy +I5 +sVz +I2 +ssViv +p610 +(dp611 +Va +I41 +sVâ +I1 +sVe +I72 +sVi +I25 +sVè +I3 +sVo +I3 +sVé +I16 +sVr +I38 +ssVii +p612 +(dp613 +V, +I1 +ssVik +p614 +(dp615 +Ve +I2 +sVd +I1 +ssVij +p616 +(dp617 +Vo +I1 +ssVim +p618 +(dp619 +Va +I36 +sV +I7 +sVb +I1 +sVe +I61 +sVi +I15 +sVé +I7 +sVm +I5 +sV, +I1 +sVo +I1 +sV. +I1 +sVp +I34 +sVs +I2 +sVu +I1 +ssVil +p620 +(dp621 +Va +I7 +sV +I530 +sVe +I53 +sVd +I1 +sVà +I24 +sVi +I26 +sVh +I1 +sVl +I382 +sVo +I26 +sV. +I7 +sVé +I1 +sVs +I134 +sVu +I2 +sVt +I3 +sVè +I3 +sVy +I1 +sV; +I3 +sV: +I1 +sV, +I42 +ssVio +p622 +(dp623 +Vc +I1 +sVd +I1 +sVm +I1 +sVl +I14 +sVn +I278 +sVs +I8 +sVr +I2 +sVu +I3 +sVt +I4 +sVv +I1 +sV. +I2 +sV, +I1 +ssVin +p624 +(dp625 +V! +I5 +sV +I206 +sV" +I1 +sV- +I6 +sV, +I72 +sV. +I48 +sV; +I21 +sV: +I8 +sV? +I1 +sVç +I1 +sV[ +I1 +sVa +I49 +sVc +I66 +sVe +I146 +sVd +I27 +sVg +I108 +sVf +I34 +sVé +I13 +sVh +I1 +sVk +I1 +sVj +I4 +sVo +I20 +sVn +I11 +sVq +I57 +sVi +I18 +sVs +I117 +sVr +I4 +sVu +I23 +sVt +I140 +sVv +I6 +sVz +I9 +ssVia +p626 +(dp627 +V +I17 +sVc +I4 +sVb +I16 +sVd +I1 +sVg +I4 +sVi +I13 +sV- +I3 +sVm +I29 +sVl +I18 +sV, +I7 +sVs +I18 +sV: +I1 +sVt +I3 +sVh +I1 +sV; +I1 +sVn +I12 +ssV_p +p628 +(dp629 +Va +I1 +ssVic +p630 +(dp631 +Va +I28 +sV +I8 +sV) +I1 +sVe +I41 +sVi +I68 +sVh +I33 +sVk +I2 +sVl +I2 +sVo +I6 +sV. +I1 +sVé +I2 +sVs +I2 +sVu +I15 +sVt +I7 +sV, +I1 +ssVib +p632 +(dp633 +Va +I1 +sVe +I11 +sVi +I6 +sVl +I64 +sVn +I2 +sVé +I1 +sVr +I12 +sVu +I16 +ssVie +p634 +(dp635 +V! +I2 +sV +I121 +sV, +I50 +sV. +I27 +sV; +I10 +sV: +I2 +sV? +I5 +sV[ +I5 +sV_ +I1 +sVd +I28 +sVf +I1 +sVi +I71 +sVm +I4 +sVl +I14 +sVn +I505 +sVp +I4 +sVs +I46 +sVr +I133 +sVu +I198 +sVt +I3 +sVw +I1 +sVz +I29 +ssVid +p636 +(dp637 +Va +I6 +sV +I9 +sVe +I449 +sVé +I21 +sVè +I6 +sV, +I1 +sVo +I1 +sVn +I2 +sVi +I10 +sVr +I1 +sVy +I1 +ssVig +p638 +(dp639 +Va +I6 +sVe +I10 +sVé +I9 +sVh +I17 +sVo +I17 +sVn +I79 +sVi +I16 +sVr +I4 +sVu +I17 +sVt +I5 +ssVif +p640 +(dp641 +V +I21 +sVe +I1 +sVf +I14 +sVi +I20 +sV, +I6 +sV. +I3 +sVé +I1 +sVs +I6 +sVr +I1 +sVt +I1 +sVy +I3 +ssVV +p642 +(dp643 +Vp +I1 +sV: +I1 +sVd +I2 +ssVn: +p644 +(dp645 +V +I18 +ssVi[ +p646 +(dp647 +V1 +I2 +sV4 +I1 +ssV"A +p648 +(dp649 +VS +I1 +ssVi_ +p650 +(dp651 +V, +I1 +ssVV, +p652 +(dp653 +V +I1 +ssVV. +p654 +(dp655 +V +I7 +ssV"P +p656 +(dp657 +VR +I3 +sVr +I3 +ssV"R +p658 +(dp659 +Vi +I1 +ssV"S +p660 +(dp661 +Vm +I5 +ssVV: +p662 +(dp663 +V +I1 +ssVbî +p664 +(dp665 +Vm +I4 +ssVV? +p666 +(dp667 +V +I1 +ssVi8 +p668 +(dp669 +V. +I1 +ssVi; +p670 +(dp671 +V +I14 +ssVi: +p672 +(dp673 +V +I2 +ssV" +p674 +(dp675 +Va +I2 +sVs +I5 +sVt +I2 +sVw +I1 +sVd +I1 +ssVVE +p676 +(dp677 +V +I4 +sVC +I1 +sVR +I3 +sVN +I2 +ssVi? +p678 +(dp679 +V +I11 +ssVi1 +p680 +(dp681 +V1 +I2 +sV0 +I6 +ssVVI +p682 +(dp683 +V +I1 +sVD +I1 +sVI +I20 +sV, +I3 +sV. +I3 +sV; +I1 +ssV". +p684 +(dp685 +V +I2 +ssV") +p686 +(dp687 +V. +I1 +ssVVO +p688 +(dp689 +VL +I2 +ssVVR +p690 +(dp691 +VE +I3 +ssVi- +p692 +(dp693 +Vc +I4 +sVe +I1 +sVh +I1 +sVj +I4 +sVm +I13 +sVl +I2 +ssVi, +p694 +(dp695 +V +I132 +ssVi. +p696 +(dp697 +V +I26 +sV- +I1 +ssVi! +p698 +(dp699 +V +I17 +sV_ +I2 +ssVi +p700 +(dp701 +Vê +I6 +sVi +I10 +sV. +I1 +sV1 +I1 +sVÉ +I1 +sVA +I1 +sVC +I8 +sVD +I2 +sVI +I2 +sVM +I2 +sVL +I1 +sVN +I1 +sVP +I1 +sVT +I1 +sVV +I1 +sVa +I86 +sVà +I19 +sVc +I43 +sVb +I23 +sVe +I51 +sVd +I166 +sVg +I11 +sVf +I50 +sVé +I43 +sVh +I4 +sVj +I30 +sVm +I43 +sVl +I82 +sVo +I13 +sVn +I53 +sVq +I29 +sVp +I81 +sVs +I46 +sVr +I42 +sVu +I16 +sVt +I34 +sVv +I52 +sVy +I1 +ssVV_ +p702 +(dp703 +V; +I5 +sV, +I1 +sV. +I5 +ssVVa +p704 +(dp705 +Vl +I2 +sVn +I5 +ssVCé +p706 +(dp707 +Vs +I3 +ssVVe +p708 +(dp709 +Vs +I11 +sVr +I2 +sVn +I32 +ssVVi +p710 +(dp711 +Vs +I3 +sVr +I3 +sVe +I1 +sVt +I1 +sVn +I1 +ssVVo +p712 +(dp713 +Vy +I17 +sVi +I10 +sVu +I39 +sVl +I30 +sVt +I6 +ssV5] +p714 +(dp715 +V +I2 +sV; +I1 +sV, +I1 +ssVt- +p716 +(dp717 +Và +I5 +sVc +I16 +sVe +I19 +sVG +I1 +sVi +I82 +sVH +I1 +sVê +I9 +sVM +I2 +sVS +I1 +sVo +I7 +sVn +I1 +sVq +I3 +sVP +I1 +sVs +I1 +sVl +I1 +ssVt* +p718 +(dp719 +V +I1 +sV* +I1 +ssV° +p720 +(dp721 +V2 +I2 +sV6 +I1 +ssVHé +p722 +(dp723 +Vb +I1 +sVl +I19 +ssV°. +p724 +(dp725 +V +I1 +ssV59 +p726 +(dp727 +V +I2 +sV4 +I2 +sV: +I1 +sV, +I3 +sV. +I1 +ssV55 +p728 +(dp729 +V- +I1 +sV. +I1 +ssV54 +p730 +(dp731 +V1 +I1 +sV. +I1 +ssV57 +p732 +(dp733 +V: +I2 +ssV56 +p734 +(dp735 +V +I1 +sV; +I1 +sV, +I2 +ssV50 +p736 +(dp737 +V1 +I2 +sV0 +I2 +sV] +I1 +sV +I2 +ssV52 +p738 +(dp739 +V8 +I1 +sV. +I1 +ssV5- +p740 +(dp741 +V4 +I1 +ssV5, +p742 +(dp743 +V +I3 +ssV5. +p744 +(dp745 +V +I4 +ssV5) +p746 +(dp747 +V: +I1 +sV, +I1 +ssV5 +p748 +(dp749 +Vj +I1 +ssVHI +p750 +(dp751 +VS +I2 +sVN +I1 +ssVHO +p752 +(dp753 +VT +I2 +ssVHA +p754 +(dp755 +VP +I30 +sVN +I1 +sVM +I1 +sVT +I1 +sVV +I2 +ssVHE +p756 +(dp757 +V +I5 +sVR +I3 +sVZ +I1 +ssVe[ +p758 +(dp759 +Va +I1 +sV1 +I3 +sV3 +I2 +sV2 +I2 +sV5 +I1 +sV7 +I1 +sV6 +I2 +sV9 +I1 +sV8 +I1 +ssVHi +p760 +(dp761 +Vs +I5 +sVd +I2 +ssVHo +p762 +(dp763 +Vr +I4 +sVm +I3 +sVl +I5 +sVn +I2 +ssVHa +p764 +(dp765 +Vy +I1 +sVr +I7 +sVm +I1 +sVw +I1 +ssVHe +p766 +(dp767 +Vr +I1 +sVn +I6 +ssV-4 +p768 +(dp769 +V1 +I1 +ssVm" +p770 +(dp771 +V +I1 +ssVH, +p772 +(dp773 +V +I1 +ssVH +p774 +(dp775 +VD +I1 +sVO +I1 +ssV[P +p776 +(dp777 +Vr +I1 +sVo +I1 +ssV[T +p778 +(dp779 +Vh +I1 +ssV[Y +p780 +(dp781 +Ve +I1 +ssV[E +p782 +(dp783 +Vm +I2 +sVt +I1 +ssV_c +p784 +(dp785 +Vo +I1 +ssVnè +p786 +(dp787 +Vq +I1 +sVs +I3 +sVr +I5 +sVg +I8 +ssVné +p788 +(dp789 +Va +I3 +sV +I52 +sVc +I9 +sVe +I48 +sVd +I1 +sVg +I126 +sVi +I1 +sVm +I3 +sV, +I4 +sV. +I2 +sVs +I24 +sVr +I12 +sV; +I1 +ssVnê +p790 +(dp791 +Vt +I17 +ssVnâ +p792 +(dp793 +Vt +I1 +ssVnç +p794 +(dp795 +Va +I21 +sVu +I2 +sVo +I7 +ssV[a +p796 +(dp797 +V] +I2 +ssVnô +p798 +(dp799 +Vt +I3 +ssV'ç +p800 +(dp801 +Vn +I1 +ssV'â +p802 +(dp803 +Vm +I5 +sVg +I4 +ssV'à +p804 +(dp805 +V +I20 +ssV'ê +p806 +(dp807 +Vt +I38 +ssV'é +p808 +(dp809 +Vc +I33 +sVd +I3 +sVg +I4 +sVm +I3 +sVl +I7 +sVn +I1 +sVq +I1 +sVp +I20 +sVt +I99 +sVv +I5 +ssV[3 +p810 +(dp811 +V] +I10 +ssV[2 +p812 +(dp813 +V] +I21 +ssV[1 +p814 +(dp815 +V1 +I2 +sV0 +I2 +sV2 +I2 +sV] +I49 +ssV[7 +p816 +(dp817 +V] +I2 +ssV[6 +p818 +(dp819 +V] +I4 +ssV[5 +p820 +(dp821 +V] +I4 +ssV[4 +p822 +(dp823 +V] +I6 +ssV[9 +p824 +(dp825 +V] +I2 +ssV[8 +p826 +(dp827 +V] +I2 +ssV[* +p828 +(dp829 +V] +I3 +ssV'É +p830 +(dp831 +VP +I1 +sVd +I2 +sVg +I1 +ssVn[ +p832 +(dp833 +V1 +I2 +sV2 +I3 +ssVn_ +p834 +(dp835 +V; +I1 +sV, +I1 +ssVnh +p836 +(dp837 +Va +I1 +sVu +I1 +sVe +I8 +ssVni +p838 +(dp839 +V! +I3 +sV +I46 +sV, +I3 +sVa +I6 +sVc +I10 +sVb +I1 +sVe +I44 +sVg +I3 +sVf +I8 +sVé +I2 +sVè +I13 +sVm +I4 +sVl +I2 +sVo +I1 +sVn +I5 +sVq +I1 +sVs +I46 +sVr +I26 +sVu +I2 +sVt +I20 +sVv +I11 +sVz +I3 +ssVnj +p840 +(dp841 +Vu +I4 +sVo +I1 +ssVnk +p842 +(dp843 +V +I1 +sVs +I1 +ssVnl +p844 +(dp845 +Va +I1 +sVy +I5 +sVe +I6 +sVi +I1 +sVo +I3 +ssVnn +p846 +(dp847 +Va +I78 +sVâ +I1 +sVe +I204 +sVi +I9 +sVè +I4 +sVê +I13 +sVo +I17 +sVé +I48 +sVs +I1 +sVu +I19 +ssVno +p848 +(dp849 +V +I5 +sVc +I11 +sVb +I8 +sVe +I1 +sVi +I17 +sVm +I33 +sVl +I17 +sVn +I40 +sVp +I14 +sVs +I33 +sVr +I14 +sVu +I206 +sVt +I59 +sVw +I8 +sVv +I4 +sVy +I5 +sVx +I2 +sV, +I1 +ssVna +p850 +(dp851 +Vn +I32 +sV +I48 +sV, +I10 +sV; +I1 +sV: +I1 +sVç +I1 +sVc +I5 +sVb +I22 +sVd +I6 +sVg +I14 +sVi +I101 +sVm +I11 +sVl +I21 +sVï +I2 +sVî +I10 +sVp +I5 +sVs +I3 +sVr +I9 +sVu +I5 +sVt +I61 +sVv +I11 +sVy +I1 +ssVnb +p852 +(dp853 +Ve +I31 +ssVnc +p854 +(dp855 +Va +I3 +sV +I28 +sVe +I151 +sV! +I1 +sVé +I8 +sVh +I21 +sVk +I11 +sVl +I17 +sVo +I82 +sV, +I6 +sVi +I25 +sVs +I5 +sVr +I3 +sVu +I3 +sVt +I3 +sVè +I6 +sVy +I3 +ssVnd +p856 +(dp857 +Va +I124 +sV +I209 +sV" +I1 +sVe +I278 +sVi +I493 +sV- +I14 +sV, +I10 +sVo +I22 +sV. +I4 +sVé +I8 +sVs +I35 +sVr +I101 +sVu +I54 +sV_ +I1 +sV; +I2 +sVî +I1 +sV? +I1 +sVl +I1 +ssVne +p858 +(dp859 +V +I743 +sV- +I4 +sV, +I42 +sV. +I19 +sV; +I9 +sV: +I4 +sV? +I3 +sV[ +I1 +sVc +I1 +sVb +I1 +sVe +I4 +sVi +I5 +sVm +I25 +sVl +I14 +sVn +I22 +sVs +I80 +sVr +I105 +sVu +I60 +sVt +I22 +sVw +I7 +sVv +I3 +sVy +I6 +sVx +I3 +sVz +I25 +ssVnf +p860 +(dp861 +Va +I20 +sVe +I12 +sVi +I52 +sVl +I2 +sVo +I30 +sV. +I2 +sVr +I2 +sVu +I8 +ssVng +p862 +(dp863 +Va +I8 +sV +I64 +sVe +I109 +sVi +I1 +sVè +I1 +sV- +I12 +sVl +I122 +sVo +I1 +sV. +I4 +sVé +I18 +sVs +I4 +sVr +I3 +sVu +I30 +sVt +I28 +sV[ +I1 +sV: +I1 +sV, +I4 +ssVnx +p864 +(dp865 +V +I1 +ssVny +p866 +(dp867 +V) +I2 +sV +I17 +sVs +I1 +sVo +I1 +ssVnz +p868 +(dp869 +Va +I2 +sVi +I1 +sVe +I10 +ssVnq +p870 +(dp871 +V +I13 +sVu +I64 +ssVnr +p872 +(dp873 +Vé +I1 +sVa +I2 +sVe +I5 +sVi +I6 +ssVns +p874 +(dp875 +V +I690 +sVi +I91 +sV) +I1 +sV- +I17 +sV, +I68 +sV. +I27 +sV; +I8 +sV: +I1 +sV? +I1 +sV[ +I2 +sV_ +I1 +sVa +I5 +sVc +I3 +sVe +I74 +sVf +I1 +sVé +I8 +sVk +I1 +sVo +I13 +sVp +I11 +sVs +I1 +sVu +I22 +sVt +I31 +sVw +I2 +sVy +I4 +ssVnt +p876 +(dp877 +V! +I11 +sV +I1213 +sV" +I1 +sV* +I1 +sV- +I12 +sV, +I115 +sV. +I53 +sV; +I32 +sV: +I8 +sV? +I2 +sV[ +I1 +sVa +I70 +sVe +I207 +sVi +I108 +sVè +I4 +sVl +I2 +sVo +I14 +sVé +I26 +sVs +I127 +sVr +I116 +sVu +I25 +sVô +I21 +sVh +I6 +ssVnu +p878 +(dp879 +Va +I11 +sV +I18 +sVe +I21 +sVf +I1 +sVi +I12 +sVè +I1 +sVm +I4 +sVl +I5 +sVo +I1 +sV. +I2 +sVq +I6 +sVs +I14 +sVt +I3 +sVy +I4 +sV; +I1 +sV, +I2 +ssVnv +p880 +(dp881 +Vi +I27 +sVa +I3 +sVe +I22 +sVu +I4 +sVo +I16 +ssV'u +p882 +(dp883 +Và +I1 +sVs +I6 +sVt +I2 +sVn +I156 +ssV't +p884 +(dp885 +V +I4 +ssV's +p886 +(dp887 +V +I4 +ssV'y +p888 +(dp889 +V +I53 +ssV'e +p890 +(dp891 +Va +I5 +sVf +I7 +sVm +I30 +sVl +I32 +sVn +I120 +sVs +I162 +sVu +I8 +sVx +I23 +sVû +I1 +ssV'd +p892 +(dp893 +Vé +I1 +ssV'a +p894 +(dp895 +V +I33 +sVc +I14 +sVb +I37 +sVd +I6 +sVg +I6 +sVi +I140 +sVm +I20 +sVl +I13 +sVn +I18 +sVp +I41 +sVs +I11 +sVr +I24 +sVu +I79 +sVt +I16 +sVv +I112 +sVy +I9 +ssV'o +p896 +(dp897 +Vb +I3 +sVe +I7 +sVf +I2 +sVn +I93 +sVp +I8 +sVs +I6 +sVr +I27 +sVu +I10 +ssV'i +p898 +(dp899 +Vc +I3 +sVd +I3 +sVg +I1 +sVm +I12 +sVl +I187 +sVn +I45 +sVr +I1 +sVv +I1 +ssV'h +p900 +(dp901 +Va +I6 +sVe +I2 +sVé +I2 +sVo +I31 +sVi +I3 +sVu +I10 +sVô +I10 +sVy +I1 +ssVn) +p902 +(dp903 +V, +I1 +ssV'U +p904 +(dp905 +Vr +I1 +ssV'T +p906 +(dp907 +V +I1 +ssVn, +p908 +(dp909 +V +I196 +ssVn- +p910 +(dp911 +Va +I1 +sVb +I3 +sVi +I1 +sVh +I3 +sV- +I1 +sV1 +I4 +sVt +I11 +sV8 +I1 +ssVn. +p912 +(dp913 +V +I101 +sV" +I1 +sVh +I2 +sV- +I1 +sV. +I1 +sV] +I1 +ssVn +p914 +(dp915 +Vê +I7 +sV" +I1 +sVp +I203 +sV( +I2 +sV1 +I24 +sVÉ +I3 +sV3 +I1 +sV2 +I3 +sV9 +I1 +sVA +I11 +sVC +I7 +sVB +I2 +sVE +I7 +sVD +I3 +sVF +I10 +sVI +I12 +sVH +I5 +sVK +I1 +sVJ +I8 +sVM +I4 +sVO +I1 +sVN +I3 +sVP +I7 +sVS +I1 +sVâ +I3 +sVT +I4 +sVV +I5 +sVX +I3 +sV_ +I4 +sVa +I184 +sVà +I23 +sVc +I147 +sVb +I51 +sVe +I101 +sVd +I203 +sVg +I34 +sVf +I70 +sVé +I24 +sVh +I41 +sVj +I44 +sVm +I107 +sVl +I81 +sVo +I31 +sVn +I76 +sVq +I35 +sVi +I36 +sVs +I99 +sVr +I48 +sVu +I20 +sVt +I79 +sVw +I4 +sVv +I95 +sVy +I13 +ssVn! +p916 +(dp917 +V +I16 +ssVn" +p918 +(dp919 +V +I2 +ssVfâ +p920 +(dp921 +Vc +I2 +ssVn' +p922 +(dp923 +Va +I106 +sVe +I67 +sVé +I32 +sVh +I2 +sVê +I2 +sVo +I11 +sVi +I1 +sVu +I1 +sVt +I4 +sVy +I36 +ssV'E +p924 +(dp925 +Vs +I12 +sVt +I1 +sVu +I7 +sVl +I8 +sVv +I1 +ssVn; +p926 +(dp927 +V +I48 +ssV'A +p928 +(dp929 +Vr +I3 +sVd +I1 +sVf +I3 +sVc +I1 +sVm +I3 +sVL +I1 +sVN +I1 +sVs +I3 +sVn +I7 +sVt +I3 +sVv +I1 +sVz +I1 +sVl +I8 +ssVn? +p930 +(dp931 +V +I4 +sV- +I1 +ssV'O +p932 +(dp933 +VP +I2 +sVr +I1 +sVN +I1 +sVv +I1 +sVp +I2 +ssV'I +p934 +(dp935 +Vs +I1 +sVn +I2 +sVb +I3 +sVL +I1 +sVt +I2 +ssV:/ +p936 +(dp937 +V/ +I8 +ssV: +p938 +(dp939 +V +I2 +sV1 +I1 +sVA +I4 +sVC +I13 +sVE +I4 +sVF +I1 +sVI +I6 +sVH +I2 +sVJ +I7 +sVM +I12 +sVO +I3 +sVN +I4 +sVQ +I2 +sVS +I6 +sVU +I1 +sVT +I1 +sVV +I6 +sV_ +I6 +sVa +I3 +sVc +I9 +sVe +I15 +sVi +I11 +sVj +I12 +sVm +I10 +sVl +I15 +sVo +I10 +sVn +I8 +sVq +I4 +sVp +I4 +sVs +I5 +sVu +I3 +sVt +I2 +sVv +I13 +ssVHu +p940 +(dp941 +Vm +I1 +ssVfê +p942 +(dp943 +Vt +I1 +ssVép +p944 +(dp945 +Va +I24 +sVe +I10 +sVé +I9 +sVl +I13 +sVo +I81 +sVi +I3 +sVr +I18 +sVu +I5 +ssVfî +p946 +(dp947 +Vt +I1 +ssVMé +p948 +(dp949 +Vd +I3 +sVm +I2 +sVl +I10 +sVo +I1 +sVq +I1 +sVt +I1 +ssVMè +p950 +(dp951 +Vn +I1 +ssVSp +p952 +(dp953 +Ve +I1 +ssVAS +p954 +(dp955 +V +I1 +sVC +I2 +sV- +I1 +ssV:M +p956 +(dp957 +Vo +I1 +ssVM +p958 +(dp959 +VI +I1 +sVE +I1 +ssVM, +p960 +(dp961 +V +I2 +ssVM. +p962 +(dp963 +VA +I1 +sV +I22 +ssVMe +p964 +(dp965 +Va +I2 +sVx +I1 +sVs +I8 +sVc +I1 +ssV.g +p966 +(dp967 +Vu +I2 +ssVMa +p968 +(dp969 +V +I9 +sVd +I7 +sVi +I32 +sVh +I2 +sVj +I1 +sVl +I3 +sVn +I1 +sVs +I6 +sVr +I123 +sVu +I1 +sVt +I1 +sVî +I4 +ssVMo +p970 +(dp971 +Va +I1 +sVg +I1 +sVi +I2 +sVk +I1 +sVï +I1 +sVn +I32 +sVs +I2 +sVr +I4 +ssVMi +p972 +(dp973 +Vs +I2 +sVc +I7 +sVl +I4 +sVd +I1 +ssVMu +p974 +(dp975 +Vl +I2 +ssVMy +p976 +(dp977 +Vt +I1 +ssVME +p978 +(dp979 +V +I3 +sVD +I2 +sV, +I1 +sV. +I1 +sVR +I3 +sVN +I1 +ssVMD +p980 +(dp981 +VC +I1 +ssVMA +p982 +(dp983 +VD +I1 +sVG +I3 +sVI +I2 +sVL +I2 +sVN +I2 +sVY +I1 +ssVMB +p984 +(dp985 +V +I1 +sVE +I2 +ssVMM +p986 +(dp987 +V. +I1 +ssVMO +p988 +(dp989 +VU +I1 +sVN +I1 +ssVMN +p990 +(dp991 +VI +I1 +ssVMI +p992 +(dp993 +VS +I2 +sVT +I3 +sVN +I2 +ssVMP +p994 +(dp995 +VR +I1 +sVL +I1 +ssVMS +p996 +(dp997 +V +I1 +ssVbr +p998 +(dp999 +Va +I34 +sVe +I37 +sVi +I30 +sVo +I11 +sVu +I19 +sVû +I13 +ssVîl +p1000 +(dp1001 +Ve +I2 +ssVîm +p1002 +(dp1003 +Vé +I3 +sVe +I3 +ssVîn +p1004 +(dp1005 +Va +I2 +sVé +I7 +sVe +I15 +ssVîc +p1006 +(dp1007 +Vh +I5 +ssVît +p1008 +(dp1009 +V +I14 +sVr +I58 +ssVsû +p1010 +(dp1011 +Vr +I1 +ssVsé +p1012 +(dp1013 +Va +I1 +sV +I30 +sVc +I2 +sVe +I14 +sVd +I2 +sVj +I3 +sVm +I10 +sV, +I6 +sV. +I1 +sVq +I8 +sVp +I9 +sVs +I11 +sVr +I16 +sVv +I1 +sVn +I6 +sV: +I1 +ssVsè +p1014 +(dp1015 +Vc +I1 +sVr +I16 +sVd +I3 +ssVs; +p1016 +(dp1017 +V +I155 +ssVs: +p1018 +(dp1019 +V +I35 +ssVs? +p1020 +(dp1021 +V +I39 +sV- +I4 +ssV, +p1022 +(dp1023 +VR +I2 +sVé +I19 +sV0 +I2 +sV1 +I6 +sVÉ +I1 +sV3 +I1 +sV2 +I3 +sV9 +I10 +sVô +I1 +sVA +I7 +sVC +I24 +sVE +I4 +sVD +I4 +sVG +I1 +sVF +I1 +sVI +I5 +sVH +I3 +sVK +I3 +sVJ +I3 +sVM +I12 +sVL +I5 +sVO +I4 +sVN +I11 +sVP +I7 +sVS +I4 +sVb +I17 +sVU +I1 +sVT +I4 +sVW +I3 +sVV +I4 +sV_ +I3 +sVa +I120 +sVà +I47 +sVc +I133 +sVâ +I3 +sVe +I601 +sVd +I397 +sVg +I6 +sVf +I26 +sVi +I120 +sVh +I8 +sVj +I93 +sVm +I131 +sVl +I228 +sVo +I59 +sVn +I73 +sVq +I235 +sVp +I116 +sVs +I132 +sVr +I51 +sVu +I38 +sVt +I68 +sVw +I6 +sVv +I62 +sVy +I16 +ssVs* +p1024 +(dp1025 +V* +I1 +ssVs) +p1026 +(dp1027 +V +I1 +sV; +I1 +sV, +I1 +ssVs/ +p1028 +(dp1029 +Vb +I1 +sVg +I1 +ssVs. +p1030 +(dp1031 +V +I302 +sV- +I2 +sV] +I1 +sV. +I1 +ssVs- +p1032 +(dp1033 +VA +I13 +sVà +I1 +sVc +I7 +sVe +I1 +sVd +I3 +sVÉ +I4 +sVm +I3 +sVj +I9 +sVM +I1 +sVl +I4 +sVn +I13 +sVt +I3 +sVv +I13 +sVE +I1 +ssVs, +p1034 +(dp1035 +Vy +I1 +sV +I765 +ssVs" +p1036 +(dp1037 +V. +I1 +ssVs! +p1038 +(dp1039 +V +I45 +sV* +I1 +ssVs +p1040 +(dp1041 +Vê +I11 +sVî +I1 +sVé +I115 +sV" +I4 +sV( +I2 +sV* +I1 +sV- +I1 +sV1 +I5 +sV3 +I1 +sV2 +I1 +sV: +I1 +sVâ +I1 +sVA +I7 +sVC +I12 +sVB +I20 +sVE +I9 +sVD +I1 +sVF +I1 +sVI +I4 +sVH +I1 +sVJ +I5 +sVM +I4 +sVL +I2 +sVO +I7 +sVP +I14 +sVS +I3 +sVR +I7 +sVT +I4 +sVW +I1 +sVY +I1 +sVX +I11 +sV_ +I12 +sVa +I375 +sVà +I97 +sVc +I294 +sVb +I120 +sVe +I236 +sVd +I770 +sVg +I70 +sVf +I147 +sVi +I93 +sVh +I69 +sVk +I1 +sVj +I75 +sVm +I224 +sVl +I448 +sVo +I76 +sVn +I153 +sVq +I224 +sVp +I375 +sVs +I294 +sVr +I145 +sVu +I116 +sVt +I153 +sVw +I12 +sVv +I122 +sVy +I35 +ssVs' +p1042 +(dp1043 +Va +I30 +sVe +I39 +sVé +I35 +sVê +I2 +sVo +I5 +sVi +I25 +sVy +I4 +ssVs[ +p1044 +(dp1045 +V1 +I6 +sV2 +I1 +sV5 +I1 +sV4 +I2 +ssVs_ +p1046 +(dp1047 +V +I1 +sV, +I11 +sV. +I5 +ssV?. +p1048 +(dp1049 +V. +I1 +ssV?- +p1050 +(dp1051 +V- +I9 +ssV? +p1052 +(dp1053 +V +I36 +sVC +I8 +sVB +I1 +sVE +I6 +sVD +I1 +sVI +I6 +sVJ +I8 +sVM +I5 +sVL +I2 +sVO +I3 +sVN +I1 +sVQ +I2 +sVP +I2 +sVS +I1 +sVT +I2 +sVV +I2 +sVY +I1 +sVc +I3 +sVe +I6 +sVd +I26 +sVi +I1 +sVj +I1 +sVl +I3 +sVo +I3 +sVn +I1 +sVq +I3 +sVp +I2 +sVs +I2 +sVv +I2 +sVy +I1 +ssVsy +p1054 +(dp1055 +Vs +I3 +sVl +I4 +ssV,c +p1056 +(dp1057 +Ve +I1 +ssVss +p1058 +(dp1059 +Va +I127 +sV +I45 +sVe +I286 +sVi +I149 +sVè +I10 +sV, +I30 +sVo +I17 +sV. +I11 +sVé +I41 +sV! +I8 +sVu +I24 +sV[ +I1 +sV: +I1 +sV; +I7 +sV? +I1 +ssVsr +p1060 +(dp1061 +Va +I3 +ssVsq +p1062 +(dp1063 +Vu +I55 +sV' +I1 +ssVsp +p1064 +(dp1065 +Va +I27 +sVe +I30 +sVé +I14 +sVè +I12 +sVl +I2 +sVo +I15 +sVi +I8 +sVr +I15 +sVu +I11 +sVh +I1 +ssVsw +p1066 +(dp1067 +Ve +I2 +ssVsu +p1068 +(dp1069 +Va +I4 +sV +I7 +sVc +I10 +sVb +I12 +sVe +I2 +sVg +I1 +sVf +I6 +sVi +I122 +sVj +I5 +sVm +I3 +sVl +I14 +sVp +I9 +sVs +I9 +sVr +I179 +sVy +I5 +ssVst +p1070 +(dp1071 +Va +I74 +sV +I312 +sVe +I82 +sVé +I21 +sVi +I47 +sVh +I1 +sV- +I26 +sV, +I7 +sVo +I23 +sV. +I2 +sVp +I12 +sVs +I1 +sVr +I50 +sVu +I6 +sV_ +I1 +sVè +I6 +sVy +I2 +sV[ +I1 +sV? +I1 +ssVsk +p1072 +(dp1073 +Va +I2 +sV +I3 +sVi +I4 +sV, +I1 +sV. +I1 +sV) +I1 +ssVsi +p1074 +(dp1075 +V +I163 +sV, +I10 +sV. +I3 +sV; +I1 +sV? +I2 +sVa +I3 +sVc +I12 +sVb +I26 +sVe +I102 +sVd +I4 +sVg +I15 +sVf +I4 +sVé +I3 +sVè +I17 +sVm +I7 +sVl +I6 +sVo +I53 +sVn +I50 +sVq +I15 +sVp +I4 +sVs +I10 +sVr +I32 +sVt +I75 +sVv +I1 +sVx +I27 +ssVsh +p1076 +(dp1077 +Va +I2 +sV +I2 +sVi +I3 +sVo +I4 +sV, +I1 +ssVso +p1078 +(dp1079 +V +I8 +sVc +I8 +sVe +I21 +sVf +I4 +sVi +I56 +sVm +I18 +sVl +I47 +sVn +I327 +sVp +I23 +sVr +I31 +sVu +I130 +sVt +I9 +sVy +I1 +sV. +I1 +ssVsm +p1080 +(dp1081 +Va +I5 +sVe +I6 +sVo +I4 +ssV,q +p1082 +(dp1083 +Vu +I1 +ssVsc +p1084 +(dp1085 +Va +I16 +sVe +I4 +sVi +I13 +sVh +I1 +sVl +I15 +sVo +I23 +sVé +I1 +sVr +I11 +sVu +I4 +sVè +I4 +ssVsb +p1086 +(dp1087 +Vi +I1 +sVo +I11 +ssVsa +p1088 +(dp1089 +V +I122 +sVc +I19 +sVb +I8 +sVd +I1 +sVg +I33 +sVi +I173 +sV- +I2 +sVm +I3 +sVl +I13 +sVn +I129 +sVs +I8 +sVr +I6 +sVu +I16 +sVt +I11 +sVv +I42 +sV; +I2 +sV. +I1 +sV, +I2 +sV_ +I1 +ssVsf +p1090 +(dp1091 +Va +I2 +sVi +I1 +sVe +I1 +ssVse +p1092 +(dp1093 +V! +I1 +sV +I326 +sV- +I3 +sV, +I51 +sV. +I34 +sV; +I15 +sV: +I4 +sV? +I3 +sV_ +I3 +sVa +I55 +sVc +I21 +sVe +I2 +sVd +I8 +sVi +I40 +sVm +I40 +sVl +I73 +sVo +I1 +sVn +I96 +sVq +I5 +sVp +I8 +sVs +I139 +sVr +I166 +sVu +I26 +sVt +I6 +sVv +I2 +sVy +I1 +sVx +I3 +sVz +I46 +ssV,y +p1094 +(dp1095 +V +I1 +ssVRu +p1096 +(dp1097 +Vp +I2 +sVs +I8 +ssVeç +p1098 +(dp1099 +Vû +I1 +sVu +I14 +sVo +I8 +ssVRe +p1100 +(dp1101 +Va +I1 +sVf +I1 +sVm +I2 +sVl +I1 +sVp +I1 +sVv +I1 +ssVRa +p1102 +(dp1103 +Vp +I1 +sVb +I1 +sVl +I1 +sVg +I3 +sVv +I1 +ssVeû +p1104 +(dp1105 +Vm +I1 +sVt +I10 +ssVRo +p1106 +(dp1107 +Vb +I3 +sVm +I5 +sVs +I2 +sVu +I4 +sVt +I1 +sVy +I1 +ssVRh +p1108 +(dp1109 +Vo +I1 +ssVRi +p1110 +(dp1111 +Vc +I2 +sVe +I3 +sVg +I2 +ssVRT +p1112 +(dp1113 +VI +I2 +sV* +I2 +ssVRU +p1114 +(dp1115 +VE +I3 +sVT +I1 +ssVRW +p1116 +(dp1117 +VI +I1 +ssVRP +p1118 +(dp1119 +VO +I1 +ssVRR +p1120 +(dp1121 +VA +I4 +ssVRS +p1122 +(dp1123 +VI +I2 +sVQ +I1 +ssVÉE +p1124 +(dp1125 +VS +I1 +ssVRD +p1126 +(dp1127 +VE +I1 +ssVRE +p1128 +(dp1129 +V! +I1 +sV +I32 +sVC +I1 +sVM +I1 +sV, +I2 +sV. +I1 +sVS +I3 +sVA +I2 +ssVRG +p1130 +(dp1131 +V" +I2 +sV- +I6 +ssVRA +p1132 +(dp1133 +VC +I1 +sVD +I1 +sVI +I1 +sVH +I1 +sVL +I1 +sVN +I4 +ssVRC +p1134 +(dp1135 +VH +I1 +ssVRM +p1136 +(dp1137 +VI +I1 +ssVRO +p1138 +(dp1139 +VU +I1 +sVV +I1 +sVJ +I8 +sVM +I1 +sVN +I1 +ssVRI +p1140 +(dp1141 +VC +I1 +sVB +I1 +sVE +I1 +sVM +I1 +sVN +I2 +sVS +I1 +ssVR +p1142 +(dp1143 +VA +I2 +sV +I1 +sVC +I1 +sVB +I1 +sVF +I2 +sVI +I2 +sVO +I1 +sVN +I1 +sVP +I3 +sVS +I1 +sVR +I2 +sV" +I1 +ssVR, +p1144 +(dp1145 +V +I6 +ssVR. +p1146 +(dp1147 +V +I1 +ssVR* +p1148 +(dp1149 +V: +I1 +ssVem +p1150 +(dp1151 +Va +I83 +sV +I4 +sVb +I79 +sVe +I206 +sVi +I48 +sVm +I44 +sVo +I67 +sVn +I2 +sVp +I104 +sVs +I1 +sVu +I2 +ssVel +p1152 +(dp1153 +Va +I38 +sV +I77 +sVc +I1 +sVe +I16 +sVà +I1 +sVi +I15 +sVl +I357 +sVo +I16 +sV, +I8 +sVq +I88 +sVé +I6 +sVs +I9 +sV! +I5 +sVu +I19 +sV. +I1 +sVy +I3 +sV; +I3 +sV: +I1 +sVp +I1 +sV[ +I2 +ssVà +p1154 +(dp1155 +VA +I4 +sVC +I45 +sVB +I9 +sVE +I1 +sVD +I1 +sVH +I1 +sVM +I20 +sVL +I7 +sVN +I3 +sVP +I16 +sVS +I5 +sVR +I6 +sVU +I1 +sVT +I3 +sVV +I27 +sV_ +I3 +sVa +I2 +sVc +I49 +sVb +I6 +sVe +I11 +sVd +I31 +sVg +I9 +sVf +I10 +sVh +I1 +sVj +I2 +sVm +I38 +sVl +I133 +sVo +I1 +sVn +I4 +sVq +I23 +sVp +I38 +sVs +I37 +sVr +I6 +sVu +I38 +sVt +I41 +sVv +I17 +ssVen +p1156 +(dp1157 +V! +I9 +sV +I590 +sV- +I12 +sV, +I25 +sV. +I14 +sVh +I1 +sV; +I6 +sV: +I5 +sV? +I1 +sVô +I1 +sVç +I4 +sV[ +I2 +sVa +I69 +sVc +I143 +sVb +I31 +sVe +I32 +sVd +I266 +sVg +I10 +sVf +I64 +sVi +I61 +sVè +I4 +sVê +I4 +sVl +I5 +sVo +I13 +sVn +I42 +sVq +I1 +sVé +I5 +sVs +I130 +sVr +I10 +sVu +I30 +sVt +I1178 +sVv +I36 +sVy +I1 +sVz +I2 +ssVei +p1158 +(dp1159 +Vb +I2 +sVg +I33 +sVl +I144 +sVn +I41 +sVp +I1 +sVt +I2 +sVv +I11 +sVz +I1 +ssVeh +p1160 +(dp1161 +V! +I1 +sVa +I1 +sVl +I4 +ssVej +p1162 +(dp1163 +Ve +I1 +ssVee +p1164 +(dp1165 +V! +I1 +sV +I12 +sVd +I4 +sV, +I1 +sVn +I3 +sVp +I1 +sVs +I4 +sVr +I2 +sVl +I2 +ssVed +p1166 +(dp1167 +V +I70 +sVe +I1 +sVi +I17 +sV, +I9 +sVo +I8 +sV. +I3 +sV) +I1 +sVs +I15 +sVu +I4 +sV_ +I2 +sV? +I1 +ssVeg +p1168 +(dp1169 +Vi +I5 +sVa +I30 +sVr +I2 +sVo +I2 +ssVef +p1170 +(dp1171 +Va +I1 +sV +I3 +sVe +I10 +sVf +I34 +sV- +I2 +sVo +I18 +sVs +I1 +sVr +I1 +sVu +I9 +ssVea +p1172 +(dp1173 +V +I8 +sVc +I6 +sVd +I21 +sVi +I7 +sVm +I1 +sVn +I8 +sVs +I14 +sVr +I9 +sVu +I201 +sVt +I3 +sVv +I1 +ssVec +p1174 +(dp1175 +V +I128 +sVe +I22 +sVi +I14 +sVh +I3 +sVk +I7 +sVo +I37 +sVs +I1 +sVr +I9 +sVu +I7 +sVt +I82 +sVz +I1 +ssVeb +p1176 +(dp1177 +Va +I1 +sV +I2 +sVr +I5 +sVu +I2 +sVo +I1 +ssVey +p1178 +(dp1179 +Vd +I1 +sV +I15 +sV, +I4 +sV_ +I1 +sV. +I1 +ssVex +p1180 +(dp1181 +Va +I7 +sVc +I22 +sVe +I20 +sVi +I9 +sVé +I8 +sVo +I1 +sVq +I1 +sVp +I28 +sVt +I44 +sV_ +I1 +ssVez +p1182 +(dp1183 +V! +I3 +sV +I236 +sV- +I63 +sV, +I34 +sV. +I8 +sV; +I3 +ssVeu +p1184 +(dp1185 +V! +I2 +sV +I117 +sVe +I13 +sVf +I7 +sVi +I9 +sVm +I2 +sVl +I22 +sVn +I31 +sVp +I8 +sVs +I34 +sVr +I636 +sVt +I70 +sVv +I17 +sVx +I290 +sV; +I2 +sV. +I4 +sV, +I6 +sV? +I1 +ssVet +p1186 +(dp1187 +Va +I21 +sV +I1033 +sVc +I3 +sVe +I26 +sVi +I46 +sVh +I5 +sV- +I1 +sV, +I24 +sVo +I18 +sV. +I11 +sVé +I10 +sVs +I22 +sVr +I12 +sVu +I5 +sVt +I187 +sV; +I4 +sV_ +I1 +sV/ +I3 +ssVew +p1188 +(dp1189 +V +I11 +sVs +I3 +sVt +I1 +ssVev +p1190 +(dp1191 +Va +I52 +sVâ +I1 +sVe +I54 +sVé +I11 +sVè +I1 +sVê +I1 +sVo +I12 +sVi +I21 +sVr +I3 +sVu +I3 +ssVeq +p1192 +(dp1193 +Vu +I46 +ssVep +p1194 +(dp1195 +Va +I19 +sV +I1 +sVe +I12 +sVl +I4 +sVo +I2 +sVp +I4 +sVr +I22 +sVu +I10 +sVt +I21 +ssVes +p1196 +(dp1197 +Va +I33 +sV +I1914 +sVc +I22 +sVe +I10 +sV) +I1 +sV- +I23 +sV, +I268 +sVo +I8 +sV. +I133 +sVq +I11 +sVp +I67 +sVs +I151 +sV! +I11 +sVu +I3 +sVt +I425 +sV; +I67 +sV: +I23 +sV[ +I3 +sV_ +I10 +sV? +I9 +ssVer +p1198 +(dp1199 +V! +I4 +sV +I531 +sV* +I2 +sV- +I11 +sV, +I79 +sV/ +I1 +sV. +I62 +sV; +I26 +sV: +I7 +sV? +I3 +sVç +I13 +sV] +I2 +sVa +I85 +sVc +I36 +sVb +I1 +sVe +I40 +sVd +I43 +sVg +I43 +sVf +I7 +sVi +I46 +sVm +I50 +sVl +I5 +sVo +I20 +sVn +I44 +sVp +I5 +sVs +I197 +sVr +I120 +sVt +I60 +sVw +I1 +sVv +I53 +ssVeM +p1200 +(dp1201 +Va +I1 +ssVeB +p1202 +(dp1203 +Vo +I46 +ssVe] +p1204 +(dp1205 +V +I1 +ssVe_ +p1206 +(dp1207 +V +I6 +sV, +I13 +sV. +I13 +sV[ +I1 +sV: +I1 +sV; +I2 +ssVR +p1208 +(dp1209 +VC +I1 +ssVfy +p1210 +(dp1211 +V +I2 +sV, +I1 +ssVRÉ +p1212 +(dp1213 +VF +I1 +ssVe- +p1214 +(dp1215 +Vc +I9 +sVE +I1 +sVD +I2 +sVG +I2 +sVf +I1 +sVj +I1 +sVm +I6 +sVl +I9 +sVN +I1 +sVs +I2 +sVt +I5 +sVv +I2 +ssVe, +p1216 +(dp1217 +Vq +I1 +sV +I911 +ssVe. +p1218 +(dp1219 +V +I443 +sV- +I2 +sV. +I2 +ssVe) +p1220 +(dp1221 +V +I3 +ssVe! +p1222 +(dp1223 +V) +I1 +sV +I47 +sV- +I1 +ssVe +p1224 +(dp1225 +Vê +I4 +sVî +I1 +sVâ +I4 +sVé +I69 +sV" +I5 +sV$ +I1 +sV( +I2 +sV1 +I22 +sV3 +I2 +sV2 +I3 +sV5 +I1 +sV4 +I7 +sV7 +I1 +sV6 +I1 +sV: +I1 +sV? +I1 +sVô +I1 +sVA +I6 +sVC +I141 +sVB +I16 +sVE +I2 +sVD +I11 +sVG +I14 +sVF +I16 +sVI +I11 +sVH +I6 +sVK +I4 +sVJ +I9 +sVM +I32 +sVL +I30 +sVN +I6 +sVP +I65 +sVS +I11 +sVR +I7 +sVU +I4 +sVT +I19 +sVW +I3 +sVV +I23 +sVX +I42 +sV_ +I18 +sVa +I240 +sVà +I123 +sVc +I463 +sVb +I169 +sVe +I291 +sVd +I824 +sVg +I88 +sVf +I283 +sVi +I68 +sVh +I53 +sVk +I1 +sVj +I156 +sVm +I479 +sVl +I694 +sVo +I77 +sVn +I264 +sVq +I304 +sVp +I558 +sVs +I511 +sVr +I229 +sVu +I64 +sVt +I320 +sVw +I12 +sVv +I327 +sVy +I8 +sVz +I1 +ssVe? +p1226 +(dp1227 +V +I68 +sV- +I1 +ssVe; +p1228 +(dp1229 +V +I260 +ssVe: +p1230 +(dp1231 +V +I87 +ssV.. +p1232 +(dp1233 +V +I6 +sV- +I1 +sV. +I18 +ssVxi +p1234 +(dp1235 +Vc +I1 +sVb +I1 +sVè +I4 +sVm +I3 +sVl +I1 +sVo +I4 +sVs +I2 +ssVxo +p1236 +(dp1237 +Vr +I1 +ssVxc +p1238 +(dp1239 +Ve +I15 +sVé +I1 +sVè +I1 +sVl +I2 +sVo +I2 +sVu +I2 +ssVxa +p1240 +(dp1241 +Vs +I1 +sVc +I2 +sVm +I2 +sV, +I1 +sVn +I8 +ssV1] +p1242 +(dp1243 +V +I31 +sV, +I8 +sV. +I5 +sV; +I4 +sV: +I1 +sV? +I2 +ssVxe +p1244 +(dp1245 +V +I2 +sVs +I4 +sVr +I9 +sVm +I9 +sV, +I1 +ssVxp +p1246 +(dp1247 +Ve +I5 +sVé +I4 +sVl +I7 +sVo +I3 +sVi +I1 +sVr +I6 +sVu +I2 +ssVxq +p1248 +(dp1249 +Vu +I1 +ssVxt +p1250 +(dp1251 +Va +I2 +sV +I17 +sVe +I6 +sVo +I1 +sVs +I4 +sVr +I21 +ssVDè +p1252 +(dp1253 +Vs +I3 +ssVDé +p1254 +(dp1255 +Vc +I1 +ssVx[ +p1256 +(dp1257 +V1 +I1 +ssV1e +p1258 +(dp1259 +Vr +I1 +ssVx_ +p1260 +(dp1261 +V, +I2 +ssVx. +p1262 +(dp1263 +V) +I1 +sV +I27 +sVc +I2 +ssVx, +p1264 +(dp1265 +V +I40 +ssVx- +p1266 +(dp1267 +Va +I1 +sVb +I2 +sVd +I2 +sVh +I1 +sVm +I3 +sVs +I1 +sVt +I1 +ssVx +p1268 +(dp1269 +Vi +I3 +sVB +I1 +sVD +I1 +sVH +I1 +sVJ +I2 +sVP +I1 +sVa +I26 +sVà +I5 +sVc +I20 +sVb +I3 +sVe +I18 +sVd +I42 +sVg +I7 +sVf +I28 +sVé +I15 +sVh +I8 +sVj +I3 +sVm +I42 +sVl +I13 +sVo +I3 +sVn +I8 +sVq +I31 +sVp +I27 +sVs +I14 +sVr +I10 +sVu +I3 +sVt +I7 +sVv +I15 +sVy +I1 +ssVx! +p1270 +(dp1271 +V +I1 +ssVx: +p1272 +(dp1273 +V +I2 +ssVx; +p1274 +(dp1275 +V +I15 +ssVx? +p1276 +(dp1277 +V +I1 +ssV11 +p1278 +(dp1279 +V +I1 +sV/ +I1 +sV. +I2 +sV3 +I1 +sV; +I1 +sV] +I2 +ssV10 +p1280 +(dp1281 +Va +I2 +sV +I4 +sV. +I4 +sV0 +I4 +sV9 +I1 +sV] +I2 +ssV13 +p1282 +(dp1283 +V +I1 +ssV12 +p1284 +(dp1285 +V. +I1 +sV4 +I1 +sV6 +I1 +sV; +I1 +sV] +I2 +sV_ +I1 +ssV15 +p1286 +(dp1287 +V +I1 +sV) +I1 +sV, +I1 +sV. +I1 +sV0 +I1 +sV2 +I1 +sV4 +I2 +sV9 +I2 +ssV14 +p1288 +(dp1289 +V +I2 +sV- +I1 +sV7 +I1 +ssV17 +p1290 +(dp1291 +V1 +I2 +sV0 +I2 +sV3 +I8 +sV5 +I14 +sV7 +I2 +sV6 +I20 +sV9 +I1 +sV8 +I3 +ssV16 +p1292 +(dp1293 +V9 +I1 +sV +I1 +sV1 +I2 +sV4 +I1 +sV7 +I1 +ssV19 +p1294 +(dp1295 +V9 +I5 +sV7 +I3 +ssV18 +p1296 +(dp1297 +V1 +I1 +sV0 +I2 +sV2 +I3 +sV, +I1 +sV +I1 +ssV1; +p1298 +(dp1299 +V +I2 +ssV1 +p1300 +(dp1301 +VO +I1 +sV +I1 +sVb +I1 +sVf +I1 +sVJ +I2 +sVo +I1 +sVN +I1 +sVs +I1 +sVT +I1 +ssV1( +p1302 +(dp1303 +Vc +I2 +ssV1* +p1304 +(dp1305 +V* +I1 +ssV1- +p1306 +(dp1307 +V2 +I1 +ssV1, +p1308 +(dp1309 +V +I5 +ssV1/ +p1310 +(dp1311 +V0 +I1 +ssV1. +p1312 +(dp1313 +V +I2 +sVt +I2 +ssVDO +p1314 +(dp1315 +VC +I2 +sVM +I2 +sVT +I1 +sVN +I1 +ssVxé +p1316 +(dp1317 +Vc +I8 +ssVDI +p1318 +(dp1319 +VC +I2 +sVE +I1 +sVD +I4 +sVN +I2 +sVS +I2 +sVR +I1 +sVU +I1 +sVT +I3 +ssVDD +p1320 +(dp1321 +VI +I1 +ssVDE +p1322 +(dp1323 +V +I10 +sVD +I1 +sVM +I1 +sV, +I2 +sVN +I2 +sVR +I2 +sVT +I1 +sV. +I1 +ssVDC +p1324 +(dp1325 +VC +I1 +ssVDA +p1326 +(dp1327 +VM +I3 +sVN +I1 +ssVDU +p1328 +(dp1329 +VI +I1 +sV +I2 +ssVDo +p1330 +(dp1331 +V +I1 +sVr +I3 +sVm +I2 +sVn +I4 +ssVDi +p1332 +(dp1333 +Va +I1 +sVs +I2 +sVe +I26 +sVd +I1 +sVo +I1 +ssVDe +p1334 +(dp1335 +V +I11 +sVc +I6 +sVf +I4 +sVm +I1 +sVl +I1 +sVn +I1 +sVs +I5 +sVu +I2 +ssVDa +p1336 +(dp1337 +Vk +I1 +sVr +I2 +sVm +I4 +sVt +I1 +sVn +I10 +ssVD, +p1338 +(dp1339 +V +I2 +ssVD* +p1340 +(dp1341 +V +I1 +ssVD' +p1342 +(dp1343 +VA +I1 +sVa +I1 +sVu +I1 +ssVD +p1344 +(dp1345 +VM +I1 +sVT +I5 +sVW +I1 +ssVWE +p1346 +(dp1347 +VR +I1 +ssVWA +p1348 +(dp1349 +VR +I4 +sVN +I1 +ssVWI +p1350 +(dp1351 +VS +I1 +ssVWH +p1352 +(dp1353 +VA +I1 +ssVWe +p1354 +(dp1355 +V +I10 +sVs +I2 +sVr +I1 +sVb +I2 +sVl +I1 +ssVWa +p1356 +(dp1357 +Vs +I1 +ssVWo +p1358 +(dp1359 +Vr +I1 +ssVWi +p1360 +(dp1361 +Vs +I1 +sVr +I1 +ssVWh +p1362 +(dp1363 +Vi +I1 +sVy +I1 +ssVWy +p1364 +(dp1365 +Vo +I1 +ssVÉP +p1366 +(dp1367 +VE +I1 +ssVjé +p1368 +(dp1369 +Vs +I33 +ssVjà +p1370 +(dp1371 +V +I17 +ssVÉF +p1372 +(dp1373 +VA +I1 +ssV$2 +p1374 +(dp1375 +V +I1 +ssVÉp +p1376 +(dp1377 +Vi +I1 +ssV.o +p1378 +(dp1379 +Vr +I2 +ssVÉt +p1380 +(dp1381 +Vo +I1 +ssVÉl +p1382 +(dp1383 +Vé +I1 +sVi +I1 +sVa +I1 +ssVÉn +p1384 +(dp1385 +Vé +I2 +ssVPM +p1386 +(dp1387 +VB +I1 +ssVÉd +p1388 +(dp1389 +Vi +I1 +sVe +I1 +sVo +I5 +ssVÉg +p1390 +(dp1391 +Vy +I1 +sVl +I2 +ssV.c +p1392 +(dp1393 +Vo +I2 +ssVus +p1394 +(dp1395 +V! +I1 +sV +I842 +sVp +I3 +sV- +I9 +sV, +I51 +sV. +I14 +sV; +I6 +sV: +I2 +sV? +I14 +sV[ +I1 +sV_ +I1 +sVa +I17 +sVc +I2 +sVe +I78 +sVé +I6 +sVè +I1 +sVl +I1 +sVo +I1 +sVq +I26 +sVi +I34 +sVs +I90 +sVu +I4 +sVt +I36 +ssVj' +p1396 +(dp1397 +Va +I81 +sVe +I9 +sVi +I2 +sVo +I1 +sVé +I14 +sVy +I6 +ssV#6 +p1398 +(dp1399 +V +I1 +ssV#4 +p1400 +(dp1401 +V6 +I1 +ssV.z +p1402 +(dp1403 +Vi +I2 +ssVju +p1404 +(dp1405 +Vi +I2 +sVp +I1 +sVs +I42 +sVr +I8 +sVg +I16 +ssVjo +p1406 +(dp1407 +Va +I1 +sVi +I9 +sVs +I2 +sVu +I137 +sVl +I10 +ssVje +p1408 +(dp1409 +V +I287 +sVc +I34 +sV, +I1 +sV. +I1 +sVs +I15 +sVu +I30 +sVt +I32 +sV? +I3 +ssVja +p1410 +(dp1411 +Vc +I1 +sVm +I56 +sVl +I4 +sVn +I6 +sVr +I11 +sVu +I2 +ssVua +p1412 +(dp1413 +V +I26 +sV: +I2 +sVd +I4 +sVg +I1 +sVi +I38 +sV- +I1 +sVl +I4 +sVn +I77 +sVq +I1 +sV. +I1 +sVr +I28 +sVu +I1 +sVt +I32 +sVz +I1 +sVb +I1 +sV, +I2 +ssV6 +p1414 +(dp1415 +Vi +I1 +sVa +I1 +sV; +I1 +sVm +I1 +sVn +I1 +ssV6, +p1416 +(dp1417 +V +I8 +ssV6. +p1418 +(dp1419 +V) +I1 +sV +I4 +ssV60 +p1420 +(dp1421 +V0 +I1 +sV +I1 +sV. +I1 +ssV61 +p1422 +(dp1423 +V0 +I2 +sV; +I1 +sV, +I3 +sV. +I2 +ssV62 +p1424 +(dp1425 +V2 +I2 +sV. +I1 +ssV63 +p1426 +(dp1427 +V. +I2 +ssV64 +p1428 +(dp1429 +V4 +I1 +sV- +I2 +sV, +I1 +sV. +I1 +ssV65 +p1430 +(dp1431 +V0 +I1 +sV5 +I1 +ssV66 +p1432 +(dp1433 +V; +I1 +sV, +I3 +ssV67 +p1434 +(dp1435 +V; +I1 +sV2 +I1 +ssV68 +p1436 +(dp1437 +V, +I2 +ssV69 +p1438 +(dp1439 +V, +I1 +sV6 +I1 +ssV6; +p1440 +(dp1441 +V +I2 +ssVï +p1442 +(dp1443 +Vc +I1 +ssV_l +p1444 +(dp1445 +Vi +I1 +sVa +I1 +sVe +I2 +sV' +I2 +ssVrâ +p1446 +(dp1447 +Vc +I13 +sVt +I1 +sVn +I1 +ssV» +p1448 +(dp1449 +V( +I1 +ssVïo +p1450 +(dp1451 +Vn +I4 +ssVïm +p1452 +(dp1453 +Va +I1 +sVb +I1 +ssVïf +p1454 +(dp1455 +V +I1 +ssVïe +p1456 +(dp1457 +V; +I1 +ssVïa +p1458 +(dp1459 +Vr +I2 +sVd +I1 +ssV_a +p1460 +(dp1461 +Vu +I1 +sVl +I1 +ssVïv +p1462 +(dp1463 +Ve +I1 +ssVïs +p1464 +(dp1465 +V +I1 +sVs +I1 +sVe +I2 +ssV6] +p1466 +(dp1467 +V +I3 +sV; +I1 +ssVïq +p1468 +(dp1469 +Vu +I1 +ssV' +p1470 +(dp1471 +VI +I1 +sVL +I1 +ssVI; +p1472 +(dp1473 +V +I1 +ssVI: +p1474 +(dp1475 +V +I1 +ssVI- +p1476 +(dp1477 +V1 +I1 +ssVI, +p1478 +(dp1479 +V +I32 +ssVI. +p1480 +(dp1481 +V +I18 +ssVI +p1482 +(dp1483 +Va +I1 +sVf +I2 +sVd +I12 +sVn +I1 +ssVà? +p1484 +(dp1485 +V +I2 +ssVIX +p1486 +(dp1487 +V +I1 +sV. +I4 +ssVI[ +p1488 +(dp1489 +V1 +I2 +ssVI_ +p1490 +(dp1491 +V +I1 +ssVIS +p1492 +(dp1493 +V +I3 +sVC +I1 +sV" +I1 +sVE +I1 +sVM +I2 +sV, +I1 +sVO +I1 +sVS +I1 +sVT +I1 +ssVIR +p1494 +(dp1495 +VM +I1 +sVE +I4 +sV, +I1 +ssVIU +p1496 +(dp1497 +VM +I1 +ssVIT +p1498 +(dp1499 +V +I2 +sVE +I3 +sVI +I4 +sVH +I1 +sVN +I1 +sVR +I30 +sVY +I4 +ssVIV +p1500 +(dp1501 +V +I2 +sVE +I2 +sV. +I3 +sV_ +I2 +sV: +I1 +sV? +I1 +ssVII +p1502 +(dp1503 +V +I16 +sVI +I22 +sV, +I18 +sV. +I10 +sV[ +I2 +sV_ +I1 +ssVIM +p1504 +(dp1505 +VI +I5 +sVP +I2 +sVE +I2 +ssVIL +p1506 +(dp1507 +VI +I3 +sV +I1 +sVS +I1 +ssVIO +p1508 +(dp1509 +VN +I6 +ssVIN +p1510 +(dp1511 +V +I6 +sVC +I3 +sVD +I4 +sVG +I3 +sVT +I2 +ssVIA +p1512 +(dp1513 +VB +I1 +sVL +I1 +ssVIC +p1514 +(dp1515 +V +I4 +sVU +I1 +sVE +I1 +sVT +I1 +ssVIB +p1516 +(dp1517 +VI +I1 +sVR +I1 +sVU +I1 +ssVIE +p1518 +(dp1519 +V +I1 +sVS +I3 +sVD +I1 +sVN +I1 +ssVID +p1520 +(dp1521 +VE +I5 +sVO +I1 +ssVIG +p1522 +(dp1523 +VE +I1 +ssVIF +p1524 +(dp1525 +V +I3 +ssVïè +p1526 +(dp1527 +Vt +I1 +ssVIs +p1528 +(dp1529 +Vs +I9 +sVr +I3 +sVm +I2 +sVl +I1 +ssVIr +p1530 +(dp1531 +Va +I1 +ssVIt +p1532 +(dp1533 +Va +I11 +sV +I1 +ssVIv +p1534 +(dp1535 +Va +I5 +ssVIm +p1536 +(dp1537 +Va +I1 +ssVIl +p1538 +(dp1539 +V +I129 +sVs +I15 +sVl +I1 +ssVIo +p1540 +(dp1541 +Vw +I1 +ssVIn +p1542 +(dp1543 +Vc +I1 +sVt +I3 +sVd +I3 +sV +I1 +sVf +I2 +ssVIc +p1544 +(dp1545 +Vi +I1 +ssVIb +p1546 +(dp1547 +Va +I3 +ssVIe +p1548 +(dp1549 +Vr +I4 +ssVId +p1550 +(dp1551 +Ve +I2 +ssVIg +p1552 +(dp1553 +Vn +I1 +ssVIf +p1554 +(dp1555 +V +I11 +ssV(~ +p1556 +(dp1557 +V) +I1 +ssV(s +p1558 +(dp1559 +Vu +I1 +ssV(p +p1560 +(dp1561 +Va +I1 +ssVvâ +p1562 +(dp1563 +Vm +I2 +sVt +I1 +ssV(t +p1564 +(dp1565 +Vh +I1 +ssVA, +p1566 +(dp1567 +V +I1 +ssV(i +p1568 +(dp1569 +Vf +I2 +sVn +I2 +ssV(o +p1570 +(dp1571 +Vr +I3 +sVu +I1 +sVn +I1 +ssV(c +p1572 +(dp1573 +V) +I2 +ssV(a +p1574 +(dp1575 +Vs +I1 +sVn +I2 +ssV(f +p1576 +(dp1577 +Vr +I1 +ssV(d +p1578 +(dp1579 +Vû +I1 +ssV2O +p1580 +(dp1581 +V. +I1 +ssV(_ +p1582 +(dp1583 +V) +I1 +sVM +I1 +ssV(V +p1584 +(dp1585 +Vo +I1 +ssV(T +p1586 +(dp1587 +Vh +I1 +ssV(J +p1588 +(dp1589 +Ve +I1 +ssVvè +p1590 +(dp1591 +Vr +I10 +ssVTM +p1592 +(dp1593 +V +I1 +ssV(N +p1594 +(dp1595 +Vo +I1 +ssV(C +p1596 +(dp1597 +V) +I1 +ssVca +p1598 +(dp1599 +Vc +I7 +sVb +I23 +sVd +I12 +sVf +I3 +sVi +I11 +sVm +I91 +sVl +I16 +sV. +I2 +sVp +I29 +sVs +I15 +sVr +I82 +sVu +I16 +sVt +I17 +sVv +I1 +sVy +I3 +sVn +I49 +ssVêt +p1600 +(dp1601 +Va +I2 +sV +I6 +sVe +I67 +sVi +I1 +sVè +I1 +sV, +I1 +sV. +I2 +sVr +I105 +sVu +I3 +ssV(3 +p1602 +(dp1603 +V) +I2 +ssVêv +p1604 +(dp1605 +Ve +I4 +ssVêl +p1606 +(dp1607 +Va +I1 +sVé +I1 +sVe +I3 +ssVêm +p1608 +(dp1609 +Ve +I79 +ssV(# +p1610 +(dp1611 +V6 +I1 +ssVêc +p1612 +(dp1613 +Vh +I9 +ssVoï +p1614 +(dp1615 +Vq +I1 +sVa +I3 +sVs +I2 +sVm +I1 +sVo +I1 +ssVoë +p1616 +(dp1617 +Vm +I1 +sVt +I2 +ssVoé +p1618 +(dp1619 +Vs +I1 +ssVoè +p1620 +(dp1621 +Vt +I3 +ssVoû +p1622 +(dp1623 +Vt +I31 +ssVoù +p1624 +(dp1625 +V +I48 +sV, +I1 +ssVd" +p1626 +(dp1627 +V +I1 +ssVo. +p1628 +(dp1629 +V +I19 +sVo +I2 +sVn +I1 +ssVo- +p1630 +(dp1631 +Vc +I1 +sVt +I1 +sVd +I7 +ssVo, +p1632 +(dp1633 +V +I43 +ssVo! +p1634 +(dp1635 +V +I1 +ssVo +p1636 +(dp1637 +V" +I1 +sVi +I2 +sVG +I1 +sVP +I1 +sVT +I3 +sVa +I14 +sVà +I2 +sVc +I8 +sVe +I6 +sVd +I12 +sVg +I3 +sVf +I3 +sVé +I2 +sVh +I2 +sVk +I1 +sVm +I5 +sVl +I5 +sVo +I5 +sVn +I9 +sVq +I1 +sVp +I9 +sVs +I7 +sVr +I2 +sVu +I1 +sVt +I17 +sVw +I4 +sVv +I1 +sVy +I3 +ssVo; +p1638 +(dp1639 +V +I9 +ssVo: +p1640 +(dp1641 +V +I4 +ssVo_ +p1642 +(dp1643 +V; +I1 +sV, +I1 +ssV; +p1644 +(dp1645 +Vê +I1 +sVA +I1 +sVC +I9 +sVD +I3 +sVJ +I1 +sVO +I2 +sVP +I2 +sV_ +I1 +sVa +I10 +sVà +I1 +sVc +I66 +sVe +I114 +sVd +I16 +sVf +I1 +sVi +I103 +sVh +I1 +sVj +I81 +sVm +I78 +sVl +I61 +sVo +I22 +sVn +I14 +sVq +I14 +sVp +I6 +sVs +I16 +sVr +I2 +sVu +I7 +sVt +I14 +sVv +I16 +sVy +I1 +ssVo[ +p1646 +(dp1647 +V1 +I2 +ssVoo +p1648 +(dp1649 +Vs +I2 +sVk +I50 +sVd +I1 +sVf +I2 +ssVon +p1650 +(dp1651 +V! +I2 +sV +I749 +sV" +I1 +sV' +I3 +sV- +I3 +sV, +I90 +sV. +I40 +sV; +I19 +sV: +I5 +sV? +I3 +sVg +I47 +sV[ +I1 +sV_ +I1 +sVa +I36 +sVc +I68 +sVe +I20 +sVd +I288 +sVç +I8 +sVf +I12 +sVi +I22 +sVh +I8 +sVj +I1 +sVl +I6 +sVo +I11 +sVn +I307 +sVq +I2 +sVs +I381 +sVu +I1 +sVt +I326 +sVv +I29 +sVz +I2 +ssVom +p1652 +(dp1653 +Va +I10 +sV +I18 +sVb +I47 +sVe +I72 +sVi +I16 +sVé +I2 +sVè +I3 +sVm +I323 +sVo +I2 +sV. +I3 +sVp +I62 +sVs +I2 +sVt +I2 +sVn +I1 +sV> +I1 +ssVol +p1654 +(dp1655 +Va +I19 +sV +I6 +sVc +I1 +sVâ +I1 +sVe +I45 +sVd +I15 +sVi +I55 +sVè +I3 +sVl +I28 +sVo +I26 +sV, +I5 +sVé +I18 +sVs +I4 +sVu +I22 +sVt +I31 +sV. +I2 +sV; +I1 +sV: +I1 +ssVok +p1656 +(dp1657 +Va +I1 +sV +I19 +sV) +I1 +sV, +I6 +sV. +I2 +sV0 +I3 +sVs +I19 +ssVoj +p1658 +(dp1659 +Ve +I32 +ssVoi +p1660 +(dp1661 +V! +I14 +sV +I100 +sVc +I6 +sV: +I2 +sVe +I21 +sVd +I7 +sVg +I11 +sV- +I6 +sVl +I34 +sVn +I123 +sVq +I15 +sVs +I250 +sVr +I165 +sVt +I75 +sVv +I2 +sVx +I16 +sV; +I5 +sV. +I6 +sV, +I37 +sV? +I2 +ssVoh +p1662 +(dp1663 +V! +I2 +sVi +I1 +ssVog +p1664 +(dp1665 +Ve +I5 +sVi +I13 +sVl +I4 +sVo +I2 +sVn +I4 +sVé +I2 +sVr +I4 +sVu +I4 +ssVof +p1666 +(dp1667 +Va +I1 +sV +I66 +sVe +I3 +sVf +I23 +sVé +I3 +sV, +I1 +sVo +I7 +sVl +I10 +sVi +I6 +sVr +I2 +sVt +I5 +sV. +I1 +ssVoe +p1668 +(dp1669 +Vi +I6 +sVs +I2 +sVu +I57 +ssVod +p1670 +(dp1671 +Va +I4 +sV +I3 +sVe +I13 +sVi +I14 +sVo +I4 +sVé +I4 +sVs +I1 +sVu +I18 +sV; +I1 +ssVoc +p1672 +(dp1673 +Va +I3 +sV +I3 +sVc +I10 +sVe +I14 +sVi +I12 +sVh +I42 +sVk +I1 +sVo +I22 +sVé +I5 +sVs +I1 +sV! +I1 +sVu +I19 +sVt +I15 +sVè +I3 +sVr +I2 +ssVob +p1674 +(dp1675 +Va +I5 +sVe +I15 +sVé +I3 +sVj +I3 +sVl +I16 +sVo +I4 +sVi +I1 +sVs +I7 +sVr +I2 +sVu +I1 +sVt +I3 +sVv +I1 +ssVoa +p1676 +(dp1677 +Vc +I2 +sVb +I1 +sVd +I3 +sVi +I1 +sVm +I1 +sVl +I1 +sVn +I1 +ssVoy +p1678 +(dp1679 +Vé +I8 +sVa +I83 +sVâ +I1 +sVe +I63 +sVo +I1 +ssVox +p1680 +(dp1681 +Ve +I2 +sV. +I2 +ssVow +p1682 +(dp1683 +Va +I1 +sV +I14 +sV: +I1 +sVe +I1 +sVi +I3 +sV, +I3 +sV. +I1 +sVs +I1 +sVn +I4 +ssVov +p1684 +(dp1685 +Vi +I29 +sVe +I16 +ssVou +p1686 +(dp1687 +V! +I1 +sV +I105 +sVp +I113 +sV, +I4 +sV. +I2 +sVh +I3 +sV; +I1 +sV: +I1 +sVa +I23 +sVc +I32 +sVb +I14 +sVe +I37 +sVd +I22 +sVg +I18 +sVf +I11 +sVé +I9 +sVè +I1 +sVj +I49 +sVm +I1 +sVl +I72 +sVï +I2 +sVn +I16 +sVq +I3 +sVi +I36 +sVs +I709 +sVr +I573 +sVt +I372 +sVv +I234 +sVx +I18 +sVz +I13 +ssVot +p1688 +(dp1689 +Va +I6 +sV +I28 +sVe +I21 +sVi +I5 +sVh +I28 +sV* +I1 +sV, +I7 +sV. +I4 +sVs +I17 +sVr +I77 +sVt +I16 +sV: +I1 +ssVos +p1690 +(dp1691 +Va +I14 +sV +I62 +sVc +I2 +sVe +I52 +sVp +I1 +sVi +I12 +sV- +I13 +sVm +I1 +sV, +I12 +sVo +I23 +sV. +I4 +sVq +I1 +sVé +I5 +sVs +I134 +sVt +I22 +sV; +I1 +ssVor +p1692 +(dp1693 +Vé +I6 +sV +I81 +sV, +I8 +sV; +I1 +sV: +I2 +sVç +I4 +sVa +I52 +sVc +I16 +sVb +I2 +sVe +I75 +sVd +I83 +sVg +I22 +sVf +I2 +sVi +I17 +sVk +I10 +sVm +I45 +sVl +I3 +sVo +I1 +sVn +I13 +sVq +I1 +sVp +I11 +sVs +I39 +sVr +I32 +sVt +I214 +sVw +I1 +sVy +I1 +sVz +I1 +ssVoq +p1694 +(dp1695 +Vu +I16 +ssVop +p1696 +(dp1697 +V +I8 +sVe +I16 +sVi +I3 +sVé +I9 +sVh +I26 +sVl +I17 +sVo +I18 +sVp +I5 +sVr +I7 +sVu +I1 +sVt +I4 +sVy +I15 +ssVNi +p1698 +(dp1699 +V +I1 +sVc +I1 +ssVaç +p1700 +(dp1701 +Va +I3 +sVo +I1 +ssVNo +p1702 +(dp1703 +Vi +I1 +sVn +I4 +sVs +I2 +sVr +I6 +sVu +I20 +sVt +I3 +sVv +I4 +ssVaé +p1704 +(dp1705 +Vl +I2 +ssVNa +p1706 +(dp1707 +Vp +I3 +sVt +I2 +sVd +I1 +ssVaë +p1708 +(dp1709 +Vl +I2 +ssVNe +p1710 +(dp1711 +V +I4 +sVb +I1 +sVw +I7 +sVv +I1 +ssVaï +p1712 +(dp1713 +Vf +I1 +sVè +I1 +sVm +I1 +sVo +I3 +sVs +I2 +sVv +I1 +ssVaî +p1714 +(dp1715 +Vc +I5 +sVt +I68 +sVn +I13 +ssVNu +p1716 +(dp1717 +Vm +I2 +ssVNI +p1718 +(dp1719 +VT +I2 +ssVNO +p1720 +(dp1721 +V +I2 +sVT +I4 +ssVNB +p1722 +(dp1723 +VE +I8 +ssVNC +p1724 +(dp1725 +VI +I1 +sVE +I1 +sVL +I2 +ssVND +p1726 +(dp1727 +VI +I4 +sV +I3 +sV* +I1 +sVE +I4 +sV, +I1 +ssVNE +p1728 +(dp1729 +VY +I1 +sVS +I1 +sVG +I1 +ssVNG +p1730 +(dp1731 +V +I3 +ssVNY +p1732 +(dp1733 +V +I2 +sVT +I1 +ssVNS +p1734 +(dp1735 +V +I6 +sVE +I1 +sV. +I1 +ssVNT +p1736 +(dp1737 +V! +I2 +sVA +I2 +sVI +I3 +sV* +I1 +sVS +I1 +sVR +I1 +sVY +I2 +ssVNU +p1738 +(dp1739 +VM +I2 +ssVN, +p1740 +(dp1741 +V +I3 +ssVN +p1742 +(dp1743 +VA +I1 +sVb +I1 +sVE +I2 +sVD +I3 +sVF +I1 +sVI +I2 +sVU +I1 +sV[ +I1 +ssVN' +p1744 +(dp1745 +Va +I2 +sVê +I1 +sVe +I1 +sVT +I1 +ssV'_ +p1746 +(dp1747 +V +I1 +sVH +I2 +ssVaa +p1748 +(dp1749 +Vs +I1 +sV, +I3 +ssVNé +p1750 +(dp1751 +Vr +I1 +ssVac +p1752 +(dp1753 +Va +I94 +sV +I3 +sVc +I40 +sV: +I1 +sVe +I29 +sVé +I4 +sVh +I64 +sVl +I10 +sVo +I7 +sV, +I2 +sVq +I11 +sVi +I6 +sVs +I1 +sVr +I10 +sVu +I21 +sVt +I30 +sV; +I1 +sV. +I1 +ssVab +p1754 +(dp1755 +Va +I33 +sVb +I28 +sVe +I2 +sVi +I35 +sVh +I1 +sVj +I1 +sVl +I135 +sVo +I39 +sVî +I4 +sVs +I9 +sVr +I2 +sVu +I1 +sVy +I2 +sV, +I1 +ssVae +p1756 +(dp1757 +Va +I1 +sV +I1 +sVr +I1 +sVl +I8 +ssVad +p1758 +(dp1759 +Va +I29 +sV +I8 +sVe +I89 +sVd +I9 +sVi +I26 +sVj +I1 +sVm +I12 +sV, +I2 +sVo +I27 +sVé +I5 +sVr +I12 +sVu +I2 +sVv +I6 +sVy +I2 +ssVag +p1760 +(dp1761 +Va +I9 +sVe +I180 +sVi +I15 +sVè +I1 +sVo +I5 +sVn +I70 +sVé +I15 +sVr +I15 +sVu +I16 +ssVaf +p1762 +(dp1763 +Vi +I2 +sVé +I3 +sVr +I2 +sVt +I2 +sVf +I20 +ssVai +p1764 +(dp1765 +V! +I1 +sV +I175 +sVb +I10 +sVe +I198 +sVd +I21 +sVg +I14 +sVi +I1 +sV- +I4 +sVm +I44 +sVl +I71 +sVn +I184 +sVs +I600 +sVr +I187 +sVt +I870 +sVx +I3 +sV[ +I1 +sV. +I3 +sV, +I14 +sV? +I1 +ssVah +p1766 +(dp1767 +Va +I1 +sV +I1 +sVe +I1 +sVi +I1 +sVm +I1 +sV, +I1 +sVo +I2 +sV! +I1 +ssVak +p1768 +(dp1769 +V +I1 +sVe +I5 +sVo +I1 +ssVaj +p1770 +(dp1771 +Ve +I15 +sVo +I4 +ssVam +p1772 +(dp1773 +Va +I99 +sV +I5 +sVb +I106 +sVe +I62 +sVé +I1 +sV) +I1 +sVi +I47 +sVè +I6 +sVm +I7 +sV, +I5 +sVo +I17 +sVn +I3 +sVp +I21 +sVu +I3 +sV. +I2 +sV: +I1 +sV_ +I1 +ssVal +p1774 +(dp1775 +V! +I1 +sV +I95 +sVp +I4 +sV- +I1 +sV, +I16 +sV. +I6 +sV; +I5 +sV? +I3 +sV_ +I1 +sVa +I28 +sVc +I4 +sVe +I66 +sVd +I1 +sVg +I2 +sVf +I2 +sVé +I5 +sVh +I23 +sVm +I3 +sVl +I120 +sVo +I33 +sVi +I39 +sVs +I5 +sVr +I2 +sVu +I8 +sVt +I15 +sVw +I1 +sVè +I18 +sVy +I1 +ssVao +p1776 +(dp1777 +Vû +I2 +sVr +I4 +sVs +I1 +sVn +I2 +ssVan +p1778 +(dp1779 +V +I34 +sV' +I1 +sV) +I1 +sV- +I3 +sV, +I8 +sV. +I2 +sV; +I1 +sVç +I17 +sV[ +I1 +sV_ +I1 +sVa +I31 +sVc +I92 +sVe +I2 +sVd +I737 +sVg +I235 +sVi +I31 +sVk +I1 +sVo +I16 +sVn +I33 +sVq +I17 +sVé +I7 +sVs +I442 +sVu +I6 +sVt +I521 +sVv +I1 +sVy +I20 +ssVaq +p1780 +(dp1781 +Vu +I40 +ssVap +p1782 +(dp1783 +Va +I4 +sVé +I5 +sVe +I31 +sV +I3 +sVi +I51 +sVh +I7 +sVl +I3 +sVo +I7 +sV. +I1 +sVp +I104 +sVr +I45 +sVt +I14 +ssVas +p1784 +(dp1785 +V! +I18 +sV +I244 +sVc +I4 +sVe +I21 +sVi +I6 +sVé +I1 +sVh +I1 +sVk +I4 +sV, +I34 +sV. +I12 +sVp +I3 +sVs +I197 +sVu +I1 +sVa +I7 +sV[ +I1 +sV; +I4 +sV? +I1 +sVt +I25 +ssVar +p1786 +(dp1787 +V +I177 +sV' +I1 +sVp +I3 +sV- +I3 +sV, +I10 +sV; +I1 +sVg +I33 +sVa +I64 +sVc +I56 +sVb +I15 +sVe +I115 +sVd +I99 +sVç +I4 +sVf +I6 +sVi +I40 +sVè +I2 +sVk +I6 +sVm +I49 +sVl +I65 +sVo +I85 +sVn +I14 +sVq +I33 +sVé +I8 +sVs +I10 +sVr +I78 +sVu +I12 +sVt +I240 +sVy +I15 +ssVau +p1788 +(dp1789 +V +I240 +sV) +I1 +sV- +I8 +sV, +I13 +sV. +I5 +sV; +I3 +sV: +I1 +sV[ +I1 +sVc +I49 +sVb +I4 +sVd +I20 +sVg +I4 +sVf +I3 +sVj +I8 +sVm +I16 +sVl +I3 +sVn +I3 +sVq +I1 +sVp +I21 +sVs +I68 +sVr +I41 +sVt +I218 +sVv +I37 +sVx +I117 +ssVat +p1790 +(dp1791 +Va +I13 +sV +I52 +sVe +I89 +sV' +I1 +sVi +I136 +sVh +I6 +sV, +I11 +sVo +I4 +sV. +I2 +sVq +I1 +sVé +I1 +sVs +I28 +sVr +I55 +sVu +I29 +sVt +I66 +sVè +I1 +sVy +I1 +sV; +I1 +sV: +I3 +sV? +I1 +ssVaw +p1792 +(dp1793 +Va +I4 +sVy +I1 +sVs +I2 +sV. +I1 +ssVav +p1794 +(dp1795 +Va +I259 +sVe +I250 +sVi +I32 +sVo +I102 +sVé +I3 +sVr +I3 +ssVay +p1796 +(dp1797 +Va +I25 +sV +I30 +sVe +I22 +sVi +I1 +sV) +I1 +sVm +I1 +sVo +I1 +sVé +I5 +sVs +I54 +ssVax +p1798 +(dp1799 +Vi +I3 +sV- +I2 +sVe +I2 +sV +I1 +ssVaz +p1800 +(dp1801 +Vi +I1 +ssVAs +p1802 +(dp1803 +Vi +I2 +sV +I3 +sVs +I2 +sVc +I1 +sVt +I1 +ssVAu +p1804 +(dp1805 +V +I10 +sVs +I8 +sVt +I1 +sVg +I2 +ssVa[ +p1806 +(dp1807 +V1 +I2 +ssVa] +p1808 +(dp1809 +V +I1 +sV. +I1 +ssVa_ +p1810 +(dp1811 +V, +I2 +ssVuè +p1812 +(dp1813 +Vr +I8 +sVd +I1 +ssVa +p1814 +(dp1815 +V" +I1 +sVi +I6 +sV5 +I2 +sVC +I19 +sVB +I4 +sVE +I1 +sVD +I2 +sVG +I3 +sVF +I2 +sVH +I1 +sVM +I5 +sVP +I11 +sVS +I1 +sVR +I1 +sVU +I1 +sVT +I2 +sVV +I4 +sV_ +I9 +sVa +I22 +sVà +I28 +sVc +I95 +sVb +I54 +sVe +I26 +sVd +I128 +sVg +I33 +sVf +I61 +sVé +I13 +sVh +I9 +sVj +I21 +sVm +I139 +sVl +I61 +sVo +I2 +sVn +I36 +sVq +I30 +sVp +I182 +sVs +I72 +sVr +I63 +sVu +I27 +sVt +I67 +sVw +I2 +sVv +I114 +ssVa- +p1816 +(dp1817 +Vc +I1 +sVb +I1 +sVf +I10 +sVM +I1 +sVC +I2 +sVt +I15 +ssVa, +p1818 +(dp1819 +V +I76 +sVc +I1 +ssVa. +p1820 +(dp1821 +V +I10 +sVb +I2 +sVt +I2 +ssVN° +p1822 +(dp1823 +V +I2 +ssVa; +p1824 +(dp1825 +V +I9 +ssVa: +p1826 +(dp1827 +V +I7 +ssVa? +p1828 +(dp1829 +V +I1 +ssV"D +p1830 +(dp1831 +Ve +I1 +ssV-E +p1832 +(dp1833 +Vs +I1 +sVt +I2 +sVd +I1 +ssVt_ +p1834 +(dp1835 +V +I1 +sV, +I2 +sV. +I1 +ssV-G +p1836 +(dp1837 +Vr +I2 +sVe +I1 +ssV-A +p1838 +(dp1839 +Vy +I13 +sVh +I2 +ssVt[ +p1840 +(dp1841 +V1 +I2 +sV2 +I1 +ssV-C +p1842 +(dp1843 +Va +I2 +sVe +I1 +sV' +I2 +sVo +I1 +ssV-M +p1844 +(dp1845 +Va +I3 +sVé +I1 +sVe +I1 +sVo +I3 +ssV-L +p1846 +(dp1847 +Va +I1 +sV' +I2 +ssV-O +p1848 +(dp1849 +Vù +I1 +sVh +I2 +sVu +I1 +ssV-N +p1850 +(dp1851 +Vo +I3 +ssV-I +p1852 +(dp1853 +VI +I1 +sVS +I1 +sVl +I1 +ssV-H +p1854 +(dp1855 +Vo +I1 +ssV-J +p1856 +(dp1857 +Ve +I1 +sV. +I2 +ssV-T +p1858 +(dp1859 +VM +I1 +sVu +I1 +ssV-V +p1860 +(dp1861 +Vo +I2 +ssV-Q +p1862 +(dp1863 +Vu +I1 +ssV-P +p1864 +(dp1865 +Vi +I1 +sVo +I1 +ssV-S +p1866 +(dp1867 +Va +I1 +sVu +I1 +ssVzè +p1868 +(dp1869 +Vl +I1 +ssVt@ +p1870 +(dp1871 +Vp +I2 +ssVgr +p1872 +(dp1873 +Va +I94 +sVâ +I13 +sVe +I17 +sVé +I14 +sVè +I2 +sVo +I15 +sVi +I5 +ssV-e +p1874 +(dp1875 +Vt +I1 +sVl +I18 +sVn +I3 +ssV-d +p1876 +(dp1877 +Vi +I7 +sVa +I7 +sVe +I9 +sV' +I2 +ssV-f +p1878 +(dp1879 +Vé +I8 +sVa +I2 +sVr +I1 +sVo +I3 +ssV-a +p1880 +(dp1881 +Vi +I1 +sVr +I1 +sVu +I1 +ssVtx +p1882 +(dp1883 +Vt +I6 +ssV-b +p1884 +(dp1885 +Vu +I1 +sVe +I3 +sVo +I2 +ssV-m +p1886 +(dp1887 +Vê +I21 +sVo +I15 +ssV-l +p1888 +(dp1889 +Va +I3 +sVà +I9 +sVe +I10 +sVu +I2 +ssVtt +p1890 +(dp1891 +Va +I13 +sVe +I195 +sVp +I5 +sVi +I10 +sVè +I3 +sVo +I2 +sVé +I7 +sVs +I1 +sVr +I43 +sVu +I7 +ssVtu +p1892 +(dp1893 +Va +I11 +sV +I21 +sVc +I1 +sVe +I31 +sVd +I9 +sVg +I12 +sVé +I18 +sVm +I4 +sVl +I9 +sVn +I15 +sVp +I2 +sVs +I5 +sVr +I68 +sV, +I1 +sV? +I2 +ssVtr +p1894 +(dp1895 +Va +I145 +sVe +I578 +sV' +I3 +sVi +I54 +sVè +I64 +sVê +I13 +sVo +I173 +sVé +I21 +sVu +I9 +sVô +I12 +sVy +I3 +ssVts +p1896 +(dp1897 +V! +I2 +sV +I173 +sV" +I1 +sVk +I3 +sV* +I1 +sVm +I3 +sV, +I45 +sV. +I20 +sV_ +I1 +sV; +I14 +sV: +I2 +sV[ +I1 +sV? +I4 +ssVtp +p1898 +(dp1899 +Vh +I12 +sV: +I6 +sV. +I1 +ssV-j +p1900 +(dp1901 +Ve +I14 +ssV-u +p1902 +(dp1903 +Vp +I1 +ssVto +p1904 +(dp1905 +V +I65 +sVc +I3 +sVb +I3 +sVd +I1 +sVf +I2 +sVi +I25 +sV- +I7 +sVm +I50 +sV, +I2 +sVn +I75 +sVp +I2 +sVr +I5 +sVu +I401 +sVt +I3 +sV: +I2 +sVy +I8 +sV. +I3 +sV_ +I1 +sVl +I3 +ssVtl +p1906 +(dp1907 +Va +I1 +sVy +I5 +sVe +I2 +ssV-v +p1908 +(dp1909 +Vi +I3 +sVa +I1 +sVe +I1 +sVo +I61 +ssV-q +p1910 +(dp1911 +Vu +I3 +ssV-p +p1912 +(dp1913 +Vè +I2 +ssVth +p1914 +(dp1915 +Va +I22 +sV +I20 +sVe +I130 +sVi +I44 +sVè +I3 +sVm +I1 +sV, +I2 +sVo +I18 +sV. +I1 +sVé +I23 +sVr +I2 +sVu +I1 +sV; +I1 +sV: +I1 +ssVti +p1916 +(dp1917 +V +I15 +sV, +I2 +sV. +I2 +sV; +I1 +sVa +I2 +sVc +I22 +sVb +I4 +sVe +I72 +sVd +I4 +sVg +I8 +sVf +I8 +sVé +I16 +sVè +I14 +sVm +I24 +sVl +I17 +sVo +I185 +sVn +I194 +sVq +I26 +sVp +I1 +sVs +I39 +sVr +I41 +sVu +I1 +sVt +I64 +sVv +I20 +ssVte +p1918 +(dp1919 +V! +I10 +sV +I403 +sV- +I4 +sV, +I100 +sV. +I37 +sV; +I21 +sV: +I6 +sV? +I7 +sV[ +I1 +sVa +I37 +sVc +I3 +sVe +I3 +sVd +I28 +sVf +I1 +sVi +I1 +sVm +I72 +sVl +I49 +sVn +I216 +sVp +I1 +sVs +I196 +sVr +I190 +sVu +I87 +sVt +I1 +sVy +I1 +sVx +I16 +sVz +I14 +ssVtb +p1920 +(dp1921 +Vo +I1 +ssVtc +p1922 +(dp1923 +V. +I3 +ssVta +p1924 +(dp1925 +V +I53 +sV- +I1 +sV, +I10 +sV. +I1 +sVc +I20 +sVb +I49 +sVd +I1 +sVg +I25 +sVf +I2 +sVi +I374 +sVh +I1 +sVk +I3 +sVm +I3 +sVl +I35 +sVn +I115 +sVq +I3 +sVp +I8 +sVs +I7 +sVr +I8 +sVt +I49 +sVv +I2 +sVx +I4 +ssV_é +p1926 +(dp1927 +Vt +I2 +ssVÈV +p1928 +(dp1929 +VR +I1 +ssVgy +p1930 +(dp1931 +Vp +I2 +ssVt? +p1932 +(dp1933 +V +I3 +sV- +I1 +sV. +I1 +ssVt: +p1934 +(dp1935 +V +I46 +sVM +I1 +ssVt; +p1936 +(dp1937 +V +I66 +ssV-- +p1938 +(dp1939 +VA +I2 +sV +I1 +sVC +I4 +sVE +I2 +sVD +I1 +sVI +I2 +sV- +I4 +sVJ +I1 +sVM +I3 +sV, +I1 +sVO +I4 +sVN +I2 +sVQ +I1 +sVP +I1 +sVS +I1 +sVV +I2 +sVL +I2 +ssV-, +p1940 +(dp1941 +V +I1 +ssV p +p1942 +(dp1943 +Va +I805 +sVâ +I5 +sVe +I337 +sVi +I95 +sVè +I36 +sVl +I307 +sVo +I434 +sVé +I24 +sVr +I416 +sVu +I65 +sVh +I41 +sVy +I1 +sVû +I4 +ssVt. +p1944 +(dp1945 +V +I116 +sV- +I4 +ssVt/ +p1946 +(dp1947 +Vp +I1 +sVd +I2 +ssVt, +p1948 +(dp1949 +V +I265 +ssV-6 +p1950 +(dp1951 +V2 +I2 +ssV-1 +p1952 +(dp1953 +V +I1 +sV2 +I3 +sV8 +I1 +ssV-3 +p1954 +(dp1955 +V1 +I1 +ssV-2 +p1956 +(dp1957 +V% +I1 +ssVt' +p1958 +(dp1959 +Va +I4 +sVy +I1 +sVs +I2 +ssVrk +p1960 +(dp1961 +Va +I1 +sV +I6 +sVe +I1 +sVi +I1 +sV- +I1 +sV, +I3 +sV. +I2 +sVs +I2 +ssVt" +p1962 +(dp1963 +V) +I1 +sV +I1 +ssV-8 +p1964 +(dp1965 +V° +I1 +ssVt +p1966 +(dp1967 +Vê +I22 +sVé +I51 +sV# +I1 +sV( +I2 +sV1 +I4 +sV2 +I1 +sV4 +I1 +sV< +I1 +sVâ +I1 +sVA +I4 +sVC +I178 +sVD +I3 +sVG +I25 +sVI +I4 +sVH +I2 +sVJ +I2 +sVM +I56 +sVP +I27 +sVR +I1 +sVV +I1 +sV_ +I6 +sVa +I271 +sVà +I160 +sVc +I196 +sVb +I97 +sVe +I218 +sVd +I603 +sVg +I14 +sVf +I92 +sVi +I83 +sVh +I36 +sVk +I1 +sVj +I77 +sVm +I129 +sVl +I488 +sVo +I74 +sVn +I70 +sVq +I230 +sVp +I336 +sVs +I210 +sVr +I80 +sVu +I182 +sVt +I157 +sVw +I11 +sVv +I91 +sVy +I11 +sVô +I1 +ssVt! +p1968 +(dp1969 +V +I7 +sV" +I6 +ssVEM +p1970 +(dp1971 +VA +I1 +sVE +I2 +sVN +I1 +ssVga +p1972 +(dp1973 +V +I1 +sVc +I1 +sVb +I1 +sVg +I24 +sVi +I7 +sVl +I44 +sVn +I8 +sVr +I74 +sVu +I7 +sVt +I3 +sV, +I2 +ssVù +p1974 +(dp1975 +Va +I2 +sVj +I7 +sVe +I3 +sVd +I1 +sVp +I1 +sVi +I8 +sVê +I1 +sVm +I2 +sVl +I9 +sVn +I5 +sVP +I1 +sVs +I4 +sVt +I3 +ssV-É +p1976 +(dp1977 +Vd +I4 +ssV z +p1978 +(dp1979 +Vè +I1 +ssVù? +p1980 +(dp1981 +V- +I1 +ssVgl +p1982 +(dp1983 +Va +I13 +sVi +I8 +sVe +I10 +sVo +I109 +ssV-à +p1984 +(dp1985 +V- +I6 +ssVtô +p1986 +(dp1987 +Vm +I1 +sVt +I38 +ssV-ê +p1988 +(dp1989 +Vt +I9 +ssVtî +p1990 +(dp1991 +Vm +I1 +ssV@p +p1992 +(dp1993 +Vo +I2 +ssVtê +p1994 +(dp1995 +Vt +I16 +ssVtè +p1996 +(dp1997 +Vr +I19 +sVm +I3 +ssVté +p1998 +(dp1999 +V +I173 +sVe +I10 +sVm +I7 +sV, +I35 +sV. +I14 +sVs +I29 +sVr +I5 +sV? +I3 +sV; +I4 +sV: +I1 +sV_ +I3 +sVt +I1 +ssVtâ +p2000 +(dp2001 +Vc +I1 +sVt +I1 +ssVEF +p2002 +(dp2003 +VÈ +I1 +sVO +I1 +ssV/N +p2004 +(dp2005 +Vo +I1 +ssVg[ +p2006 +(dp2007 +V2 +I1 +ssVS* +p2008 +(dp2009 +V* +I1 +sVV +I1 +ssVS. +p2010 +(dp2011 +V +I5 +ssVS- +p2012 +(dp2013 +VI +I1 +ssVS, +p2014 +(dp2015 +V +I6 +ssVS" +p2016 +(dp2017 +V. +I1 +ssVS +p2018 +(dp2019 +VA +I1 +sVb +I2 +sVE +I2 +sVD +I1 +sVF +I2 +sVI +I1 +sVL +I1 +sVo +I2 +sV3 +I1 +sVO +I4 +ssVS' +p2020 +(dp2021 +Vé +I1 +ssVve +p2022 +(dp2023 +Va +I6 +sV +I65 +sVc +I128 +sVd +I7 +sVi +I8 +sVm +I18 +sVl +I21 +sVn +I151 +sV. +I2 +sVs +I14 +sVr +I174 +sVu +I22 +sVt +I1 +sVy +I1 +sV; +I1 +sVz +I67 +sV, +I7 +sV? +I1 +ssVSy +p2024 +(dp2025 +Vr +I1 +ssVfû +p2026 +(dp2027 +Vm +I1 +sVt +I11 +ssVSu +p2028 +(dp2029 +Vi +I1 +sVè +I1 +sVr +I14 +ssVSt +p2030 +(dp2031 +Va +I3 +sVu +I1 +ssVSi +p2032 +(dp2033 +Va +I1 +sV +I11 +sVe +I1 +sVè +I11 +sVn +I3 +sVr +I3 +ssVSo +p2034 +(dp2035 +Vy +I1 +sVm +I1 +sVu +I5 +sV, +I1 +sVn +I1 +ssVSm +p2036 +(dp2037 +Vy +I2 +sVa +I6 +ssVSc +p2038 +(dp2039 +Vu +I1 +ssVfé +p2040 +(dp2041 +V +I3 +sV, +I3 +sV. +I2 +sVs +I1 +sVr +I13 +sVt +I2 +sV; +I1 +sV[ +I1 +sVl +I2 +ssVSa +p2042 +(dp2043 +V +I6 +sVc +I1 +sVi +I7 +sVm +I1 +sVl +I2 +sVn +I1 +sVr +I1 +sVx +I1 +ssVSe +p2044 +(dp2045 +Vc +I2 +sVr +I2 +sVs +I1 +ssVSS +p2046 +(dp2047 +VI +I1 +sV +I2 +sVE +I1 +ssVSQ +p2048 +(dp2049 +VU +I1 +ssVSU +p2050 +(dp2051 +VC +I1 +ssVST +p2052 +(dp2053 +VA +I2 +sVR +I2 +ssVSI +p2054 +(dp2055 +VB +I1 +sVO +I2 +ssVSO +p2056 +(dp2057 +V- +I1 +ssVSM +p2058 +(dp2059 +VA +I2 +sVE +I2 +ssVSC +p2060 +(dp2061 +VI +I2 +sVL +I1 +ssVSE +p2062 +(dp2063 +VQ +I1 +sV +I2 +sVN +I1 +sVM +I1 +sV. +I1 +ssVf +p2064 +(dp2065 +V2 +I1 +sV4 +I1 +sVC +I6 +sVF +I2 +sVP +I3 +sVR +I1 +sVT +I1 +sVV +I1 +sVa +I7 +sVà +I1 +sVc +I3 +sVe +I1 +sVd +I5 +sVi +I4 +sVm +I2 +sVo +I4 +sVn +I2 +sVp +I1 +sVs +I6 +sVr +I2 +sVu +I1 +sVt +I32 +sVw +I3 +sVy +I17 +ssVf, +p2066 +(dp2067 +V +I7 +ssVf- +p2068 +(dp2069 +Vt +I1 +sVd +I2 +ssVf. +p2070 +(dp2071 +V +I4 +sVf +I2 +ssVôt +p2072 +(dp2073 +Va +I1 +sV +I37 +sVe +I22 +sVé +I11 +sVo +I2 +sVi +I3 +sVr +I4 +sV; +I1 +ssVôp +p2074 +(dp2075 +Vi +I2 +ssVôn +p2076 +(dp2077 +Vi +I2 +sVa +I1 +sVe +I5 +sVé +I8 +ssVâl +p2078 +(dp2079 +Vi +I1 +sVe +I1 +ssVôl +p2080 +(dp2081 +Ve +I3 +ssVôm +p2082 +(dp2083 +Ve +I1 +ssVân +p2084 +(dp2085 +Ve +I1 +ssVfr +p2086 +(dp2087 +Va +I30 +sVe +I17 +sVi +I20 +sVè +I26 +sV, +I2 +sVo +I30 +sVé +I3 +sVu +I5 +ssVfs +p2088 +(dp2089 +V +I5 +sV; +I1 +sV, +I3 +sV. +I1 +ssVft +p2090 +(dp2091 +Vy +I1 +sVp +I2 +sVe +I4 +sVw +I3 +ssVfu +p2092 +(dp2093 +Vg +I3 +sVi +I9 +sVm +I3 +sVl +I2 +sVn +I10 +sVs +I41 +sVr +I35 +sVt +I68 +sVy +I2 +ssVg; +p2094 +(dp2095 +V +I1 +ssV / +p2096 +(dp2097 +Ve +I1 +ssVfa +p2098 +(dp2099 +V +I2 +sVc +I8 +sVb +I2 +sVç +I3 +sVi +I197 +sVm +I13 +sVl +I12 +sVn +I20 +sVs +I8 +sVr +I3 +sVu +I56 +sVt +I6 +sVv +I5 +sV: +I1 +ssVSé +p2100 +(dp2101 +Vd +I1 +sVn +I1 +ssVfe +p2102 +(dp2103 +Vc +I8 +sVe +I6 +sVm +I32 +sVn +I14 +sVs +I52 +sVr +I39 +sVu +I15 +sVt +I15 +sV: +I1 +ssVff +p2104 +(dp2105 +Va +I16 +sV +I1 +sVe +I29 +sVi +I24 +sV- +I1 +sVl +I8 +sVo +I5 +sVé +I6 +sVr +I20 +sV_ +I1 +ssVfi +p2106 +(dp2107 +Va +I5 +sVc +I29 +sVe +I8 +sVd +I10 +sVg +I9 +sVf +I2 +sVé +I1 +sVè +I2 +sVl +I62 +sVn +I59 +sVq +I3 +sVs +I7 +sVr +I11 +sVt +I76 +sVv +I1 +sVx +I2 +ssVfl +p2108 +(dp2109 +Va +I9 +sVe +I14 +sVi +I3 +sVè +I2 +sVo +I7 +sVé +I12 +ssVfo +p2110 +(dp2111 +Vi +I47 +sVl +I7 +sVn +I37 +sVs +I2 +sVr +I122 +sVu +I22 +ssVât +p2112 +(dp2113 +Va +I1 +sV +I6 +sVe +I32 +sVi +I3 +sVo +I3 +sV. +I1 +sVé +I1 +sVs +I1 +sVr +I11 +sV; +I1 +ssVf_ +p2114 +(dp2115 +V, +I1 +ssVô +p2116 +(dp2117 +Vm +I3 +ssVy) +p2118 +(dp2119 +V +I3 +ssVy* +p2120 +(dp2121 +V +I1 +ssVy, +p2122 +(dp2123 +V +I12 +ssVy. +p2124 +(dp2125 +V +I3 +ssVy +p2126 +(dp2127 +Vj +I1 +sV +I1 +sVp +I7 +sV( +I2 +sV1 +I1 +sV2 +I1 +sVA +I7 +sVC +I4 +sVB +I1 +sVD +I1 +sVF +I1 +sVI +I2 +sVM +I4 +sVL +I3 +sVP +I1 +sVS +I2 +sVT +I1 +sVV +I5 +sVa +I84 +sVc +I8 +sVb +I9 +sVe +I19 +sVd +I5 +sVg +I4 +sVf +I12 +sVé +I4 +sVh +I4 +sVê +I2 +sVm +I4 +sVl +I1 +sVo +I17 +sVn +I6 +sVi +I5 +sVs +I8 +sVr +I8 +sVu +I3 +sVt +I20 +sVw +I3 +sVv +I4 +sVy +I3 +ssV 5 +p2128 +(dp2129 +V0 +I4 +sV2 +I1 +sV +I1 +ssVg/ +p2130 +(dp2131 +Vp +I1 +sVe +I2 +sVg +I1 +ssV ; +p2132 +(dp2133 +V +I1 +ssV24 +p2134 +(dp2135 +V0 +I1 +sV. +I1 +ssV25 +p2136 +(dp2137 +V) +I1 +sV0 +I1 +ssV26 +p2138 +(dp2139 +V +I1 +sV. +I1 +ssV27 +p2140 +(dp2141 +V +I1 +sV, +I1 +ssV20 +p2142 +(dp2143 +V0 +I15 +sV; +I1 +sV% +I1 +sV, +I1 +sV +I1 +ssV21 +p2144 +(dp2145 +V +I1 +sV5 +I2 +ssV22 +p2146 +(dp2147 +V1 +I2 +sV +I1 +ssV28 +p2148 +(dp2149 +V +I1 +sV; +I1 +ssV29 +p2150 +(dp2151 +V. +I2 +ssV2: +p2152 +(dp2153 +V +I1 +ssV2; +p2154 +(dp2155 +V +I1 +ssV2% +p2156 +(dp2157 +V +I1 +ssV2 +p2158 +(dp2159 +Va +I1 +sV( +I1 +sVd +I1 +sVD +I1 +ssV2, +p2160 +(dp2161 +V +I6 +ssV2. +p2162 +(dp2163 +V +I5 +ssV2/ +p2164 +(dp2165 +V1 +I1 +ssV2* +p2166 +(dp2167 +VE +I1 +ssVyi +p2168 +(dp2169 +Vn +I3 +ssVym +p2170 +(dp2171 +Va +I1 +sVe +I1 +ssVyl +p2172 +(dp2173 +Ve +I1 +sVl +I1 +sVo +I1 +sVv +I4 +ssVyo +p2174 +(dp2175 +Vm +I1 +sVu +I62 +sVn +I3 +ssVyn +p2176 +(dp2177 +Vx +I1 +sVg +I3 +ssVya +p2178 +(dp2179 +V +I7 +sVc +I1 +sVb +I7 +sVd +I1 +sVg +I25 +sVi +I20 +sV, +I2 +sVl +I7 +sVu +I11 +sVn +I36 +ssV2] +p2180 +(dp2181 +V +I15 +sV; +I2 +sV, +I1 +sV. +I6 +ssVyc +p2182 +(dp2183 +Vl +I3 +ssV2_ +p2184 +(dp2185 +V. +I1 +ssVye +p2186 +(dp2187 +Va +I4 +sVe +I2 +sVd +I1 +sV, +I2 +sVn +I10 +sVr +I16 +sVu +I23 +sVt +I1 +sVz +I59 +ssVyd +p2188 +(dp2189 +Ve +I1 +ssVyg +p2190 +(dp2191 +Vm +I1 +ssVyz +p2192 +(dp2193 +Ve +I1 +ssVyp +p2194 +(dp2195 +Va +I1 +sVr +I1 +sVe +I2 +sVt +I1 +sVo +I2 +ssVys +p2196 +(dp2197 +V +I35 +sVi +I20 +sV- +I3 +sV, +I8 +sV. +I6 +sVs +I1 +sVt +I4 +sV; +I2 +sV? +I1 +ssVyr +p2198 +(dp2199 +Va +I2 +sVe +I14 +sVi +I11 +sVo +I1 +sVn +I2 +sVr +I1 +ssVyt +p2200 +(dp2201 +Vi +I1 +ssVÀ +p2202 +(dp2203 +VM +I1 +ssVëm +p2204 +(dp2205 +Ve +I1 +ssVël +p2206 +(dp2207 +V, +I2 +ssVy_ +p2208 +(dp2209 +V, +I1 +ssVët +p2210 +(dp2211 +Ve +I2 +ssVE, +p2212 +(dp2213 +V +I5 +ssVE. +p2214 +(dp2215 +V +I4 +ssVE! +p2216 +(dp2217 +V* +I1 +ssVE +p2218 +(dp2219 +VA +I1 +sVC +I1 +sVE +I1 +sVD +I5 +sVG +I1 +sVI +I6 +sVJ +I1 +sVM +I2 +sVL +I3 +sVO +I4 +sVN +I2 +sV1 +I1 +sVP +I1 +sVS +I2 +sVT +I2 +sVV +I4 +sVX +I23 +ssVyé +p2220 +(dp2221 +V +I15 +sVe +I2 +ssVEN +p2222 +(dp2223 +V +I3 +sVC +I1 +sVB +I8 +sVD +I3 +sV, +I1 +sVT +I3 +ssVEI +p2224 +(dp2225 +VT +I1 +sVN +I2 +ssVED +p2226 +(dp2227 +VI +I4 +sV +I4 +sV, +I1 +ssVEG +p2228 +(dp2229 +VL +I1 +ssVyâ +p2230 +(dp2231 +Vt +I1 +ssVEA +p2232 +(dp2233 +VC +I1 +sVD +I1 +ssVEC +p2234 +(dp2235 +V +I1 +sVT +I9 +ssVEB +p2236 +(dp2237 +VC +I2 +sVO +I6 +ssVEY +p2238 +(dp2239 +V +I1 +ssVEX +p2240 +(dp2241 +VP +I1 +ssVEZ +p2242 +(dp2243 +V +I1 +ssVEU +p2244 +(dp2245 +VC +I2 +sVR +I2 +sVV +I2 +ssVET +p2246 +(dp2247 +V +I2 +sVC +I1 +sVT +I2 +ssVEV +p2248 +(dp2249 +VE +I2 +ssVEQ +p2250 +(dp2251 +VU +I2 +ssVES +p2252 +(dp2253 +V +I8 +sVS +I2 +sV, +I3 +sV. +I1 +ssVER +p2254 +(dp2255 +V +I4 +sVC +I1 +sVD +I1 +sVG +I8 +sVI +I1 +sV* +I1 +sV, +I4 +sVO +I1 +sVS +I2 +sVT +I1 +sVW +I1 +ssVEm +p2256 +(dp2257 +Vp +I2 +ssVEl +p2258 +(dp2259 +Ve +I1 +sVd +I17 +sVl +I17 +ssVEn +p2260 +(dp2261 +V +I13 +sVs +I3 +sVd +I1 +sVf +I14 +ssVEh +p2262 +(dp2263 +V! +I2 +sV +I10 +ssVEd +p2264 +(dp2265 +Vi +I1 +sVo +I1 +ssVEx +p2266 +(dp2267 +Vc +I1 +sVt +I1 +ssVEu +p2268 +(dp2269 +Vr +I15 +ssVEt +p2270 +(dp2271 +V +I11 +sVr +I1 +sVe +I6 +sV, +I1 +ssVEv +p2272 +(dp2273 +Vh +I1 +ssVEs +p2274 +(dp2275 +Vp +I18 +sVs +I2 +sVt +I7 +ssVEr +p2276 +(dp2277 +Vz +I1 +ssV ç +p2278 +(dp2279 +Và +I1 +ssVX. +p2280 +(dp2281 +V +I8 +ssVX, +p2282 +(dp2283 +V +I4 +ssVX +p2284 +(dp2285 +Ve +I1 +sVd +I2 +ssVX; +p2286 +(dp2287 +V +I1 +ssVXI +p2288 +(dp2289 +V +I3 +sVI +I14 +sV- +I1 +sV, +I10 +sV. +I2 +sVV +I4 +sVX +I3 +ssVXL +p2290 +(dp2291 +VI +I1 +sV +I1 +ssVXX +p2292 +(dp2293 +VI +I24 +sVX +I12 +sV. +I3 +sV, +I3 +sVV +I11 +ssVXP +p2294 +(dp2295 +VR +I1 +ssVXV +p2296 +(dp2297 +VI +I21 +sV +I2 +sV, +I1 +sV_ +I9 +sV. +I3 +ssVk +p2298 +(dp2299 +Va +I1 +sVb +I2 +sVe +I1 +sVd +I2 +sVf +I1 +sVi +I4 +sV( +I3 +sVm +I2 +sVl +I1 +sVo +I7 +sVp +I1 +sVs +I1 +sVr +I1 +sVt +I6 +sVw +I1 +ssVk) +p2300 +(dp2301 +V +I1 +sV, +I1 +ssVk. +p2302 +(dp2303 +V +I5 +ssVk- +p2304 +(dp2305 +Vu +I1 +sVd +I1 +ssVk, +p2306 +(dp2307 +V +I13 +ssVk0 +p2308 +(dp2309 +V3 +I2 +sV2 +I1 +ssV7] +p2310 +(dp2311 +V +I2 +ssV7 +p2312 +(dp2313 +VA +I1 +sVd +I1 +ssV7. +p2314 +(dp2315 +V +I2 +ssV7, +p2316 +(dp2317 +V +I3 +ssV77 +p2318 +(dp2319 +V8 +I1 +sV, +I1 +sV7 +I1 +ssV76 +p2320 +(dp2321 +V1 +I6 +sV0 +I1 +sV3 +I2 +sV2 +I1 +sV4 +I2 +sV7 +I1 +sV6 +I4 +sV9 +I1 +sV8 +I2 +ssV75 +p2322 +(dp2323 +V9 +I7 +sV5 +I1 +sV7 +I2 +sV6 +I4 +ssV73 +p2324 +(dp2325 +V9 +I3 +sV8 +I1 +sV6 +I1 +sV0 +I3 +ssV72 +p2326 +(dp2327 +V, +I1 +ssV71 +p2328 +(dp2329 +V1 +I1 +sV8 +I1 +sV* +I1 +sV +I1 +ssV70 +p2330 +(dp2331 +V; +I1 +sV2 +I1 +sV5 +I1 +sV. +I1 +ssVk[ +p2332 +(dp2333 +V3 +I1 +ssV7; +p2334 +(dp2335 +V +I2 +ssV7: +p2336 +(dp2337 +V +I2 +ssV79 +p2338 +(dp2339 +V6 +I1 +ssV78 +p2340 +(dp2341 +V9 +I1 +sV2 +I1 +sV5 +I1 +sV, +I1 +ssVka +p2342 +(dp2343 +V +I1 +sV, +I2 +sVï +I1 +sVn +I1 +ssVke +p2344 +(dp2345 +V +I6 +sVs +I1 +sVe +I1 +sVt +I1 +ssVkd +p2346 +(dp2347 +Vo +I1 +ssVki +p2348 +(dp2349 +V +I2 +sV[ +I1 +sV, +I1 +sVn +I1 +ssVkh +p2350 +(dp2351 +V +I3 +sV; +I3 +sV, +I3 +sV? +I1 +sV. +I1 +ssVko +p2352 +(dp2353 +Vt +I1 +ssVkn +p2354 +(dp2355 +Vo +I5 +ssVkl +p2356 +(dp2357 +Va +I1 +ssVks +p2358 +(dp2359 +V! +I1 +sV +I14 +sV, +I4 +sV/ +I1 +sV. +I1 +sV; +I1 +ssVky +p2360 +(dp2361 +V, +I2 +ssVJE +p2362 +(dp2363 +VC +I8 +ssVJA +p2364 +(dp2365 +VC +I1 +ssVJu +p2366 +(dp2367 +Vi +I17 +sVs +I1 +sVl +I1 +sVg +I1 +ssVJo +p2368 +(dp2369 +Va +I1 +sVu +I5 +sVï +I1 +ssVJe +p2370 +(dp2371 +Va +I2 +sV +I101 +sVr +I1 +ssVJa +p2372 +(dp2373 +Vp +I3 +sVc +I9 +sVm +I2 +sVn +I3 +ssVJ. +p2374 +(dp2375 +V +I4 +sV- +I2 +ssVJ' +p2376 +(dp2377 +Va +I22 +sVé +I4 +sVi +I1 +sVy +I1 +ssVJé +p2378 +(dp2379 +Vh +I1 +sVc +I1 +sVr +I1 +ssV]? +p2380 +(dp2381 +V +I2 +ssV]; +p2382 +(dp2383 +V +I11 +ssV]: +p2384 +(dp2385 +V +I1 +ssV] +p2386 +(dp2387 +V +I6 +sV6 +I2 +sVA +I4 +sVC +I4 +sVD +I3 +sVF +I2 +sVI +I2 +sVH +I1 +sVM +I2 +sVL +I11 +sVQ +I1 +sVS +I6 +sVR +I1 +sVT +I1 +sVV +I11 +sVY +I1 +sV_ +I2 +sVa +I4 +sVd +I2 +sVé +I1 +sVo +I1 +sVq +I1 +sVp +I1 +ssV], +p2388 +(dp2389 +V +I12 +ssV]. +p2390 +(dp2391 +V +I16 +ssV); +p2392 +(dp2393 +V +I1 +ssV): +p2394 +(dp2395 +V +I2 +ssV)( +p2396 +(dp2397 +V3 +I2 +ssV), +p2398 +(dp2399 +V +I6 +ssV). +p2400 +(dp2401 +V +I4 +ssV) +p2402 +(dp2403 +Va +I1 +sVc +I2 +sVe +I1 +sVd +I1 +sVo +I1 +sV2 +I1 +sVt +I1 +sVy +I2 +ssVpr +p2404 +(dp2405 +Va +I4 +sVe +I108 +sVi +I156 +sVè +I79 +sVê +I18 +sVo +I176 +sVî +I1 +sVé +I76 +sVu +I8 +ssVps +p2406 +(dp2407 +V +I54 +sVi +I1 +sVh +I1 +sV, +I14 +sV. +I6 +sV; +I5 +sV_ +I1 +ssVpp +p2408 +(dp2409 +Va +I17 +sVe +I28 +sVé +I6 +sVl +I10 +sVo +I17 +sVi +I1 +sVr +I39 +sVu +I2 +ssVpt +p2410 +(dp2411 +Va +I4 +sV +I9 +sVe +I14 +sVi +I25 +sV- +I1 +sV, +I1 +sV. +I1 +sVé +I4 +sVs +I1 +ssVpu +p2412 +(dp2413 +V +I10 +sVc +I1 +sVb +I16 +sVd +I1 +sVi +I33 +sVl +I7 +sV. +I1 +sVp +I1 +sVs +I5 +sVr +I6 +sVt +I20 +sVy +I1 +sVn +I5 +ssVpy +p2414 +(dp2415 +V +I4 +sVr +I10 +sVg +I1 +sV. +I1 +ssVpa +p2416 +(dp2417 +V +I2 +sVc +I2 +sVd +I5 +sVg +I64 +sVi +I24 +sVv +I1 +sVl +I20 +sVn +I9 +sVp +I12 +sVs +I285 +sVr +I402 +sVu +I17 +sVt +I33 +sVî +I1 +sVy +I66 +sV. +I1 +ssVpg +p2418 +(dp2419 +V +I1 +ssVpe +p2420 +(dp2421 +Va +I6 +sV +I24 +sVc +I22 +sVz +I2 +sVi +I23 +sV: +I1 +sVl +I20 +sVo +I3 +sVn +I86 +sVs +I16 +sVr +I164 +sVu +I112 +sVt +I39 +sV; +I3 +sV. +I6 +sV, +I9 +sV_ +I2 +ssVph +p2422 +(dp2423 +Va +I15 +sVe +I21 +sVé +I1 +sVè +I1 +sVi +I29 +sVr +I1 +sVt +I3 +sVy +I20 +ssVpi +p2424 +(dp2425 +Va +I15 +sVc +I5 +sVe +I44 +sVd +I6 +sVg +I2 +sVè +I15 +sVl +I6 +sVn +I3 +sVq +I6 +sVs +I7 +sVr +I21 +sVt +I67 +sV, +I1 +ssVpo +p2426 +(dp2427 +Vc +I4 +sVb +I2 +sVp +I1 +sVi +I65 +sVè +I3 +sVë +I3 +sVm +I4 +sVl +I19 +sVn +I75 +sVq +I1 +sVé +I1 +sVs +I62 +sVr +I79 +sVu +I299 +sVt +I6 +ssVpl +p2428 +(dp2429 +Va +I86 +sVe +I60 +sVi +I34 +sVè +I1 +sVo +I5 +sVu +I210 +sVy +I2 +ssVpm +p2430 +(dp2431 +Ve +I2 +ssVp: +p2432 +(dp2433 +V/ +I6 +ssVp; +p2434 +(dp2435 +V +I1 +ssVp +p2436 +(dp2437 +Va +I1 +sVà +I3 +sVb +I1 +sVe +I2 +sVd +I20 +sVl +I3 +sVo +I1 +sVq +I1 +sVp +I5 +sVs +I5 +sVr +I2 +sVt +I1 +sVv +I3 +ssVp! +p2438 +(dp2439 +V +I1 +ssVp* +p2440 +(dp2441 +V* +I2 +ssVp. +p2442 +(dp2443 +Vi +I1 +sV +I3 +ssVp, +p2444 +(dp2445 +V +I6 +ssVpû +p2446 +(dp2447 +Vt +I4 +ssVpâ +p2448 +(dp2449 +Vm +I1 +sVt +I2 +sVl +I2 +ssVpç +p2450 +(dp2451 +Vo +I3 +ssVpê +p2452 +(dp2453 +Vc +I6 +sVt +I3 +ssVpè +p2454 +(dp2455 +Vc +I10 +sVr +I41 +ssVpé +p2456 +(dp2457 +Va +I1 +sV +I10 +sVc +I2 +sVe +I18 +sVd +I3 +sVf +I2 +sV. +I2 +sVs +I6 +sVr +I47 +sV! +I1 +sVt +I5 +ssV<h +p2458 +(dp2459 +Va +I1 +ssV(* +p2460 +(dp2461 +V) +I1 +sV +I1 +ssVO. +p2462 +(dp2463 +V +I1 +ssVO- +p2464 +(dp2465 +VL +I1 +ssVçû +p2466 +(dp2467 +Vt +I1 +ssVO +p2468 +(dp2469 +Vc +I3 +sVI +I1 +sVm +I2 +sVO +I1 +sVP +I3 +sVS +I1 +sVR +I1 +sVT +I1 +sVW +I1 +sVY +I2 +ssVO? +p2470 +(dp2471 +V +I1 +ssVçà +p2472 +(dp2473 +V, +I1 +ssVgt +p2474 +(dp2475 +Va +I1 +sV +I20 +sVe +I2 +sV- +I5 +sV, +I1 +sVo +I1 +sVs +I3 +ssVgs +p2476 +(dp2477 +V, +I4 +ssVOn +p2478 +(dp2479 +V +I45 +sVl +I1 +ssVOk +p2480 +(dp2481 +Vl +I1 +ssVbâ +p2482 +(dp2483 +Vt +I6 +ssVOh +p2484 +(dp2485 +V! +I7 +sV +I1 +sVi +I1 +ssVOe +p2486 +(dp2487 +Vu +I2 +sVd +I1 +ssVbè +p2488 +(dp2489 +Vr +I1 +ssVbé +p2490 +(dp2491 +V +I16 +sVc +I1 +sVi +I2 +sV, +I6 +sVn +I5 +sV. +I1 +sVr +I1 +sV; +I2 +sV: +I1 +ssVbê +p2492 +(dp2493 +Vt +I1 +ssVOx +p2494 +(dp2495 +Vf +I1 +ssVOv +p2496 +(dp2497 +Vi +I1 +ssVOu +p2498 +(dp2499 +Vi +I15 +sVa +I1 +sVr +I1 +ssVOt +p2500 +(dp2501 +Vh +I1 +sVe +I1 +ssVOs +p2502 +(dp2503 +Ve +I1 +ssVOr +p2504 +(dp2505 +V +I3 +sVe +I9 +ssVOp +p2506 +(dp2507 +Vt +I1 +sVo +I1 +ssVOO +p2508 +(dp2509 +VK +I6 +ssVON +p2510 +(dp2511 +V +I2 +sVE +I1 +sV' +I1 +sV, +I2 +sVS +I6 +sVT +I1 +ssVOM +p2512 +(dp2513 +VA +I3 +sVE +I3 +ssVOL +p2514 +(dp2515 +VT +I2 +ssVOK +p2516 +(dp2517 +V +I3 +sVS +I3 +ssVOJ +p2518 +(dp2519 +VE +I8 +ssVOI +p2520 +(dp2521 +VR +I1 +ssVOF +p2522 +(dp2523 +V +I6 +ssVOE +p2524 +(dp2525 +VU +I2 +ssVOC +p2526 +(dp2527 +VH +I1 +sVR +I1 +sVT +I2 +ssVOB +p2528 +(dp2529 +V, +I1 +ssVOV +p2530 +(dp2531 +VI +I1 +ssVOU +p2532 +(dp2533 +V +I7 +sVR +I1 +sVT +I1 +sVV +I1 +ssVOT +p2534 +(dp2535 +V +I2 +sVE +I1 +sVI +I1 +sVH +I2 +sV, +I1 +sV. +I2 +ssVOS +p2536 +(dp2537 +VS +I1 +sVE +I1 +ssVOR +p2538 +(dp2539 +V +I15 +sVS +I1 +sVE +I1 +ssVOP +p2540 +(dp2541 +VT +I2 +ssVçu +p2542 +(dp2543 +V +I8 +sVe +I1 +sV, +I1 +sVs +I3 +sVr +I2 +sVt +I12 +ssVb +p2544 +(dp2545 +Vs +I2 +sVd +I1 +ssVb, +p2546 +(dp2547 +V +I1 +ssVb. +p2548 +(dp2549 +V +I1 +ssVb/ +p2550 +(dp2551 +Vd +I1 +ssVça +p2552 +(dp2553 +Vi +I17 +sV +I4 +sVt +I4 +sVn +I3 +ssVço +p2554 +(dp2555 +Vi +I7 +sVn +I18 +ssVçn +p2556 +(dp2557 +V +I1 +ssVbe +p2558 +(dp2559 +Va +I71 +sV +I31 +sVc +I3 +sVe +I2 +sVd +I1 +sVg +I3 +sVf +I3 +sVi +I1 +sVl +I43 +sVn +I3 +sVs +I14 +sVr +I63 +sVt +I3 +sV: +I1 +sV, +I5 +ssVba +p2560 +(dp2561 +V +I5 +sVc +I2 +sVb +I4 +sVg +I9 +sVi +I26 +sVm +I1 +sVl +I8 +sVï +I3 +sVn +I17 +sVp +I12 +sVs +I15 +sVr +I113 +sVu +I2 +sVt +I28 +sVy +I1 +ssVbb +p2562 +(dp2563 +Va +I3 +sVé +I25 +ssVbl +p2564 +(dp2565 +Vi +I41 +sVè +I3 +sVé +I5 +sVe +I226 +sVa +I27 +ssVbm +p2566 +(dp2567 +Ve +I2 +ssVbn +p2568 +(dp2569 +Vi +I2 +sVf +I2 +ssVbo +p2570 +(dp2571 +V! +I1 +sV +I38 +sV, +I25 +sV: +I2 +sVe +I3 +sVi +I11 +sVm +I6 +sVl +I2 +sVo +I4 +sVn +I97 +sVr +I31 +sVu +I53 +sVt +I1 +sVa +I1 +sVv +I1 +sVx +I2 +sV; +I9 +sV. +I10 +sVï +I2 +ssVbh +p2572 +(dp2573 +Vo +I1 +ssVbi +p2574 +(dp2575 +Va +I1 +sV +I1 +sVb +I4 +sVe +I154 +sVè +I1 +sVj +I1 +sVl +I16 +sVn +I7 +sVs +I4 +sVr +I2 +sVt +I24 +sVz +I2 +ssVbj +p2576 +(dp2577 +Vu +I1 +sVe +I4 +ssVbk +p2578 +(dp2579 +V- +I1 +ssVbt +p2580 +(dp2581 +Vi +I3 +sVe +I1 +ssVbu +p2582 +(dp2583 +Ve +I1 +sVf +I1 +sVi +I1 +sVl +I10 +sVn +I1 +sVé +I1 +sVs +I1 +sVt +I20 +sVv +I3 +ssVbv +p2584 +(dp2585 +Vi +I1 +ssVOù +p2586 +(dp2587 +V +I2 +sV? +I1 +ssVbs +p2588 +(dp2589 +Va +I1 +sVc +I4 +sVe +I3 +sVi +I3 +sVo +I8 +sVu +I1 +sVt +I1 +ssVby +p2590 +(dp2591 +V +I27 +sVs +I1 +sVl +I1 +ssV~) +p2592 +(dp2593 +V, +I1 +ssVTê +p2594 +(dp2595 +Vt +I1 +ssVTé +p2596 +(dp2597 +Vt +I1 +ssV.) +p2598 +(dp2599 +V +I2 +ssVu? +p2600 +(dp2601 +V +I5 +ssV., +p2602 +(dp2603 +V +I1 +ssV.- +p2604 +(dp2605 +VJ +I2 +sV- +I17 +ssVu; +p2606 +(dp2607 +V +I9 +ssVu: +p2608 +(dp2609 +V +I2 +ssV. +p2610 +(dp2611 +VA +I1 +sV +I914 +sVC +I1 +sVB +I2 +sVI +I5 +sVD +I3 +sVS +I2 +sVH +I4 +sVM +I1 +sVL +I1 +sVl +I7 +sVP +I2 +sVs +I1 +sVV +I3 +sVN +I1 +sVd +I4 +ssV." +p2612 +(dp2613 +V +I1 +ssVu- +p2614 +(dp2615 +Vp +I1 +sVr +I1 +sVM +I1 +sVd +I4 +sVf +I1 +ssVu, +p2616 +(dp2617 +V +I44 +ssVu. +p2618 +(dp2619 +V +I15 +sV- +I1 +ssVu) +p2620 +(dp2621 +V. +I1 +ssV.0 +p2622 +(dp2623 +V2 +I1 +ssVu' +p2624 +(dp2625 +Va +I23 +sVà +I20 +sVe +I46 +sVç +I1 +sVi +I169 +sVo +I75 +sVu +I29 +sVy +I1 +ssVu! +p2626 +(dp2627 +V) +I1 +sV +I3 +ssVu +p2628 +(dp2629 +Vi +I2 +sV' +I1 +sV1 +I5 +sV2 +I2 +sV5 +I1 +sV6 +I1 +sVC +I1 +sVE +I1 +sVD +I4 +sVI +I1 +sVH +I1 +sVJ +I5 +sVM +I4 +sVN +I1 +sVP +I7 +sVS +I9 +sVT +I3 +sV_ +I10 +sVa +I25 +sVà +I16 +sVc +I69 +sVb +I41 +sVe +I21 +sVd +I142 +sVg +I9 +sVf +I14 +sVé +I1 +sVh +I7 +sVk +I2 +sVj +I14 +sVm +I71 +sVl +I49 +sVo +I2 +sVn +I18 +sVq +I33 +sVp +I96 +sVs +I50 +sVr +I32 +sVu +I7 +sVt +I23 +sVw +I8 +sVv +I20 +sVy +I1 +ssVu_ +p2630 +(dp2631 +V +I1 +ssVu[ +p2632 +(dp2633 +V1 +I1 +ssV.A +p2634 +(dp2635 +Vh +I1 +sVr +I1 +ssV.C +p2636 +(dp2637 +Vr +I1 +ssV.] +p2638 +(dp2639 +V +I2 +ssV.h +p2640 +(dp2641 +Vt +I2 +ssV.i +p2642 +(dp2643 +Vb +I2 +ssVuy +p2644 +(dp2645 +Va +I5 +sVé +I4 +sVe +I5 +ssVux +p2646 +(dp2647 +V! +I1 +sV +I345 +sV- +I7 +sV, +I34 +sV. +I21 +sV_ +I1 +sV; +I13 +sV: +I1 +sV[ +I1 +sV? +I1 +ssV.n +p2648 +(dp2649 +Ve +I4 +ssVuz +p2650 +(dp2651 +Va +I4 +sVe +I9 +ssVut +p2652 +(dp2653 +Va +I36 +sV +I400 +sVb +I1 +sVe +I180 +sVi +I27 +sVh +I10 +sV- +I22 +sV, +I19 +sVo +I39 +sV. +I5 +sVé +I15 +sVs +I1 +sVr +I109 +sVu +I6 +sVt +I1 +sVè +I5 +sV; +I4 +sV: +I1 +sV? +I1 +sVô +I3 +ssV.b +p2654 +(dp2655 +Vn +I2 +ssVuv +p2656 +(dp2657 +Va +I81 +sVâ +I1 +sVe +I131 +sVé +I22 +sVè +I5 +sVo +I2 +sVi +I6 +sVr +I46 +ssVuq +p2658 +(dp2659 +Vu +I11 +ssVup +p2660 +(dp2661 +Va +I11 +sV +I33 +sV! +I1 +sVe +I36 +sVç +I3 +sVp +I4 +sVé +I21 +sVè +I1 +sVl +I8 +sVo +I1 +sV, +I4 +sVi +I10 +sVs +I22 +sVr +I18 +sVu +I1 +sVt +I1 +sVh +I3 +sV; +I1 +sV. +I1 +ssV.f +p2662 +(dp2663 +Vr +I2 +ssVur +p2664 +(dp2665 +V! +I2 +sV +I677 +sVp +I14 +sV- +I5 +sV, +I80 +sV. +I21 +sV; +I17 +sV: +I2 +sV? +I4 +sV_ +I3 +sVa +I85 +sVc +I20 +sVb +I2 +sVe +I209 +sVd +I28 +sVg +I12 +sVf +I1 +sVi +I47 +sVè +I2 +sVm +I4 +sVl +I2 +sVo +I22 +sVn +I39 +sVq +I18 +sVé +I11 +sVs +I275 +sVr +I29 +sVu +I14 +sVt +I23 +sVv +I4 +ssVum +p2666 +(dp2667 +Va +I9 +sV +I4 +sVb +I8 +sVe +I39 +sVé +I3 +sVè +I2 +sV, +I2 +sV. +I2 +sVi +I4 +sVu +I1 +sVô +I4 +sV: +I1 +sV_ +I2 +ssVul +p2668 +(dp2669 +Va +I21 +sV +I9 +sVc +I1 +sVâ +I3 +sVe +I61 +sVd +I6 +sVg +I31 +sVé +I7 +sVm +I2 +sVl +I6 +sVo +I5 +sV, +I1 +sVi +I10 +sVs +I6 +sVu +I17 +sVt +I23 +sVy +I1 +sVû +I3 +sV. +I1 +ssVuo +p2670 +(dp2671 +Vi +I63 +sVs +I1 +sVn +I3 +ssVun +p2672 +(dp2673 +Va +I1 +sV +I533 +sVc +I5 +sVe +I321 +sVd +I36 +sVé +I129 +sV- +I1 +sVl +I1 +sV. +I2 +sVi +I24 +sVs +I8 +sVu +I5 +sVt +I4 +sV; +I1 +sV, +I1 +ssVui +p2674 +(dp2675 +V! +I1 +sV +I553 +sVc +I2 +sVe +I9 +sVf +I17 +sVé +I5 +sVè +I4 +sV- +I14 +sVl +I25 +sV. +I9 +sVp +I5 +sVs +I155 +sVr +I23 +sVt +I119 +sVv +I21 +sV[ +I1 +sV; +I2 +sVn +I31 +sV, +I35 +sV? +I2 +ssVuh +p2676 +(dp2677 +Va +I3 +ssVuj +p2678 +(dp2679 +Ve +I5 +sVo +I57 +ssVue +p2680 +(dp2681 +V! +I2 +sV +I645 +sVr +I64 +sV: +I1 +sVf +I12 +sVi +I7 +sV- +I2 +sV? +I2 +sVm +I3 +sV, +I28 +sVo +I3 +sV. +I15 +sVs +I117 +sVn +I10 +sVu +I19 +sVt +I42 +sV; +I10 +sVz +I7 +sV_ +I4 +sVl +I180 +ssVud +p2682 +(dp2683 +Va +I1 +sV +I1 +sVe +I25 +sVi +I16 +sV, +I1 +sVé +I1 +sVr +I23 +sV: +I1 +ssVug +p2684 +(dp2685 +Va +I12 +sVe +I23 +sVg +I1 +sVi +I8 +sVh +I3 +sVm +I3 +sVé +I2 +sVu +I5 +ssVuf +p2686 +(dp2687 +Va +I1 +sV +I6 +sVs +I1 +sVr +I2 +sVf +I19 +ssV.t +p2688 +(dp2689 +Vx +I6 +ssVuc +p2690 +(dp2691 +Va +I1 +sVc +I2 +sVe +I14 +sVi +I8 +sVh +I37 +sVk +I1 +sVo +I27 +sVr +I4 +sVu +I15 +sVt +I7 +ssVub +p2692 +(dp2693 +Va +I1 +sVe +I2 +sVi +I4 +sVj +I1 +sVm +I2 +sVl +I34 +sVo +I2 +sVs +I4 +sVt +I1 +sV/ +I1 +ssVTy +p2694 +(dp2695 +Vr +I1 +ssVTu +p2696 +(dp2697 +V +I4 +sVc +I2 +sVr +I12 +sVn +I1 +ssVTr +p2698 +(dp2699 +Va +I8 +sVi +I3 +sVè +I1 +sVo +I2 +sVé +I2 +sVu +I1 +ssVTo +p2700 +(dp2701 +V +I3 +sVu +I14 +ssVTh +p2702 +(dp2703 +Ve +I21 +sVi +I7 +sVo +I4 +sVé +I4 +sVr +I1 +sVu +I12 +ssVTi +p2704 +(dp2705 +Vm +I1 +sVt +I1 +ssVTe +p2706 +(dp2707 +Vx +I2 +sVn +I2 +sVm +I1 +sVl +I3 +sV +I2 +ssVTa +p2708 +(dp2709 +Vs +I2 +sVn +I3 +ssVA +p2710 +(dp2711 +Vc +I2 +sVp +I9 +sVm +I1 +sVl +I1 +sVP +I2 +sVs +I1 +sVT +I1 +ssVgg +p2712 +(dp2713 +Ve +I1 +ssVTY +p2714 +(dp2715 +V +I4 +sV; +I1 +sV, +I1 +ssVTT +p2716 +(dp2717 +VE +I2 +sVO +I1 +ssVTR +p2718 +(dp2719 +VA +I2 +sVI +I2 +sVE +I30 +sVO +I1 +ssVTS +p2720 +(dp2721 +V, +I1 +ssVA. +p2722 +(dp2723 +V +I1 +ssVTN +p2724 +(dp2725 +VE +I1 +ssVTO +p2726 +(dp2727 +VI +I1 +sV +I6 +sVM +I3 +sV? +I1 +ssV.» +p2728 +(dp2729 +V +I2 +ssVTH +p2730 +(dp2731 +VI +I3 +sVE +I7 +ssVTI +p2732 +(dp2733 +VA +I1 +sVC +I2 +sVE +I2 +sVM +I2 +sVO +I4 +sVS +I1 +sVV +I1 +ssVTE +p2734 +(dp2735 +VS +I1 +sVR +I2 +sVU +I2 +sVD +I3 +sVN +I8 +ssVTC +p2736 +(dp2737 +V. +I1 +ssVTA +p2738 +(dp2739 +VI +I2 +sVB +I1 +sVR +I2 +sVL +I1 +ssVAC +p2740 +(dp2741 +VH +I1 +sVE +I1 +sVT +I1 +sVO +I1 +ssVAB +p2742 +(dp2743 +VI +I2 +sVR +I1 +sVO +I1 +ssVAD +p2744 +(dp2745 +V +I1 +sVU +I1 +sVD +I1 +sVE +I1 +ssVAG +p2746 +(dp2747 +VE +I3 +ssVAI +p2748 +(dp2749 +VR +I3 +sVM +I1 +sVN +I2 +ssVAH +p2750 +(dp2751 +VA +I1 +ssVAM +p2752 +(dp2753 +VA +I3 +sV, +I1 +ssVAL +p2754 +(dp2755 +VP +I1 +sVL +I3 +sV +I1 +sV, +I1 +ssVAN +p2756 +(dp2757 +VY +I3 +sV +I2 +sVS +I2 +sVD +I4 +sVT +I6 +ssVT. +p2758 +(dp2759 +V +I2 +ssVAP +p2760 +(dp2761 +VI +I30 +ssVT, +p2762 +(dp2763 +V +I3 +ssVAR +p2764 +(dp2765 +VI +I1 +sV +I2 +sVR +I4 +sVE +I1 +sVT +I3 +ssVT* +p2766 +(dp2767 +V +I1 +sV* +I2 +ssVAT +p2768 +(dp2769 +V +I1 +sVT +I1 +ssVAV +p2770 +(dp2771 +VE +I4 +ssVAY +p2772 +(dp2773 +V +I1 +ssVT +p2774 +(dp2775 +VE +I1 +sVD +I1 +sVG +I8 +sVI +I1 +sVH +I1 +sVM +I1 +sVL +I3 +sVN +I2 +sVP +I1 +ssVT! +p2776 +(dp2777 +V +I1 +sV* +I1 +ssVAc +p2778 +(dp2779 +Vh +I8 +ssVAb +p2780 +(dp2781 +Va +I4 +sVs +I1 +sVr +I1 +ssVAd +p2782 +(dp2783 +Va +I1 +ssVAg +p2784 +(dp2785 +Vi +I1 +ssVAf +p2786 +(dp2787 +Vr +I5 +ssVAh +p2788 +(dp2789 +V! +I18 +sV +I1 +ssVAm +p2790 +(dp2791 +Vé +I4 +sVa +I1 +sVs +I1 +sVo +I3 +ssVAl +p2792 +(dp2793 +Va +I2 +sVo +I2 +sVe +I3 +sVl +I12 +sVg +I4 +ssVAo +p2794 +(dp2795 +Vd +I1 +ssVAn +p2796 +(dp2797 +Vc +I1 +sVn +I1 +sVt +I3 +sVg +I8 +sVd +I1 +ssVAp +p2798 +(dp2799 +Vp +I2 +sVr +I7 +ssVuï +p2800 +(dp2801 +V +I1 +sVe +I1 +ssVAr +p2802 +(dp2803 +Va +I2 +sVc +I6 +sVi +I3 +sVk +I1 +sVo +I1 +sVr +I4 +sVt +I2 +ssVué +p2804 +(dp2805 +V! +I1 +sV +I23 +sVe +I9 +sV, +I5 +sVn +I11 +sVs +I4 +sVr +I8 +sVu +I1 +sV? +I1 +ssVAt +p2806 +(dp2807 +Vh +I1 +sVr +I1 +sVl +I1 +sVo +I2 +ssVAw +p2808 +(dp2809 +Va +I1 +ssVAv +p2810 +(dp2811 +Va +I1 +sVe +I3 +ssVAy +p2812 +(dp2813 +Va +I2 +sVr +I13 +sVe +I1 +ssVAz +p2814 +(dp2815 +Vo +I4 +ssVuà +p2816 +(dp2817 +V +I1 +ssVuâ +p2818 +(dp2819 +Vm +I1 +sVt +I1 +ssV b +p2820 +(dp2821 +Va +I144 +sVâ +I6 +sVe +I153 +sVi +I154 +sVê +I1 +sVl +I14 +sVo +I150 +sVé +I5 +sVr +I72 +sVu +I18 +sVy +I27 +ssV c +p2822 +(dp2823 +Va +I220 +sVe +I483 +sV' +I87 +sVi +I49 +sVh +I335 +sVl +I12 +sVo +I635 +sVé +I11 +sVr +I101 +sVu +I30 +sVô +I18 +ssVgu +p2824 +(dp2825 +Va +I11 +sVe +I61 +sVé +I8 +sVè +I3 +sVm +I2 +sVl +I6 +sVi +I4 +sVs +I3 +sVr +I13 +sVt +I5 +ssV a +p2826 +(dp2827 +Vî +I1 +sV +I146 +sV- +I9 +sVc +I61 +sVb +I51 +sVe +I1 +sVd +I33 +sVg +I19 +sVf +I21 +sVi +I97 +sVh +I2 +sVj +I2 +sVm +I49 +sVl +I100 +sVo +I2 +sVn +I137 +sVp +I112 +sVs +I69 +sVr +I97 +sVu +I396 +sVt +I51 +sVw +I2 +sVv +I394 +sVy +I17 +ssV f +p2828 +(dp2829 +Va +I282 +sVâ +I2 +sVe +I120 +sVi +I177 +sVê +I1 +sVl +I23 +sVo +I172 +sVî +I1 +sVé +I4 +sVr +I93 +sVu +I151 +sVt +I1 +sVû +I12 +ssV g +p2830 +(dp2831 +Va +I63 +sVâ +I2 +sVe +I44 +sVi +I6 +sVl +I7 +sVo +I50 +sVé +I19 +sVr +I121 +sVu +I33 +ssV d +p2832 +(dp2833 +Va +I396 +sVe +I2108 +sV' +I412 +sVi +I567 +sVè +I7 +sVo +I292 +sVî +I12 +sVé +I183 +sVr +I24 +sVu +I295 +sVû +I2 +ssV e +p2834 +(dp2835 +Va +I3 +sVB +I42 +sVd +I6 +sVf +I27 +sVi +I1 +sVh +I1 +sVm +I42 +sVl +I72 +sVn +I593 +sVq +I5 +sVs +I220 +sVr +I4 +sVu +I63 +sVt +I950 +sVv +I7 +sVx +I85 +sVû +I10 +ssV j +p2836 +(dp2837 +Va +I80 +sVe +I328 +sV' +I113 +sVé +I33 +sVo +I93 +sVu +I63 +ssV k +p2838 +(dp2839 +Va +I1 +sVe +I1 +sVn +I5 +ssV h +p2840 +(dp2841 +Va +I60 +sVâ +I1 +sVe +I29 +sVi +I9 +sVo +I158 +sVé +I12 +sVu +I18 +sVô +I8 +sVy +I4 +sVt +I5 +ssV i +p2842 +(dp2843 +Vc +I29 +sVd +I10 +sVg +I4 +sVf +I11 +sVm +I36 +sVl +I395 +sVn +I170 +sVs +I25 +sVr +I2 +sVt +I27 +sVv +I1 +ssV n +p2844 +(dp2845 +Va +I41 +sVe +I299 +sV' +I258 +sVé +I27 +sVè +I8 +sVo +I335 +sVi +I44 +sVu +I20 +sVô +I2 +ssV o +p2846 +(dp2847 +Vc +I11 +sVb +I20 +sVe +I5 +sVd +I3 +sVf +I81 +sVi +I4 +sVh +I2 +sVm +I2 +sVn +I173 +sVp +I7 +sVs +I2 +sVr +I84 +sVu +I79 +sVt +I21 +sVw +I1 +sVv +I3 +sVù +I49 +ssV l +p2848 +(dp2849 +Va +I801 +sVà +I16 +sVâ +I2 +sVe +I1457 +sV' +I415 +sVi +I102 +sVè +I3 +sVo +I70 +sVé +I7 +sVu +I207 +sVy +I2 +ssV m +p2850 +(dp2851 +Va +I607 +sVâ +I2 +sVe +I299 +sV' +I75 +sVi +I182 +sVè +I30 +sVê +I48 +sVo +I544 +sVé +I62 +sVu +I23 +sVy +I1 +ssV r +p2852 +(dp2853 +Va +I109 +sVe +I408 +sVi +I85 +sVè +I1 +sVê +I4 +sVo +I121 +sVé +I135 +sVu +I21 +sVô +I4 +sVh +I1 +ssV s +p2854 +(dp2855 +Va +I281 +sVé +I29 +sVc +I22 +sVb +I1 +sVe +I413 +sV' +I140 +sVi +I192 +sVh +I4 +sVm +I1 +sVo +I490 +sVp +I9 +sVu +I289 +sVt +I28 +sVè +I1 +sVy +I3 +sVû +I1 +ssVge +p2856 +(dp2857 +Va +I20 +sV +I102 +sVz +I4 +sVd +I2 +sV) +I1 +sVm +I6 +sV, +I41 +sVo +I6 +sVn +I58 +sV. +I13 +sVs +I61 +sVr +I66 +sV! +I2 +sVt +I9 +sVu +I13 +sV; +I15 +sV: +I5 +sV? +I2 +sVl +I1 +ssV q +p2858 +(dp2859 +Vu +I1445 +ssV v +p2860 +(dp2861 +Va +I109 +sVe +I119 +sVi +I253 +sVê +I4 +sVo +I579 +sVé +I21 +sVr +I17 +sVu +I47 +sVô +I1 +ssV w +p2862 +(dp2863 +Va +I9 +sVe +I13 +sVi +I31 +sVh +I12 +sVo +I15 +sVr +I4 +sVw +I2 +ssV t +p2864 +(dp2865 +Va +I86 +sVâ +I1 +sVe +I139 +sV' +I5 +sVi +I36 +sVh +I180 +sVê +I16 +sVo +I490 +sVé +I8 +sVr +I304 +sVu +I54 +sVw +I1 +sVy +I1 +ssV u +p2866 +(dp2867 +Vp +I2 +sVs +I26 +sVt +I5 +sVn +I638 +ssVgo +p2868 +(dp2869 +Vc +I1 +sVb +I1 +sVe +I1 +sVg +I1 +sVi +I1 +sV- +I1 +sVl +I2 +sVo +I1 +sVn +I132 +sVs +I1 +sVr +I13 +sVu +I38 +sVt +I5 +sVû +I21 +ssVgn +p2870 +(dp2871 +Vé +I10 +sVa +I20 +sVe +I88 +sVi +I14 +sVo +I26 +ssVgm +p2872 +(dp2873 +Vé +I1 +sVe +I3 +ssV y +p2874 +(dp2875 +V +I102 +sVe +I26 +sVo +I62 +ssVgi +p2876 +(dp2877 +Va +I2 +sVe +I20 +sVé +I1 +sVm +I4 +sVl +I3 +sVo +I5 +sVn +I15 +sVq +I3 +sVs +I12 +sVr +I1 +sVt +I12 +sVv +I5 +ssVgh +p2878 +(dp2879 +V +I3 +sVt +I16 +sVo +I1 +sV, +I1 +ssV B +p2880 +(dp2881 +VA +I1 +sVa +I7 +sVE +I3 +sVi +I7 +sV, +I2 +sVo +I12 +sV. +I45 +sVr +I3 +sVu +I36 +sVy +I7 +sVe +I4 +sVR +I1 +sVU +I2 +ssV C +p2882 +(dp2883 +Va +I495 +sVA +I4 +sVe +I83 +sV' +I34 +sV +I1 +sVi +I3 +sVh +I22 +sVl +I4 +sVo +I58 +sVé +I3 +sVr +I5 +sVu +I125 +sVO +I2 +sVH +I31 +ssV A +p2884 +(dp2885 +V +I19 +sV, +I1 +sV. +I1 +sVB +I2 +sVD +I1 +sVN +I3 +sVS +I3 +sVR +I1 +sVV +I2 +sVc +I7 +sVb +I6 +sVg +I1 +sVf +I2 +sVh +I16 +sVm +I6 +sVl +I15 +sVo +I1 +sVn +I6 +sVp +I9 +sVs +I6 +sVr +I14 +sVu +I21 +sVt +I2 +sVw +I1 +sVv +I3 +sVy +I3 +sVz +I3 +ssV F +p2886 +(dp2887 +Va +I2 +sVe +I7 +sVi +I5 +sVl +I1 +sVo +I9 +sVI +I4 +sVr +I21 +sVu +I1 +sVO +I4 +ssV G +p2888 +(dp2889 +Va +I5 +sVe +I4 +sVi +I10 +sVo +I1 +sVI +I1 +sVr +I4 +sVu +I27 +sVU +I8 +sVR +I1 +ssV D +p2890 +(dp2891 +Va +I15 +sVA +I4 +sVE +I9 +sVé +I1 +sV' +I3 +sVI +I3 +sVè +I3 +sVo +I10 +sVi +I28 +sVU +I2 +sVO +I5 +sVe +I30 +ssV E +p2892 +(dp2893 +VB +I8 +sVd +I1 +sVI +I2 +sVh +I12 +sVl +I26 +sVD +I2 +sVn +I31 +sVs +I14 +sVr +I1 +sVu +I8 +sVT +I2 +sVX +I1 +sVV +I2 +sVx +I1 +sVt +I15 +ssV J +p2894 +(dp2895 +VA +I1 +sVa +I17 +sVe +I102 +sV' +I28 +sVé +I3 +sVo +I5 +sV. +I4 +sVu +I20 +ssV K +p2896 +(dp2897 +Va +I1 +sV +I1 +sVe +I5 +sVI +I1 +sV. +I4 +sV° +I1 +ssV H +p2898 +(dp2899 +Va +I10 +sVA +I2 +sVe +I6 +sVi +I4 +sVo +I12 +sVé +I20 +sVu +I1 +ssV I +p2900 +(dp2901 +V, +I1 +sV. +I3 +sV: +I1 +sVF +I3 +sVI +I9 +sVM +I2 +sVN +I5 +sVS +I2 +sVT +I1 +sVV +I5 +sVX +I2 +sVc +I1 +sVe +I4 +sVd +I2 +sVg +I1 +sVf +I11 +sVm +I1 +sVl +I144 +sVo +I1 +sVn +I8 +sVs +I14 +sVr +I1 +sVt +I9 +sVv +I5 +ssV N +p2902 +(dp2903 +Va +I6 +sVé +I1 +sVe +I13 +sV' +I4 +sVi +I2 +sVO +I6 +sV° +I2 +sVU +I2 +sVo +I34 +sVu +I2 +sVE +I1 +ssV O +p2904 +(dp2905 +V +I7 +sVC +I1 +sVe +I2 +sVF +I6 +sVh +I7 +sVk +I1 +sVc +I3 +sVn +I46 +sVs +I1 +sVr +I11 +sVu +I16 +sVt +I2 +sVx +I1 +sVù +I2 +sVE +I2 +sVR +I10 +sVN +I1 +sVT +I2 +ssV L +p2906 +(dp2907 +Va +I47 +sVA +I1 +sVE +I6 +sV' +I31 +sVi +I21 +sVO +I1 +sVI +I5 +sVu +I2 +sVo +I17 +sVe +I151 +ssV M +p2908 +(dp2909 +Va +I184 +sVA +I2 +sVe +I11 +sVD +I1 +sVé +I7 +sVi +I14 +sVM +I1 +sV, +I1 +sVO +I2 +sV. +I22 +sVI +I1 +sVS +I1 +sVu +I2 +sVo +I37 +sVè +I1 +sVy +I1 +sVE +I2 +ssV R +p2910 +(dp2911 +VA +I1 +sVa +I7 +sVe +I6 +sVi +I6 +sVh +I1 +sVo +I16 +sVU +I3 +sVO +I1 +sVu +I10 +sVE +I2 +ssV S +p2912 +(dp2913 +Va +I19 +sVc +I1 +sVe +I3 +sV' +I1 +sVp +I1 +sVi +I26 +sVM +I2 +sVm +I3 +sVo +I9 +sV. +I3 +sVé +I2 +sVu +I15 +sVt +I4 +sVy +I1 +sVU +I1 +sVE +I1 +sVT +I1 +ssV P +p2914 +(dp2915 +VA +I3 +sVa +I153 +sVe +I11 +sVi +I2 +sVM +I1 +sVl +I11 +sVO +I2 +sV. +I1 +sVé +I6 +sVR +I9 +sVu +I5 +sVo +I50 +sVy +I1 +sVU +I4 +sVr +I50 +ssV Q +p2916 +(dp2917 +Vu +I56 +sVU +I1 +ssV V +p2918 +(dp2919 +Va +I6 +sVE +I2 +sVi +I9 +sVé +I3 +sVO +I2 +sV. +I1 +sVI +I7 +sVo +I97 +sVe +I44 +ssV W +p2920 +(dp2921 +Va +I1 +sVA +I4 +sVE +I1 +sVi +I2 +sVh +I2 +sVo +I1 +sVH +I1 +sVy +I1 +sVe +I15 +ssV T +p2922 +(dp2923 +Va +I5 +sVe +I7 +sVi +I2 +sVh +I43 +sVê +I1 +sVO +I10 +sVé +I1 +sVr +I17 +sVu +I18 +sVo +I17 +sVH +I5 +sVy +I1 +sVR +I2 +ssV U +p2924 +(dp2925 +VN +I2 +sVS +I3 +sVr +I3 +sVt +I2 +sVn +I29 +ssV Z +p2926 +(dp2927 +Va +I1 +sV, +I1 +ssV [ +p2928 +(dp2929 +Va +I1 +sVE +I3 +sVP +I2 +sV* +I3 +sV1 +I29 +sV9 +I1 +sV3 +I6 +sV2 +I12 +sV5 +I2 +sVT +I1 +sV7 +I1 +sV6 +I2 +sVY +I1 +sV8 +I1 +sV4 +I3 +ssV X +p2930 +(dp2931 +V +I2 +sVI +I13 +sVL +I2 +sV. +I1 +sVV +I25 +sVX +I40 +sV; +I1 +sV, +I1 +ssV Y +p2932 +(dp2933 +Ve +I1 +sVO +I7 +sVo +I7 +ssV _ +p2934 +(dp2935 +Vé +I2 +sVI +I1 +sVA +I2 +sVC +I12 +sVE +I2 +sVD +I3 +sVÉ +I2 +sVH +I3 +sVJ +I2 +sVM +I13 +sVL +I9 +sVO +I2 +sVN +I1 +sVP +I9 +sVS +I6 +sVR +I1 +sVT +I3 +sVV +I3 +sVa +I2 +sVà +I1 +sVc +I1 +sVg +I1 +sVi +I1 +sVh +I1 +sVm +I1 +sVl +I6 +sVp +I1 +sVu +I2 +ssV " +p2936 +(dp2937 +VA +I1 +sVD +I1 +sVP +I6 +sVs +I1 +sVl +I1 +sVp +I1 +sVS +I5 +sVR +I1 +ssV # +p2938 +(dp2939 +V4 +I1 +ssS' ' +p2940 +(dp2941 +Vê +I9 +sVé +I44 +sVâ +I1 +sV +I660 +sV" +I3 +sV' +I1 +sV( +I7 +sV« +I3 +sV* +I13 +sV- +I2 +sV1 +I23 +sVÉ +I3 +sV3 +I1 +sV2 +I9 +sV4 +I2 +sV6 +I1 +sV9 +I1 +sV» +I1 +sVô +I1 +sVA +I81 +sVÀ +I1 +sVC +I350 +sVB +I62 +sVE +I73 +sVD +I46 +sVG +I6 +sVF +I12 +sVI +I154 +sVH +I20 +sVK +I4 +sVJ +I117 +sVM +I95 +sVL +I198 +sVO +I81 +sVN +I28 +sVQ +I51 +sVP +I88 +sVS +I29 +sVR +I19 +sVU +I27 +sVT +I66 +sVW +I16 +sVV +I73 +sVY +I5 +sVX +I6 +sV[ +I65 +sVç +I1 +sV_ +I16 +sVa +I156 +sVà +I22 +sVc +I223 +sVb +I79 +sVe +I150 +sVd +I344 +sVg +I44 +sVf +I111 +sVi +I68 +sVh +I40 +sVk +I1 +sVj +I50 +sVm +I192 +sVl +I235 +sVo +I40 +sVn +I82 +sVq +I129 +sVp +I259 +sVs +I178 +sVr +I99 +sVu +I38 +sVt +I122 +sVw +I7 +sVv +I113 +sVy +I8 +ssVâg +p2942 +(dp2943 +Vé +I4 +sVe +I7 +ssV ' +p2944 +(dp2945 +V +I1 +sVd +I1 +ssV $ +p2946 +(dp2947 +V2 +I1 +ssVâc +p2948 +(dp2949 +Vh +I5 +sVe +I13 +ssV * +p2950 +(dp2951 +VB +I1 +sV* +I10 +sVE +I2 +sVW +I1 +sVn +I1 +ssVâm +p2952 +(dp2953 +Ve +I16 +ssV ( +p2954 +(dp2955 +Va +I3 +sV# +I1 +sV* +I2 +sVd +I1 +sVf +I1 +sVi +I4 +sVJ +I1 +sVC +I1 +sVo +I5 +sVN +I1 +sVp +I1 +sVs +I1 +sVT +I1 +sVV +I1 +sV~ +I1 +sV_ +I2 +sVt +I1 +ssV . +p2956 +(dp2957 +V +I1 +sV. +I1 +ssVg: +p2958 +(dp2959 +V +I1 +ssV - +p2960 +(dp2961 +V- +I2 +sVn +I1 +ssV 2 +p2962 +(dp2963 +V +I1 +sVO +I1 +sV1 +I1 +sV0 +I18 +sV5 +I1 +sV4 +I1 +sV7 +I2 +sV6 +I1 +sV8 +I1 +sV: +I1 +ssV 3 +p2964 +(dp2965 +V1 +I1 +sV0 +I3 +sV3 +I1 +sV2 +I1 +sV8 +I1 +ssV 0 +p2966 +(dp2967 +V1 +I1 +sV0 +I1 +ssV 1 +p2968 +(dp2969 +V +I2 +sVe +I1 +sV- +I1 +sV, +I1 +sV1 +I2 +sV0 +I6 +sV2 +I2 +sV5 +I6 +sV4 +I3 +sV7 +I52 +sV6 +I6 +sV9 +I8 +sV8 +I6 +ssV 6 +p2970 +(dp2971 +V +I1 +sV0 +I2 +sV4 +I2 +sV. +I1 +ssV 7 +p2972 +(dp2973 +V. +I1 +ssV 4 +p2974 +(dp2975 +V +I1 +sV% +I1 +sV0 +I2 +sV3 +I2 +sV2 +I2 +sV7 +I2 +sV8 +I1 +ssVg +p2976 +(dp2977 +Vp +I1 +sV( +I1 +sVA +I1 +sVE +I4 +sVL +I6 +sVN +I1 +sVP +I1 +sVS +I2 +sVa +I7 +sVc +I3 +sVb +I1 +sVe +I5 +sVd +I3 +sVf +I7 +sVé +I1 +sVh +I1 +sVo +I8 +sVn +I1 +sVq +I1 +sVi +I9 +sVs +I3 +sVt +I8 +sVw +I5 +sVv +I1 +ssV : +p2978 +(dp2979 +V +I3 +ssVg. +p2980 +(dp2981 +V +I4 +sVn +I3 +ssVg- +p2982 +(dp2983 +Vt +I13 +ssVg, +p2984 +(dp2985 +V +I8 +ssV ? +p2986 +(dp2987 +V +I1 +ssV < +p2988 +(dp2989 +Vh +I1 +ssVéh +p2990 +(dp2991 +Vu +I1 +ssVém +p2992 +(dp2993 +Va +I5 +sVe +I24 +sVi +I12 +sVè +I1 +sVê +I1 +sVo +I26 +sVé +I2 +sVu +I3 +ssVd; +p2994 +(dp2995 +V +I5 +ssV â +p2996 +(dp2997 +Vm +I7 +sVg +I7 +ssV à +p2998 +(dp2999 +V +I641 +sV- +I2 +ssVzo +p3000 +(dp3001 +Vn +I1 +sVf +I4 +ssVzi +p3002 +(dp3003 +Vp +I2 +sVr +I3 +sVè +I1 +sVl +I1 +sVn +I2 +ssV3] +p3004 +(dp3005 +V +I6 +sV; +I1 +sV, +I1 +sV. +I2 +ssV ê +p3006 +(dp3007 +Vt +I70 +ssVze +p3008 +(dp3009 +V +I20 +sVr +I2 +sV, +I1 +sVd +I1 +ssV é +p3010 +(dp3011 +Vc +I42 +sVd +I13 +sVg +I14 +sVm +I6 +sVl +I14 +sVn +I3 +sVq +I4 +sVp +I37 +sVr +I2 +sVt +I305 +sVv +I7 +ssV î +p3012 +(dp3013 +Vl +I2 +ssVza +p3014 +(dp3015 +V +I2 +sVb +I1 +sVi +I1 +sV, +I2 +sV. +I1 +sVr +I2 +sVt +I4 +ssVgâ +p3016 +(dp3017 +Vt +I2 +ssV ô +p3018 +(dp3019 +V +I3 +sVt +I2 +ssVgé +p3020 +(dp3021 +V +I25 +sVe +I11 +sVd +I10 +sVm +I1 +sV, +I1 +sVn +I20 +sVs +I14 +sV; +I1 +sV. +I1 +ssVgè +p3022 +(dp3023 +Vr +I7 +ssV À +p3024 +(dp3025 +V +I1 +ssV É +p3026 +(dp3027 +Vd +I1 +sVn +I2 +sVt +I1 +sVg +I2 +sVl +I2 +ssVz, +p3028 +(dp3029 +V +I35 +ssVz- +p3030 +(dp3031 +Ve +I1 +sVn +I1 +sVm +I11 +sVl +I1 +sVv +I49 +ssVz. +p3032 +(dp3033 +V +I7 +sV. +I1 +ssV « +p3034 +(dp3035 +V +I2 +sVM +I1 +ssVz +p3036 +(dp3037 +Vj +I1 +sVM +I1 +sVP +I1 +sVa +I11 +sVà +I1 +sVc +I14 +sVb +I7 +sVe +I6 +sVd +I22 +sVg +I2 +sVf +I4 +sVé +I5 +sVê +I2 +sVm +I6 +sVl +I34 +sVo +I1 +sVn +I3 +sVq +I8 +sVp +I28 +sVs +I7 +sVr +I10 +sVu +I6 +sVt +I21 +sVv +I8 +ssVz! +p3038 +(dp3039 +V +I3 +ssVz; +p3040 +(dp3041 +V +I3 +ssV » +p3042 +(dp3043 +V +I1 +ssV39 +p3044 +(dp3045 +V +I1 +sV, +I1 +sV. +I2 +ssV38 +p3046 +(dp3047 +V, +I1 +sV6 +I1 +ssV33 +p3048 +(dp3049 +V9 +I1 +ssV32 +p3050 +(dp3051 +V. +I1 +ssV31 +p3052 +(dp3053 +V5 +I1 +sV4 +I1 +ssV30 +p3054 +(dp3055 +V0 +I1 +sV; +I1 +sV, +I2 +sV +I1 +sV6 +I1 +ssV36 +p3056 +(dp3057 +V. +I2 +ssV35 +p3058 +(dp3059 +V. +I1 +ssV3) +p3060 +(dp3061 +V +I2 +ssV3. +p3062 +(dp3063 +V +I2 +ssV3, +p3064 +(dp3065 +V +I2 +ssV3 +p3066 +(dp3067 +V +I1 +sVo +I1 +sVN +I1 +ssVFA +p3068 +(dp3069 +VC +I1 +ssVFI +p3070 +(dp3071 +VR +I1 +sVL +I1 +sVT +I1 +sVN +I1 +ssVFO +p3072 +(dp3073 +VR +I6 +ssVFr +p3074 +(dp3075 +Va +I16 +sVé +I1 +sVe +I2 +sVo +I1 +sVè +I1 +ssVFu +p3076 +(dp3077 +Vy +I1 +ssVFa +p3078 +(dp3079 +Vr +I2 +ssVFe +p3080 +(dp3081 +Vr +I4 +sVb +I2 +sVu +I1 +ssVFi +p3082 +(dp3083 +Vg +I4 +sVn +I1 +ssVFl +p3084 +(dp3085 +Vo +I1 +ssVFo +p3086 +(dp3087 +Vu +I9 +ssVF +p3088 +(dp3089 +VA +I1 +sVD +I1 +sVM +I1 +sVS +I1 +sVT +I1 +sVW +I1 +sVY +I3 +ssVYO +p3090 +(dp3091 +VU +I7 +ssVdl +p3092 +(dp3093 +Ve +I1 +ssVdm +p3094 +(dp3095 +Vi +I10 +sVe +I2 +ssVFÈ +p3096 +(dp3097 +VV +I1 +ssVYT +p3098 +(dp3099 +VH +I1 +ssVYo +p3100 +(dp3101 +Vr +I1 +sVu +I6 +ssVYe +p3102 +(dp3103 +Va +I1 +sVs +I1 +ssVY, +p3104 +(dp3105 +V +I1 +ssVY +p3106 +(dp3107 +VE +I1 +sVK +I1 +sVB +I1 +sVM +I1 +sVO +I3 +ssVY; +p3108 +(dp3109 +V +I1 +ssV! +p3110 +(dp3111 +V +I38 +sVô +I1 +sVA +I2 +sVC +I2 +sVE +I1 +sVF +I1 +sVI +I1 +sVJ +I4 +sVM +I4 +sVL +I1 +sVO +I2 +sVN +I1 +sVP +I3 +sVa +I2 +sVà +I1 +sVc +I9 +sVb +I1 +sVe +I2 +sVd +I22 +sVf +I1 +sVi +I1 +sVj +I7 +sVm +I12 +sVl +I8 +sVo +I2 +sVn +I1 +sVq +I5 +sVp +I1 +sVs +I10 +sVr +I2 +sVt +I2 +sVv +I6 +ssVdy +p3112 +(dp3113 +V +I2 +sVl +I1 +ssV% +p3114 +(dp3115 +Vo +I3 +ssVé! +p3116 +(dp3117 +V +I2 +ssVdw +p3118 +(dp3119 +Va +I1 +ssVl_ +p3120 +(dp3121 +V +I1 +ssV8° +p3122 +(dp3123 +V. +I1 +ssVl[ +p3124 +(dp3125 +V1 +I2 +ssVlf +p3126 +(dp3127 +V +I1 +sVe +I1 +ssVlg +p3128 +(dp3129 +Va +I31 +sVu +I1 +sVr +I1 +sVe +I4 +ssVld +p3130 +(dp3131 +Va +I14 +sV +I8 +sVb +I1 +sVe +I1 +sV' +I1 +sVo +I17 +sV. +I1 +ssVle +p3132 +(dp3133 +V! +I8 +sV +I1245 +sV- +I15 +sV, +I111 +sV. +I67 +sV; +I25 +sV: +I8 +sV? +I8 +sV[ +I1 +sV] +I1 +sV_ +I2 +sVd +I1 +sVa +I19 +sVc +I10 +sVg +I6 +sVç +I4 +sVf +I1 +sVi +I13 +sVm +I71 +sVn +I48 +sVq +I14 +sVs +I738 +sVr +I79 +sVu +I217 +sVt +I55 +sVv +I32 +sVy +I3 +sVx +I7 +sVz +I13 +ssVlc +p3134 +(dp3135 +Va +I3 +sVi +I1 +sVu +I2 +sVo +I1 +ssVla +p3136 +(dp3137 +Vî +I2 +sV +I758 +sV- +I2 +sV, +I5 +sV. +I1 +sV; +I1 +sV? +I1 +sVc +I23 +sVb +I4 +sVd +I8 +sVg +I15 +sVi +I182 +sVh +I1 +sVm +I11 +sVn +I90 +sVq +I12 +sVs +I33 +sVr +I46 +sVu +I2 +sVt +I28 +sVw +I5 +sVv +I15 +sVy +I2 +ssVlo +p3138 +(dp3139 +Va +I4 +sV +I4 +sVc +I1 +sVb +I3 +sVg +I22 +sVi +I20 +sVm +I4 +sV, +I1 +sVï +I1 +sVn +I77 +sVq +I1 +sVp +I4 +sVs +I127 +sVr +I23 +sVu +I36 +sVt +I13 +sVw +I7 +sVy +I2 +ssVll +p3140 +(dp3141 +Va +I92 +sV +I29 +sVâ +I1 +sVe +I676 +sVi +I40 +sVè +I8 +sVo +I35 +sVé +I21 +sVs +I2 +sVu +I11 +sVy +I9 +ssVlm +p3142 +(dp3143 +Va +I3 +sVe +I2 +ssVlh +p3144 +(dp3145 +Ve +I23 +sVo +I1 +ssVli +p3146 +(dp3147 +V +I8 +sV, +I2 +sV; +I2 +sVa +I8 +sVc +I34 +sVb +I23 +sVe +I111 +sVd +I2 +sVg +I21 +sVf +I2 +sVé +I3 +sVè +I8 +sVk +I2 +sVm +I13 +sVl +I1 +sVo +I24 +sVn +I13 +sVq +I26 +sVp +I1 +sVs +I23 +sVr +I8 +sVu +I2 +sVt +I37 +sVv +I28 +sVz +I2 +ssVlv +p3148 +(dp3149 +Va +I4 +ssVlw +p3150 +(dp3151 +Va +I1 +ssVlt +p3152 +(dp3153 +Va +I36 +sVe +I13 +sVi +I11 +sVh +I1 +sVo +I4 +sV. +I1 +sVé +I1 +sVr +I1 +sVu +I2 +sVy +I2 +ssVlu +p3154 +(dp3155 +Va +I3 +sV +I12 +sVe +I5 +sVd +I8 +sVi +I219 +sVm +I18 +sV, +I1 +sVn +I6 +sVp +I6 +sVs +I209 +sVr +I6 +sVt +I28 +ssVlr +p3156 +(dp3157 +Ve +I2 +ssVls +p3158 +(dp3159 +V +I153 +sVi +I4 +sV, +I6 +sVo +I3 +sV. +I4 +sVé +I2 +sV; +I3 +ssVlp +p3160 +(dp3161 +Vi +I4 +sV +I1 +ssVlq +p3162 +(dp3163 +Vu +I88 +ssVly +p3164 +(dp3165 +V +I25 +sV, +I2 +sV. +I1 +sVr +I1 +sVn +I1 +sVz +I1 +ssVl' +p3166 +(dp3167 +Va +I156 +sVé +I31 +sVâ +I8 +sVe +I61 +sVA +I15 +sVÉ +I2 +sVh +I34 +sVê +I3 +sVO +I1 +sVy +I1 +sVi +I36 +sVu +I20 +sVo +I33 +sVI +I5 +sVE +I8 +sV_ +I3 +ssVl +p3168 +(dp3169 +Vi +I6 +sV( +I1 +sV5 +I2 +sVC +I1 +sVB +I1 +sVD +I1 +sVH +I3 +sVP +I8 +sVS +I4 +sVR +I1 +sVT +I1 +sVa +I52 +sVà +I17 +sVc +I15 +sVb +I10 +sVe +I80 +sVd +I55 +sVg +I2 +sVf +I71 +sVé +I21 +sVh +I3 +sVj +I3 +sVm +I42 +sVl +I45 +sVo +I11 +sVn +I104 +sVq +I13 +sVp +I58 +sVs +I46 +sVr +I22 +sVu +I3 +sVt +I10 +sVv +I25 +sVy +I56 +ssVl! +p3170 +(dp3171 +V +I6 +ssVl. +p3172 +(dp3173 +V +I19 +ssVl, +p3174 +(dp3175 +V +I76 +ssVl- +p3176 +(dp3177 +Vv +I1 +ssVl? +p3178 +(dp3179 +V +I3 +ssVl: +p3180 +(dp3181 +V +I3 +ssVl; +p3182 +(dp3183 +V +I12 +ssV8. +p3184 +(dp3185 +V +I2 +ssV8, +p3186 +(dp3187 +V +I6 +ssV8 +p3188 +(dp3189 +Va +I1 +sVd +I1 +sVO +I1 +ssV8; +p3190 +(dp3191 +V +I1 +ssV89 +p3192 +(dp3193 +V, +I1 +ssV82 +p3194 +(dp3195 +V9 +I1 +sV2 +I1 +sV5 +I1 +sV. +I1 +ssV80 +p3196 +(dp3197 +V9 +I1 +sV3 +I1 +ssV81 +p3198 +(dp3199 +V5 +I1 +ssV86 +p3200 +(dp3201 +V5 +I1 +ssV85 +p3202 +(dp3203 +V. +I1 +ssVlâ +p3204 +(dp3205 +Vc +I2 +sVt +I5 +ssVlà +p3206 +(dp3207 +V +I39 +sV; +I1 +sV: +I1 +sV, +I7 +sV? +I2 +ssVlè +p3208 +(dp3209 +Vc +I1 +sVg +I4 +sVm +I2 +sVn +I2 +sVr +I29 +sVt +I1 +sVv +I5 +ssVlé +p3210 +(dp3211 +Va +I2 +sV +I14 +sVc +I1 +sVb +I1 +sVe +I42 +sVd +I1 +sVg +I6 +sVm +I2 +sV, +I7 +sV. +I1 +sVs +I13 +sVr +I4 +sVt +I1 +sV; +I2 +sV: +I4 +ssVlû +p3212 +(dp3213 +Vt +I3 +ssV8] +p3214 +(dp3215 +V +I1 +sV; +I1 +ssVK +p3216 +(dp3217 +VI +I1 +sVs +I1 +sVO +I1 +ssVK. +p3218 +(dp3219 +V +I2 +sV- +I2 +ssVKa +p3220 +(dp3221 +Vn +I1 +ssVKe +p3222 +(dp3223 +Vh +I4 +sVn +I1 +ssVKI +p3224 +(dp3225 +VN +I1 +ssVKS +p3226 +(dp3227 +V +I1 +sV* +I2 +ssV*n +p3228 +(dp3229 +Vo +I1 +ssVK° +p3230 +(dp3231 +V +I1 +ssV*e +p3232 +(dp3233 +VB +I1 +ssV*] +p3234 +(dp3235 +V +I3 +ssV*T +p3236 +(dp3237 +Vh +I4 +sVH +I1 +ssV*V +p3238 +(dp3239 +Ve +I1 +ssV*W +p3240 +(dp3241 +VA +I1 +sVe +I1 +ssV*S +p3242 +(dp3243 +VT +I2 +ssV*E +p3244 +(dp3245 +VI +I1 +sVN +I2 +ssV*F +p3246 +(dp3247 +VO +I1 +ssV*B +p3248 +(dp3249 +VE +I1 +ssV*: +p3250 +(dp3251 +V +I1 +ssVsl +p3252 +(dp3253 +Va +I2 +sVy +I1 +sVe +I3 +ssV*) +p3254 +(dp3255 +V +I1 +ssV** +p3256 +(dp3257 +V +I10 +sVe +I1 +sVF +I1 +sV* +I27 +sVS +I2 +sVT +I5 +sVW +I1 +ssV* +p3258 +(dp3259 +VY +I1 +sVc +I1 +sVm +I1 +sVT +I1 +ssVho +p3260 +(dp3261 +V +I3 +sVc +I4 +sVe +I1 +sVd +I5 +sVf +I1 +sVi +I5 +sVm +I112 +sVl +I11 +sVo +I2 +sVn +I36 +sVq +I1 +sVs +I39 +sVr +I38 +sVu +I15 +sVt +I2 +sVw +I8 +ssVPé +p3262 +(dp3263 +Vc +I2 +sVr +I4 +ssVha +p3264 +(dp3265 +Vo +I1 +sVî +I5 +sV +I13 +sV, +I2 +sV[ +I1 +sVc +I21 +sVb +I23 +sVe +I6 +sVg +I3 +sVi +I14 +sVë +I1 +sVm +I14 +sVl +I15 +sVï +I1 +sVn +I45 +sVq +I7 +sVp +I28 +sVs +I18 +sVr +I61 +sVu +I23 +sVt +I16 +sVv +I9 +ssVq +p3266 +(dp3267 +Va +I1 +sVp +I4 +sVr +I1 +sVo +I3 +ssVq' +p3268 +(dp3269 +Vu +I1 +ssVqu +p3270 +(dp3271 +Va +I141 +sVâ +I2 +sVe +I931 +sV' +I358 +sVi +I462 +sVè +I3 +sVo +I57 +sVé +I15 +ssVPr +p3272 +(dp3273 +Vé +I9 +sVu +I1 +sVe +I1 +sVi +I6 +sVo +I45 +ssVPu +p3274 +(dp3275 +Vi +I2 +sVp +I2 +sVb +I1 +ssVPy +p3276 +(dp3277 +Vr +I1 +ssVPa +p3278 +(dp3279 +Vd +I2 +sVg +I1 +sVl +I4 +sVn +I99 +sVq +I17 +sVs +I1 +sVr +I29 +sVy +I1 +ssVPe +p3280 +(dp3281 +Vr +I3 +sVu +I1 +sVt +I2 +sVn +I5 +ssVPi +p3282 +(dp3283 +Ve +I3 +ssVPo +p3284 +(dp3285 +Vc +I15 +sVi +I3 +sVm +I1 +sVl +I5 +sVr +I17 +sVu +I11 +ssVPl +p3286 +(dp3287 +Va +I3 +sVu +I1 +sVe +I7 +ssVPR +p3288 +(dp3289 +VI +I3 +sVÉ +I1 +sVE +I1 +sVO +I9 +ssVwa +p3290 +(dp3291 +Vi +I1 +sV, +I1 +sVn +I4 +sVs +I1 +sVr +I8 +sVy +I6 +ssVPT +p3292 +(dp3293 +VI +I2 +ssVPU +p3294 +(dp3295 +VR +I1 +sVB +I2 +sVN +I1 +ssVPA +p3296 +(dp3297 +VR +I3 +ssVPE +p3298 +(dp3299 +VR +I1 +ssVPH +p3300 +(dp3301 +V, +I1 +ssVPI +p3302 +(dp3303 +VT +I30 +ssVPO +p3304 +(dp3305 +VC +I1 +sVS +I2 +ssVPL +p3306 +(dp3307 +VI +I1 +ssVtz +p3308 +(dp3309 +V +I1 +sV, +I1 +ssV0] +p3310 +(dp3311 +V +I2 +sV, +I1 +ssVP. +p3312 +(dp3313 +VC +I1 +ssVdû +p3314 +(dp3315 +V +I1 +sVt +I2 +ssVck +p3316 +(dp3317 +Vy +I2 +sV +I6 +sV[ +I1 +sV, +I3 +sVh +I11 +ssVci +p3318 +(dp3319 +Va +I22 +sV +I36 +sVc +I5 +sVe +I62 +sVd +I6 +sVf +I3 +sVé +I2 +sVè +I2 +sVm +I2 +sVl +I8 +sVo +I2 +sVn +I38 +sVp +I13 +sVs +I20 +sVr +I4 +sVt +I13 +sVv +I4 +sV; +I1 +sV. +I3 +sV, +I10 +sV? +I3 +ssVch +p3320 +(dp3321 +Va +I200 +sV +I15 +sVâ +I30 +sVe +I224 +sVi +I35 +sVè +I21 +sVm +I7 +sVo +I54 +sVé +I32 +sVr +I11 +sVu +I5 +sVt +I1 +sVy +I1 +ssVco +p3322 +(dp3323 +Vc +I20 +sVb +I1 +sVe +I16 +sVd +I2 +sVg +I4 +sVf +I1 +sVi +I2 +sV- +I1 +sVm +I227 +sVl +I25 +sVï +I1 +sVn +I271 +sVq +I7 +sVp +I17 +sVs +I5 +sVr +I92 +sVu +I198 +sVt +I3 +sVv +I1 +sVû +I5 +sV, +I1 +ssVcl +p3324 +(dp3325 +Va +I28 +sVe +I30 +sVi +I3 +sVo +I3 +sVé +I1 +sVu +I20 +ssVcc +p3326 +(dp3327 +Va +I7 +sVe +I8 +sVi +I2 +sVè +I1 +sVo +I19 +sVq +I1 +sVé +I1 +sVr +I2 +sVu +I12 +ssVvé +p3328 +(dp3329 +V +I28 +sVc +I2 +sVe +I9 +sVd +I1 +sV, +I3 +sV. +I3 +sVs +I7 +sVr +I33 +sV; +I1 +sVn +I6 +sV? +I2 +ssVvê +p3330 +(dp3331 +Vt +I5 +ssVce +p3332 +(dp3333 +V! +I2 +sV +I330 +sV, +I34 +sV. +I25 +sV; +I12 +sV: +I4 +sV? +I4 +sV_ +I3 +sVa +I6 +sVc +I2 +sVd +I2 +sVi +I12 +sVm +I18 +sVl +I61 +sVn +I56 +sVq +I12 +sVp +I19 +sVs +I151 +sVr +I16 +sVu +I10 +sVt +I109 +sVv +I11 +ssVcz +p3334 +(dp3335 +Vi +I1 +ssVcy +p3336 +(dp3337 +Vc +I3 +ssVvô +p3338 +(dp3339 +Vt +I1 +ssVcs +p3340 +(dp3341 +V +I6 +sV, +I5 +sV/ +I1 +sV. +I1 +ssVcr +p3342 +(dp3343 +Va +I12 +sVâ +I1 +sVe +I20 +sVi +I68 +sVè +I1 +sVo +I45 +sVé +I16 +sVu +I16 +sVy +I1 +ssVcq +p3344 +(dp3345 +Vu +I12 +ssVcu +p3346 +(dp3347 +V +I2 +sVe +I9 +sVi +I8 +sVm +I3 +sVl +I28 +sVn +I33 +sVp +I7 +sVs +I10 +sVr +I39 +sVt +I11 +sVv +I1 +sVy +I1 +sV, +I1 +ssVct +p3348 +(dp3349 +Va +I12 +sV +I38 +sV" +I1 +sVe +I35 +sV' +I1 +sVi +I25 +sVè +I2 +sVl +I3 +sVo +I4 +sV. +I1 +sVé +I1 +sVs +I5 +sVr +I9 +sVu +I8 +sV; +I1 +ssVc) +p3350 +(dp3351 +V( +I2 +sV +I1 +ssVc. +p3352 +(dp3353 +V +I7 +sV» +I1 +ssVc, +p3354 +(dp3355 +V +I13 +ssVc! +p3356 +(dp3357 +V +I2 +ssVc +p3358 +(dp3359 +Vp +I10 +sVA +I1 +sVC +I4 +sVD +I1 +sVM +I4 +sVP +I2 +sVT +I1 +sVa +I5 +sVc +I7 +sVb +I3 +sVe +I9 +sVd +I19 +sVg +I1 +sVf +I2 +sVé +I2 +sVj +I1 +sVm +I9 +sVl +I26 +sVn +I2 +sVq +I8 +sVi +I2 +sVs +I10 +sVr +I2 +sVu +I19 +sVt +I8 +sVv +I4 +ssVc' +p3360 +(dp3361 +Vé +I22 +sVe +I65 +ssVc; +p3362 +(dp3363 +V +I2 +ssVc: +p3364 +(dp3365 +V +I1 +ssVva +p3366 +(dp3367 +V +I34 +sVc +I2 +sVb +I2 +sVd +I1 +sVg +I13 +sVi +I335 +sV, +I5 +sVl +I50 +sV. +I2 +sVs +I3 +sVr +I2 +sVu +I19 +sVt +I2 +sV; +I1 +sVn +I101 +ssVcé +p3368 +(dp3369 +V +I6 +sVe +I4 +sVd +I11 +sVl +I3 +sVn +I1 +sVs +I2 +sVr +I7 +ssVcè +p3370 +(dp3371 +Vs +I5 +sVr +I7 +sVn +I4 +ssVcî +p3372 +(dp3373 +Vm +I1 +ssVvh +p3374 +(dp3375 +Vé +I1 +ssVvi +p3376 +(dp3377 +Va +I4 +sV +I7 +sVc +I15 +sVe +I135 +sVd +I14 +sVg +I4 +sVf +I2 +sVè +I4 +sVl +I45 +sVo +I18 +sVn +I71 +sVs +I37 +sVr +I21 +sVt +I35 +sVv +I18 +sVz +I3 +sV, +I1 +ssVvo +p3378 +(dp3379 +Vc +I1 +sVe +I2 +sVg +I1 +sVi +I171 +sVm +I1 +sVl +I38 +sVn +I24 +sVs +I18 +sVr +I8 +sVu +I396 +sVt +I46 +sVy +I83 +sVû +I3 +ssVvr +p3380 +(dp3381 +Va +I31 +sVe +I63 +sVi +I8 +sVè +I1 +sVo +I3 +sVé +I3 +ssVvu +p3382 +(dp3383 +V +I34 +sVe +I11 +sVl +I4 +sV, +I5 +sV; +I1 +sV. +I1 +sV? +I1 +ssVcô +p3384 +(dp3385 +Vt +I18 +ssVOc +p3386 +(dp3387 +Vh +I1 +sVt +I2 +ssV/g +p3388 +(dp3389 +Va +I2 +sVu +I3 +ssV/f +p3390 +(dp3391 +Vt +I1 +ssV/e +p3392 +(dp3393 +VB +I3 +ssV/d +p3394 +(dp3395 +Vo +I3 +ssV/b +p3396 +(dp3397 +Vo +I1 +ssV/w +p3398 +(dp3399 +Vw +I3 +ssV/p +p3400 +(dp3401 +Vr +I1 +sVu +I1 +sVg +I1 +ssV// +p3402 +(dp3403 +Vp +I1 +sVw +I3 +sVg +I3 +sVf +I1 +ssV/1 +p3404 +(dp3405 +V1 +I1 +ssV/0 +p3406 +(dp3407 +V2 +I1 +ssVBE +p3408 +(dp3409 +V +I1 +sVR +I10 +sVU +I2 +sVF +I1 +ssVBA +p3410 +(dp3411 +VT +I1 +ssVBC +p3412 +(dp3413 +VD +I2 +ssVBL +p3414 +(dp3415 +VI +I2 +ssVBO +p3416 +(dp3417 +VU +I1 +sVO +I6 +ssVBI +p3418 +(dp3419 +VL +I3 +ssVBU +p3420 +(dp3421 +VT +I3 +ssVû +p3422 +(dp3423 +Vf +I1 +ssVBR +p3424 +(dp3425 +VA +I2 +sVE +I1 +ssVBe +p3426 +(dp3427 +Va +I1 +sV +I1 +sVr +I2 +ssVBa +p3428 +(dp3429 +Va +I1 +sVs +I1 +sVb +I1 +sVt +I2 +sVd +I2 +ssVBo +p3430 +(dp3431 +Vr +I8 +sVu +I1 +sVt +I1 +sVo +I46 +sVn +I2 +ssVBi +p3432 +(dp3433 +Vs +I3 +sVb +I3 +sVe +I1 +ssVBu +p3434 +(dp3435 +Vé +I12 +sVe +I1 +sVl +I22 +sVt +I1 +ssVBr +p3436 +(dp3437 +Vi +I1 +sVe +I2 +ssVBy +p3438 +(dp3439 +V +I4 +sVn +I3 +ssVûr +p3440 +(dp3441 +Ve +I1 +ssVût +p3442 +(dp3443 +Va +I3 +sV +I35 +sVe +I11 +sVé +I7 +sV- +I2 +sV, +I2 +sV. +I1 +sVs +I1 +ssVûm +p3444 +(dp3445 +Ve +I2 +ssVûl +p3446 +(dp3447 +Vé +I7 +sVa +I2 +sVe +I4 +ssVB +p3448 +(dp3449 +V1 +I1 +ssVB, +p3450 +(dp3451 +V +I3 +ssVB. +p3452 +(dp3453 +V +I45 +ssVU' +p3454 +(dp3455 +VI +I1 +sVO +I1 +ssVU +p3456 +(dp3457 +VA +I1 +sV" +I1 +sVD +I2 +sVG +I1 +sVH +I1 +sV* +I1 +sVB +I1 +sVU +I1 +ssVUt +p3458 +(dp3459 +Va +I1 +sVr +I1 +ssVUr +p3460 +(dp3461 +Vb +I4 +ssVUn +p3462 +(dp3463 +Vi +I3 +sV +I24 +sVe +I2 +ssVUT +p3464 +(dp3465 +VI +I1 +sV +I4 +sVE +I8 +ssVUV +p3466 +(dp3467 +VÉ +I1 +sVR +I2 +ssVUS +p3468 +(dp3469 +V +I2 +sVE +I1 +ssVUR +p3470 +(dp3471 +V +I1 +sVU +I1 +sV, +I1 +sVP +I1 +ssVUM +p3472 +(dp3473 +V +I1 +sVB +I2 +ssVUL +p3474 +(dp3475 +VA +I1 +ssVUN +p3476 +(dp3477 +VI +I1 +sVD +I2 +ssVUI +p3478 +(dp3479 +VE +I1 +sVT +I1 +ssVUE +p3480 +(dp3481 +V +I3 +sVN +I1 +ssVUD +p3482 +(dp3483 +VI +I2 +ssVUC +p3484 +(dp3485 +VH +I3 +ssVUB +p3486 +(dp3487 +VL +I2 +ssVh: +p3488 +(dp3489 +V +I1 +ssVh; +p3490 +(dp3491 +V +I4 +ssV!" +p3492 +(dp3493 +V +I6 +ssVh? +p3494 +(dp3495 +V +I1 +ssV!) +p3496 +(dp3497 +V +I1 +sV. +I1 +ssV!* +p3498 +(dp3499 +V +I1 +sV* +I2 +ssV!- +p3500 +(dp3501 +V- +I1 +ssVeo +p3502 +(dp3503 +Vi +I2 +sVp +I3 +sVr +I4 +sVn +I6 +ssVh. +p3504 +(dp3505 +V +I2 +ssVh, +p3506 +(dp3507 +V +I8 +ssVh +p3508 +(dp3509 +Va +I6 +sVo +I2 +sVc +I1 +sVb +I8 +sVE +I2 +sVd +I2 +sVP +I1 +sVi +I1 +sVé +I2 +sVH +I1 +sVj +I1 +sVm +I1 +sVC +I2 +sVO +I1 +sVp +I3 +sVu +I1 +sVt +I8 +sVv +I1 +sVy +I2 +sVe +I1 +sVh +I1 +ssVh! +p3510 +(dp3511 +V +I31 +ssVhy +p3512 +(dp3513 +Va +I1 +sVp +I5 +sVs +I20 +sV +I1 +ssVhr +p3514 +(dp3515 +Vé +I8 +sVi +I4 +sVe +I1 +sVa +I1 +sVo +I2 +ssVht +p3516 +(dp3517 +V +I13 +sVi +I3 +sVm +I2 +sV, +I2 +sVs +I2 +sVt +I5 +ssVhu +p3518 +(dp3519 +Vi +I17 +sVm +I13 +sVn +I13 +sVs +I4 +sVr +I1 +sVt +I2 +sV; +I1 +ssVhi +p3520 +(dp3521 +V +I1 +sVc +I3 +sVb +I1 +sVe +I12 +sVd +I2 +sVg +I1 +sVm +I1 +sVl +I26 +sVo +I2 +sVn +I19 +sVq +I1 +sVs +I54 +sVr +I11 +sVt +I1 +sVv +I6 +ssVà, +p3522 +(dp3523 +V +I8 +ssVhl +p3524 +(dp3525 +V, +I2 +sV. +I2 +ssVhm +p3526 +(dp3527 +Vé +I1 +sVe +I7 +sVo +I1 +ssVà- +p3528 +(dp3529 +Vd +I3 +sVv +I1 +sVl +I2 +sVf +I2 +ssVhe +p3530 +(dp3531 +Va +I4 +sV +I131 +sVc +I4 +sV: +I2 +sVd +I2 +sVf +I6 +sVm +I14 +sV, +I17 +sVn +I8 +sVq +I1 +sV. +I9 +sVs +I37 +sVr +I89 +sVu +I59 +sVt +I12 +sVv +I23 +sVy +I6 +sV; +I5 +sVz +I34 +sV? +I2 +sVl +I1 +ssV!_ +p3532 +(dp3533 +V +I2 +ssVÂC +p3534 +(dp3535 +VE +I1 +ssVà: +p3536 +(dp3537 +V +I1 +ssVà; +p3538 +(dp3539 +V +I1 +ssV48 +p3540 +(dp3541 +V. +I1 +ssV46 +p3542 +(dp3543 +V5 +I1 +ssV47 +p3544 +(dp3545 +V0 +I2 +sV. +I1 +ssV44 +p3546 +(dp3547 +V. +I1 +ssV42 +p3548 +(dp3549 +V9 +I1 +sV0 +I1 +ssV43 +p3550 +(dp3551 +V5 +I1 +sV6 +I1 +ssV40 +p3552 +(dp3553 +V +I1 +sV0 +I2 +ssV41 +p3554 +(dp3555 +V +I1 +sV0 +I1 +ssV4. +p3556 +(dp3557 +V +I4 +ssV4, +p3558 +(dp3559 +V +I3 +ssV4- +p3560 +(dp3561 +V3 +I1 +sV6 +I2 +ssV4% +p3562 +(dp3563 +V +I1 +ssV4 +p3564 +(dp3565 +Va +I1 +sVJ +I2 +sVm +I2 +sVo +I1 +ssV4] +p3566 +(dp3567 +V +I3 +sV; +I1 +sV. +I2 +ssVhô +p3568 +(dp3569 +Vp +I2 +sVt +I16 +ssVhè +p3570 +(dp3571 +Vq +I3 +sVr +I20 +sVt +I1 +sVv +I1 +ssVhé +p3572 +(dp3573 +Va +I9 +sV +I18 +sVâ +I4 +sVe +I9 +sVi +I1 +sVm +I3 +sV, +I1 +sVo +I9 +sVn +I1 +sVs +I5 +sVr +I9 +sVt +I2 +sV; +I1 +sV. +I1 +sVb +I1 +sVl +I1 +ssVhâ +p3574 +(dp3575 +Vt +I31 +ssVhà +p3576 +(dp3577 +Vt +I1 +ssV-s +p3578 +(dp3579 +Vi +I2 +sVu +I2 +sVe +I2 +sssb. \ No newline at end of file diff --git a/lib/venus/examples/filters/guess-language/guess-language.py b/lib/venus/examples/filters/guess-language/guess-language.py new file mode 100644 index 0000000..2f08223 --- /dev/null +++ b/lib/venus/examples/filters/guess-language/guess-language.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python +"""A filter to guess languages. + +This filter guesses whether an Atom entry is written +in English or French. It should be trivial to chose between +two other languages, easy to extend to more than two languages +and useful to pass these languages as Venus configuration +parameters. + +(See the REAME file for more details). + +Requires Python 2.1, recommends 2.4. +""" +__authors__ = [ "Eric van der Vlist <vdv@dyomedea.com>"] +__license__ = "Python" + +import amara +from sys import stdin, stdout +from trigram import Trigram +from xml.dom import XML_NAMESPACE as XML_NS +import cPickle + +ATOM_NSS = { + u'atom': u'http://www.w3.org/2005/Atom', + u'xml': XML_NS +} + +langs = {} + +def tri(lang): + if not langs.has_key(lang): + f = open('filters/guess-language/%s.data' % lang, 'r') + t = cPickle.load(f) + f.close() + langs[lang] = t + return langs[lang] + + +def guess_language(entry): + text = u''; + for child in entry.xml_xpath(u'atom:title|atom:summary|atom:content'): + text = text + u' '+ child.__unicode__() + t = Trigram() + t.parseString(text) + if tri('fr') - t > tri('en') - t: + lang=u'en' + else: + lang=u'fr' + entry.xml_set_attribute((u'xml:lang', XML_NS), lang) + +def main(): + feed = amara.parse(stdin, prefixes=ATOM_NSS) + for entry in feed.xml_xpath(u'//atom:entry[not(@xml:lang)]'): + guess_language(entry) + feed.xml(stdout) + +if __name__ == '__main__': + main() diff --git a/lib/venus/examples/filters/guess-language/learn-language.py b/lib/venus/examples/filters/guess-language/learn-language.py new file mode 100755 index 0000000..d92ca7e --- /dev/null +++ b/lib/venus/examples/filters/guess-language/learn-language.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +"""A filter to guess languages. + +This utility saves a Trigram object on file. + +(See the REAME file for more details). + +Requires Python 2.1, recommends 2.4. +""" +__authors__ = [ "Eric van der Vlist <vdv@dyomedea.com>"] +__license__ = "Python" + +from trigram import Trigram +from sys import argv +from cPickle import dump + + +def main(): + tri = Trigram(argv[1]) + out = open(argv[2], 'w') + dump(tri, out) + out.close() + +if __name__ == '__main__': + main() diff --git a/lib/venus/examples/filters/guess-language/trigram.py b/lib/venus/examples/filters/guess-language/trigram.py new file mode 100644 index 0000000..95cbdaa --- /dev/null +++ b/lib/venus/examples/filters/guess-language/trigram.py @@ -0,0 +1,188 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- +""" + This class is based on the Python recipe titled + "Language detection using character trigrams" + http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/326576 + by Douglas Bagnall. + It has been (slightly) adapted by Eric van der Vlist to support + Unicode and accept a method to parse strings. +""" +__authors__ = [ "Douglas Bagnall", "Eric van der Vlist <vdv@dyomedea.com>"] +__license__ = "Python" + +import random +from urllib import urlopen + +class Trigram: + """ + From one or more text files, the frequency of three character + sequences is calculated. When treated as a vector, this information + can be compared to other trigrams, and the difference between them + seen as an angle. The cosine of this angle varies between 1 for + complete similarity, and 0 for utter difference. Since letter + combinations are characteristic to a language, this can be used to + determine the language of a body of text. For example: + + >>> reference_en = Trigram('/path/to/reference/text/english') + >>> reference_de = Trigram('/path/to/reference/text/german') + >>> unknown = Trigram('url://pointing/to/unknown/text') + >>> unknown.similarity(reference_de) + 0.4 + >>> unknown.similarity(reference_en) + 0.95 + + would indicate the unknown text is almost cetrtainly English. As + syntax sugar, the minus sign is overloaded to return the difference + between texts, so the above objects would give you: + + >>> unknown - reference_de + 0.6 + >>> reference_en - unknown # order doesn't matter. + 0.05 + + As it stands, the Trigram ignores character set information, which + means you can only accurately compare within a single encoding + (iso-8859-1 in the examples). A more complete implementation might + convert to unicode first. + + As an extra bonus, there is a method to make up nonsense words in the + style of the Trigram's text. + + >>> reference_en.makeWords(30) + My withillonquiver and ald, by now wittlectionsurper, may sequia, + tory, I ad my notter. Marriusbabilly She lady for rachalle spen + hat knong al elf + + Beware when using urls: HTML won't be parsed out. + + Most methods chatter away to standard output, to let you know they're + still there. + """ + + length = 0 + + def __init__(self, fn=None): + self.lut = {} + if fn is not None: + self.parseFile(fn) + + def _parseAFragment(self, line, pair=' '): + for letter in line: + d = self.lut.setdefault(pair, {}) + d[letter] = d.get(letter, 0) + 1 + pair = pair[1] + letter + return pair + + def parseString(self, string): + self._parseAFragment(string) + self.measure() + + def parseFile(self, fn, encoding="iso-8859-1"): + pair = ' ' + if '://' in fn: + #print "trying to fetch url, may take time..." + f = urlopen(fn) + else: + f = open(fn) + for z, line in enumerate(f): + #if not z % 1000: + # print "line %s" % z + # \n's are spurious in a prose context + pair = self._parseAFragment(line.strip().decode(encoding) + ' ') + f.close() + self.measure() + + + def measure(self): + """calculates the scalar length of the trigram vector and + stores it in self.length.""" + total = 0 + for y in self.lut.values(): + total += sum([ x * x for x in y.values() ]) + self.length = total ** 0.5 + + def similarity(self, other): + """returns a number between 0 and 1 indicating similarity. + 1 means an identical ratio of trigrams; + 0 means no trigrams in common. + """ + if not isinstance(other, Trigram): + raise TypeError("can't compare Trigram with non-Trigram") + lut1 = self.lut + lut2 = other.lut + total = 0 + for k in lut1.keys(): + if k in lut2: + a = lut1[k] + b = lut2[k] + for x in a: + if x in b: + total += a[x] * b[x] + + return float(total) / (self.length * other.length) + + def __sub__(self, other): + """indicates difference between trigram sets; 1 is entirely + different, 0 is entirely the same.""" + return 1 - self.similarity(other) + + + def makeWords(self, count): + """returns a string of made-up words based on the known text.""" + text = [] + k = ' ' + while count: + n = self.likely(k) + text.append(n) + k = k[1] + n + if n in ' \t': + count -= 1 + return ''.join(text) + + + def likely(self, k): + """Returns a character likely to follow the given string + two character string, or a space if nothing is found.""" + if k not in self.lut: + return ' ' + # if you were using this a lot, caching would a good idea. + letters = [] + for k, v in self.lut[k].items(): + letters.append(k * v) + letters = ''.join(letters) + return random.choice(letters) + + +def test(): + en = Trigram('http://gutenberg.net/dirs/etext97/lsusn11.txt') + #NB fr and some others have English license text. + # no has english excerpts. + fr = Trigram('http://gutenberg.net/dirs/etext03/candi10.txt') + fi = Trigram('http://gutenberg.net/dirs/1/0/4/9/10492/10492-8.txt') + no = Trigram('http://gutenberg.net/dirs/1/2/8/4/12844/12844-8.txt') + se = Trigram('http://gutenberg.net/dirs/1/0/1/1/10117/10117-8.txt') + no2 = Trigram('http://gutenberg.net/dirs/1/3/0/4/13041/13041-8.txt') + en2 = Trigram('http://gutenberg.net/dirs/etext05/cfgsh10.txt') + fr2 = Trigram('http://gutenberg.net/dirs/1/3/7/0/13704/13704-8.txt') + print "calculating difference:" + print "en - fr is %s" % (en - fr) + print "fr - en is %s" % (fr - en) + print "en - en2 is %s" % (en - en2) + print "en - fr2 is %s" % (en - fr2) + print "fr - en2 is %s" % (fr - en2) + print "fr - fr2 is %s" % (fr - fr2) + print "fr2 - en2 is %s" % (fr2 - en2) + print "fi - fr is %s" % (fi - fr) + print "fi - en is %s" % (fi - en) + print "fi - se is %s" % (fi - se) + print "no - se is %s" % (no - se) + print "en - no is %s" % (en - no) + print "no - no2 is %s" % (no - no2) + print "se - no2 is %s" % (se - no2) + print "en - no2 is %s" % (en - no2) + print "fr - no2 is %s" % (fr - no2) + + +if __name__ == '__main__': + test() diff --git a/lib/venus/examples/filters/xpath-sifter/xpath-sifter.ini b/lib/venus/examples/filters/xpath-sifter/xpath-sifter.ini new file mode 100644 index 0000000..f739d09 --- /dev/null +++ b/lib/venus/examples/filters/xpath-sifter/xpath-sifter.ini @@ -0,0 +1,55 @@ +# The xpath_sifter filter allows you to stop entries from a feed being displayed +# if they do not match a particular pattern. + +# It is useful for things like only displaying entries in a particular category +# even if the site does not provide per category feeds, and displaying only entries +# that contain a particular string in their title. + +# The xpath_sifter filter applies only after all feeds are normalised to Atom 1.0. +# Look in your cache to see what entries look like. + +[Planet] +# we are only applying the filter to certain feeds, so we do not configure it in the +# [Planet] section + +### FIRST FEED: FILTER ON CATEGORY ### + +# We are only interested in entries in the category "two" from this blogger, but +# he does not provide a per-category feed. +# The Atom for categories looks like this: <category term="two"/>, so here +# we filter the http://example.com/uncategorised.xml file for entries with a +# category tag with the term attribute equal to 'two' +[http://example.com/uncategorised.xml] +name = Category 'two' (from Site Without a Categorised Feed) + +# This first version is the readable way to do it, but you'll run into trouble +# if you have any special characters, like spaces, in your require string +# filters = xpath_sifter.py?require=//atom:category[@term='two'] + +# Here's a URL quoted version: +filters = xpath_sifter.py?require=//atom%3Acategory%5B%40term%3D%27two%27%5D + +# Here's a way to get the URL quoted version on the command line: +# python -c "import urllib; print urllib.quote('STRING');" +# eg +# python -c "import urllib; print urllib.quote('atom:category[@term=\'two\']');" + +### SECOND FEED: FILTER ON TITLE ### + +# The verbose blogger whose feed is below blogs about many subjects but we are +# only interested in entries about Venus. She does not use categories but +# fortunately her titles are very consistent, so we search within the title +# tag's text for the text 'Venus' +[http://example.com/verbose.xml] +name = Venus (from Verbose Site) + +# Non-quoted version +# filters = xpath_sifter.py?require=//atom:title[contains(.,'Venus')] +# Quoted version +filters = xpath_sifter.py?atom%3Atitle%5Bcontains%28.%2C%27Venus%27%29%5D + +### THIRD FEED: NO FILTER ### + +# We can include other feeds that do not have the filter applied +[http://example.com/normal.xml] +name = No filter applied diff --git a/lib/venus/examples/foaf-based.ini b/lib/venus/examples/foaf-based.ini new file mode 100644 index 0000000..3ea5f58 --- /dev/null +++ b/lib/venus/examples/foaf-based.ini @@ -0,0 +1,44 @@ +# Planet configuration file + +# Every planet needs a [Planet] section +[Planet] +# name: Your planet's name +# link: Link to the main page +# owner_name: Your name +# owner_email: Your e-mail address +name = Elias' Planet +link = http://torrez.us/planet/ +owner_name = Elias Torres +owner_email = elias@torrez.us + +# cache_directory: Where cached feeds are stored +# log_level: One of DEBUG, INFO, WARNING, ERROR or CRITICAL +cache_directory = /tmp/venus/ +log_level = DEBUG + +# The following provide defaults for each template: +# output_theme: "theme" of the output +# output_dir: Directory to place output files +# items_per_page: How many items to put on each page +output_theme = mobile +output_dir = /var/www/planet +items_per_page = 60 + +# If non-zero, all feeds which have not been updated in the indicated +# number of days will be marked as inactive +activity_threshold = 90 + +# filters to be run +filters = excerpt.py + +# filter parameters +[excerpt.py] +omit = img p br +width = 500 + +# subscription list +[http://torrez.us/who#elias] +content_type = foaf +online_accounts = + http://del.icio.us/|http://del.icio.us/rss/{foaf:accountName} + http://flickr.com/|http://api.flickr.com/services/feeds/photos_public.gne?id={foaf:accountName} diff --git a/lib/venus/examples/images/edd.png b/lib/venus/examples/images/edd.png new file mode 100644 index 0000000000000000000000000000000000000000..eefa1c08c66c820ab79b9b9c7e2340c3e3f67b16 GIT binary patch literal 9918 zcmV;vCPCSWP)<h;3K|Lk000e1NJLTq002G!002-31^@s6K|jWB00004XF*Lt006O% z3;baP00006VoOIv0RI600RN!9r;`8x010qNS#tmY3labT3lag+-G2N4000McNliru z)ByzsIWO)V;Yt7iAOJ~3K~#90#k_gU<=1)N_c_~dyX(6zv(Iq2Q=+Jat|Hg6op@8- zw5}1?ZMvX#iUvv3B7Ze)fi`Hf0WxC8Xwld~TH8_KS^-)oh7DJ?Y*mycQz9u+9FjwF z$k}J!_3rn#oUMP{8F56;NTjv+0e*NdUf}-j`JQJv=lMR*Ig0PTh_^io&<~&3Z2kC? zk3FtH-587kl3S0t<57UT^}X--puW@ZlDsuoap(UNCCrb$?}4+&S66>(WozlLnk*g} z?u}hEY3Zxk(8VxNX?r{vO_<kBaKgU6e>nbwFP^*lR0v@VRDc8GcWSWT?mHx)^28(` zfBeByTdUoNV?91DlUyxtu8VSbn6GwLPYI*nuY`WE91YCO&lydp6j=w{gyToIsHP)g zu=wCH?Pcb}8Lk?KtF`&eXP$Ze?;Z?i7k~-S0D*5~0=^yKw*i|Ud+hYl`?r_=9hatm zWmLt-H@4SJE-FOUV&=vaL8D5+&TPndI$^EVCGF%KjAle9FrsFoo0AgBQUS4~y>^6p zuuF)B(!s^O+0Ey#jQ`;?=P&+_^Zo#s^OpAC^91}^fZsM>^AG;|fB2)ziTi6w;(usx zcvw9D#sP0mHBrr3>9?4cC3O&Fq^#s^#?v8z8OOI)5w+s5Dyglam$gu`L4*ovB6+4* z?etO2A+~H-+c?T#Fyq2*6>Q9(e*OB*Pd<JA)h`0K&ELOQ@b3lq*Mrj!f9UbvPk#K# zzm+8J?<Bd_uU)vvXfk1Mn)2rU1f53uSx(b9DiLVPDJ|v6TV0%U6sn<+9d;%&_8Ld+ zYEU&=Y7!KM5}4pw?)B(OMJE+F>zH~=A~cUa{Fr_A#b<u=Uw!eyCufzt#=`vVdjbD$ zfeS#r@9c^8-~8Dh`iW!P$Ns11zWV&>=U>0Ve(9*C!G;n=i`qqm1n)|Oj705FBG6G1 zp$nSIps;k(7US`ZYttExRuqY$B{fC?S}?1p7z-<jAqjIfH#bRj#=*gL9=(5!W;|hc z+Pe7crJcX=m6tC(0}NUC{<{LN-VNwuTWk3z|HuFCXaDQJ`7{6g=!w<;{K}QrmR~r3 znVrz$#;~Mvl5rCV&ZD%U34vLovDP3+P-MiI5rklFJ(D`%Rf0+t$P^bGsT8DIk|q)5 zX0)Rw*OB#Zi&;4)D8*zlXX*v>*^D%3R5R`0cl^XdU%hbk^Va*>cM`ZbvbB}`vw!^e z{?P-+*Z%&+3s0}T^0gO)^gZ6VHekPUly!xcDK175QxKz|@!&~tz631ZIV$Iw&ZbmB zF)amUIm4DEA!rhvVg%G~PHHlI2t=JAVqj@`g~rZF(v(3xr*V$9St3sitwht(Ve{yb zHTT?w>*siDx%;+(%U}KE-~Z_G_4Y46{iT1cu3s6^?X6;x885%K%l=&8l|iV0lmUUj zdx5hNr8G(jf~!CXidLUgX_~5{2?ifD)_I~ziJ>7&6I^tFq_Upip^1{Jb~LWSq6i|T z)9R993C*0M-6OiOID2OOu_DpCFJC@*6R3F?!R}iDSO4kHeBwXYzkc;Ek@l#nDdYW{ zT)Q#h(x_orf{KxZIYJ1^sv(dNqr|&81P|UJgdkUfLReB!Bc!DE4Nf_t&hS2B8;djv zbySqrqdcHAsZIzHX4M>93p9>)E+9mj#<A2%$?Ztrf9ll7q!K&luO3`{H{ib+Z~>^3 zr`CR^)oy=idU%uAOz8JkIe%rBD^pL|G$dG7+HFb~DI0LY;G@F@hZKS$Z?U$zj0$tS z^R(JMR=YW|86veJ*oamsQbDj4p%H{=Sz7Mm>XH<JZ)R8lbrUgKlMs<Q!&Vg%a6XV5 zqfeeZ`U4kl?!UP|DzAMj@ZSo!e(>Q_D`!q@{^d^U$@7%`gI%r+D{c-dVr(#(VOGs4 zn}D?f8*3sCEpwDqVCOW|jLJ(!WkXe!1Xq&jj83sa@DXoI+J!+1k8%~=qJ?xbMD(;f zeTWgI5~Q$*Xz|vQnv8xgCr>1YJNr!Qni#BVW$9noo6KLF%<T>fpAYW^xB%4u`oDeh z{`=3K{@XV$zJ8ypJ>_h|g{uRu4r^w&R?R3fW)(Ctkf;PHE3{U~s4+SPsi}jf7Lp(Y z()5^HgR_#PrJ1=QL_w-jL~!(4ZCXYlyeCZ(g!BYACsTq}s?jL2q>Ts;<t$sBC6>B5 z!^x1cNm*+r*{RLWgI~LJ@HrRalzT+_djXf9`^;~D^uFVp|L-4v;ZGjDaOnop2j<h7 zy;)?p_LP;!qj5n~)gCV_EiGsngVqWmB3fkNB3TBh_9zU&dxX*u1Bvn2x};q!lS%01 z8A3x7N<wUiQQ~|+s01YiS(1@OMd}0XqJ{7Q-&CknU|l3c!%8RNfg@|H%B1ex<=q#7 z#nSRN@cLdR?#uoE>gkuSE`9C%Rr>85kr;05Tw^ddlvTh>iM5UxB0@`4)VC&AK@-Sz zN@Hh8Au+;}<OLOhS~j?5NVnBRxCyN!BQM%ivje0w?2Sh>D6~w-yh9`z&Pk+-xOq(s zmORsldW4ZMje^7d8OldYT61Z?qK-m5arXFs{POkjpMLF)%Oe*4{te*org8aq|C_&k zQVIEsU;65KJ!znm!n~<yM1mJN^ZA@wE`o|ykR=N1JyChQP&gNn0&HmTINYs_%R0O8 zSDBHSE<V)ceW)7C;h`rchMBENvVs(~i2lwItw%JDTnc2YNR+3e4blpNP6#sMou{lU z*QXL?B1?%jlaLHwy?p%{V0sJqJvA->b>#T=&%N~K<@DxYjuw*Pz%d%{5d;XG<E27` zNDKkzJ<3D^3SV~+i9kw^YfADoqpTfX<p?QJagIO|qhnr=u)*QR3g-h-DGmlxlH3rQ z5*sv9X82f<;#n1uGu;(Th-8UEIcRiX>`JuBm;^<T8fhGRWexjtuIvsgAmuH7Al?f& z?|=O9V(P?y^5W~Svp@Hkq#)Kcb&!Z4uysj;$JZrU+NGRTlnoFCB6x(C#3)dzg?BYR zI;uKQ%^T7rA>h$cLa+ppFen`oN2}E)HZ?*(j0Wo*L0P0!6bg<Oa4K>1^MZDv2`Zxw zftRja=H}c|HZYVOTqAI3q7Cfq4+h+W#d(Lu|ME}$^haNQ<^0y+%u>e;J1^<zK+)11 z9873}BpQQJ8PR!Uhy<n4B9e%Rl7hOaX)H+5V1oyx7khXM4oQ+|WN=t3shvV;jZ~2) zLZ~8Hk|DLj;pj-uYMQg08?qv$omn;>I6?H5Xd{7!y_auNse*<Pbx45(rz1=2{U5Hq zdv-7y?%eY8uag(@?Z6NB4_Z-VMA<?_h4zkaR^Xh#ONG&r76Gj^iPk7BkTRlG#0ZCy z79}Hy8gxP=hZxX8Ad$o%sVYZRdk~I9d!%&CssX`+mI(@v^i$eVvE?HVc2YKT&uTYt z{QEyd=Y6Lz8#&pTJ|FnmPq5L?aZa(3!9%N>4;)`-Bek4d?S14&p8UQ~Z7eU{4|MK~ zY46Z@FB9UWOP6t>A~c>NP4Kp)sy)$xmY_7un>iw+2qcN}NL!+#L`Ov`KnRTs3Mar3 z5keBZMJhvGRY)n(#?UD=vJQ-<p2QU7sUy(_LyNUU@xG$qv8^RmHd_>1UCd^I>}Zl> zD`cD7bhkdhkN<qhwSVwy?7NiBo@6;~IC^v)E?%aS7Rke>j(ue3aBzu5-tK|;4j<ne z?zUWvbn*-*N|HQd|8S48aX17%z-%_9lAsz-t_q5n5%Fl*Abmq_QbY_iz9vM8l?ip@ z5y~K<M5>f1B|d_$YEmTK1hgKIsWy6k$W|(OAWM1t%qmAdcmlPQ(K>rSs>lhxLfbB; zwM>$%^1xsGtNiqZSNXqx_j$IqPIBqu%k0l3T58IA%y@9)IMBMK=;m&~#i=uo9NBsL zIjZR)B6_A#GHC*h3lId+JCtdIE75q;Sdw@}p$xgwXljg-#0;kO9B(60wXt4eP!Izy zdP4LFq0k;uEh(g<D;xTu;><?M*|ioM8*3CBU1Z;oY^|YM8OTUh9KnYYfx_7#+DIPw zw||_!@YQqdj&|5@YOcltRo85pKBOyauy>oTzI#-^@4<&BUpaSyW;Darb3!@CSx0R> zga{Ed5upvKfW8RyWT0<6U0cyBGUl$Pik_~4Lcp$TP`3h~jbKxOiw!OBS&0cPQPGi> zM^2oiTQ)rKk^4!P5<*iU(ga!L2rZB*MVTBSG*YC5*wENRB7tman}>e#Nd~|DB|?)> zM6kjzo9}_q9Tq&jxEJvFg)e>i%Sdo`N*zkPk2o9gE)YyaSb?Y{X$q;btQg0p?yzhE z%PmEx08<=gFqm?6I;Nuxy)fq>Olf4Bxsqhk(hiR8G-EkY6q#VVzr@C|RnlBhJhY7I zbg`2m-dD&%qgpLOv}n;oNP~zTB{BfKpA$mE$`c=@|A)^pZ3NvyaM(!dkdlk(ZASQ> zz{Nw4J^rKDcmDt(6!-*uKnq7yp5Ox^fES+N9V^Omwv)1!Kr2n?W-SsW=yo$UR&$O_ zTHLt4!w!xv1!a(krs8<laO~K9EUk3VICQQ^I|;>oC&;$9@n*Kz&-Wxc1d{#=I`5FA zD@c_fkO-lHfb%mTqGXO~_c{Llv*_oaVsEOjX`8i<rW&VjH|%c!E&v9@a;>U7!78-s zfSqD8MT#JJupTPsNs*jtb+~_Xoo=Dg(XqU`Ml~ByEVYm=Mc4IM*;;1p{Hypz;e@2K zy2|$VJx0E{K{MGWxEj@JQJg+aR32#(q{)chVOssg!1TgOy`Lk5!iO2oPf;?*yD?H0 zpaedYtUvY`M>}7^`wWo^(iTj2>u;T;4E6@2$)E<AA!36N5xhi7gZG~39f=6^q~-Y1 zDy>$|TE9*3B}zHk>wRQy5Q!!<B}q?n@}o~6TPb?Ejb3Y`SNlk9@Ym)j*+LgRWNYz$ zp(RQcpdw1Oz7E*d1De?$CSM{=*D<<}_fw?Ikuo6!i$IXAt#Nd@VE+)DZ7Ao4!@>A~ z1s50Y20Q|B|Kb}@B~j1>OLP?>Izj_lM4Yljqscv-TFP1LB?vdiT20nfsBQrwAhV2! zLv=c2{VpQW$Sfg<8qx)$5}hI2JOUyTTuo>yl+FoFO{i;>&IrOI5LCl!ASL<AQ6df> z=0qYfSP+sB78Ru!Jt9*que2!236m<LnL4ajw}IbtuO5H@cR&5iV~;#JA^MJrf*8bN z|EvV5(FEJDE+p^YTxD~$N0^OCTMF4Wkbp=HDA27AA~(n)1ugKg1OZYgRFV@zAk1ro zGQ{BVvc{Kl++>P~no!sHSYrCiWGmYUBWdP`AR{_yW0Ga0?h!&mjQ&=Tn<90J(h1{{ zgFJF<<37$^8r;_QT{R96ukKtcPn{fnJ~8Sikcy}@Ay`_8CTnHTXx2nzH8pfw3AHe& z!XS+%G!;^31lQo|5wew`W1FY~sIbsZ<p_->#D*kWM(Y;N1zbHLItNkW%Q<FgiL|?k zREFT@*x?v}V(la*T|&xq0a!E$VUR)*VuKJGCA4o9T8TWvRwH6`w=MeXf8XKbKm2|F z=CV%m_g7NjTt$e2C;~|ego<vfpc5R8bC|Xv>GY6BLzEB%p{|fwf=mizp5v<uk!l2j zNB{%UBq&oLgj_H!1XR`{28UWIP)Uv}r}*&{54dWEZgt4kwh6Jq+96UV2$>LLAcl%S zM5`9w&qy{_OLOshI})f}iS^~%VZH;n>aB1850fSw3FAY``2;T{2=LKU&r33qFprVx ztU@S_H1Sq+2<Ud3AR1(z03cO@OcMeQAtgF12z5<!aDeW1phzJ$palYjcQrHtKbsRK zHLjW=44BnELTJkUD+F7SE^T0nWr&U#9RjhirsxqeBU|m3e(Kw%U!Vj?ncS)IcK{cF z9F40p7LY<9r6NX9Dq}W}*hb@GNgV>G(>C?gQY<G#EFwq<AoGOaEK+Oyd=5r{5d>F* z@MzOTNJW@eH02zXrKr4(FbbV^K|t^g-j|>ws@=xh3OApjM1m^X*zu71aF-|orqe+t zIVN2q#5pkr+S|wMN+%Dsn901gMyKd_IQWKg@;gkunI1mxlh*f3ouZR~t4bQ%;GHAQ zGHme7eZxWHI2@FuNhGK_!fO<u(i~wVA~gu9ky%Oz4T6+dM?%vqG@uk|zmICQKuI7V zlp=&h1t{I#Ah;5Oz||9^Q4j_7!49zr;1@1qOOSbqHA7UMA(NE#?fz<8`CwY9yfPk# zy~Dxlywyzb4&V_8DwUsh%KeSfI#Ihw6FiOv5d+F=q83yEhEZ`i9JAEc6e~SQJ)x=* zGYLgpn6X@FFFJ=12A#JNT7efxqj2+*;4D!DkO84@O_nqWrNDr%N`y8<8?lE&;@qKJ zi^+=@D9aMzX0*;6BR#T&S}G82iOH9eem}3C-;e&>_0jJg4o7dW;NAS!efn(*7uRoI zA8j5z_KC{*4Qp#$2m}G)Rs)1{9v>iAu)&;6DN>bS`pIHKr9>zJG7v+9%2RaShA0TW z!j)5^4G0hTK(v8S*2KyZTtg@;@Sf-#vGL#+O7ep#erHa7VV~(MmzlhFg~860@^FkA zRWPqn>0;OCnbYZ<X@2MP=YIW*FP#50a2YsYQDBI71C9@#&DE79EyWMUAP`>SJp>|A zdSVQO81SK_8ztNQWsLMBxhHB5E)boEC{Ue+?@Ez^i-g)Dq5>De1$<o+ohOtH)Sl=( zA~DER6KsW>ER+;ViyzG>FI}g;GNB%qH1jFJXCy^Vw!B5&?GvOW=`5i-Z9ez8^PelE ze&Y4Z*MG0H{wfO+;_H>vcW4}l?ab`vnfkfXDjkDb{5YTli#1Uyyoh98vt1Z^?SwoL z2+hJor9c{kN-|>bL|YSGAQGW!aEC*xE7x&@F=5saCk=LYNEp=kaz^kTY*=h)#uNPB z4100J?6n;x`wcg5j(PsD<fTUO`e2_6dxu=VHXz*{qs9YdF4-5WuYBd%OJDfuPyEPF zefH^RewVj(Ox?ATayT57XV2cZ9+du&Qwpa7(OR-JMaF=L5X2&JIn$B!ij+J_Q7wb% z^-xJd6anGQt+gUTh{UEL&KvwRP|qxfS8g)BzRUdP0oC<A+zgbHP&xc`hC7(h+&IKt zpD}%Hm%ZJJH}<Fe>1fKcvEY@U87B>$mZ2>Ky&`4x<OZhIqLr;=|LC9p*8lnO4?h0r z$z#XUXJ3Bh72YBo?^;QTKz-@r<zL*`-2Ukjx#E0{5FQmH3XKmA6&0xzDBlq3KsgP_ z)S(iG)CS>i@t8Iew8fV*{Jdm#P;=$=%k19R#|4cM0_{CINEC;(S{*izZqnIUaPicU zW@o^S8@s$Y6I`E<5G%`U)HNql&HK*W$45>c#moYuy{jZAx?DYXjph6EWB<#~|Ku;f zda(b>#`>nbh4r5K_-)|s;^oWNk8U6RUE`YnC=(Ka0_TxR0RkN(g%+5!izna}jCO|< zb5GuGfr^C0-}2=-p|lM52JF7M$KgSZ4_)f$iLu5;aALv5k0y?IWe;OkXq{L^_qzyF zkWMPP<pKR}i_VcvPCRgoo#9R1xN({8nGYfA8}vT>DD8*N@XY`BI}CS9)*k=R<^TDw z{hhxDyvd@5=D!*6+u$peWdEY8)W7eCLlaE~O5>XbK_KXqX>2G%#Cy+tIw90G^O<0I z23c-UxumWSv2Ko?Mw*Eu%T~$yp6l0duv<F3Y)F(K6M-U~(iH~j8?qxOF!%RRMH{Yr z&YXT9rz^$$)$6n#+(s8U-74e$_nk$zE6f8YQLPq9f1S1M4R!`QWLrlDw*U`$YdO2O zkKdL{m)_hvy1l-!lISOdv((<<eI(CPf{<7{rKci2ZBSJW4xEe3nuhs&L^CU~GfO>e zz^pKJ8PD$S@cEk;D3PqMv^d&ZrY9vuCg@Aci6d*Ac<c=M=@mK;Jw$Qj7?#O`in_5w z=h3soPJ=xd5p9i1!P}C0xJ$OSi7EQXy(w<uIREB_XaD%wS3eKj1ZI4b?CeenK((Fq ze*2*;`S~l;8@wSkH)Ws>HAx-ONzQQQxDlt&&*>?Ps)mFl!|8;>QHhEP`_n1s%IgeD zOVU~4(bEs}=*9|#tC;Q&z+^0M9AW8bn|!s4J=`a0uPib-9AY4`qeFzwU<IVhP^%!K zKqx4-PLUrwiA)U$Mf=zq{<#-f>8&i?VZq-Z-wb#J+-UFWtKA|CtG1H6N}{92*D2lz zHdM`{%WD*g<mS#bSXrgpS%$$bzVZxw%FBam><L)zuTW&38?zyuWXSr~QR2-5Rvy?y zwiJ0QM-(8J4Qb~Hqd$6v-Xrfv+%L$FoS-sSn7nq5wSV_7B4tJhB}x_eFh{6_YH*D> zsgWi_cN0GNbN?P!uaCC*g-_k4mA`52>>c1Q>>Up4GY{@9t7_vTORH?mN-i`dBO(HW z36kyJ689fj=Gx7xoWJ-c{j`fO12@VsBdy6>IZ1uM2kv{A<>ely9y*FPHMRgd9HEzU z^lE|kGsq(2%M-5L*ysMW8|dRbbXO5`=$zR=1PwaieNFU{7(KC`Q(wPMy0L{aZHm*U zD5qEG3|L~(3jPfd{apcnb8l~6KJoF_j@Gk{tOci*mw0F_cya#_qcuha!fY31QXV{g zjMa^GUU=bkx~to4mLpcOgj|-KKEBEJgU3NSR9@r974>Kzv)m;+v4!dvG<$nUnX&iM zIh-@Z7*PEV?Ng7CHzz6AFH!DXCO>){5e3!o`eGN@M3VLfI_VIait*l8(OHj9zu)Dp zzW3WLsdpzL?jr8B>5VU(N%N1jWke+qgy4uyDY`3UG9gqWobQ11Y@b|V`;jezjm-9M zGQPS)|L9p{tZ4T35rw3l?31jlvGlPIqL$j2qRaf!73!ONsI=hdL&s=(BVtohUcF4Z zy+PJL#$3ONn~bS;uafnT(&{WzPi|1}-2^J!d_)u;-R`2gDNfFB^XYFdj(%&61McZp z&;QdO`oO>ObH*L4`f<tNaK^N@*wG<t`3Av8s?yW#YLsaqItiqnWa|{|)h=dvbwMzS zfLhKm-99>N6A9Sah-P;mW&#;fq|+o>o8*Bmao*r26Xw_7K)1RSt7ov)ka}mA^7YI3 zsv=Gu_HabHxq&DIvPcl^CA_P!&4fEN{SESNdRZCG=k~<<sy?>b`*=AyWN+-qV$G@T zZKNv!L8wYhrZB0%Q!e)OE>avhMLoPpeta8W&GFM2p)7IJ31L<fXO>V_*y$mylV?ft zWhT$RfM{!Uw}Vs*^3p8z$y(cF-3{u=4MxBB6~awRGajP48EUmfy1j<(bwLMQGol*p z`(ORkm;NP-<!r(o%h@|j;2v79KlR*K{@I?$o-bC`X&XnLbjgwop*&Vfnkbka9#9X) z_|X)~06{L8l4&Fi0@52~of3zUFl-=zDOSkWPSAhw!`Q0>uKkPO#Eun;0uq5N3Y5wh z35%PcbeH~pPm-NnCzyux=ql;)4MeVR(+PGq!ObVQSxIosJwd%IjRVxqJCFSGLtE?r z)_OZ@``MgWPf%G-WgCRjh-!+_hSX%}yaV&V?6r%8g9^7dMkN{T;}4LmFE7kJX%p&- z`S~}PJo^%pS1!{G8-i((wNqq~VmduUB9U2&E{a7jjml6*ddzn(vZz9OqFpdD;0a9) zK_Rlt|H^MXbytnQo8ZQ+FVw&P^0}9ecGJK0BisG|V|%?_G(|y;X1=$FcO_0J<_BZi z-9G+cM!ma73?Q=tCXwdi0rkyYL@p6ZBG9<QIeuCZ$`aEqU`?Tm7NK#3SxuAgljO@N zo#E>mJ0BD31#8+qc^}p5*YJ}XF>k<o<Z?#T4y_GBW#7KWZ;5~I+Kp>lE9=9J<>epD z>#<5Y9qPv7Y>g5EL`s@<DGx_fvndWqKogxK)N_<DglR*p0)A3K2<YVw*@-RmN{*dO z(JBLPK?p=55J3@bz>N=alL@g2GzU9~L=v^fHzhGRqP3s}dZ~kMw+KZ4E5H8vPXjw| z*Z6w{9s&RKm20m)_V9<!9$V=@B<n+JE2x}8i4-X$by-r^4jUETdwf)jpL78kL28Xz zTP8iRL3&~X)z65r!rLj$prP4sNOZcem@*P43+>y(0q$T*v%inb44p?FM)WkIr5CoV z1-je9EcH>{7R_YrfAv#e{4@(wpYmrg^*ez(U|QO^BSM0&J()_GSWg`-hy_olMS@BU zNRJADNl3aa%tjlt-a!^AF*@92OvDjfgGdZ%x5LG+JkMaT#Izhyy?lvwyUlvPMVe)p zG=aQi`Mw_{VlnL=!e~stPct}JP*-_Ia24~?-PZDZ+N*yv@OHyApOusOUl*c{8V@nH zp)pVk$c4gVQPG3*sF=`M-y~V-;N^^FGQh+>QW~Q5MBxa^Vb+#V%8@Lkj9lcB7u=XS zlIbB^hhsV-;lxIV?T_7uNDX#61_^F9LPUk>_3^P>6ftb1D&y@#I`259^=_84y8#cs z@x`Z}^3#J+n9fnwqnarbKchlYM}a4zqa!T}dK=qF1#UipCL$V1nAW&zN{9_efo`=i z-7cbM$cl(ikt~7LjaAZ4mzx$Y*K^oNXn*i5YAM5o3AQ>Q1c#jsu=4@n@V+Dl&*8y% zj|HFpj=uHx>f>LRo0D1CO!F5ev#Kavvgzw4O|o1F=dntY0qrD`t}K&fh8XOk!Xg*t zBH}Fy8V>^WQV(QAgouiUYHuHBJ*o68rO=a>(?{1h{jvA4^3f*|T0;nkQqsJ(gB#Bf ziC*ju&4T3o!#{rc_n-Rm8-EJyvf$h9T}gSD5`p>`UV8P9fajM?cJ%Rn@5!K{7a;%u z1}{lOK~xWf<-fApZ+$rNjc_7h(}FBDxM)#AE_&Sp2qK6`42~chOnZ4z>n<aHR?<4Y z&2g2`dHpiYd_rP!mY;Zp<&S=tU>paZc?y3};m1>AU1ClxqmCDgeL8JI>1gJa{p{z@ zKhIkYyx*Yl@5m8LAOi{sOaIwJkNxAbh5Acvt?6gbY8#Snmu|a_Of*3{WS$_jAesP0 zM73H7DTy{NiiXXcq_>6?n)1?hnrkzp5SU&EH=h#6Gn7ebf8bGyC(a;_8p_Ef>YZIE zLF$B^-O1&j_%HtV{}OlsxWU^R_W!IJ=hjO+i>TQEvjCGX?Ck!x>-V2}GPUDw)kG3j zkWOM7N0RD=b0r|N1TsO)BhfqDutbT1&@|WzX0N_MIdF_dV-gqXMuABN6i2pL{=|=v zJ+w{Z4~WVULygh}DsRz@4iM7t)pOULX2G@1xr>i~JHYRpfZNaf<-?s<9~!OyQr=Gf zdmVy^rC_z)!2;R}Oj0b4bi{y6446gJpwtQF&X8+gyU4-r3||KN?G~N&71oX(p?}{A z)OwEDS}b4ln{N`eN3<m(Hw4=tg@)*v4Gyc{_}9;T_7?DxyPQk;P5{4i0s>He=E7^g zviis)Kb1@QNIlvo#p9NHMA0HAqCH4q2#rUin&>S-IFikr)5ku<$tt34f~YORNP=sy zZh))|&E<Wn!<+c|40EJMdU^{ns}}_ftr3agjhAn}`0~YrOMH`Kmw)D{{>`NE&UUia zYp2uv$B{M{L7==?aP77vSc^#uVr)Q$MQtj9WV4T1Ef8sgP<~+<-V(~1P*%*YTx5Fg z1!gx77eUelWT6p7f)Iij10qU(?+?%atEXSO_B0FMo^uzCe>Z?fAY2?C?sPN#`kG08 zEclohJRqQ+FIasc(OMFnCCY#h5-BxGXNVvWNP?^u8=Nvw&u5hLDONa=)fKW{3)xr5 zr3BGV(Csz^a0j*frC<BpukDWJ=UG%p?|RDkyQ1-1;$I$)uB;~J(n?}}%zKotYNBrt zw!}6K-g*ciTtLZ1MKxLo))Ae>+Y*^12npa&x~4d>PPWy<9O+|DEFqQ>Ot*_pTG-K$ z8*hv*|I+{U`QM6g6&P_x_2f?ZPWt$ra(hod`Sg_=|Bp7R{a`Eqq>E8CRmQxAys=Ct zJGAN!z3o+Wb*mm;1;_-lonf*bNCBBgt|X}bQA8)9*gAns5&{kA#R)|_8{n%sUps&8 z*%0GBqxo(5ZfgAN6Y%0-{OWd*&eFtutPVj{)=^o<+$~x~Op#M`3W#n|6wne?<X|Eq zlc;uvSy@GQI_ORdk!pf1K}Cq3P&c^I6w&DL5C7>O{K}1k@p)jMx9!%xm%w2WSnSt^ z({tOc)~v15kHr{NK;cMetjF7kP74yP(OC-G6HP#MT8my9WfnDesZk=ujmNlh3M^W| zd^shID=xjX^XfnOmw)nGanS)W<Q``Jy#fxmvhj1n@wx3(PrD}heiu-4TQawS@ys)u z&Jj`|t%IgU1OW|;wvrg2j`;D6P%TnfI~w4pCBiA(a7y*sLHNzz`RcDfcXj_OETZ{c z*YEE&aBdC4zBHb_w5sJe*ZKzl>4jouYsS+(TscQXL6nNv2$Z$h=>X!Qjl@qE9TM(v zOdMNesqhCgrmyVt$6t8kPk!+WFa8E_=lXq5`CbBtTj0-6%a=B@{IF%@4}p+E2#7UR z=@^Vk#={xRD_jY|f}dEF)5J+bvp*)3o@Q86-<+}Y>JFdz@`bPd@^i2Lf{pP43!3S! zYxkY<9luKGU5H?j*z}%QS^n_JBK_H1i66+NIih7EEf%c=>uJGKk&!2mYJm}p)IgD7 zy{D{ZqnD=h=bzh|J@v)?!>3$~S6EO?Bi_aBKz|;<Z~v}laWbfvDY<ea&rhx!{aBWo zW4STg{UEkG+N>wSYl9V15}PPSI;z7rZTZ}1cSe6Sa{dMjvmUYleCMS&-*NzdUUECF z)hsHS*{vsGaUS%}ORpqvB|?@(2h;S{Q}WjB0Pp%5q(4vKx8=^uuq1Dt1yy%^zCF&{ wiIBVFxefNacJt5oO2j=r-zo8Jf8VqG{}wn>?#(DccK`qY07*qoM6N<$g2|lvasU7T literal 0 HcmV?d00001 diff --git a/lib/venus/examples/images/jdub.png b/lib/venus/examples/images/jdub.png new file mode 100644 index 0000000000000000000000000000000000000000..8a0de0b1332174236620f42c0ac1e02d9087433d GIT binary patch literal 9814 zcmV-cCaKwpP)<h;3K|Lk000e1NJLTq002e+002q|1^@s62J9mL00006VoOIv0RI60 z0RN!9r;`8xAOJ~3K~#9!-Mo2_WY=|{`Q3ZpmV0LH)m!z3ZZsMj(Evz*05_2o0X9fz zp~jMEg_ju16XA&&%}8Nu?2$q-4$I-N#@57m<WP)d$4F+#GEIvF2$CaEq|8+$KoA5P zKyT>1_N?6BdYAd5vN*|NgCYr%vi%}%ysXTwm-)W>opXNY-kUA>E{=cwp?g|@{_8*Y zwE;h%<Mip%00Q92C!chauRZ=#@AJZvsyXY5xUYv#KKUd+>O=gf-|(@=PFI|8iw=8> z)%u`yW`9;_r;Wv!0%HwMxFC$Yh0qI@CF-j|=wEEiMS~Ch?h_aOs{nKQ^l79ggtboL z+#VDk)Y`l`i?f4+VZW}^qHL_F8mBQ<W3<5<jnP)+nGsTmTq-*tq~Dlnbeq-cSL?OP z)BR!psd~Lxc(!D$5$=bfs2>`bpZLl5D}*?h>*Agw(Qgrw_oZ<voz>@jKfD|&*|G3B zp-4SXAf!cShNPeP6)SEZ^at~sdGAo1<_F@~&L>*$kqWByh?&LKMiiC*eWhIbL@B6$ z_Q{VuHT*9E%=_N|SW7DPw~{1%6l<4sVLvBvUkZHhRg`>2D%pSHiI3-h(RF_6^aG7# zYqYqszI8Y^_MK_rPG#EM@0Ww9){2Py@`_Z!A6J`|{}6elKYQ|{pVa?Zf_dc8M?Im$ z{nncQ&gAZmO7JJn*$*qFzOFp4_r#M=+VA(;f927)mM>hs+Vo51x8z#?H7ngim6=-E z*UDyTv0ABBKcf8bpOtIsT0Zpk>kTjQgUTfqvsY*Ow1r>9nlJgj|4%>h$xmJSVJPzP z$A7#s91Pwy93+3c(g+^(1JUVk>C_9AjLOkJ@@4rCKmOz=lD||iryqN-GS;5fMe&PR zXM~gg(hIyVNF}>ZKKb$2{Jo$5<;Ub;ING0d)|#qgZ;kt6KAaA2iA^?$q%X2gdac}0 z16!AWI=8>J@uA=P^0$TFBkz32{y0rOpk47nsk|<_7+4{0sWqD)@xAD`p7`jKogX=v z)2H8yRYk=X;unfs|B7(p>q?2g>-)jQCqMr2ZwZ+nd(VCTNQ|OkKdx2E<u}H?(HnZ5 z^lt64yKS0!E_P*CC|{OD$th`+kZ7&5Txe6Ng&WgTjhE-<%Ac;6n@{_G<<)dpjz9X* z?NE8--R}sB!ko<V<R9CBawOCXNo*bos^yPVO5s2K_!FO6`O$)T{5{iIp8x$UHXoFZ ze_kq;{*O<5@{`|E9e(C#A6LV{V9%v17axnW<RN42TL&A1JrNS=Nwmw+g+*r$XTUm# zaS|s5LV<AxfkzNps?~tSh54d3UHRRGy_J7E)0%(ocYgb0>CSt-`(1C}mn5U#(AFG` z{OC{eEc=C^5}dD88XtV}W1sk*jj8%RO`4x(>CdNe_II6izh8<<|M-)i`qZ~TdFWjy zXD+W^`sK?PHh-zTG5o9jt<k;tsHiP86s?MfkQ$R4C>(_mI4KbVtODo2T7`8E=LA+e z^3>AXDpZmWPx-!i7)<r<drrLc;`5ghfD^Z!?AyHfGS=-$P4Tc7dY{UMqqi2Sm^pRt zJzsm}%(?W34(7z|hac^BI=_Q<UkF3*A3Xi(&ujqv)t`Htdhov4*8TS#SW*#xXZ1qw zfAnMTSZzv#^9wCb9z4X}#fVbmgVp%5j^|ZSQV@6w1Ssj@c@Yi}csL}^ZVTI5Lz*Qr zkKK$j>disO)T#S#fA;t-w~s#h$4_wS%C*t4LkGX43w=^sdvC2)`onIg`?suT?X4$n zedgl1SIz4NbNcjYaqQSJaqir?Z}k>`<&nANm9>A^-%3jLDEOsHbNbA=bLaTrUwfcz zvf-(U@847Qb+y#0v>UbX-J>io&(2P9^7wuZ>|11UW`S}TQLTkUL5V<0T8%oT$j1-B zBd6qn5`w^Mq7+!S{l|7VL?E%5kFnA-qIg5(SC5xV`B(0|>yGXV-#EvsSFY(>m-auG z75c5Y<Mu$vf14!wZ&Yg0%EhynUwYkO?zr=gisyO$%P+lL{Qt)7v{>6(e{5}K>%R@X z@b_jG=Kt{HPkqAv#G^+lkw1JF(3z<;zn(|#?&qGr{%h;klfC;I`^2rc9^%OU{mhl8 zXx2+Kn-wZ`K{>24)tsVUZ{bG)(g)!R>a~!lBq^00zHg8Mgpl}N2`e1dIJC(?Na^D( zRV&;Y_<ZL1=PwNaPTqA#GRlT8C;j1LVWYHC4D&w9{_NY{^_CAm|Fsw6*9GRblecMI zB<0(0KN9Xga8NECJVemUTFckh{)eQW-Z3*>`|VPraCh8U3m0m&M@HSjmrGTB*00v8 zH?DU6{`!^Sn~v^3Dh@2pb70>L2bUH&w11I3^Ybk2+t0znhgsUYheP}Jv2X7Ji_`No zD<vvXNVC4c%v7C5qfFpOfJCYQiz3eotOe&RR$EG?x~Fq@#O3nN`F#stK7Q|!?k7I_ zr|z~Bw{7~ES9;z4uQsM@9~*TCcbGsdyy?yNf9{3n&Y0I-m45mI?>SQBZZ;jcW1X~L z>)Nef8eY$jc*4?}7MBk!mHz!bCyxJLzwwVgdigj1w?~7^mo|Ul#WPoa{lK21`}Q=Z zm@b#O@6B)Iwo`XAH9Lc~0^=lj0*Qdak!LB{)_~6DIxEY|=t7{2j2B*hiA&cvxVF+~ zWot;gH=^6?Qs@FphLn;<vqrrEv(s~?UlgASYu>LOIkf-TfA_Dpr!ww(!_j~4lsDh- zqpu~-{o;XRhyVIV{-6K&x!0Y1@4Wlgt`Z{gJ$1(`uU!0%_DZt9*BLSF8V0?zdFbF2 zzx#jxr~e(`<lPIi*RFT|MV>T|9z1ZAlArUyeRp&JgAXuuU<toEg<qP&t4t9@E&QlS zP_0oal_&=u%~pfOg*_}RPP1>{0yDF7C>0U~0TK`Az}O6(8LZ23q@+bgoM?pSWLgwU zgI@pE=|=O<&%JzU6X58<#Z_ncc`u5-VjOqnR`Ad}-}HqSo_SHfE-){?aE>!Colo|j zP|4ZzD}N{L;5!o_Tn%d#(=+<zt5>^!0C3BJ)~{SVyY<sE&3O^pgj4q%<E`&_7tO`J z$S_365Fx4vd;|(`2%BS*J|c~g)*z7x=@3E^gb{P|b1W>(vuDpDt$KxORK;0|lmcrl zu8`zefhh`vP;#`XmI|F5K74BFvo|iR4DPz)j`gH7XceCQIqAF)N##B$J@M4J7tXhT zAm#cErw%kPzdHQeCM^d_h8Pi1E_wY%)%&&O)%MH(^Z#^e;nLa7-;^Sn4@#Qj2N!wd z$KJ)BefyBUj}sCnJWxIkhZ6<Z1eXu6MITcnXp>Utf<kMuJV$GbuR=;enQ~ZRsy<D# z(V$wdQLa{TGC&JSZepy3aycOMbFtphi^hxM2OqlQ*{44Cr}^>Q_jlL3@lVc7)xMF8 zieq6|y>aoC^WXSR_v?MPV2&Qyzw)`@{Hw7@?vSp;{CtfAdz+UcfAsv{`;|x3rAwE7 zrZ>tD`NF4K4mf`D7_Ip^gcl*41Z#1&0Bym7a2ZZ#*rb5MA)Uk;wXFgt0Yy<1BuPe= zS(NY7oNnS(Lc&I!srf}JjcH0(u25Z<Y_(UAF6DuTk6*p`(ux}OlDGCRt^7VP3d6>^ zQaJe9Ml!fX#qyOrHE#!g{|BO&FZ|irV(-4%J?-}Jo=V;EmiPZSQLEXGs>R2PWNY!p ztHrP8BYSANS>ouyBb>VDB>NWUiGm6Uuo9e9AOnN|gawmB9;1^1Nk3;aj7igkG|fn} zl)@AsBuIsG5{E#B9<vKArsrz-VU0><4~^Cg(hF#J*O{6t4<CE%jqkOlKP>|DwnKN` z`Shor_>4ZhwD;;T9{ysZQu#`u{72vS_#=Pt)h|7hzHY_*<OlC+eCa=3ev6O+Rtw^R z<HFfXhtl5J->8Hl%6jI`nMRXpX^MKahA|c|2ta6XAf1m;A$UFxi8U$4jJ8$e0z%Kn z8AF^HBt9n7bUF#f<|scP3`<0zk1qkw&=3_4?mdLnBg&N~VW~x&_Ssxt-5Zs|JCB|S z|K-=8@BP*=A3XN>PrUylFP{JMMHQAYfwFO@?~Jw&15dneFqd9hUFd9NdoTi5IL?3V z^IW^JT8{kieH~+o6wEe4B2~v3P@;qs0SJQ=0^u|^Pr)fbA#or*j8qt#qjiR{4nI^B z+Ms+#vsEF>EJ<SM_u}oO8!2QY$c={+0p*$`a)N2U#Qy#juDp7K<?A<o_Wn0L_~eb{ z&;91wrPcp)xYu52smi(B`0YHgfiNU1tL^VtmE?B=<?sFa>Dv7d+;jKBT+^Rwm6<-U zhsNSO3w!o+@2UH+HYd{q5E-C}f(rG@3>Y7iLy>BdenQ;sWAhZ3k8o*%$ic&>R4!Al zmnqe&1f>STi_lhJoggYlOwH7oo3GGlZ71$tU$C+1*cimvpdhMyMD;3rm+quqJHgrW zTMNT12!HSQzVggWO|Izv)(4K=cT{Fc{^dN0ow8=L6a{y_uH^f_{`%uyn#)I9({umi zy+5@lbj~BiXL%!`-#x?X<|;+zsFVZ3(4m5qg}pWA<{RY2h%_@4+OWL5L2o$X@R4IQ zXJ%1SAdEpehtmdUEeH>(94b)wk&n<4fkYcanrr%loH&y>=_8dz6^f$pF(yMuN24)M zbLJM-JI@Ad?OXv&RijTA9rrWCZt|~_q8HE2?7KJbW)fSJUl*8CrP<J!_eRy);>lBY zf(m)*xo@zx+(k*l#>OUw0)$T~@VNc9gFNu2Q#9*!l4QhiIAm>Yjq|TwBFzoU%WF(c z&rz#YnQFACma0TSfDi(03aqgdxx*TX#G;f%c@7nN`1Oz=lMGUa!@~xS!U}BeVTcH# zh-PDv<<(4Fc=enB&?36t+3e1w+2(tL67|4SL2R`rJu!M+U}hH&9G~}RZnKo_$vaNb z9ppUswHHZ-YnVJEONM~KXpfm@gLl0BEi4_rg(T68Mg><dtg^YePQ6qoOFO)D<}97g zvj{0Dg*Eo<*~49To@8-xFU~r!V2q&14G4?!3<!bI0&OME^9XAS>7Yme<DdYH#3(S8 z28e1|NVWHaA2=Rec=_@_Yp)asORcHjMtVIjaxdiNV8+Pc_4$}mdHUUcdG?gGRc2eW zWTQ4~?Ge}3*GO$ft_`^D?)R4Cw{i5?36u!1MaC%Z)0~-OdSO3FIzXy*(qWlmxW?MX zD$D(KUcGRg>(|z~|Bd(4Y)lab5jcUfprk<g7AFLS4k)xkOUFPLbTb&{KDqF*zQPmR z3w9VqYGq?9NpHcr(<__9g9RmAUJ<KZ*=(o%Q&F{)+;VuI{;t4`*OHlBcKGlAmtXd* zAHIE<Yl>{ht5>gb<<fP!H+u9|GOFba4~Z5L)k=v6AG(Kfsfja=B2KA96$($$@8-1E z2edawWQn9&ZBZDIazH1(&efH5%vWDv-@ZLePwgf2D=5#y^FXNr>!2u7hQpL@0y?ab z7{yQvTv)>o6@jm?Mqo9>S-G*gRyq-^_C716uZOwyk~G80Dx<hm_JWr9`uXKI{;NOw zhtK@YpZN#fn}XQ^GSO{+bAes@!=>J^xTVwSp_2lWCoEsN!Kl;5=0mbTlZhNF6^D*4 zaO?3~z&M<7C_J)6aQ?z2E?-&U+VwRyHV2GiO&C_F))dXD8r9k%lF^8GtH*{c<FY0} z<ZvE%p}-3qg~{lS5<1<CtzJ%|CBxV<(vG0qq&eN>;GshZV3dt8IkL0Ttpr*9&uh)% z>CP~Iw<$)_3n~Z%$ZB-tz+I0VT)O2WKlRg(cmKnO|M)wGlGwfLKzjKd?e&ey>a`T9 zAdWX!yRkt$?1Hg~tcA!^0-16A$N_4N?TI8~3|XFY<=S;#dHD+GE?r=w-J?hyS_CBB zF8W5wo|!53&CXIU*O5x0oFq#NGM$iFgHaN{qDYIB&3>OwzaYzeoE40cA*)+$oLweZ zRg}viv$OLI;}oq^l<nVk^!DR#()!A$<H41ON8N4(qma>lYPGVt|IPy!Ymw^v{>{z5 zw-Y+Mlmo!9Bj#+{_BJ;*@xv;kq{Q0#2Af+e6uN+{53ZmQdE9r~2`Z`vT9T(-+MCze zSiM0q7*SP<!w=rckyH1xvX=AJXU-8PIo-48sG1?uvo(TH;s+^G_Q|q<OoQ{w;78b8 zViHZ{LEwk@VS~Cjz}#VnhtFE)0_*F`OxK#kgP2ibIdkUP+t0mn{!Md>p)?{W%OD2T zw^e^Ic=)cxFD&h;{aSnN+OksOdr!WhS3ch948tV12xW*nTl9Nf`n>|HOXSvIw4yOr zW8cz#5CWV<cp=qRlf$>{VY*tv^N%rgaEZ@+^#%U;sn6nq7AGHk1V2lNH?L7HJo1qt zqyS&x{05$1$AmRpSVak_*2-8VDF82mAgoiVF5m|h=8{D&oH;`l7Zh<p)^C$1v6pLI zyE@GA1BFG=n2tJk-_!gTdUW}B9)9zCHojx>eeALA4Lnag@vUL>)Bnu}RGbXw2mQV( z5=CMQ;<(G8->0xXRv2&s;e6_?8KxJex2?Z~C=6I!*h914L?70SMhSQoj?C8CU-mIF zWZs0>z@u$q215_ebL4qODT3p793U=QTpm{G6cSTpkVD+>ves=QBTJ(-!{XjDwN^;2 zRAH)FC(C=JSr;Amz-k;0lS@qD@I0S#d2dv19>4JizxQtj6AiST%&WT<^DQsD=aEO$ z%tC8_FWKDJ=|Yx_umeM`GX}#P=`49MLW%&V;Na|DZkb)evjQg_GVlmO1rjn+R+d-k zZmmL+^JDkli_Sxm_yX(Q8<^pUTn1=QQp7zDo;tz%f8w1iuZ+0(k+UpcO|Zi?2BQ@Q zgAra-#*8FamseSDx4HZFhnbmQV0vzjt+n%v+E+0}AA#7O5R)L3$G(Hpu2Hq;sxt32 znzeGTJ4o1eK>B8IyeFRcwp{_@)mL9-dcGB=CJB=mhNDfK@vu&kjvPsxV6{L>@H{xQ z)FSXT#%YvS!UX|BNdhkd8G{#c`O10Lm%Hf1!|0e{x`|K;vvW11r$Gt!E-vx*cRxhA z=2P#KAj`1vh{5_YxysR@!gwijCmv+36taHi1y)wi61FO+N*#+&o(&K-0|7z^R1i|B zPcyw#*N2uG|MB1bn?E{J6uLBqF)4~~fpgOwbMEX}ZasNSWiAtOoDvUWobka)bXrho z1J(c%g=KMR3gcUZ0wpB~523;_oGN<`3wk}n#@bgIB^laCbm1tekY=mIEys^><m6HI zFU_*==zg5sp4d`eiY}JP`j?3+RaQ6Jbh9Di;N9H&;JX=Yb{Y1&q)9<o@gbL(%mb%z zLg1XnlQnAfX^iug3)FqBnfjltuWXqK5Ibi#3Ceqol>?ECQYCyN^vE#mqzEJ~7?JB3 zr3;)ZZ(DdF*nj9&P<4ddw(7z<FcPOc@I0pG=XvlgHI5&i=h@HyIoB`tNK%gjw;klo z?|ci>^DTsnF}jNYjPdEVJ4j~;N<Kk7B2S0Rm3>0hq*+vPH#V8BdiZ_=XFxbfs=E|s z9fUy$A0Zqv(3n6{#8sJfIXW{}-?zT9mA(cTb5q4U@W2D2(Wp1FEHB|`>B6Zr&q<05 z=W+rYU<!%GLkfq$Q1%oHvrSMrLX<!#kPasWNVRRKkO(Q5nx5j}x4n_>ZJYG6oW*0u zX-?M>CIOuzOn}vfB<ZucKEk;sepC=OW+=Rvl3!)M*1!u=lI8Q1WD~Dh!B!>OT1LZ| zJU1W$5IzWl7wyA&O-6aZFiBd|^Hcla@(my#=a_t)XUt@HeG8a=zfTYZ$!IjP)mn8Z zy+FrF2W__%W3f0ZaX2g%fuq%kn4j_pg+j`)60Jp;0%z>D{*1<1jk5yl6s1~&>U>1K zRYOPv)@}<`J1_;wC}v}rgJ@8%h4{)NO*$B>32F^$trk(Kg%u%INjw6q&KQo`<XMIi zO@xpb0R$CfFimPx`lC_Yz1mKI5|E9dWMe=(V9d8E=FFKhZv2nU;^N}v+fJUmp>u;R z1Rg%lf^a+6QWzY278fZ;C9q;!m$rZmoGEY?a0;6j6lqF2N*E0WSZgVv@Y)?bU!jyl zA#h?lG<6E2#K(FNL>|a#!k~uH7AGYx1?w5SvWN0QLM7>q`fP4o!y1F6g|!(jTEL&) zi>tLz=?G<Q*6$=5h$i3!<Fg(Q@okFP$uB_=s2~VJoD-o48Q5gp`rGXr$81ke3df#( z^BC=u7Xl@>!I;covh5WCtKGI69gNu8>M`gKvD%`Ppw=p}|L_9UMu}3Xf+`2NLNOR+ zB$=cTIl|MZJOSIsGXiHlq}GT`Abf$Z6rS-Z`aL$TUqDD7<6;mAD%?*neJhlwaNKYz zO!`Tjh%uNH2mycWY2?JptnhY20i-cTWm)DU#LAvMvw3Gw1X?Sw1;&c)nR}=3Jk3nC zhA9jVO_mz+G$W65@;E0O4H*tb^tUqF?H*g*0ZE!s6a`Kcyyx+^GJRkV%9kkRBRoMK zXLS2HqfBEogkFS|eS|dlz5^w30eAs~kxx*HkSJEJyiBjXj!-oyG{}%(s)elYLHM(T zSeM2ZN2B2|04igZBx5k%1elvDMgqzh6RfSR32W^i9Y22SiCd2?K6LfkhTrR?IBRj% z15yfOS*S+%vLGq?<f*3LjTwzH`kjo;jRBp_KHa_{FH-VsNTD5R9<#8w#EH9a!w<^X z0-P==vXnumN57Xa%2TWrU@d_MPI!3IA-xLHZ{qnid{w~-kBkv7zVKO`E+LeM#2~Bl z1XD|R(H^8<W;&cV8?Rhhan4PR?=u79U5fdpYP{WQ89x^|=Su7A>#it@4<A@saz_u( z-@CFZ>nqEfU~|ACoJIK&)=Ki!kn{~(8v_i2)%GT9Ya@muLAyU7jfbdQ<9lE!Flk8p z#yXWs313L06J&Y9pxbAZD1->X%ZT$XQJxaW03kfQu!Ja;QDGTmh!Bb_?Xk6a6%<&p zy*o>lrtqr^7~$a&5{P85eCbR9G{?DMIL<FSV8m-qJ4}EH$8xK8IvqI}3|?%tTJG?n zgZGw0Rlk0{BjO~*xtL3XExID1l*4+vO`aDF63xomfbLM!8x-_<TbL|CSxqGXiKbDW z<(V%&OJ}pqk;8{La9}UCFm&26{iIAGd?HU`vmr%3KnO+XMcYffQG_j(kwJ(QlJ4p< zrjS@KMidSqzz^yKUWk`w)XKQ-rO&VCSy~(Wm3R!rn^-BiDVQC_s0lD-pb9j!*42%T z4b$y*Zp=*0xOye3S&PcG<;KQ{L1KvW0jq00nzaU<_6G4tGfWI=mJ)>q5pEYn>r$qg zb>49QNd~=`Z@l~pXD?jh*zu!;wJAor#8w(0WDVall)MUtP2zY1zdVm}WpF+oWjq&w zOW0bwfHoQmiLeQ(+@e%3<8%+xdl}!#<mxMbHW=TRnlN)a!|iSnUQ03l_#&k-pfXS@ z3LUJjuNA)MXHo{CLdsERD7tGY0fIb*B9s^{u+EX@mN-wyyIa&MA*Cp!QuTQA!w<1^ z<T#s~9p;ygaP4Y`m#+1Q>Nx}rw5T#Lf|1VgEffS~c1S4?k*0w(8NwQbvsjnW*;>bG zKpU(G2`dXoUt&gWqEL~<>%(NQZpP&}9Vgv*d`@>P$^@Km0V5}K%tWm(jUR`A@0|1U zA{RmwPD$kisaRjRMkxqL3Qdp)6iy*T38Q;JKxZ@M>`Rv@wS4Zo>j*^x{dSjTwZ@&d z-^W((JhAf_S%tAx5FuFth4sj=Xv%1-7{!}J<rbbvutlH3%P91KUVjTj2b4v5HL8sk z+79rEsmhYxy8hBAOGd>Q#sKJyM}Pd>j>-FGd*Drkk=O;t9|H<@B`FCA=bUgjktT*~ zS67*zn?e@>x*wAk4kvt|z(+8(*r46J!S&@8f<TbRlKn5#IkbO?u(C*NYMP=av6N7v zf$+<CWkul%i~wytGO>?%^D<Gog_qCahapM`I{gk?>+4u;K{`s!3Mh?ohEgE#=_T#e zi^H)h;xUZg_&gk|X43h(-8qI`aMZ+*yA2S4F^20aYwpI4uJHUSQs#K6#c3a51ffs_ z(jY5U_U&6j7de5asJ1F(T5$fA6`+F=Eu_~Z@FSds!fLEBq}c#4NMZ3&<Wgcp59O!$ zzCj4+uH9g3^AgU1A2uk}D+n*4>;;6Tuou4Z+#rcZJFYf+jWE)$1?IascDLpZa4Q?@ z;_Av}s`ZF4sN!sfuM|Qn%2H5nl}IzqY;!*c<{PZ6UM8w29(c#wSY1tNw>5F@(N82) zS`<3O`Ic`y^BG=x_D`6KB9;yuW6z;^j4trJSv*xixe%T7Id|sINz)-JEK_UEAcco- z95NobwbfVS-qvP5zF|GlG!_$+RFet#HD~a4vhz+ewR53-f~=k_bA>kY!j&6L)th2w zW*%P!7$vEwkm>1=rKSBup`+QX(Wp$(X|Hf?<qDC4)?$_V{%N+-7AkF!h8{_O#BhC$ zjq6wO7_fNcUiKcp2Vag*THsgbD7*lqWp(uuue|gnlu(o_4U{kNeS<A}Ze#s=KJ0E4 z6ZfJg#<zvecbeLC6Bo(HGK+!HI2K3a;Z1~;=bUq^n{Bncu})N}h*qNs!eTsySBj|A z17@Zw)G8sha>T*?w{hb5VXj==;?ji+c+)wytZ=T(Xmyix)aKQ%eUX|TarEvVXX>^G zsWs-%y-S3x48Kt)!6VOxeChK~5yzYOwHhMYZdv*(muxcXm?BHHbIwdaDRxaW><*vF zi1_A&u-G-!O>zvoRP{}z71}r{m8?Y-8I?k+)e5uCCiTdtQVO<reJdMjiOm221^-Dz zK~#Z<K&J+g3o1d0X-`2nX6@p2+UG9PJNqJ==U+tR340H`f$76<By7&(dkX6cMCf6B zpDgY0`KLd`3(tNUr!(h@l&s%o*jaa3Jg`RV9kplUB-;fxfz|Q3Jx;*!M5(?hnAh6- zJE^2%BD^MJ(*hz+5@uU96@;N^O*NUHYEi3J2tuEFy^QZmwAKs<Ih$Jp+8bL4Cx}8x z(-TBCr=PT;ypPh{NlG)v@#=M?vRIjs+l(-Zz+{|x_S1a*D^I&3PqycVtZ~lS$<I5P zXuD$&>%htw%+|PUr@O%1G?XU8Xh%7drl>!@*cLDi%#QnxCQ#4vqI~(rDs#<NIUf#@ zN>Xc;sZ}bJs%5YR!+6NBZyEG7{Xs$+Cn#A(YlZP^)D}))m?H@*cuHZVp&-TB0$*A5 zDCXtoKf_nP@RUoF;mwM}PLkb81;ep2x?|tc9pA_3X5QsHZBBHr@dl$Yl>USdNG3dh z1Kco4iK0^ANza>`nWkQs%+JpfD4)O!!CKZ=yR5G!tZntV-qGCX8`g5kmi0-z5PTn` z#3Hc5U?mu<>8)Mixi5Z_uYdI^muEw_>$6T4i);c&8(7{ocEV^hk#F30yK8e2&Lp6n zgd9&KGP~n*thKTzj8`p}!ayOTFkq_LA`AmUKSWANGAbDM3p#Pi`Y>Z_lroC9=Xs2i zI1ym1LR*Iw2CWOS!4}Va_QPCw<#Sf+*zSU%Cx9g58=efG8{-gK8UNfJgUeoPP3bQJ zGdXjvC+0`92^s21Fo2ilnGzB|kSa`*Oq6Rif^rGvM<5g`2oXVm2&?plf;cG{6*)## zaLPxB0P7fUBGROTHqU+WqwdvL{?u5L=!tO8Csezm=pB`;jX^8}SI3Xn#;*;=zmrb@ z{+{>wPQtOf%AT2s(H&PTopWBdKXB!8H8O^vu$F4IhOq*zE!q@Bkx!`{(P%XALxnaT z!c`H%1L=cQASFrM=J_vw*j#w&X>ClFPfRZLC-TN7uraPU%VULH83VZrT$`vyyDO1> zJ=<B`YYaJYjcB6z44re{xyx5V&y${a`EuB3H5d$Y>g5_ms!`HMqselQvadP3SR*%e zwt6EHlaV`*LOagA{26opmCxo{XT>gG)tkt9lOAg{(IoaJ6f_vW7JsK@+3#zg@9qVu z@f+=Uo?r~h9}hW7V)gRHi^AD>Ch$en-P>nwdN0m}=)#P@h*iNN2t!My;t)}d<t>AD zie7iczViJ4o9ZIlnI_n2{I<pl=}c6b9TU2f|0W-gCYDO{U*vedA27S&WD{lGn+Qz> zIOm)gj1u?4`Rg_*{Ml|SqxEhk>aA(Cfh^JFX^+6G;Cms$PcdPM*eLqhDi>aSI!#8M z-b97jm`JEQq0yNLkIAMm-_;YDT_(=`MK}7rg4tcMvawRS6M($Q&DCj^+Y6U3yZ&I< z8YRPOYt)xchWK6+U6gTY4#HqXhVr}kUPyc8YQB8=8y)9tdwlcliK?<T1~aLaWETWI zky!0_qmUoSw`gzTdE72HJo&+lHpUG`qhe#TU8HfQvOJcBj=_1PSt_(P7_95}y6*aw zbNR({UtEhv8*AfadVQQkFO8Gx^)aN43BwjosHM0Gm>=nXVKCWO`Qrp!1zKZgyf_~7 zz#d=*Xb2%1p7P42Qc$W^rvopjN~wG-(sVm(*`T-4cDC3YhsurzSskC-V;DPjKHCL> z?^PK;$WUU}2QW8L(T=RsiK@{7rkrzAg)S<^peXeRqhMlky0}Sib7LZe*2h4G<LiD` z)#Wb(%x*Z7{Z=*sWo!J`pBk%aYOJjCSY<omyfY`eQ(`xFCE4zTF?-D(=LdC&AMmy0 zt|geA$@CgfpEy^>&xaH9JH<^JzpaTJGHJ}r4-3c-1I+H#1~uVp!iiOf^7!X)!vBZ~ zC!Or_AnC5H8t(f|sDIgDxXG%6no!ctP`h>+*j;Uho-k#vtHS)}1ZMa3<xOCTn|zVX wy@vmAKkU%?QG<CcC4Hy&cQ;v;_>mv~A8}c1|68J4>i_@%07*qoM6N<$f`j~GV*mgE literal 0 HcmV?d00001 diff --git a/lib/venus/examples/images/keybuk.png b/lib/venus/examples/images/keybuk.png new file mode 100644 index 0000000000000000000000000000000000000000..265dc3963cf3dab920e694711e6d2e026b6484ce GIT binary patch literal 8496 zcmV-0A<y24P)<h;3K|Lk000e1NJLTq002P%003181^@s67O;uP00006VoOIv0RI60 z0RN!9r;`8x010qNS#tmY3ljhU3ljkVnw%H_000McNliru)C3t57#tOllXCz703CEi zSad^gZEa<4bO1wgWnpw>WFU8GbZ8({Xk{QrNlj4iWF>9@03ZNKL_t(|+U=ZajAiLr z-+%9NmV54AdsT1KJ#Bgx+knktFc^#>1d9aXARz@sLJ>)fScDWsN(e#`c0PcUh!4qU zB1KB1h(ur@Kx7=_jj=tRfthLdv}bx|x~qG-x|UmY*R#A^K2){2zTB#=9?v-Wkdj`V ztJ=<a|L^nspZ&o9|3dsfbieR_3k3NVZuV>IBOm$I=D+**AMH+m@7I3gqw#P2=Est$ z@5!SjX4YH&rQd=R7Qg<lf2|hBvHJbr`-Aa^KJ<52W98o&to_R0`SBkezq)zP|J*P9 z(t4KFk{6$UZVNCo((p}}7V&2M{lEF`-&X(XSAN9<eDou~deP*QCn{O{Tq}#-HR<pD zV}e`B8ja7XwDo)Wq$;|r8|SUi*GIeeTF#Dbe>~jz?T`J(-p~KihZ~Vn`m>+?i-TvL ze#Xt&cz7!S5#Pw){r5lmqgKDS{kL?I_O`ZnpYGp%t?kD9|FX5T@%;YQZC~btpKEj$ z&y|x=BBa<i*8P5?v$DN-@yS2jy>aztE6dYKCYIWZt#{?7{11bBy>{>JZT~y}^-n)P zwaGBEW<dzwY;_O-zy8tR&|iG&Dc@b}u3i7qr#`fN;YwB0bhdh1?~UU4hsFmxKVIcS z8K+sxmPK0R6A{&$NpJi1&n%z2a3|B{&n9W2%E9E(@xkO<^3g#y*t>g0X<g=%{_g-^ z{IMVVK<$tJ_|L|GpW3LO0z=GPzbPOHfZzPsN76^mtv_-3{0ApH_wIyIfA3;_xPPIw zxcYu2WpeZCGmQ{}7E+4EGZ&OMRt)+*e18zX{?+F@jaH+TCQ;bEbA!FT!P@rbt5Qm- zf(y$(`HTOo|J-Lj{yyO2k6k|ZU4IOp1>Dgl{nWGgV>TY%N_7yw{LB9|?i~z#tI^a8 zy82jdMo%_2)^3f5`H%KCH#=9}`Tg?xGoKeiC-SvxUlEfkr@eLt9R%AqU&D%sSO+?t z8p~^o4Eqzd?;Qm13s%-vMW@~fjit2e^>(%%`Su_Az20j#-Rh(7_|t#;Tfg<?d@`{| z8+0?*Sv7e>Dxywn?tc2G|K8HKy#29ncm2D+I3A3D<fZ5T>arz$vfm#xHm_fc^5I0? zx%H~t+usvgJNq1r#$vCxCkBI&Fep)4D~xfXGC_<-mhr?fuDmcFVVpziRP68fRBwM! zi_-QJt|~4xYPH=)7H>cP#N)Mq@C%Cz>ir*hfAiYaYt_^NiaCcW-b{7)nyvO#eyJSq z{p~YX9{<S~pL%+EI5f%D7oQW2g_amkJg>a`3X>uiC16wyCj?R`^2#GrKx;)&7>tMh zC>Pe01O-kwhIvI{bO^zSAXWUAPkkY(YnNgsAAZmGeD_bdz)R;IdE`HPAF5}beeOj- zudFPyy}e_OR6`tH{Tty5c^Uu!F)7Nsqh9a*&4snatvk2V!Xo?QT-@1wU5v(791IGv zb1-73H$dxzR3)TDgvwIo1^KANxxl0>7>xT^JW(yBvW{_Cv3D>Kl~)*_h{7p332~~r zXIBnJ_R8(eJ1fFW{=u-fn+*2%bgR`&Kk&ovTg$R6K9c79j+S~O&X#@<SG?~BzE8dP z`@eg&mNkFk=C!9+UcL5G(%an?#(RQ5RHa8GDb9IZPzWh8lL4(-gH$C*S5P`|LE*h4 zXo=Ji#~PDQkip}9pz;N|45BDYd>~9>C7$}!C$w;6ja0wq-1)QpbC)l--}fDFyB7bz zJFQGRf9}2CJ9<nvwd53Wiqmj}IspIyh<ASHyP_*sF8$E<?OPw-y!rZtm#@E+jB_XQ z!U-u;ECFLn0-yw>I%Z@BNEL{+qL!t2mlI1*E6teL5~QM)HBqjjrfbL`up%I0g>jzB zdf~hm-W#lSLJ6%gwx;9kZ0YP3p~TL?u)I1PjE245zMCVt4@LeDk_2)2ZI{zVYw3wm zHd?=TZ!g)}>x(#PV4cL65@Lxlf#3>~xQ0wpf~y#g4YnMT)oNHL$+U*5BF5v9WR&Q{ zAfuQ)W01Ozasee`g0Of3PDV7_bz)r;<D%5BUVE{gq$+y++aG)1;@S1vpDxRrM{c(S z?3@jsN+R?Ls7XLhCPj3xv;DoV-+ZlkFc>MPGpZnwN>fe7<dfp?E+L?kI=-q1RiFwU z?<*>6NVTRa3R+p4x>O{!I$h~m>NF{f31MP!X+o{mAg?O!Y~Le9F%pE!b0)zej1Rhz zh_jDej@s>ZYckwlUFtTMHaDpraN5&8#ayz~Cq;n&<d=WG@%WRE{_~sHUwZ7;tygQ~ zybzUfcrz4flG1E781LpFB?w7nEv_tyg}@0wc%%?mQ=pn{@@j%?CR9bAbZMC^jp)=R zoqEP_D4A4ayqnNT1S(DO&LEt`RGv7AL_H%&cUQWxPNb?gy9?a~V1Fk1ob?yvl%w@P zN%%Tz@b+(gr@C_G;?KPJ<)=P){pz)qt=IRo7mBP=#}yMyIl+YrAtl}mkP1^)NF1?@ zNz)W59Z0aI!r`c}_}rqMWz;($oj|84z!<^=r0~Sj(a9pbH-uo2Dkj@lM@UVU7W8+v zSvm8hsyEw@x3aCvz}*>1n0BDl5m}0p)Z#cvkU#i=?{9Ct`tk==(p^6|7{~huBVoNj ziCn}w0--2vg%TPe6+&w86|oj%v1EOrOC&ryy&YVVV#_gcvrB%kPqW@2OCs8>h=o>6 zRaIDFi9|qKhprSdZeWY?A$0i;-X|0z3sQu^7oM_C>gS(&{>BX;n*qSjxr&qg20c#- z0N!@_Z9ld7^0kXEeC`wVaepACiijgEvLq!Ef~p*&L!d%ny+BBTQi?{VIMc1M(2|UH zb~(R(hW>C!wAiIT7*lUGIk(znxoKH!20D#8d0!#1*bvwo3^Bno&WDI7LdX;y6j9P; zcXtaDJZIKgB1*GvWxtvnfe@ygrTc5vwHC*5gPM~GlHG1szx{3Jf9$oZfB8NAgMMS2 zTj@n2l#(P`kt$6@Ac<pQsZa<MftrGL8gYKPiy3Z{Br%=NGNCMpBaNLDv=YgMwHlWm z>5?#}9>>(<jKy}BL>jb~xB%W-Og_T9ibx2|WW;bh7NzrelLsZN7#9B8Xq*=_C-3Hf z5>86m=sCW(`1ue2qbKw6e&gI@?`C^<0F(%!Q1Z|hP<U!digguP6k(OX8cQTStKB7} zRYXyf&gwF@tVoojnheR5VyT&N>EZ^>XoQT{sHH8MwI-pgXk-=fP-A@`9W=|WC3^Xg zdc96Q8dLd%VMs+$7OLf=%v82^Bnj@wMA{>hX^-}1r@YByU1>F=3KCGyJp0`K54`s~ zvvSn?!NG*q_PuSHWSLMx5eZ4ABw3uIkSGF;G^Ujb8oFY!(;!U~l&@%Y7O70&;9!7M zo|WYeXI2|*tas=%YMeWBk+r22y6qN=Yl}2H9kh_dDk1nlR7*%AjZ}fsR4APyy+8{e z&R$$C<9c)Z=H}hq8IN&#`8lC@IE{4><MCMNC=p9%E){?Axi5R6A{9)9K%nD<cBezT z(IAd9%1IwwP8Lfd>ZGwmX9>yjS@Ln8U@BJ6tkKRStBY;Ut+aXc(F@cQMWPjrBtuH@ z(o=7=Xt$T?c6U+gCANnn(ptp!UJn(g2oJGvM2SXeAMS1Mlt2oUKmrs%bhIxyCwV*T z4~2Okd<~4heE#b5OU?AoWIVp$kr5$~>X>$;Nw?FaRnO2WBI&G<M1q?3=#Zm?MYr0- zNy1=Pu;@GsD{HKG8?@4xM=xDuvDH9Ifh|iKSpzff<AlW}o<@?;s5Q}A;h*~w_iRp< zr5udMcn3lfj5WTr3}3mqH3H%z0HP@X)J$M?VnC&5H-DcT?`-WAkx8Ag&IS<$=bea^ zrjgZX)l=$8AV~#Ftv1a@jaIWpA`FoV)H-d3g8`!$vNYoS#fz*jEt1t6WX%SVmiWpb zDvPicGL3OT;6%U%i_n3MjWq`SvzXhr>GgMUrGXG5Gzb^_gJC(SDx;1-&{H6&sU?n) zf^eLPWGeqJo_Y4K^7B9Zy<0l2d1*8OLZp?*l7uvtH0m)~qs~IRLATSQ+i8+YOS4r+ z;+bT1@<ztWQit`8HPX0-NMl4T!Al8o34<Z{fY6d)Jw8Z~F-iyG7&;3}L|dB(Z@~qO zDF|BPeF(*PbpQGF%;~GC(?3-mLmpEwk)|@@?1e|Gnp7^hQY7BfQBc>QgGRUnQA#LF zBJW9qW1*JN@D5=sT8ZY|@-nN7Ys4}I6v26{DG7-rL;<f1MwA#=felCk!A+3DproPR zNJv{tsH6^3A_N3mp?we$Qq6#%XH;NX6;6yM^$B6LY_u18sTPG2-YX%cQUZy`S%WnK zD+9&EQk8XV6=TaloJQy<K?zBkrUYm4&VvROKvovNvxo8)@)BQG1nY2BiHIX40cT4H zo+L@B)oLgyQA!*=oIoPzwA!hZLU|u%H+<^NpOjhpn%LZUb@TISB!?<g4gyk10+!$n z&XhP;P?iN{Ssg8dwGIRbDF`8e5cuE@6DrmrN{cQXVpJeXgS4LDJyL5B0ucl_Pq3Ca zPN~;o6cxc)LI~L42?-3#BKJOsQ=Ysy4G43S5!YXSWovP5eLE7)5<tgEkV+gzhguMV z!CFsM8OprCSVs_tPN=gMXDYn4c;~RD#F!FK00BWuTqKcM4V}a&9iak<01|<g8YLCY zW=1VmXnDwG!8sOdQayQLt(&DuJjdl150C<J90+p*i>p_kAIHt+wKR=N9Yw(t5D1*9 z5P;SiAq74JjB^-ksEj#0bpk<v$K#zv1P};(@c2j(8Z|<rhEFs?D+B?8N8kzGp_D=? zMYq#sad`!Cw6%I9#ka1ktM6J}UiybW_^v0~&1O7@{2nUDcv7M@6h+~aPWxUfOL9dB zl@mxGA*{p&Po`6nBqmVdu~e?4bUETk9lUT@VR2Zb^~7Ef1%Xr&B85<gk*>fZDG`2x z^CO%n@RW#PXvB(*l|__FP(g6E;kmT2DAMii+IO@Xk6d0~Z9hN)#A(&xxGGK<R_>(> zPrTy*pIc`OocAFFPgRu&rD!zkByofi0U1EpK<p){j7Wqc2@)M7-W~FSw-#SkP<ez6 z-~u8Dgp5%l1tSn%609Yd5|5|eZIRUktyu8*rN?<><E*%_x~k4EbklaCPg03DBnh)c zQ2)tKeDa2jvloTHSZ4?VTnHFjQB?(6d9qqeEsjZ4LL?%T7erp5Z9qGLbRKIBrkEhD zN7`_>oI*NH@Q3^syuulYDGkn8gaC!2)kx{oCG9wHZgrK_rB#|qOsCyYXO<Qkvp&u- zV4UbNo|Kmhv5F+6tH}@xQ&e~YUIeT&7+ZlW(8OqQNNiY-Eej&85lSJI0wD;&V~PTO zWa2PKxAp<N!dZ*)5~~k0*p-_ch0!VPTE^Py0>bREw6ctG4xKj8i4<wG@kYpTE(l@H z@<4d}!p52W?&igMq@$uLFg~EQL?ZD*;=`dp2_!ZIa%T~hK`4V$0q;FXahTq59y=J~ zg~ta^2p;Ehs(gYe9EN~Uf<Q%3mMD@Wi6j+@)zwAv{Smjn@*J$4<=n;vDu=uA`d0rK z50N>*%q!IeJty}ztrG71+Ui-ibGLqWWm(?7dxy$6;wVCUk3mxU6zfxR6UhApAr+H} zpsWI|T1sA2co{%~v7Yg0!r-9Kq!>_C2Ut7C8t|?LN|D5ZS}bTaG^-mM1g+3UvcAx! z^{r3x_}TAb@0F{>X%y@@cY6nea>hwIP4sxII?Qc~2*|`aC4C8L)WBPdGZLp@Jj~fu z3g-oh3P@4md_kOMq%xt?Xpv=_q$aSzaj>8B>h+s!-q>VpEv2>;`3S9Jq|<cT9hSN+ zT8T%jwpdz}XrmC`v%0cEWerOUim=lnOJdwXKZL*LwprajDQ@_>AWF|6#;;3F$wvG2 zg)>PpxxE*HP`Ka;)-xFN8TSX|{Tg@g?NF3s%3^}I6=+4bwZxf~B`#f9;LKVB=`F*( z9kw=a(jR+dW0j=2%-+Gv=n5ih$QGNZqM*C5#K!75I`tOHIfVBlN)Wtb(z{I~i!jxQ zO5*df3{!Q;X(>20IqC=W1BJW2-sbrz?8T8-YebnInF$DqwE^cHLvpI3q;CZ~I}@_T zGUH*7#Zkq6?=@Pf<%!E{Y;+g#L&Ke#Mu>=PsZD*U%|i1mX1LGWAA5q8)Um#{#6q`4 zJ(WmbkXIICJk|zmDXFZd+isJ~(jROOtF7Kad5i?e)0#?sf}h}l@CP6NgWLb&7e4&? zVtnI#?4vaCkyKJqZ!~eHq@NEcg`#=>ae8@RXTL{HDMW3F+#KNhfn~i+E2?p6twa2t zcd&Kf84V|-@tAX$R;WcQB)-quVvF_l2DKyxQ-OujdaB^D3aqfmcAc^)gU=GbJsMZT zaqdn~0r{|;z;PDh`Ile)<P-Jm{cdDBO2|Y)NRn7MCCHFO#!(*(c-Q%h3>!N*EV-Sa ztVIVw6eL(rBT+oEvA{xCar@R5gM%TmDoL9eE6d9)uP%_pF}9c>g(P^758&_wZxP<3 zYiZC)<~Mh?%9pqL{ZqQkr<R0p3>f}TKJkg$m;e5UUJk-OA_56M5JeG@ju1M<mpOGN zEC$KNl_rG<+}<5gRTHE%RK5UJL!n4rMZH<4b>$Lww|W?3X=E8otrqntLpq6!YIyG{ zLrEnpE;zgaLL3Gq_QgB5CZB%c+Sc#>+0(BaV*(#4lGJkw5`RM)oO5m>#I+dS8|k>= zY$?69=t$FQYE&a4Q?SsC$s$egF{9ChQK9kH;GF~E(7J|_6`?9wSXiXdS!B{5Bb=qK zC8~5FJywEMhq?3IOsJg41lTVs|D_vW9{s03edfidU%Gv_vMxWNZ|T%cUB_`km@Q?E zESsGW21!y|u)<4lAx$%JX{|}SS;I{Fh+t7tP?>-ThDaxfaF_!0p&|rD5Osnu__83b zDWXP3C@r#@KxrXP@Xk^>N98=$JFIt<=+JY<{rUCHozGvpxi^(k567%iH*m=Zw9tOd zTz=*YpZ%(;$s56&+y&tS0!4)@3Nq>0Sc+L&%2-%jKmox=2$hl6Tj)p<l*C~v>;$7E zRR{#*z!gaCA&DTW;X_0q!8=888Xr;u5h7STAruaK40rjN`lCOl3G~zf)ZdVZ+qYjI z_e}m7BYLj~Sqwv{oV7NLO6d1<gpp)AA`yYo0YajKWqGkhA`Q}32yY2hfgKMhh6Bp7 zzy(K;5TwBR!<>Zk4(|g#1YCd+4*MUJkR*|wF1(&pbeRR>SjmtN(4T2k-3zyFUAtr5 zf5~I_=@9vT>2QO>GcFaAi2@swL@5;!GSVc8q~59%yakaUk_IA5NzxYH2`U%JOG9NG z#)Fdr6D*Tzf)xQLz{|rvAm<&zSy4}8HM7wNf$%`7&=25)ue;H6FTAw-+zULrxX``* zQ}6w@^F^N5ghcI+gQ$xPgrHe#kyjQk1X4!~E9gx?HY3LQh{33$o@h*AXm=7!krRv| ziju=<Gtva-a3Wy5!4YuYgAXD?h>fLAeP;7^X1S@WpA!z8*q;!Nu@KWhSitP{_9xeG z-}~H!PUlP{Op=>IO+tf4+GW4=Ov(Z;HN)YUtJhvaia=v)i{9Qnc5l5zJFWBX?|VNg zjt@&p&O;O<g+h9VbdKOHK?y{V1Rrq027(jyL`!iPfLo4V$4SIoAoMXM>*E0UTIKh~ z*8W#7v=@hAY};k1)G%1$D#wh*6ajoBsiKHicK11W_63@eq0v}iaqXR~uXgCJuAsHS zi<mO64wI|e5rPHbk4V!Ze4w(1!W*HK7u`DX+4ZG(^X{(W;ea?cJkSr|gD|6O_e<Z^ z*3M2ZyyI;>7g#q+g^{&Fl{u6CK1J}9-W^t(@)EBDX}iYROXs<CZi9u`V#_{NsW8qF zjH4z~f(|$W#(Rvll-3>cMqx0eMX5++iPUMV#hh}yIYA0ev=H(ErqRu+0#E^!u{Jj< zyjJ<DszP95EPI0q{lZgPj}ww00&yC%dhP=6`0no~C8rS`_A!i&;qGw4tJ_<=wztLW z;~taXDZR%AL1_<dVPOL{NGj`)Qi?2=`pHK(I>!*=JSmv<dBSPj=2>2tl>&2A3a<(& zZ`7pvUS~tSEUb23;hiQp5FRv=X06UrXOWfWD#2U&x9&3ViagJ`w{w?!yIZJw%=yRO zMpU~@632(>`w+;>lJTTsoEJDBP)boxMfA?g8_OU6)U)3E;7%};vl4MCVmvmIJPwFE zJN=uBs|$BRS#@lcC?O9ENWle$`#tR7fGSw7-nzm5Uwo2wtPbmz^$wlRBJ1@okN?nn zD3oEZcazCD#|1%A6^tfh27`S@g~e7m^;V5IPDF5Co$tmAmp8iIuiozMKVZ|$@WEW4 zNS)BsJ+E3lz0{pMTjjgo{p6CH49-ZhnoP7p7dR(yNL*ggjhZ~Na-Nk&leO+T-*V;K zxOnjrIx@ry5sj4^QBz}sr8JJh8HRbm!SI05Bxh2M(IT+aUL@5TsRS|@e`l}Qy>@f^ z&QW7V0pwGktEt0s#|(}<aNgm#wDoj1xdPPnS2kb2a_Pb&-b|L26p<7{fFzDmvLqvx zfkrK6q259(&1kSgZ?KJTYV=YIS&MKY02&vdD09Z6A^UqhtOHYyX*Jq(TOFj}usJjY zzn81w=bwA|<v9S9Qy|zm-0)EI4zmO)r--LIx>+=y<fCU_z4M3fI^Q{KjfiDvB{B*^ z9ziS{3A-U@*e{5EMWd0ju&~Pd!XoulQ@V=az&nqzCB~NId4cna$#6ow9#gM15iX!a z01dH{vQ>-fLLkD)A=0S=sFO^jm>Ht*GbX9J55^1EZ`?Y+@{T`Wl=g!vYAi`%qcWce zEhM2HlXf!JTQRHMMOGFUX|^+DIJ6Ru30??-QXmhDwjl_t^;iq_tV1LtgmJ{R2p23e z1R+dyn*8B6H19CmAu491;p;#MA>^NW`m3M)@%Oy_tjzCyN2HRDRFR6Jnpj$C^6144 zE-htbLeZ+#X*XH~At_u%X%$Wj-0(10nHP@z{*c|fw^_gND6M)zswL8y!^ah1y$yTA z$`Ve@ET4J=NIpOn=EW1!URim6jVHzMPrh{R<6}`>&CB7~$q<re9ZZ289WWVAnB)Zq z2fGZ$16(K(A}2`8XguIxJYxGGXY<Z3w{E;lyR*nryFn%sWa+?qyfauk!Ia*<c5i<W zj_0k1szRw#j{nSU>6s!)4XCm*?2N0s=N9X47eUuW&`L>!Ek>xI4;w60z)6FXIg@gq zk@4&fDsJE1;l-D3@%rt%tgW2k?9vKN5%6P6gd)-kUDKFg{pPNl{I~!8<*U76-kaj5 za_aMaNj=VNJ`^#|7ApKa7y9VNF;MglhCP=&c4Iu8biLJDi@J(b%GMyq)LIONhMLP! z!vjLFOePU`Hn-V)?G;9Y5$DccU}I^OG*n2ViQ<^D%1K&_6i$SGmDn#_dv$R0-v0i) zhO<*NXoQF5gjsH2N+r}hesIng&%Ai^)0Y?8Yu1b(rOH!NCSp{K*&dG&!4bS6N;P>t zVSj5vJ}{_QbNTEQF0C(6XF@DIBFXTPK}CWP$6*qbf9ds|{C__6?5)ZL^8h|LH8-4w z7-wDS>B#bktq#rQ(TZQYy|o!j`GqSB$%e^Aa<?c&w38#%0R-^gA%a6Fh0_W3T7!+{ zF6Y;mNacuFd7?B$=m-%hve;1CU4Lzd;y?e<XYRgyuYczREAY_7_%LI_vmo4;26MFe zY2D0w!||P}vpDR;x{2^Icm+x$g+wcbk^-$2LIt`t$=T%^LRw^_4WcBZ2|=bvP1tp* zd-3jW`9D7Kxw}tY-@NVv=7d4=w`5lOBx$%0L^UN51rYW6!<+q8f4ig2Qk+VSNQ97w zeMnMiq?W`AnvvteMu%1n>KW*$Ldg)ECgi5_`}@j%>BSrQ|N8vXJ2$rWUa=l?%sfMQ zqrg1u)cmp8rYRsIAoAW1w-5SPR~ze>r3^`=lu}wFl@d~ERHR4~!kOh2y4_4jX@iY? zFy4lJqy51c`@)N_6kmMd`tDa=dwnanethH}&xXtPgoyD}U_1;l&XWc=HB+Ew`C+TK z`|Mh${jTM>7B`j81YSuQA(RlPK$>Mi#u0-_6>_{EIpOYY_sW}hcFO0jztX$2yWcb3 z=QDE*W@g!$7-8O^$D1w*v!3KM7}ZpZZ8WKh?Q3^#{cagHesnRjXX9E{Q&rFbA<{Gw zc|g2$chBwb_pANhp55F#80-y(1LwV+L%`KB%go6v`I)rZiGV!pn>40bS4~MkbkuI2 z0QaX+8bA|hN+}m(trxY{3qs1aHC6^f8bU~{wc2?<JAg8s(3$m4b6|Y52Eg#>djIq2 z=x6zq17(k?SiK=hm`=*h3$Es>h4+3?R25WJb@Y&pBe2pV;PeAPFjJ&^KXjf<h0x_G zLg+UXN}mM4tOGUeO5cBAeM}d+n(3r<M_^6&8O_UPl`{a`2jTvzX4K-iBx^WrdhN|1 z$#P06W?S*6J<ntY1ZF@mb0pzD2;-ycbV9{EAiwwxOTvRla{A=dbZ|A(H4^5K=`29A zq&Uq9`4l%y=IFouW`HoQ7PD4DPHj%knMIoBkZNjtN6q|CKa<I-rcS(=0VAKuEgqLv zJ5??6jjF{wb1A0OLd<ON{s|{D!&}Uelxa?=rqrUG0i`;|Og_+Idb3_THZRH)M-Rp^ zn=}0N2%nk(!OuzHR8t_#<`CwQqMPF@gtr2Oc_ish=r#{CZgw<>4`xAdGu~n~<2>0p zo}+!3j+Hl45@z|q&M`~VQV`AT!6-8T004+dL_t)Mf_WBa)>PU#F0-Ba-7qJU6y8b@ zj`JOul8P|L?9KWO;)J|~o6~z>Ie8>FJV*`RDoGG?sywr%)G>dCaUKX^W`pxS&)fW| z0eCY(nCDW@Qx(tr*+rQDDiF`f??#wy)_Oy)AA7Ukd^wxslykDvvonn17!a6KT0gGT e{tdl<?f(Fu6$063seyn10000<MNUMnLSTZw)GZGH literal 0 HcmV?d00001 diff --git a/lib/venus/examples/images/thom.png b/lib/venus/examples/images/thom.png new file mode 100644 index 0000000000000000000000000000000000000000..738179aeb5c35f4bbb11aeb5cd20d5d023be797f GIT binary patch literal 7257 zcmV-f9H!%mP)<h;3K|Lk000e1NJLTq0027x003181^@s6_uHJA00006VoOIv0RI60 z0RN!9r;`8x010qNS#tmY3ljhU3ljkVnw%H_000McNliru)C3t55fIJfWDx)W8|Fzw zK~#9!&3sv}WLKKi`u2Fnh`963!IgHoT!pL3uBvW$K|=Kx_z}=ZNd3$!AR!^~#vla0 z0f`6b7X5%kAvG#dMN?h4+_uY3=X~=HF`qcoo_IJBm&nM7n`x&FgtT?;cw(=!zP-L- ze>(`@?u)+0|AhaaCiA-YdXsyHZ{s0|ZxG;3VCl~B5g`Nw0E`GvE-u5@1QY&mGrJ2U zo$d{?pZxfJ`QeX$nEdWHzd5QF#Ys~X?{K2Kob$e5G>PN*6Q%WUFD|bV0Ph>)Y6m}3 zS$>&j+2s2_`qAvu-+$ix<wt+zE+(^T17P^Npy>^Ou3rfNC6f34=3oEmKiuEz-+ubw zL2l|QZmLBboR1T&wBVdIO=Bsb^4>SZ8H5nrI!9>H>sUlC0H9+T3~j?Zs$3efHme z{hNPW8aD&r-xM@`4WR4)!`8vR|J~dF_V{4`10KRrUCyFJEAHA>SYtSxXADFHcx$X9 z&OBo*z=5^FAZQh0DLq%p6;<uxwBLBfE-!|okN)cZvp-sb3Ohi*7GVB0fzlG_YKZUL z-upKP{r=AqExwazImpu_=744t6bA$+5<+k;g%C<Asg#mJFhv|vNg)+yiVLQt5IP$S zVl74DgU<j;xd?w_I<NoqO@&ar0Z>{#EcVu|!M{2^I{aA{$9FmTOiRHBc>+ZQ?;JVj z5S&K{0U-pmtpyPg=L|%EBL_G_6h|P=VSzB37K|ZeS(@BEWrPsIzkYl+`THB@yJDGM z$E3wq1j;sm@{_&zpB@bcAMW?_yP1-il8o<X8SV9Qc+-N2NH7Kfa5kW68kA)X?>*8a z0p|=-2@nIU_we3>D-BKzyeK$q$d$_Pwsm8xvifIN)8-#6-`_2-emPzVSiAvHp*i;V zv-JI=LGR99nxumy;d_Iejt2u|NrYG{lAJ>b0nQ}62ims9qH54IH5dYj5z-`v^9}(X z&U>`h(ZOIZT+L=QKA$t`$}H>;?o>tb{%qFlw*iyooc!8cUkR9hMWAd26oB~Nt%HAY z`}pt&$9sDR{WMYgSxUzT`#9)lNR^^UNJuWAgoKg;T1JTD7`-e<KhKb5Df)SaNGT}6 zyT9cEN(d+=h%=6|D##c^S}Mkz6CwDYj2F$H>;M&RTEqM`+{=ndS_3NY-9Gu3@4a>B zgX6va=|P^ygDhi%G)6CuktjuqGf2jvh(U7+Av&;HDa0y96h{Co7!fq*5MbaTfCyl| zv(Vr?iMEA}Q;I-bC@H0iqRP2HC>GU!dW9O(>ww(^`VymWogV%3)BXNW@0}dq=|wUf zqzT{eXXqyxl1Njm6yhjG7N>~g1ZkQhOESbs3aw)(6@du`&Ip2YXsN)MfZzgBNPsZ} z-_4(^h%^I&a}>N6I*OyJZW0iFR+ROheHGWrUtzv0M#XB}9;E-|?URE)yn8r!tEXAI z7b~7bnj$5Ur3n%pkrE1#jzLT!fWd<!5Qhzf);ToB!Z`;D9tZ*6ujuKXD}ljT2ciHU z0*d(zZCyb%6<K&0X{nj_$~4vY7p?m*I|2=Ql|XH-(`(gTnQtP+dx>PH31>;F1&<>| ziB^cULaa1JQG`gvh~orOYJdnNBsk|F#z8~~1O#v4nt<Q~JRCqATyRJt2q6FvSj*u( zkj4=S)`mjXloktC)hx+lcPm!ndq5+1nDa}5#crVgB~d!cqJI>}@$EQ@GOe|UqKLHC zP%46ynmB*aP^@$COLyXZ=Vyrsj4?3ALBy`-6@r8J0nWcj3g;X;PN7tvw2l!)2c)$Y zaWqJ@(nou-{Np7Ne8Xeh$m}*&*Vn34QXa=z_mxsoDMfJ{BZ?v@rHC<x5CW{VXwgCl zfdC6BCD`h=#K0I^auBY$2tL3$1AxPO0)j`|wy@SBgaA$i#S~l=Q|m)8mn})9cu!~1 zMAkJbBi8(FSY!2tfJ5xaVqO9YKnfu;A%)_cGa&@xI7Jl4q_yr$j|l|g2w(_cP%45_ z3PLL|#=w{6972E#VfjYgTZiD`oP`h(oTKg@!~q7VNP`KE&>X;o3PN0hI}VWuvcvEN z0JR3zuUYI1rp>k}*H%^qV|#*FOaX!uBOwG<gEQF;&cQhclikmiQs~TFK?u>Y3c!00 zYb}TfoUzWF1A}(~O;dm~0+ZtUeyjVh-mll|fdVKnZhgSrB^hE(U)VO!!gdH0+Ylz9 zgm@P~)<u7eBo|;Lh%o^sB$$vOE<k+wy;Sf7EUgmGdN^a@t%J7?!8&lxAvlLv2^ePx zp3qbU7}1OO6B3LG5I9V0aWR~T@x0jQLjSFm<!xEJY~5<bmLPwd87slq`vl|^AOtZI zLPAQt9NlDv_&;ng!IwhRC3dU9`SRzh$4?BN7(4<3F*t4j-#MXRJtzRidbCx8tKkHb zVvdWeA&j-GZW<+Hk;zirGR8gzU^c+6uicjf>V<4(&A&>)`3FtYBykjR#u)hW`ADIm zRotm8=iov>=?F?kT`<37jfK#8P{si75f}p}g7X%fbHqG_;0jf}SPB!s!C<XHXbjAv zMqM`;k0+?B1(f6nU{o2$jj{8!xvl`aO(Mk&YxV{IDuvh!wv}+6ymjaVDh6ZhdVK-_ z5rZ)X!{HG9ym!rZ2murTAp~gYf}F9K%nA(0BiLY(Bnjd;2A3ScI(RZ5!2u@VIfK?3 zaU4?!A)KF|gLne8)@)g`v{_hKsqr=?=4)mwJFrp;cqXMJ&N<>ZCV)^IgJwEKUDqgz z0!?k;oP)I%Kl$JT93K<3jv=M!z!HPC7S>u^TwY)@8RO~mGhB^^aLysmbL{W$BTG_f z(Zvfccr>O(ZCa4vP(r{XfOmchTflMvcFmMu@Uxq1bv@`803;*gT5Hlu!C8y8ZSnZ= zW29+{q9`yLjSxIttL5qGDU30&*1{Om<*6PBoWnT>X9E~xNb?>Jy+vaUMxzntMFC>n z1EByYrC_av_kQW30+^JPBnj#&5DSRb!`U4{1+DwpZEN-gGxp9)B^3)HP*D`PytqK{ zgkC<t{@x)5{eA2m?87++V+>ADPa#+*Ms;0-h#&$%Db>w{2x*#vw=FyabeiJcGSX_B z25nViHkqQTn=S$ifYcIH6Pz>f))4|D;a?zy-xNu03o6()_v5|Kb)-d?9S}L^@cs9G zh?A2Oa8B^vBT8b-=W{F;3!I&up_k>TstWxyL$B9E5~m;{G<A)tt}$E8P?QUlrU4fM zI*Adh7)g>K^gXoJn2skHjYep#f#)5wAOcYU3yu)Nwoq|%z0$65@daih9ewZQ;OOt6 zsrQp4kxD5#%=$P!KE}bp0mkDo8q;Djnc(W`3R#xn&Ye48Okgw`fr!xN6{08tOLJIj zF`Z5^nNBgU3)F1`;{rSan#(JM5Rm6N4)^wv=Y51gDC-LIQ3=x+SnsZb7J}u3pcyFl z2*nmgUohoO2Q1b^NdbuN-oE{J@17k0wP2cq!$D8=<Cu;P_UZKW1dkp)!GlK+!C0rj zpMLr&0N~!edw>r}k{G6G;H*QWG?bL6s|vI61oL8!vZ~>n2XO{R0prO8PtKm<>go!` zY>HUxu8JWgthH!t3*#K-<pSf$47PRN8P_((d}e_&0PS+vrR!Obmjk;Ym|y_-kKTKC z@SR&nc|R5^5uoG!KD~A81RMk=IF3$k;ntnEz=T3smf)P@U~eCXhlg;+;OO8Gk>DsM zW013eHK>X?A};ZRx4#F?6!!c3c;}sW@buz2e)Si>LTeicE%EX1KEixFM#MOd`U7aG zkmoswaX4>C2P?jR=QRKEJ8%8%4bs?_V*X1%T(@@tfM;<ePY(}xKTRo5V&qwZIEoNy zg;+;ObOiDNw@!}m^Pm45AAa~D7-R6Z#h}+ma9sxDoI_QXSS%KuGBbwD^9%Iz0rETt z02~|~;P%}+==F0PA09#p4(|+@4@jaIgS-a-&{{(aiPNJ)_TG2jPXF-34^D5T@{iW$ zzpbFZ)%CpKXNed~;)H|{Na76p{Q=TAf%hIq2YWCsAb5*3PH_6eA3+F);2fBg2+pG{ z3$$&EB#N<E6sYSO&RKYGkt8`FBqpOVe8lhvKlv%%efJ&Mx`s0rr$@(#m0s4b1oAAy z#dM5VM-W2b)~#Fg_Pu)|Jvd1J>=*yFkA-1d0AoFZ+GWBF0G~~!)E*rmSckZaU?7OW z6EMh9Snpt32d)^DSmw0OL2`zx@eqrmz^a_u+AiQ&t#KbpB)uF_6v0a%$y20BgvE3U zSGS1L4BP_@9Ki=@sUV3Vjbccy(ChCZN>WIrQP(wIp~ySc_!{(NG9kto<#~=Iis6if zX<9^41cHYoAnoU1tg8tJ0w{P)MpKl<0-#Q)qDXg%fb3Gewlyfr0>l}zgFOTuu$UFy z!vI0+2pSBq9`GJj(?A4<NNOya23kn;dOd(B3@77u-k4u*vCx;yn6^|BuQQ(ce9i}h z0hyB%Qc96&S{SL2B?&lVXp9999s~w&Ewt9~Ae3bZsRShBoi(h*#9&<kLCFYZ-9XnB zWD-F!4lWg}X<@4dD$*eG2+pIbDtKeT17R_nLkWqKlUu<t<{p3XxF#aC6kOV&Mf6HF zF3XY;I7y)Z=RwOdiERxe7tox8oCB;yEF}bEpb!u#2`wZTV_n1p2NDDuELz(DtP2q0 zI7XJHh`5Aaj)+Sf?;k*N0kJC5g@Cqg0RU~;;O?C}I6gTE#ya!S$G=;YF8p?fpQT-u ziWlm{g4jV>SFBhRn9t{Eng*OnNG{RV4V<xvlt!d9vLwa+U=PtUmW`CYo?ul~aIQ<% zt+lAC3f5WxfKmc*9&J-0OA-vS9Ex#7LV-enF{T^K<YJX!{os2)2w9f<Nm10FeSUxH zfqDnf7nt3a1TX;120xQh+39rZqa+UC0>(Qm$`VQ`1ZSWGLzYB{w1n0Yd7eT_iTQlK z5)E)JVT?oF7;vE=RD`;1F`A4qo6X>y!+w7N2?p0R$m6stvI*JMt9)0ps*Q#BT@KBp z#_#VxaL>*z7Q@E;J4VQM3Defi<ZbJ>Y?{&swArBVtEvi3(*T^IwFXjZFmx3NrMjY8 z(=>3#biUU6>)$0wf+&ixDz_3bC?ya=K+{w(Z3{+#mJ0E*fJeNmV0Z$EF?dFpl?A{V ztassZGICGO&ub4<KA^fmoam-R;F{riUHxKF*YDgqI>;}pN?7l}M7NHF5D0+~0)fEc zyhl|v*Jhg}DWWLC!NDPpkB`AQ$80vklnXRX1KSz|>#j2;v8*SD5a67HloG@_OxvOj z4%R!Ab&bR0BlLPbTrFxp9FNV8vJ!R$71$-)jq{JbeDr7@2|6t5N?8QnT8~&TSnt7@ zM{6xY+gw)-jq4^Rmz6}fES9e-7T$H@<h_RrU0Uaig#qBL1t$jMEb3+Y$Fu|)fxsCm z+hR|vFqzLolxD7~s`i`MdA|ny@$WwyR`bcxNuKN0Sz1&jOJj|u+iV6|3l}_y7^F~$ z;{;`0<LucvWTep;2M>lwDom$S42NS>)dIbK22Kpby1H@GG+>}E3Zt$<K~kV?8_b&u zl{1i09Mb;42PxafpFdoTXY<*{9?#d>jK2P-%X#r1Y*`$nKlpxJHBDYcEqQ=iYr3`N zEP@9Zaey<ZNF&QKq<J5U#RBvB9FxfeimPsac3H`&>k^E@^;&kuz*vhA7(6Es=O9jK zyhUjmOc!MsPN%N@;)|vYH2d>k|If4MlkuOdf!u5|d7-05Z0S1LHvHJ(-xqcDcV>$s z+3WYYaTcBl7-K=i5CB-)z<3Ai0*FhP)?zT&$E{m;z*tuk1^@~H-djXz0^8PI<YgSl zb%N$SU_(IBlwh%f2_8wBhUV<TfBN8l{l)#q<I3TqGT@iXRL*Tn={8vwTXwL+7PANd zd{H;mq*%DS!G}d#Q>-H(I5=lOM5w9~^Z6V^UDOnDg?^GDPg5A@V2r!otO_AuG8tiB z%;AlNRsvcoq;Ujd3@!wK5$dMFY&Jt30;<;9vNpxg;!g>vHh{W~Es&Rhvn@bZf3$fc zHz|sm>Bts!6?C4W<{mEMs9<q98Dc!0fP8=m1iz>ec|sf*%%lXB7N`uGNdaFORI>$O z7<|i-r31_s4Jzj_Hw_BYfFc2|I@OP&BrJ-?H^wzjo?p!Xn9zZ>%Q~@He!ZTwzqF3U z4inxAcwCg9EXwk>qk~Y@b>LY>BGPEf3dLdpUmI{8LkSKgB#QAAO;I8QT5e8BSnp7j zB^HYc#yAjXxU5DHTA(!sqzFPN7-vD8p;|N`VyNocO(yet+S*^P0o(*?Z!&AK1>jC* zi7}LQ-7c!eo8UP@fM5ci2}CMFSvj1Youe*l^pX^DltOZjH1DDI4z0CltwC8gUFK68 zaL)1k;taR$o+8a7WP=oGn!<Yv<GS*x4?Z~W-NPqO>HsX309PfLZJ^h;F0ZY2b8Bw} zbo~ik->kC!cTb-`Z#)HOjq3u5)<6)${{B9Cy&i_cA@1M5kMoNQJbd^NUp{_>@pOWs zED<=Ncd&<BZ{5WYfBa(*5r&snID7gG4<0<gd_KRf4I9_R&aJg!I2wB&LNh7mBSu63 zn&oKMu4W6nFSP%OE#~Xi0B;U6navww!WUH;j{189Phd<Si6nTR<LKxFJV@NVdlzrr zzJuv>3gQBdY2g9^#L+s7qFms+Z@+`b0Fq3@bB{ZB?%?R?2tsk_5Mex>2jU`tv-Z*1 z+4Ouo`NW4XUjtijfHlkc+ie0Rw)m!X6J8eplO}O?_dE9{!|CMssNdJ~*@E`69z{_E z-&!c8aOch)q&h{cI$tY<K;0U+Mb~BkACScf(m27@_yUYE+`fGqwX2cmIRXJy-NKW{ zqOM?rv$b=J2hYwP)z&><19%g=ev7ZgE;Dx9SY4FWrB3tD&(1Fk8@#KUx)T|2fH*v8 z*~#W{d3lK^kDkD`7Lpip9V2EEWE^y0uw@MvJn}pTV+@1A05a+l0Inp!C^W6{^F?Vs z{o=vpmuDAWtg*R1cA4=@fY}y_b{p%}uA9kcpWlDjH0@}<s0<U_FRC&u>IxoV=}Wsh zQ*a(v=NBkvb2N(*u~az7`{>CCXbhe|e2g?nkmU)6qboS)P}eoA^8h0(>I$CVMI4v+ zpFVp!98bnufNlf4Gr(Z4p~iMy{f6VwyuY_MdGhR8Qyc4wx`GLv$r)!mRsi&RJ#a?2 zy12l6JjQ%DLN%QNZ3{D<!BrI=-v0t;XU`DDakncc1p*@&=Mca`AhM6o&L>|!ef~6r zP_HptZJ6`A8oOO)EMCKmUyyZcT|1f<j~2n)o)&YJBr&U+nlm4W6NA(WX`aCp6;4l2 zLCym{z_kXW*+rMzMhW)v9K&)BuNic#pc4%hNq`8(+EBGkSTv1!@c7AOG@XuLVZ!a} zfaN<HM_$^pTpwrWmkT0taisS52ZKl{$vH711oGBG5FuELMKOm`5_fOi!gt?(8z(2n z$kL>%K(K(ywcxRaPGV%e3_6Pe!BMp}in4U$*}VGoN1uH5@aglX==|$yl*?<i0eXGS zz7+Oui=ft3*=}Lj0B9ebU)|5*`-is<57Ss{8Sfu3A_ikERHTvgfe^>2iaEY`^Z-xK zo`3=arvSkuECXbuL6JZdD@d(6R-HxTEE;3|Y*97yvL3AgdR<`(+mZpX))*CbtXI1w zJTm~=Cl|vnA3l9@H_<X3q$yAG95$9Dxqwmvu4!RH18+PG4<PT+1PeF|+d5>2dyr|2 zEYG201<wd|Q-`uBeX&@ylj&^o^!dfbhEHv_nD3@Ay%ardOYyKJz}WP+t2b$jsu^F6 zCd22+`B5)PMI31vB{2jDOb0l{kt+uH0JttWSA5ww!Mo<LC`}OO2`G{%+8VP(5yrEb zy&O-9FP}btIGRki1rg1Tbs9E+hF!^f*rCjJQ;}D|eF)9-;pEXEi4T={AO!=vdwQEI zC6UB2CH)+dF=(aWy+>J=U3(tq@YaC^TGp8u>ZZYXG7iJh$WM!+ettDP`}J=>{&Y=+ zn=R4Owgh0;k(d`+aOpLAjpauFf}Ad@mWTp*+jG{bIElGnoaK2A=RF0Yu78*S;v7*D zLuv)*9Ac%reIIWznN7p_@XB9~$L*vjN5B2_{_j4$|M2me7_Ee<Tnf`Rb~jlK+xm^^ z232MoJr<jQ`EXWLjWIQMrthtlQ4}#_9QCsvyz}IZ1BC!aUDGsqKud|XDiDGT<IylY zdHT#>42Nyi`swFSo`3R-U;V|$-iH!Eu>p1^Q0<ODBD|_?o_3tXVq04K)_3Tyud}LY zT92}vPj$Jd<e=YYf=S|BKr0E(1u=9Ts8v~o@nj6|tUtdv_m`uQeRTG`UNm<0+0*ln z|Kl(I{C_QhF1{&Nw{?>5+#T4~&$z)jn^(1&)a?Y`^XX(PjcF+aPf`eS&Y`ZV&>G{b zw)SON*=LvM=8H#<>%aW$_p@p1&pvr}{@H*2@BicXOF%1D%Qdjo8ms23u)3@D_NBhX zReuG51VFYNIe`B1+FM>T05O0#kE6l6M+f)byL<cO-tF7*{$RkB<S^E`@oZ|&hok!0 z<wbEmosT9}I~>l6X<1j(5U>C+Uydn&+44GHGwGEnn=Se4&T^{QQ9|8J<k^}cH)~?0 zHdwyi6`WV~<TsC=mg7Y^{_@Gw+lkhZQqs?xws=07&W1%fty)t#@0%T=VD*Xs!fy!; zzw!a!UCq_&>?Q!xC7UZ|SB$4Cb^+*>pfSco2%eTlXv2m}YnI>^OZJK-qqF7Lo2)h) zT`{j@^`;{EuEbz9F}@*Co84sVL5W&6jjhMw>-TKdz?K_ARNoN$ZVmRo%JxTC@AX^D zZ)uAuvn`Fx*b-N(wTH6)Fx*<8Qm=tsYysTd5Ci+yJx42EvGuVTI@&E}PCJ0EfUHb; z{ek!`VCyxI#SWjkEm8`vdqjMfK+%?iecO63XiI^8J%LyOkz1Hu|C?qFY_*oYZDZEG zDhB?#%x;=9cI<<$UT%Xiy_N^>V%BU?<7#be#=`zpG4Pw5pItvEvp&gfVz!wysI~BN z8`#YRV>4gfm9vJgbLRbZwk&B&ez;4$H&t7$nK0W}9-HgdY$=UyMoPPO=)#+V-307k z!Te@rquFx&gmx&l-2iQF*s$`iih<ws%zL;2@HWLZTjJ+!et6f;!>&Ble^pWXI%}40 z033EG@lA&T*Uf&jBJ!#|*RS;%6<_spDrm=%n9b87>y6sGI=i=3MBI+A!#4r+tAfQ1 zIq$aP;&el6-L?`?*s){ow*dTFpD*(!pDjZ-G>@@Yb%<^W4E&BwaeNa%-vlgnG*<2G z2)#kr!j9elZw%x&0rpj(>4x89hiUHeo!`g@VtrE|enqdiC*9O%^=$&YsRs3Jyy$fr n%f2mu-v+Z^^}PcAAH4oIuVOtTnC6ri00000NkvXXu0mjfCdX*i literal 0 HcmV?d00001 diff --git a/lib/venus/examples/opml-top100.ini b/lib/venus/examples/opml-top100.ini new file mode 100644 index 0000000..5ba6771 --- /dev/null +++ b/lib/venus/examples/opml-top100.ini @@ -0,0 +1,53 @@ +# Planet configuration file + +# Every planet needs a [Planet] section +[Planet] +# name: Your planet's name +# link: Link to the main page +# owner_name: Your name +# owner_email: Your e-mail address +name = OPML Top 100 +link = http://planet.intertwingly.net/top100/ +owner_name = Sam Ruby +owner_email = rubys@intertwingly.net + +# cache_directory: Where cached feeds are stored +# log_level: One of DEBUG, INFO, WARNING, ERROR or CRITICAL +cache_directory = /home/rubys/planet/top100 +log_level = INFO + +# The following provide defaults for each template: +# output_theme: "theme" of the output +# output_dir: Directory to place output files +# items_per_page: How many items to put on each page +output_theme = mobile +output_dir = /home/rubys/public_html/top100 +items_per_page = 60 + +# If non-zero, all feeds which have not been updated in the indicated +# number of days will be marked as inactive +activity_threshold = 90 + +# filters to be run +filters = excerpt.py + +bill_of_materials: + .htaccess + favicon.ico + robots.txt + +# filter parameters +[excerpt.py] +omit = img p br +width = 500 + +# add memes to output +[index.html.xslt] +filters = mememe.plugin + +[mememe.plugin] +sidebar = //*[@id="footer"] + +# subscription list +[http://share.opml.org/opml/top100.opml] +content_type = opml diff --git a/lib/venus/examples/planet-schmanet.ini b/lib/venus/examples/planet-schmanet.ini new file mode 100644 index 0000000..5c620e2 --- /dev/null +++ b/lib/venus/examples/planet-schmanet.ini @@ -0,0 +1,78 @@ +# Planet configuration file based on the 'fancy' Planet 2.0 example. +# +# This illustrates some of Planet's fancier features with example. + +# Every planet needs a [Planet] section +[Planet] +# name: Your planet's name +# link: Link to the main page +# owner_name: Your name +# owner_email: Your e-mail address +name = Planet Schmanet +link = http://planet.schmanet.janet/ +owner_name = Janet Weiss +owner_email = janet@slut.sex + +# cache_directory: Where cached feeds are stored +# log_level: One of DEBUG, INFO, WARNING, ERROR or CRITICAL +# feed_timeout: number of seconds to wait for any given feed +cache_directory = /home/rubys/planet/pscache +log_level = DEBUG +feed_timeout = 20 + +# output_theme: "theme" of the output +# output_dir: Directory to place output files +# items_per_page: How many items to put on each page +output_theme = classic_fancy +output_dir = /home/rubys/public_html/fancy +items_per_page = 60 + +# additional files to copy (note the wildcards!) +bill_of_materials: + images/#{face} + +# Options placed in the [DEFAULT] section provide defaults for the feed +# sections. Placing a default here means you only need to override the +# special cases later. +[DEFAULT] +# Hackergotchi default size. +# If we want to put a face alongside a feed, and it's this size, we +# can omit these variables. +facewidth = 65 +faceheight = 85 + + +# Any other section defines a feed to subscribe to. The section title +# (in the []s) is the URI of the feed itself. A section can also be +# have any of the following options: +# +# name: Name of the feed (defaults to the title found in the feed) +# +# Additionally any other option placed here will be available in +# the template (prefixed with channel_ for the Items loop). We use +# this trick to make the faces work -- this isn't something Planet +# "natively" knows about. Look at fancy-examples/index.html.tmpl +# for the flip-side of this. + +[http://www.netsplit.com/blog/index.rss] +name = Scott James Remnant +face = keybuk.png +# pick up the default facewidth and faceheight + +[http://www.gnome.org/~jdub/blog/?flav=rss] +name = Jeff Waugh +face = jdub.png +facewidth = 70 +faceheight = 74 + +[http://usefulinc.com/edd/blog/rss91] +name = Edd Dumbill +face = edd.png +facewidth = 62 +faceheight = 80 + +[http://blog.clearairturbulence.org/?flav=rss] +name = Thom May +face = thom.png +# pick up the default faceheight only +facewidth = 59 diff --git a/lib/venus/expunge.py b/lib/venus/expunge.py new file mode 100755 index 0000000..ff5017a --- /dev/null +++ b/lib/venus/expunge.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python +""" +Main program to run just the expunge portion of planet +""" + +import os.path +import sys +from planet import expunge, config + +if __name__ == '__main__': + + if len(sys.argv) == 2 and os.path.isfile(sys.argv[1]): + config.load(sys.argv[1]) + expunge.expungeCache() + else: + print "Usage:" + print " python %s config.ini" % sys.argv[0] diff --git a/lib/venus/filters/addsearch.genshi b/lib/venus/filters/addsearch.genshi new file mode 100644 index 0000000..f6f36ce --- /dev/null +++ b/lib/venus/filters/addsearch.genshi @@ -0,0 +1,30 @@ +<html xmlns:py="http://genshi.edgewall.org/" py:strip=""> + + <!--! insert search form --> + <div py:match="div[@id='sidebar']" py:attrs="select('@*')"> + ${select('*')} + <h2>Search</h2> + <form><input name="q"/></form> + </div> + + <?python from urlparse import urljoin ?> + + <!--! insert opensearch autodiscovery link --> + <head py:match="head" py:attrs="select('@*')"> + ${select('*')} + <link rel="search" type="application/opensearchdescription+xml" + href="${urljoin(str(select('link[@rel=\'alternate\']/@href')), + 'opensearchdescription.xml')}" + title="${select('link[@rel=\'alternate\']/@title')} search"/> + </head> + + <!--! ensure that scripts don't use empty tag syntax --> + <script py:match="script" py:attrs="select('@*')"> + ${select('*')} + </script> + + <!--! Include the original stream, which will be processed by the rules + defined above --> + ${input} + +</html> diff --git a/lib/venus/filters/addsearch.xslt b/lib/venus/filters/addsearch.xslt new file mode 100644 index 0000000..f96db81 --- /dev/null +++ b/lib/venus/filters/addsearch.xslt @@ -0,0 +1,70 @@ +<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0" + xmlns:xhtml="http://www.w3.org/1999/xhtml" + xmlns="http://www.w3.org/1999/xhtml"> + + <!-- insert search form --> + <xsl:template match="xhtml:div[@id='sidebar']"> + <xsl:copy> + <xsl:apply-templates select="@*|node()"/> + <h2>Search</h2> + <form><input name="q"/></form> + </xsl:copy> + </xsl:template> + + <!-- function to return baseuri of a given string --> + <xsl:template name="baseuri"> + <xsl:param name="string" /> + <xsl:if test="contains($string, '/')"> + <xsl:value-of select="substring-before($string, '/')"/> + <xsl:text>/</xsl:text> + <xsl:call-template name="baseuri"> + <xsl:with-param name="string"> + <xsl:value-of select="substring-after($string, '/')"/> + </xsl:with-param> + </xsl:call-template> + </xsl:if> + </xsl:template> + + <!-- insert opensearch autodiscovery link --> + <xsl:template match="xhtml:head"> + <xsl:copy> + <xsl:apply-templates select="@*|node()"/> + <link rel="search" type="application/opensearchdescription+xml" title="{xhtml:link[@rel='alternate']/@title} search"> + <xsl:attribute name="href"> + <xsl:call-template name="baseuri"> + <xsl:with-param name="string"> + <xsl:value-of select="xhtml:link[@rel='alternate']/@href"/> + </xsl:with-param> + </xsl:call-template> + <xsl:text>opensearchdescription.xml</xsl:text> + </xsl:attribute> + </link> + </xsl:copy> + </xsl:template> + + <!-- ensure that scripts don't use empty tag syntax --> + <xsl:template match="xhtml:script"> + <xsl:copy> + <xsl:apply-templates select="@*|node()"/> + <xsl:if test="not(node())"> + <xsl:comment><!--HTML Compatibility--></xsl:comment> + </xsl:if> + </xsl:copy> + </xsl:template> + + <!-- add HTML5 doctype --> + <xsl:template match="/xhtml:html"> + <xsl:text disable-output-escaping="yes"><!DOCTYPE html></xsl:text> + <xsl:copy> + <xsl:apply-templates select="@*|node()"/> + </xsl:copy> + </xsl:template> + + <!-- pass through everything else --> + <xsl:template match="@*|node()"> + <xsl:copy> + <xsl:apply-templates select="@*|node()"/> + </xsl:copy> + </xsl:template> + +</xsl:stylesheet> diff --git a/lib/venus/filters/coral_cdn_filter.py b/lib/venus/filters/coral_cdn_filter.py new file mode 100644 index 0000000..e0a8c1c --- /dev/null +++ b/lib/venus/filters/coral_cdn_filter.py @@ -0,0 +1,18 @@ +""" +Remap all images to take advantage of the Coral Content Distribution +Network <http://www.coralcdn.org/>. +""" + +import re, sys, urlparse, xml.dom.minidom + +entry = xml.dom.minidom.parse(sys.stdin).documentElement + +for node in entry.getElementsByTagName('img'): + if node.hasAttribute('src'): + component = list(urlparse.urlparse(node.getAttribute('src'))) + if component[0] == 'http': + component[1] = re.sub(r':(\d+)$', r'.\1', component[1]) + component[1] += '.nyud.net:8080' + node.setAttribute('src', urlparse.urlunparse(component)) + +print entry.toxml('utf-8') diff --git a/lib/venus/filters/delDupName/byline_author.xslt b/lib/venus/filters/delDupName/byline_author.xslt new file mode 100644 index 0000000..ad1fbec --- /dev/null +++ b/lib/venus/filters/delDupName/byline_author.xslt @@ -0,0 +1,29 @@ +<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0" + xmlns:atom="http://www.w3.org/2005/Atom" + xmlns:xhtml="http://www.w3.org/1999/xhtml"> + + <!-- Replace atom:author/atom:name with the byline author --> + <xsl:template match="atom:entry/atom:author[../atom:content/xhtml:div/xhtml:span[@class='byline-author' and substring(.,1,10)='Posted by ']]"> + <xsl:copy> + <atom:name> + <xsl:value-of select="substring(../atom:content/xhtml:div/xhtml:span[@class='byline-author'],11)"/> + </atom:name> + <xsl:apply-templates select="*[not(self::atom:name)]"/> + </xsl:copy> + </xsl:template> + + <!-- Remove byline author --> + <xsl:template match="xhtml:div/xhtml:span[@class='byline-author' and substring(.,1,10)='Posted by ']"/> + + <!-- Remove two line breaks following byline author --> + <xsl:template match="xhtml:br[preceding-sibling::*[1][@class='byline-author' and substring(.,1,10)='Posted by ']]"/> + <xsl:template match="xhtml:br[preceding-sibling::*[2][@class='byline-author' and substring(.,1,10)='Posted by ']]"/> + + <!-- pass through everything else --> + <xsl:template match="@*|node()"> + <xsl:copy> + <xsl:apply-templates select="@*|node()"/> + </xsl:copy> + </xsl:template> + +</xsl:stylesheet> diff --git a/lib/venus/filters/delDupName/p_by_name.xslt b/lib/venus/filters/delDupName/p_by_name.xslt new file mode 100644 index 0000000..878904f --- /dev/null +++ b/lib/venus/filters/delDupName/p_by_name.xslt @@ -0,0 +1,17 @@ +<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0" + xmlns:atom="http://www.w3.org/2005/Atom" + xmlns:xhtml="http://www.w3.org/1999/xhtml"> + + <!-- If the first paragraph consists exclusively of "By author-name", + delete it --> + <xsl:template match="atom:content/xhtml:div/xhtml:p[1][. = + concat('By ', ../../../atom:author/atom:name)]"/> + + <!-- pass through everything else --> + <xsl:template match="@*|node()"> + <xsl:copy> + <xsl:apply-templates select="@*|node()"/> + </xsl:copy> + </xsl:template> + +</xsl:stylesheet> diff --git a/lib/venus/filters/delDupName/p_from.xslt b/lib/venus/filters/delDupName/p_from.xslt new file mode 100644 index 0000000..c551838 --- /dev/null +++ b/lib/venus/filters/delDupName/p_from.xslt @@ -0,0 +1,15 @@ +<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0" + xmlns:atom="http://www.w3.org/2005/Atom" + xmlns:xhtml="http://www.w3.org/1999/xhtml"> + + <!-- If the first paragraph consists contains @class="from", delete it --> + <xsl:template match="atom:content/xhtml:div/xhtml:div[@class='comment']/xhtml:p[1][@class='from']"/> + + <!-- pass through everything else --> + <xsl:template match="@*|node()"> + <xsl:copy> + <xsl:apply-templates select="@*|node()"/> + </xsl:copy> + </xsl:template> + +</xsl:stylesheet> diff --git a/lib/venus/filters/detitle.xslt b/lib/venus/filters/detitle.xslt new file mode 100644 index 0000000..367a6ee --- /dev/null +++ b/lib/venus/filters/detitle.xslt @@ -0,0 +1,25 @@ +<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0" + xmlns:atom="http://www.w3.org/2005/Atom" + xmlns="http://www.w3.org/1999/xhtml"> + + <!-- only retain titles that don't duplicate summary or content --> + <xsl:template match="atom:title"> + <xsl:copy> + <xsl:if test="string-length(.) < 30 or + ( substring(.,1,string-length(.)-3) != + substring(../atom:content,1,string-length(.)-3) and + substring(.,1,string-length(.)-3) != + substring(../atom:summary,1,string-length(.)-3) )"> + <xsl:apply-templates select="@*|node()"/> + </xsl:if> + </xsl:copy> + </xsl:template> + + <!-- pass through everything else --> + <xsl:template match="@*|node()"> + <xsl:copy> + <xsl:apply-templates select="@*|node()"/> + </xsl:copy> + </xsl:template> + +</xsl:stylesheet> diff --git a/lib/venus/filters/excerpt.py b/lib/venus/filters/excerpt.py new file mode 100644 index 0000000..c1d4e9a --- /dev/null +++ b/lib/venus/filters/excerpt.py @@ -0,0 +1,109 @@ +""" +Generate an excerpt from either the summary or a content of an entry. + +Parameters: + width: maximum number of characters in the excerpt. Default: 500 + omit: whitespace delimited list of html tags to remove. Default: none + target: name of element created. Default: planet:excerpt + +Notes: + * if 'img' is in the list of tags to be omitted <img> tags are replaced with + hypertext links associated with the value of the 'alt' attribute. If there + is no alt attribute value, <img> is used instead. If the parent element + of the img tag is already an <a> tag, no additional hypertext links are + added. +""" + +import sys, xml.dom.minidom, textwrap +from xml.dom import Node, minidom + +atomNS = 'http://www.w3.org/2005/Atom' +planetNS = 'http://planet.intertwingly.net/' + +args = dict(zip([name.lstrip('-') for name in sys.argv[1::2]], sys.argv[2::2])) + +wrapper = textwrap.TextWrapper(width=int(args.get('width','500'))) +omit = args.get('omit', '').split() +target = args.get('target', 'planet:excerpt') + +class copy: + """ recursively copy a source to a target, up to a given width """ + + def __init__(self, dom, source, target): + self.dom = dom + self.full = False + self.text = [] + self.textlen = 0 + self.copyChildren(source, target) + + def copyChildren(self, source, target): + """ copy child nodes of a source to the target """ + for child in source.childNodes: + if child.nodeType == Node.ELEMENT_NODE: + self.copyElement(child, target) + elif child.nodeType == Node.TEXT_NODE: + self.copyText(child.data, target) + if self.full: break + + def copyElement(self, source, target): + """ copy source element to the target """ + + # check the omit list + if source.nodeName in omit: + if source.nodeName == 'img': + return self.elideImage(source, target) + return self.copyChildren(source, target) + + # copy element, attributes, and children + child = self.dom.createElementNS(source.namespaceURI, source.nodeName) + target.appendChild(child) + for i in range(0, source.attributes.length): + attr = source.attributes.item(i) + child.setAttributeNS(attr.namespaceURI, attr.name, attr.value) + self.copyChildren(source, child) + + def elideImage(self, source, target): + """ copy an elided form of the image element to the target """ + alt = source.getAttribute('alt') or '<img>' + src = source.getAttribute('src') + + if target.nodeName == 'a' or not src: + self.copyText(alt, target) + else: + child = self.dom.createElement('a') + child.setAttribute('href', src) + self.copyText(alt, child) + target.appendChild(child) + + def copyText(self, source, target): + """ copy text to the target, until the point where it would wrap """ + if not source.isspace() and source.strip(): + self.text.append(source.strip()) + lines = wrapper.wrap(' '.join(self.text)) + if len(lines) == 1: + target.appendChild(self.dom.createTextNode(source)) + self.textlen = len(lines[0]) + elif lines: + excerpt = source[:len(lines[0])-self.textlen] + u' \u2026' + target.appendChild(dom.createTextNode(excerpt)) + self.full = True + +# select summary or content element +dom = minidom.parse(sys.stdin) +source = dom.getElementsByTagNameNS(atomNS, 'summary') +if not source: + source = dom.getElementsByTagNameNS(atomNS, 'content') + +# if present, recursively copy it to a planet:excerpt element +if source: + if target.startswith('planet:'): + dom.documentElement.setAttribute('xmlns:planet', planetNS) + if target.startswith('atom:'): target = target.split(':',1)[1] + excerpt = dom.createElementNS(planetNS, target) + source[0].parentNode.appendChild(excerpt) + copy(dom, source[0], excerpt) + if source[0].nodeName == excerpt.nodeName: + source[0].parentNode.removeChild(source[0]) + +# print out results +print dom.toxml('utf-8') diff --git a/lib/venus/filters/h1title.xslt b/lib/venus/filters/h1title.xslt new file mode 100644 index 0000000..3f419dd --- /dev/null +++ b/lib/venus/filters/h1title.xslt @@ -0,0 +1,30 @@ +<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0" + xmlns:atom="http://www.w3.org/2005/Atom" + xmlns:xhtml="http://www.w3.org/1999/xhtml"> + + <!-- Replace title with value of h1, if present --> + <xsl:template match="atom:title"> + <xsl:apply-templates select="@*"/> + <xsl:copy> + <xsl:choose> + <xsl:when test="count(//xhtml:h1) = 1"> + <xsl:value-of select="normalize-space(//xhtml:h1)"/> + </xsl:when> + <xsl:otherwise> + <xsl:apply-templates select="node()"/> + </xsl:otherwise> + </xsl:choose> + </xsl:copy> + </xsl:template> + + <!-- Remove all h1s --> + <xsl:template match="xhtml:h1"/> + + <!-- pass through everything else --> + <xsl:template match="@*|node()"> + <xsl:copy> + <xsl:apply-templates select="@*|node()"/> + </xsl:copy> + </xsl:template> + +</xsl:stylesheet> diff --git a/lib/venus/filters/html2xhtml.plugin b/lib/venus/filters/html2xhtml.plugin new file mode 100644 index 0000000..3ab7a8c --- /dev/null +++ b/lib/venus/filters/html2xhtml.plugin @@ -0,0 +1,6 @@ +import sys +import html5lib +tree=html5lib.treebuilders.dom.TreeBuilder +parser = html5lib.html5parser.HTMLParser(tree=tree) +document = parser.parse(sys.stdin) +sys.stdout.write(document.toxml("utf-8")) diff --git a/lib/venus/filters/mememe.plugin b/lib/venus/filters/mememe.plugin new file mode 100644 index 0000000..99cbd11 --- /dev/null +++ b/lib/venus/filters/mememe.plugin @@ -0,0 +1,496 @@ +# +# This Venus output filter will annotate an XHTML page with a list of +# "memes" (or most popular linked destinations, based on the last week +# of entries from the cache) and will update the subscription list with +# links to recent entries from each subscription. +# +# Templates that don't produce XHTML natively will need their output passed +# through html2xhtml.plugin first. +# +# Typical configuration (based on classic_fancy): +# +# [index.html.tmpl] +# filters: +# html2xhtml.plugin +# mememe.plugin +# +# [mememe.plugin] +# sidebar = @class='sidebar' +# + +import glob, libxml2, os, time, sys, sgmllib, urllib2, urlparse, re, md5 +from xml.sax.saxutils import escape +from htmlentitydefs import entitydefs + +import planet +from planet import config +from planet.spider import filename +import feedparser +log = planet.logger +options = config.filter_options(sys.argv[0]) +spam = options.get('spam', '').split() + +MEMES_ATOM = os.path.join(config.output_dir(),'memes.atom') + +now = time.time() +week = 7 * 86400 +week_ago = now - week + +cache = config.cache_directory() +meme_cache = os.path.join(cache, 'memes') +if not os.path.exists(meme_cache): os.makedirs(meme_cache) + +bom = config.bill_of_materials() +if not 'images/tcosm11.gif' in bom: + bom.append('images/tcosm11.gif') + config.parser.set('Planet', 'bill_of_materials', ' '.join(bom)) + +all_links = {} +feed_links = {} + +def check_cache(url): + try: + file = open(filename(meme_cache, url)) + headers = eval(file.read()) + file.close() + return headers or {} + except: + return {} + +def cache_meme(url, headers): + json = [] + for key,value in headers.items(): + json.append(' %s: %s' % (toj(key), toj(value))) + file = open(filename(meme_cache, url),'w') + file.write('{\n' + ',\n'.join(json) + '\n}\n') + file.close() + +urlmap = {} +revmap = {} +def canonicalize(url): + url = urlmap.get(url,url) + parts = list(urlparse.urlparse(url)) + + parts[0] = parts[0].lower() + parts[1] = parts[1].lower() + if parts[1].startswith('www.'): parts[1]=parts[1][4:] + if not parts[2]: parts[2] = '/' + parts[-1] = '' + + canonurl = urlparse.urlunparse(parts) + revmap[canonurl] = url + return canonurl + +def unique_votes(links): + voters = [] + for weight, entry, feed, title, author, mtime in links: + if feed not in voters: voters.append(feed) + return len(voters) + +log.debug("Loading cached data") +for name in glob.glob(os.path.join(cache, '*')): + # ensure that this is within the past week + if os.path.isdir(name): continue + mtime = os.stat(name).st_mtime + if mtime < week_ago: continue + + # parse the file + try: + doc = libxml2.parseFile(name) + except: + continue + xp = doc.xpathNewContext() + xp.xpathRegisterNs("atom", "http://www.w3.org/2005/Atom") + xp.xpathRegisterNs("planet", "http://planet.intertwingly.net/") + + # determine the entry + entry = xp.xpathEval("/atom:entry/atom:link[@rel='alternate']") + if not entry: continue + entry = canonicalize(entry[0].prop("href")) + + # determine the title + title = xp.xpathEval("/atom:entry/atom:title") + if title: + if title[0].prop('type') == 'html': + title = re.sub('<.*?>','',title[0].content) + else: + title = title[0].content + title = str(title or '') + + # determine the feed id + feed = xp.xpathEval("/atom:entry/atom:source/planet:memegroup") + if not feed: feed = xp.xpathEval("/atom:entry/atom:source/atom:id") + if not feed: continue + feed = feed[0].content + + # determine the author + author = xp.xpathEval("/atom:entry/atom:source/planet:name") + if author: + author = author[0].content + else: + author = '' + + # track the feed_links + if author: + if not feed_links.has_key(author): feed_links[author] = list() + feed_links[author].append([mtime, entry, title]) + + # identify the unique links + entry_links = [] + for node in doc.xpathEval("//*[@href and not(@rel='source') and not(@rel='license')]"): + parent = node.parent + while parent: + if parent.name == 'source': break + parent = parent.parent + else: + link = canonicalize(node.prop('href')) + if not link in entry_links: + entry_links.append(link) + if node.hasProp('title') and node.prop('title').startswith('http'): + link = canonicalize(node.prop('title')) + if not link in entry_links: + entry_links.append(link) + + # add the votes + weight = 1.0 - (now - mtime)**2 / week**2 + vote = [(weight, str(entry), str(feed), title, author, mtime)] + for link in entry_links: + all_links[link] = all_links.get(link,list()) + vote + + # free the entry + doc.freeDoc() + +# tally the votes +weighted_links = [] +for link, votes in all_links.items(): + site = {} + updated = 0 + for weight, entry, feed, title, author, mtime in votes: + site[feed] = max(site.get(feed,0), weight) + if mtime > updated: updated=mtime + weighted_links.append((sum(site.values()), link, updated)) +weighted_links.sort() +weighted_links.reverse() + +cp1252 = { + 128: 8364, # euro sign + 130: 8218, # single low-9 quotation mark + 131: 402, # latin small letter f with hook + 132: 8222, # double low-9 quotation mark + 133: 8230, # horizontal ellipsis + 134: 8224, # dagger + 135: 8225, # double dagger + 136: 710, # modifier letter circumflex accent + 137: 8240, # per mille sign + 138: 352, # latin capital letter s with caron + 139: 8249, # single left-pointing angle quotation mark + 140: 338, # latin capital ligature oe + 142: 381, # latin capital letter z with caron + 145: 8216, # left single quotation mark + 146: 8217, # right single quotation mark + 147: 8220, # left double quotation mark + 148: 8221, # right double quotation mark + 149: 8226, # bullet + 150: 8211, # en dash + 151: 8212, # em dash + 152: 732, # small tilde + 153: 8482, # trade mark sign + 154: 353, # latin small letter s with caron + 155: 8250, # single right-pointing angle quotation mark + 156: 339, # latin small ligature oe + 158: 382, # latin small letter z with caron + 159: 376} # latin capital letter y with diaeresis + +# determine the title for a given url +class html(sgmllib.SGMLParser): + def __init__(self, url): + sgmllib.SGMLParser.__init__(self) + self.title = "" + self.feedurl = "" + self.intitle = False + + headers = check_cache(url) + + try: + # fetch the page + request = urllib2.Request(url) + request.add_header('User-Agent', 'Venus/MeMeme') + if headers.has_key('etag'): + request.add_header('If-None-Match', headers['etag']) + if headers.has_key('last_modified'): + request.add_header('If-Modified-Since', headers['last-modified']) + response = urllib2.urlopen(request) + self.feed(response.read()) + + # ensure the data is in utf-8 + try: + self.title = self.title.decode('utf-8') + except: + self.title = ''.join([unichr(cp1252.get(ord(c),ord(c))) + for c in self.title.decode('iso-8859-1')]) + + # cache the results + headers = {} + if self.feedurl: headers['feedurl'] = self.feedurl + if self.title: headers['title'] = self.title + headers.update(response.headers) + cache_meme(url, headers) + except: + self.feedurl = headers.get('feedurl') + if headers.has_key('title'): + if isinstance(headers['title'],str): + self.title=eval('u'+repr(headers['title']).replace('\\\\','\\')) + else: + self.title=headers['title'] + + # if there is a feed, look for an entry that matches, and take that title + if self.feedurl and not self.title: + headers = check_cache(self.feedurl) + data = feedparser.parse(self.feedurl, etag=headers.get('etag'), + modified=headers.get('last-modified')) + + if data.has_key('headers') and data.has_key('status') and \ + data.status in [200, 301, 302]: + + titles = {} + for entry in data.entries: + if entry.has_key('title_detail') and entry.has_key('link'): + titles[entry.link] = entry.title_detail.value + if entry.title_detail.type == 'text/plain': + titles[entry.link] = escape(titles[entry.link]) + + if titles.has_key(url): self.title = titles[url] + + data.headers.update(titles) + cache_meme(self.feedurl, data.headers) + else: + if headers.has_key(url): + if isinstance(headers[url],str): + self.title=eval('u'+repr(headers[url]).replace('\\\\','\\')) + else: + self.title=headers[url] + + # fallback is the basename of the URI + if not self.title: + self.title = escape(url.rstrip('/').split('/')[-1].split('?')[0]) + + # parse out the first autodiscovery link + def start_link(self, attrs): + if self.feedurl: return + attrs = dict(map(lambda (k,v): (k.lower(),v), attrs)) + if not 'rel' in attrs: return + rels = attrs['rel'].split(' ') + if 'alternate' not in rels: return + if not 'type' in attrs or not attrs['type'].endswith('xml'): return + if 'href' in attrs: + self.feedurl = attrs['href'] + + # parse the page title + def start_title(self, attributes): + if not self.title: self.intitle = True + def end_title(self): + self.intitle = False + def handle_data(self, text): + if self.intitle: self.title += escape(text) + +# convert unicode string to a json string +def toj(value): + result = repr(value).replace(r'\x',r'\u00') + if result[:1] == 'u': result=result[1:] + if result.startswith("'"): + result = '"%s"' % result.replace('"',r'\"').replace(r"\'","'")[1:-1] + return result + +seenit = [] +count = 0 + +# construct an empty feed +feed_doc = libxml2.newDoc("1.0") +meme_feed = feed_doc.newChild(None, "feed", None) +meme_feed.newNs('http://www.w3.org/2005/Atom', None) +meme_feed.newTextChild(None, 'title', config.name() + ': Memes') +author = meme_feed.newChild(None, 'author', None) +author.newTextChild(None, 'name', config.owner_name()) +if config.owner_email: author.newTextChild(None, 'email', config.owner_email()) +meme_feed.newTextChild(None, 'id', os.path.join(config.link(), 'memes.atom')) +link = meme_feed.newChild(None, 'link', None) +link.setProp('href', os.path.join(config.link(), 'memes.atom')) +link.setProp('rel', 'self') +meme_feed.newTextChild(None, 'updated', + time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())) + +# parse the input +log.debug("Parse input") +doc=libxml2.readDoc(sys.stdin.read(), '', 'utf-8', libxml2.XML_PARSE_NONET) + +# find the sidebar/footer +sidebar = options.get('sidebar','//*[@class="sidebar"]') +footer = doc.xpathEval(sidebar) +if not hasattr(footer,'__len__') or len(footer) == 0: + raise Exception(sidebar + ' not found') +if len(footer) > 1: + log.info("%d occurrences of %s found, taking last" % (len(footer),sidebar)) + if '@id' in sidebar: + for element in footer[:-1]: + element.unsetProp('id') +footer = footer[-1] + +# add up to 10 entry links to each subscription +subs_ul = footer.children +while subs_ul.isText() or subs_ul.name != 'ul': subs_ul = subs_ul.next +child = subs_ul.children +while child: + if child.name == 'li': + if child.lastChild().name == 'ul': child.lastChild().unlinkNode() + link = child.lastChild() + while link.isText(): link=link.prev + author = link.getContent() + state = 'inactive' + if feed_links.has_key(author): + ul2 = child.newChild(None, 'ul', None) + feed_links[author].sort() + feed_links[author].reverse() + link_count = 0 + for mtime, entry, title in feed_links[author]: + if not title: continue + li2 = ul2.newChild(None, 'li', None) + a = li2.newTextChild(None, 'a', title) + a.setProp('href', revmap.get(entry,entry)) + link_count = link_count + 1 + if link_count >= 10: break + if link_count > 0: state = None + if state: + link.setProp('class',((link.prop('class') or '') + ' ' + state).strip()) + child=child.next + +# create a h2 and ul for the memes list +footer_top = footer.children +memes = footer_top.addPrevSibling(footer.newTextChild(None, 'h2', 'Memes ')) +memes_ul = footer_top.addPrevSibling(footer.newChild(None, 'ul', None)) + +# create a header for the memes list +a = memes.newChild(None, 'a', None) +a.setProp('href', 'memes.atom') +img = a.newChild(None, 'img', None) +img.setProp('src', 'images/feed-icon-10x10.png') + +# collect the results +log.debug("Fetch titles and collect the results") +from urllib import quote_plus +for i in range(0,len(weighted_links)): + weight, link, updated = weighted_links[i] + if link in spam: continue + + # ensure that somebody new points to this entry. This guards against + # groups of related links which several posts point to all. + novel = False + for weight, entry, feed, title, author, mtime in all_links[link]: + if entry not in seenit: + seenit.append(entry) + novel = True + if not novel: continue + + all_links[link].sort() + all_links[link].reverse() + cache_file = filename(cache, link) + title = None + + # when possible, take the title from the cache + if os.path.exists(cache_file): + entry = feedparser.parse(cache_file).entries[0] + if entry.has_key('title_detail'): + title = entry.title_detail.value + if entry.title_detail.type == 'text/plain': title = escape(title) + + # otherwise, parse the html + if not title: + title = html(revmap.get(link,link)).title + + # dehtmlize + title = re.sub('&(\w+);', + lambda n: entitydefs.get(n.group(1), '&'+n.group(1)+';'), title) + title = re.sub('&#(\d+);',lambda n: unichr(int(n.group(1))), title) + title = re.sub('&#x(\w+);',lambda n: unichr(int(n.group(1),16)), title) + + # title too long? Insert zero width spaces where appropriate + if max(map(len,title.split())) > 30: + title=re.sub('(\W+)',u'\\1\u200b',title) + + # save the entry title (it is used later) + entry_title = title.strip() + + # add to the memes list + memes_ul.addContent('\n') + li = memes_ul.newChild(None, 'li', None) + memes_ul.addContent('\n') + + # technorati link + a = li.newChild(None, 'a', None) + tlink = 'http://technorati.com/blogs/' + if link.startswith('http://'): + a.setProp('href',tlink + quote_plus(link[7:])) + else: + a.setProp('href',tlink + quote_plus(link)) + a.setProp('title','cosmos') + img = a.newChild(None, 'img', None) + img.setProp('src','images/tcosm11.gif') + + # main link + a = li.newTextChild(None, 'a', title.strip().encode('utf-8')) + a.setProp('href',revmap.get(link,link)) + if (((i==0) or (updated>=weighted_links[i-1][2])) and + (i+1==len(weighted_links) or (updated>=weighted_links[i+1][2]))): + rank = 0 + for j in range(0,len(weighted_links)): + if updated < weighted_links[j][2]: rank = rank + 1 + if rank < len(weighted_links)/2: + a.setProp('class','rising') + + # voters + ul2 = li.newChild(None, 'ul', None) + voters = [] + for weight, entry, feed, title, author, mtime in all_links[link]: + if entry in voters: continue + li2 = ul2.newChild(None, 'li', None) + a = li2.newTextChild(None, 'a' , author) + a.setProp('href',revmap.get(entry,entry)) + if title: a.setProp('title',title) + voters.append(entry) + + # add to the meme feed + if unique_votes(all_links[link]) > 2: + meme_feed.addContent('\n') + entry = meme_feed.newChild(None, 'entry', None) + meme_feed.addContent('\n') + + # entry + tagbase = config.link().split('/') + if not tagbase[-1]: tagbase = tagbase[:-1] + tagbase = 'tag:%s,2007:%smeme/%%s' % (tagbase[2],'/'.join(tagbase[3:])) + entry.newTextChild(None, 'id', tagbase % md5.new(link).hexdigest()) + entry.newTextChild(None, 'title', entry_title.encode('utf-8')) + meme_link = entry.newTextChild(None, 'link', None) + meme_link.setProp('href', link) + entry.newTextChild(None, 'updated', + time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(updated))) + + # voters + content = entry.newChild(None, 'content', None) + content.setProp('type', 'xhtml') + div = content.newTextChild(None, 'div', 'Spotted by:') + div.newNs('http://www.w3.org/1999/xhtml', None) + content_ul = div.newChild(None, 'ul', None) + for weight, entry, feed, title, author, mtime in all_links[link]: + li2 = content_ul.newTextChild(None, 'li', author + ": ") + a = li2.newTextChild(None, 'a' , title or 'untitled') + a.setProp('href',entry) + + count = count + 1 + if count >= 10: break + +log.info("Writing " + MEMES_ATOM) +output=open(MEMES_ATOM,'w') +output.write(feed_doc.serialize('utf-8')) +output.close() + +sys.stdout.write(doc.serialize('utf-8')) diff --git a/lib/venus/filters/minhead.py b/lib/venus/filters/minhead.py new file mode 100644 index 0000000..b9c225e --- /dev/null +++ b/lib/venus/filters/minhead.py @@ -0,0 +1,36 @@ +# +# Ensure that all headings are below a permissible maximum (like h3). +# If not, all heading levels will be changed to conform. +# Note: this may create "illegal" heading levels, like h7 and beyond. +# + +import sys +from xml.dom import minidom, XHTML_NAMESPACE + +# determine permissible minimimum heading +if '--min' in sys.argv: + minhead = int(sys.argv[sys.argv.index('--min')+1]) +else: + minhead=3 + +# parse input stream +doc = minidom.parse(sys.stdin) + +# search for headings below the permissable minimum +first=minhead +for i in range(1,minhead): + if doc.getElementsByTagName('h%d' % i): + first=i + break + +# if found, bump all headings so that the top is the permissible minimum +if first < minhead: + for i in range(6,0,-1): + for oldhead in doc.getElementsByTagName('h%d' % i): + newhead = doc.createElementNS(XHTML_NAMESPACE, 'h%d' % (i+minhead-first)) + for child in oldhead.childNodes: + newhead.appendChild(child) + oldhead.parentNode.replaceChild(newhead, oldhead) + +# return (possibly modified) document +print doc.toxml('utf-8') diff --git a/lib/venus/filters/notweets.py b/lib/venus/filters/notweets.py new file mode 100644 index 0000000..5be6dd5 --- /dev/null +++ b/lib/venus/filters/notweets.py @@ -0,0 +1,6 @@ +#remove all tweets +import sys + +data = sys.stdin.read() +if data.find('<id>tag:twitter.com,') < 0: + sys.stdout.write(data) diff --git a/lib/venus/filters/regexp_sifter.py b/lib/venus/filters/regexp_sifter.py new file mode 100644 index 0000000..a6f7c4f --- /dev/null +++ b/lib/venus/filters/regexp_sifter.py @@ -0,0 +1,44 @@ +import sys, re + +# parse options +options = dict(zip(sys.argv[1::2],sys.argv[2::2])) + +# read entry +doc = data = sys.stdin.read() + +# Apply a sequence of patterns which turn a normalized Atom entry into +# a stream of text, after removal of non-human metadata. +for pattern,replacement in [ + (re.compile('<id>.*?</id>'),' '), + (re.compile('<url>.*?</url>'),' '), + (re.compile('<source>.*?</source>'),' '), + (re.compile('<updated.*?</updated>'),' '), + (re.compile('<published.*?</published>'),' '), + (re.compile('<link .*?>'),' '), + (re.compile('''<[^>]* alt=['"]([^'"]*)['"].*?>'''),r' \1 '), + (re.compile('''<[^>]* title=['"]([^'"]*)['"].*?>'''),r' \1 '), + (re.compile('''<[^>]* label=['"]([^'"]*)['"].*?>'''),r' \1 '), + (re.compile('''<[^>]* term=['"]([^'"]*)['"].*?>'''),r' \1 '), + (re.compile('<.*?>'),' '), + (re.compile('\s+'),' '), + (re.compile('>'),'>'), + (re.compile('<'),'<'), + (re.compile('''),"'"), + (re.compile('"'),'"'), + (re.compile('&'),'&'), + (re.compile('\s+'),' ') +]: + data=pattern.sub(replacement,data) + +# process requirements +if options.has_key('--require'): + for regexp in options['--require'].split('\n'): + if regexp and not re.search(regexp,data): sys.exit(1) + +# process exclusions +if options.has_key('--exclude'): + for regexp in options['--exclude'].split('\n'): + if regexp and re.search(regexp,data): sys.exit(1) + +# if we get this far, the feed is to be included +print doc diff --git a/lib/venus/filters/stripAd/feedburner.sed b/lib/venus/filters/stripAd/feedburner.sed new file mode 100644 index 0000000..d203ccd --- /dev/null +++ b/lib/venus/filters/stripAd/feedburner.sed @@ -0,0 +1 @@ +s|<p><a href="http://[a-zA-Z0-9\-\.]*/~a/[a-zA-Z0-9]*?a=[a-zA-Z0-9]*"><img border="0" src="http://[a-zA-Z0-9\.\-]*/~a/[a-zA-Z0-9/]*?i=[a-zA-Z0-9]*"/></a></p>||g diff --git a/lib/venus/filters/stripAd/google_ad_map.sed b/lib/venus/filters/stripAd/google_ad_map.sed new file mode 100644 index 0000000..b802a09 --- /dev/null +++ b/lib/venus/filters/stripAd/google_ad_map.sed @@ -0,0 +1 @@ +s|<p><map name="google_ad_map.*</p>|| diff --git a/lib/venus/filters/stripAd/yahoo.sed b/lib/venus/filters/stripAd/yahoo.sed new file mode 100644 index 0000000..03cd9dd --- /dev/null +++ b/lib/venus/filters/stripAd/yahoo.sed @@ -0,0 +1 @@ +s|<p><!-- begin(Yahoo ad) -->.*<!-- end(Yahoo ad) --></p>|| diff --git a/lib/venus/filters/xhtml2html.plugin b/lib/venus/filters/xhtml2html.plugin new file mode 100644 index 0000000..b3aaff8 --- /dev/null +++ b/lib/venus/filters/xhtml2html.plugin @@ -0,0 +1,31 @@ +# Example usages: +# +# filters: +# xhtml2html.plugin?quote_attr_values=True"e_char="'" +# +# -- or -- +# +# [xhtml2html.plugin] +# quote_attr_values=True +# quote_char="'" + +import sys +opts = {} +for name,value in zip(sys.argv[1::2],sys.argv[2::2]): + name = name.lstrip('-') + try: opts[name] = eval(value) + except: opts[name] = value + +try: + from xml.dom import minidom + doc = minidom.parse(sys.stdin) +except: + from html5lib import liberalxmlparser, treebuilders + parser = liberalxmlparser.XHTMLParser(tree=treebuilders.getTreeBuilder('dom')) + doc = parser.parse(sys.stdin, encoding='utf-8') + +from html5lib import treewalkers, serializer +tokens = treewalkers.getTreeWalker('dom')(doc) +serializer = serializer.HTMLSerializer(**dict(opts)) +for text in serializer.serialize(tokens, encoding='utf-8'): + sys.stdout.write(text) diff --git a/lib/venus/filters/xpath_sifter.py b/lib/venus/filters/xpath_sifter.py new file mode 100644 index 0000000..c7c14c4 --- /dev/null +++ b/lib/venus/filters/xpath_sifter.py @@ -0,0 +1,23 @@ +import sys, libxml2 + +# parse options +options = dict(zip(sys.argv[1::2],sys.argv[2::2])) + +# parse entry +doc = libxml2.parseDoc(sys.stdin.read()) +ctxt = doc.xpathNewContext() +ctxt.xpathRegisterNs('atom','http://www.w3.org/2005/Atom') +ctxt.xpathRegisterNs('xhtml','http://www.w3.org/1999/xhtml') + +# process requirements +if options.has_key('--require'): + for xpath in options['--require'].split('\n'): + if xpath and not ctxt.xpathEval(xpath): sys.exit(1) + +# process exclusions +if options.has_key('--exclude'): + for xpath in options['--exclude'].split('\n'): + if xpath and ctxt.xpathEval(xpath): sys.exit(1) + +# if we get this far, the feed is to be included +print doc diff --git a/lib/venus/planet.py b/lib/venus/planet.py new file mode 100755 index 0000000..c278c06 --- /dev/null +++ b/lib/venus/planet.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +"""The Planet aggregator. + +A flexible and easy-to-use aggregator for generating websites. + +Visit http://www.planetplanet.org/ for more information and to download +the latest version. + +Requires Python 2.1, recommends 2.3. +""" + +__authors__ = [ "Scott James Remnant <scott@netsplit.com>", + "Jeff Waugh <jdub@perkypants.org>" ] +__license__ = "Python" + + +import os, sys + +if __name__ == "__main__": + config_file = "config.ini" + offline = 0 + verbose = 0 + only_if_new = 0 + expunge = 0 + + for arg in sys.argv[1:]: + if arg == "-h" or arg == "--help": + print "Usage: planet [options] [CONFIGFILE]" + print + print "Options:" + print " -v, --verbose DEBUG level logging during update" + print " -o, --offline Update the Planet from the cache only" + print " -h, --help Display this help message and exit" + print " -n, --only-if-new Only spider new feeds" + print " -x, --expunge Expunge old entries from cache" + print + sys.exit(0) + elif arg == "-v" or arg == "--verbose": + verbose = 1 + elif arg == "-o" or arg == "--offline": + offline = 1 + elif arg == "-n" or arg == "--only-if-new": + only_if_new = 1 + elif arg == "-x" or arg == "--expunge": + expunge = 1 + elif arg.startswith("-"): + print >>sys.stderr, "Unknown option:", arg + sys.exit(1) + else: + config_file = arg + + from planet import config + config.load(config_file) + + if verbose: + import planet + planet.getLogger('DEBUG',config.log_format()) + + if not offline: + from planet import spider + try: + spider.spiderPlanet(only_if_new=only_if_new) + except Exception, e: + print e + + from planet import splice + doc = splice.splice() + splice.apply(doc.toxml('utf-8')) + + if expunge: + from planet import expunge + expunge.expungeCache diff --git a/lib/venus/planet/__init__.py b/lib/venus/planet/__init__.py new file mode 100644 index 0000000..f90dfe9 --- /dev/null +++ b/lib/venus/planet/__init__.py @@ -0,0 +1,40 @@ +xmlns = 'http://planet.intertwingly.net/' + +logger = None +loggerParms = None + +import os, sys, re +import config +config.__init__() + +from ConfigParser import ConfigParser +from urlparse import urljoin + +def getLogger(level, format): + """ get a logger with the specified log level """ + global logger, loggerParms + if logger and loggerParms == (level,format): return logger + + try: + import logging + logging.basicConfig(format=format) + except: + import compat_logging as logging + logging.basicConfig(format=format) + + logger = logging.getLogger("planet.runner") + logger.setLevel(logging.getLevelName(level)) + try: + logger.warning + except: + logger.warning = logger.warn + + loggerParms = (level,format) + return logger + +sys.path.insert(1, os.path.join(os.path.dirname(__file__),'vendor')) + +# Configure feed parser +import feedparser +feedparser.SANITIZE_HTML=0 +feedparser.RESOLVE_RELATIVE_URIS=0 diff --git a/lib/venus/planet/config.py b/lib/venus/planet/config.py new file mode 100644 index 0000000..e1325d1 --- /dev/null +++ b/lib/venus/planet/config.py @@ -0,0 +1,400 @@ +""" +Planet Configuration + +This module encapsulates all planet configuration. This is not a generic +configuration parser, it knows everything about configuring a planet - from +the structure of the ini file, to knowledge of data types, even down to +what are the defaults. + +Usage: + import config + config.load('config.ini') + + # administrative / structural information + print config.template_files() + print config.subscriptions() + + # planet wide configuration + print config.name() + print config.link() + + # per template configuration + print config.days_per_page('atom.xml.tmpl') + print config.encoding('index.html.tmpl') + +Todo: + * error handling (example: no planet section) +""" + +import os, sys, re, urllib +from ConfigParser import ConfigParser +from urlparse import urljoin + +parser = ConfigParser() + +planet_predefined_options = ['filters'] + +def __init__(): + """define the struture of an ini file""" + import config + + # get an option from a section + def get(section, option, default): + if section and parser.has_option(section, option): + return parser.get(section, option) + elif parser.has_option('Planet', option): + if option == 'log_format': + return parser.get('Planet', option, raw=True) + return parser.get('Planet', option) + else: + return default + + # expand %(var) in lists + def expand(list): + output = [] + wild = re.compile('^(.*)#{(\w+)}(.*)$') + for file in list.split(): + match = wild.match(file) + if match: + pre,var,post = match.groups() + for sub in subscriptions(): + value = feed_options(sub).get(var,None) + if value: + output.append(pre+value+post) + else: + output.append(file) + return output + + # define a string planet-level variable + def define_planet(name, default): + setattr(config, name, lambda default=default: get(None,name,default)) + planet_predefined_options.append(name) + + # define a list planet-level variable + def define_planet_int(name, default=0): + setattr(config, name, lambda : int(get(None,name,default))) + planet_predefined_options.append(name) + + # define a list planet-level variable + def define_planet_list(name, default=''): + setattr(config, name, lambda : expand(get(None,name,default))) + planet_predefined_options.append(name) + + # define a string template-level variable + def define_tmpl(name, default): + setattr(config, name, lambda section, default=default: + get(section,name,default)) + + # define an int template-level variable + def define_tmpl_int(name, default): + setattr(config, name, lambda section, default=default: + int(get(section,name,default))) + + # planet wide options + define_planet('name', "Unconfigured Planet") + define_planet('link', '') + define_planet('cache_directory', "cache") + define_planet('log_level', "WARNING") + define_planet('log_format', "%(levelname)s:%(name)s:%(message)s") + define_planet('date_format', "%B %d, %Y %I:%M %p") + define_planet('new_date_format', "%B %d, %Y") + define_planet('generator', 'Venus') + define_planet('generator_uri', 'http://intertwingly.net/code/venus/') + define_planet('owner_name', 'Anonymous Coward') + define_planet('owner_email', '') + define_planet('output_theme', '') + define_planet('output_dir', 'output') + define_planet('spider_threads', 0) + + define_planet_int('new_feed_items', 0) + define_planet_int('feed_timeout', 20) + define_planet_int('cache_keep_entries', 10) + + define_planet_list('template_files') + define_planet_list('bill_of_materials') + define_planet_list('template_directories', '.') + define_planet_list('filter_directories') + + # template options + define_tmpl_int('days_per_page', 0) + define_tmpl_int('items_per_page', 60) + define_tmpl_int('activity_threshold', 0) + define_tmpl('encoding', 'utf-8') + define_tmpl('content_type', 'utf-8') + define_tmpl('ignore_in_feed', '') + define_tmpl('name_type', '') + define_tmpl('title_type', '') + define_tmpl('summary_type', '') + define_tmpl('content_type', '') + define_tmpl('future_dates', 'keep') + define_tmpl('xml_base', '') + define_tmpl('filter', None) + define_tmpl('exclude', None) + +def load(config_file): + """ initialize and load a configuration""" + global parser + parser = ConfigParser() + parser.read(config_file) + + import config, planet + from planet import opml, foaf, csv_config + log = planet.logger + if not log: + log = planet.getLogger(config.log_level(),config.log_format()) + + # Theme support + theme = config.output_theme() + if theme: + for path in ("", os.path.join(sys.path[0],'themes')): + theme_dir = os.path.join(path,theme) + theme_file = os.path.join(theme_dir,'config.ini') + if os.path.exists(theme_file): + # initial search list for theme directories + dirs = config.template_directories() + if theme_dir not in dirs: + dirs.append(theme_dir) + if os.path.dirname(config_file) not in dirs: + dirs.append(os.path.dirname(config_file)) + + # read in the theme + parser = ConfigParser() + parser.read(theme_file) + bom = config.bill_of_materials() + + # complete search list for theme directories + dirs += [os.path.join(theme_dir,dir) for dir in + config.template_directories() if dir not in dirs] + + # merge configurations, allowing current one to override theme + template_files = config.template_files() + parser.set('Planet','template_files','') + parser.read(config_file) + for file in config.bill_of_materials(): + if not file in bom: bom.append(file) + parser.set('Planet', 'bill_of_materials', ' '.join(bom)) + parser.set('Planet', 'template_directories', ' '.join(dirs)) + parser.set('Planet', 'template_files', + ' '.join(template_files + config.template_files())) + break + else: + log.error('Unable to find theme %s', theme) + + # Filter support + dirs = config.filter_directories() + filter_dir = os.path.join(sys.path[0],'filters') + if filter_dir not in dirs and os.path.exists(filter_dir): + parser.set('Planet', 'filter_directories', ' '.join(dirs+[filter_dir])) + + # Reading list support + reading_lists = config.reading_lists() + if reading_lists: + if not os.path.exists(config.cache_lists_directory()): + os.makedirs(config.cache_lists_directory()) + + def data2config(data, cached_config): + if content_type(list).find('opml')>=0: + opml.opml2config(data, cached_config) + elif content_type(list).find('foaf')>=0: + foaf.foaf2config(data, cached_config) + elif content_type(list).find('csv')>=0: + csv_config.csv2config(data, cached_config) + elif content_type(list).find('config')>=0: + cached_config.readfp(data) + else: + from planet import shell + import StringIO + cached_config.readfp(StringIO.StringIO(shell.run( + content_type(list), data.getvalue(), mode="filter"))) + + if cached_config.sections() in [[], [list]]: + raise Exception + + for list in reading_lists: + downloadReadingList(list, parser, data2config) + +def downloadReadingList(list, orig_config, callback, use_cache=True, re_read=True): + from planet import logger + import config + try: + + import urllib2, StringIO + from planet.spider import filename + + # list cache file name + cache_filename = filename(config.cache_lists_directory(), list) + + # retrieve list options (e.g., etag, last-modified) from cache + options = {} + + # add original options + for key in orig_config.options(list): + options[key] = orig_config.get(list, key) + + try: + if use_cache: + cached_config = ConfigParser() + cached_config.read(cache_filename) + for option in cached_config.options(list): + options[option] = cached_config.get(list,option) + except: + pass + + cached_config = ConfigParser() + cached_config.add_section(list) + for key, value in options.items(): + cached_config.set(list, key, value) + + # read list + curdir=getattr(os.path, 'curdir', '.') + if sys.platform.find('win') < 0: + base = urljoin('file:', os.path.abspath(curdir)) + else: + path = os.path.abspath(os.path.curdir) + base = urljoin('file:///', path.replace(':','|').replace('\\','/')) + + request = urllib2.Request(urljoin(base + '/', list)) + if options.has_key("etag"): + request.add_header('If-None-Match', options['etag']) + if options.has_key("last-modified"): + request.add_header('If-Modified-Since', + options['last-modified']) + response = urllib2.urlopen(request) + if response.headers.has_key('etag'): + cached_config.set(list, 'etag', response.headers['etag']) + if response.headers.has_key('last-modified'): + cached_config.set(list, 'last-modified', + response.headers['last-modified']) + + # convert to config.ini + data = StringIO.StringIO(response.read()) + + if callback: callback(data, cached_config) + + # write to cache + if use_cache: + cache = open(cache_filename, 'w') + cached_config.write(cache) + cache.close() + + # re-parse and proceed + logger.debug("Using %s readinglist", list) + if re_read: + if use_cache: + orig_config.read(cache_filename) + else: + cdata = StringIO.StringIO() + cached_config.write(cdata) + cdata.seek(0) + orig_config.readfp(cdata) + except: + try: + if re_read: + if use_cache: + if not orig_config.read(cache_filename): raise Exception() + else: + cdata = StringIO.StringIO() + cached_config.write(cdata) + cdata.seek(0) + orig_config.readfp(cdata) + logger.info("Using cached %s readinglist", list) + except: + logger.exception("Unable to read %s readinglist", list) + +def http_cache_directory(): + if parser.has_option('Planet', 'http_cache_directory'): + os.path.join(cache_directory(), + parser.get('Planet', 'http_cache_directory')) + else: + return os.path.join(cache_directory(), "cache") + +def cache_sources_directory(): + if parser.has_option('Planet', 'cache_sources_directory'): + return os.path.join(cache_directory(), + parser.get('Planet', 'cache_sources_directory')) + else: + return os.path.join(cache_directory(), 'sources') + +def cache_lists_directory(): + if parser.has_option('Planet', 'cache_lists_directory'): + parser.get('Planet', 'cache_lists_directory') + else: + return os.path.join(cache_directory(), 'lists') + +def feed(): + if parser.has_option('Planet', 'feed'): + return parser.get('Planet', 'feed') + elif link(): + for template_file in template_files(): + name = os.path.splitext(os.path.basename(template_file))[0] + if name.find('atom')>=0 or name.find('rss')>=0: + return urljoin(link(), name) + +def feedtype(): + if parser.has_option('Planet', 'feedtype'): + parser.get('Planet', 'feedtype') + elif feed() and feed().find('atom')>=0: + return 'atom' + elif feed() and feed().find('rss')>=0: + return 'rss' + +def subscriptions(): + """ list the feed subscriptions """ + return __builtins__['filter'](lambda feed: feed!='Planet' and + feed not in template_files()+filters()+reading_lists(), + parser.sections()) + +def reading_lists(): + """ list of lists of feed subscriptions """ + result = [] + for section in parser.sections(): + if parser.has_option(section, 'content_type'): + type = parser.get(section, 'content_type') + if type.find('opml')>=0 or type.find('foaf')>=0 or \ + type.find('csv')>=0 or type.find('config')>=0 or \ + type.find('.')>=0: + result.append(section) + return result + +def filters(section=None): + filters = [] + if parser.has_option('Planet', 'filters'): + filters += parser.get('Planet', 'filters').split() + if filter(section): + filters.append('regexp_sifter.py?require=' + + urllib.quote(filter(section))) + if exclude(section): + filters.append('regexp_sifter.py?exclude=' + + urllib.quote(exclude(section))) + for section in section and [section] or template_files(): + if parser.has_option(section, 'filters'): + filters += parser.get(section, 'filters').split() + return filters + +def planet_options(): + """ dictionary of planet wide options""" + return dict(map(lambda opt: (opt, + parser.get('Planet', opt, raw=(opt=="log_format"))), + parser.options('Planet'))) + +def feed_options(section): + """ dictionary of feed specific options""" + import config + options = dict([(key,value) for key,value in planet_options().items() + if key not in planet_predefined_options]) + if parser.has_section(section): + options.update(dict(map(lambda opt: (opt, parser.get(section,opt)), + parser.options(section)))) + return options + +def template_options(section): + """ dictionary of template specific options""" + return feed_options(section) + +def filter_options(section): + """ dictionary of filter specific options""" + return feed_options(section) + +def write(file=sys.stdout): + """ write out an updated template """ + print parser.write(file) diff --git a/lib/venus/planet/csv_config.py b/lib/venus/planet/csv_config.py new file mode 100755 index 0000000..ba3be61 --- /dev/null +++ b/lib/venus/planet/csv_config.py @@ -0,0 +1,29 @@ +from ConfigParser import ConfigParser +import csv + +# input = csv, output = ConfigParser +def csv2config(input, config=None): + + if not hasattr(input, 'read'): + input = csv.StringIO(input) + + if not config: + config = ConfigParser() + + reader = csv.DictReader(input) + for row in reader: + section = row[reader.fieldnames[0]] + config.add_section(section) + for name, value in row.items(): + if value and name != reader.fieldnames[0]: + config.set(section, name, value) + + return config + +if __name__ == "__main__": + # small main program which converts CSV into config.ini format + import sys, urllib + config = ConfigParser() + for input in sys.argv[1:]: + csv2config(urllib.urlopen(input), config) + config.write(sys.stdout) diff --git a/lib/venus/planet/expunge.py b/lib/venus/planet/expunge.py new file mode 100644 index 0000000..de7e511 --- /dev/null +++ b/lib/venus/planet/expunge.py @@ -0,0 +1,67 @@ +""" Expunge old entries from a cache of entries """ +import glob, os, planet, config, feedparser +from xml.dom import minidom +from spider import filename + +def expungeCache(): + """ Expunge old entries from a cache of entries """ + log = planet.logger + + log.info("Determining feed subscriptions") + entry_count = {} + sources = config.cache_sources_directory() + for sub in config.subscriptions(): + data=feedparser.parse(filename(sources,sub)) + if not data.feed.has_key('id'): continue + if config.feed_options(sub).has_key('cache_keep_entries'): + entry_count[data.feed.id] = int(config.feed_options(sub)['cache_keep_entries']) + else: + entry_count[data.feed.id] = config.cache_keep_entries() + + log.info("Listing cached entries") + cache = config.cache_directory() + dir=[(os.stat(file).st_mtime,file) for file in glob.glob(cache+"/*") + if not os.path.isdir(file)] + dir.sort() + dir.reverse() + + for mtime,file in dir: + + try: + entry=minidom.parse(file) + # determine source of entry + entry.normalize() + sources = entry.getElementsByTagName('source') + if not sources: + # no source determined, do not delete + log.debug("No source found for %s", file) + continue + ids = sources[0].getElementsByTagName('id') + if not ids: + # feed id not found, do not delete + log.debug("No source feed id found for %s", file) + continue + if ids[0].childNodes[0].nodeValue in entry_count: + # subscribed to feed, update entry count + entry_count[ids[0].childNodes[0].nodeValue] = entry_count[ + ids[0].childNodes[0].nodeValue] - 1 + if entry_count[ids[0].childNodes[0].nodeValue] >= 0: + # maximum not reached, do not delete + log.debug("Maximum not reached for %s from %s", + file, ids[0].childNodes[0].nodeValue) + continue + else: + # maximum reached + log.debug("Removing %s, maximum reached for %s", + file, ids[0].childNodes[0].nodeValue) + else: + # not subscribed + log.debug("Removing %s, not subscribed to %s", + file, ids[0].childNodes[0].nodeValue) + # remove old entry + os.unlink(file) + + except: + log.error("Error parsing %s", file) + +# end of expungeCache() diff --git a/lib/venus/planet/foaf.py b/lib/venus/planet/foaf.py new file mode 100644 index 0000000..6149c1f --- /dev/null +++ b/lib/venus/planet/foaf.py @@ -0,0 +1,197 @@ +from ConfigParser import ConfigParser + +inheritable_options = [ 'online_accounts' ] + +def load_accounts(config, section): + accounts = {} + if(config.has_option(section, 'online_accounts')): + values = config.get(section, 'online_accounts') + for account_map in values.split('\n'): + try: + homepage, map = account_map.split('|') + accounts[homepage] = map + except: + pass + + return accounts + +def load_model(rdf, base_uri): + + if hasattr(rdf, 'find_statements'): + return rdf + + if hasattr(rdf, 'read'): + rdf = rdf.read() + + def handler(code, level, facility, message, line, column, byte, file, uri): + pass + + from RDF import Model, Parser + + model = Model() + + Parser().parse_string_into_model(model,rdf,base_uri,handler) + + return model + +# input = foaf, output = ConfigParser +def foaf2config(rdf, config, subject=None, section=None): + + if not config or not config.sections(): + return + + # there should be only be 1 section + if not section: section = config.sections().pop() + + try: + from RDF import Model, NS, Parser, Statement + except: + return + + # account mappings, none by default + # form: accounts = {url to service homepage (as found in FOAF)}|{URI template}\n* + # example: http://del.icio.us/|http://del.icio.us/rss/{foaf:accountName} + accounts = load_accounts(config, section) + + depth = 0 + + if(config.has_option(section, 'depth')): + depth = config.getint(section, 'depth') + + model = load_model(rdf, section) + + dc = NS('http://purl.org/dc/elements/1.1/') + foaf = NS('http://xmlns.com/foaf/0.1/') + rdfs = NS('http://www.w3.org/2000/01/rdf-schema#') + rdf = NS('http://www.w3.org/1999/02/22-rdf-syntax-ns#') + rss = NS('http://purl.org/rss/1.0/') + + for statement in model.find_statements(Statement(subject,foaf.weblog,None)): + + # feed owner + person = statement.subject + + # title is required (at the moment) + title = model.get_target(person,foaf.name) + if not title: title = model.get_target(statement.object,dc.title) + if not title: + continue + + # blog is optional + feed = model.get_target(statement.object,rdfs.seeAlso) + if feed and rss.channel == model.get_target(feed, rdf.type): + feed = str(feed.uri) + if not config.has_section(feed): + config.add_section(feed) + config.set(feed, 'name', str(title)) + + # now look for OnlineAccounts for the same person + if accounts.keys(): + for statement in model.find_statements(Statement(person,foaf.holdsAccount,None)): + rdfaccthome = model.get_target(statement.object,foaf.accountServiceHomepage) + rdfacctname = model.get_target(statement.object,foaf.accountName) + + if not rdfaccthome or not rdfacctname: continue + + if not rdfaccthome.is_resource() or not accounts.has_key(str(rdfaccthome.uri)): continue + + if not rdfacctname.is_literal(): continue + + rdfacctname = rdfacctname.literal_value['string'] + rdfaccthome = str(rdfaccthome.uri) + + # shorten feed title a bit + try: + servicetitle = rdfaccthome.replace('http://','').split('/')[0] + except: + servicetitle = rdfaccthome + + feed = accounts[rdfaccthome].replace("{foaf:accountName}", rdfacctname) + if not config.has_section(feed): + config.add_section(feed) + config.set(feed, 'name', "%s (%s)" % (title, servicetitle)) + + if depth > 0: + + # now the fun part, let's go after more friends + for statement in model.find_statements(Statement(person,foaf.knows,None)): + friend = statement.object + + # let's be safe + if friend.is_literal(): continue + + seeAlso = model.get_target(friend,rdfs.seeAlso) + + # nothing to see + if not seeAlso or not seeAlso.is_resource(): continue + + seeAlso = str(seeAlso.uri) + + if not config.has_section(seeAlso): + config.add_section(seeAlso) + copy_options(config, section, seeAlso, + { 'content_type' : 'foaf', + 'depth' : str(depth - 1) }) + try: + from planet.config import downloadReadingList + downloadReadingList(seeAlso, config, + lambda data, subconfig : friend2config(model, friend, seeAlso, subconfig, data), + False) + except: + pass + + return + +def copy_options(config, parent_section, child_section, overrides = {}): + global inheritable_options + for option in [x for x in config.options(parent_section) if x in inheritable_options]: + if not overrides.has_key(option): + config.set(child_section, option, config.get(parent_section, option)) + + for option, value in overrides.items(): + config.set(child_section, option, value) + + +def friend2config(friend_model, friend, seeAlso, subconfig, data): + + try: + from RDF import Model, NS, Parser, Statement + except: + return + + dc = NS('http://purl.org/dc/elements/1.1/') + foaf = NS('http://xmlns.com/foaf/0.1/') + rdf = NS('http://www.w3.org/1999/02/22-rdf-syntax-ns#') + rdfs = NS('http://www.w3.org/2000/01/rdf-schema#') + + # FOAF InverseFunctionalProperties + ifps = [foaf.mbox, foaf.mbox_sha1sum, foaf.jabberID, foaf.aimChatID, + foaf.icqChatID, foaf.yahooChatID, foaf.msnChatID, foaf.homepage, foaf.weblog] + + model = load_model(data, seeAlso) + + for statement in model.find_statements(Statement(None,rdf.type,foaf.Person)): + + samefriend = statement.subject + + # maybe they have the same uri + if friend.is_resource() and samefriend.is_resource() and friend == samefriend: + foaf2config(model, subconfig, samefriend) + return + + for ifp in ifps: + object = model.get_target(samefriend,ifp) + if object and object == friend_model.get_target(friend, ifp): + foaf2config(model, subconfig, samefriend) + return + +if __name__ == "__main__": + import sys, urllib + config = ConfigParser() + + for uri in sys.argv[1:]: + config.add_section(uri) + foaf2config(urllib.urlopen(uri), config, section=uri) + config.remove_section(uri) + + config.write(sys.stdout) diff --git a/lib/venus/planet/idindex.py b/lib/venus/planet/idindex.py new file mode 100644 index 0000000..87daa0f --- /dev/null +++ b/lib/venus/planet/idindex.py @@ -0,0 +1,99 @@ +from glob import glob +import os, sys + +if __name__ == '__main__': + rootdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + sys.path.insert(0, rootdir) + +from planet.spider import filename +from planet import config + +def open(): + try: + cache = config.cache_directory() + index=os.path.join(cache,'index') + if not os.path.exists(index): return None + import dbhash + return dbhash.open(filename(index, 'id'),'w') + except Exception, e: + if e.__class__.__name__ == 'DBError': e = e.args[-1] + from planet import logger as log + log.error(str(e)) + +def destroy(): + from planet import logger as log + cache = config.cache_directory() + index=os.path.join(cache,'index') + if not os.path.exists(index): return None + idindex = filename(index, 'id') + if os.path.exists(idindex): os.unlink(idindex) + os.removedirs(index) + log.info(idindex + " deleted") + +def create(): + from planet import logger as log + cache = config.cache_directory() + index=os.path.join(cache,'index') + if not os.path.exists(index): os.makedirs(index) + import dbhash + index = dbhash.open(filename(index, 'id'),'c') + + try: + import libxml2 + except: + libxml2 = False + from xml.dom import minidom + + for file in glob(cache+"/*"): + if os.path.isdir(file): + continue + elif libxml2: + try: + doc = libxml2.parseFile(file) + ctxt = doc.xpathNewContext() + ctxt.xpathRegisterNs('atom','http://www.w3.org/2005/Atom') + entry = ctxt.xpathEval('/atom:entry/atom:id') + source = ctxt.xpathEval('/atom:entry/atom:source/atom:id') + if entry and source: + index[filename('',entry[0].content)] = source[0].content + doc.freeDoc() + except: + log.error(file) + else: + try: + doc = minidom.parse(file) + doc.normalize() + ids = doc.getElementsByTagName('id') + entry = [e for e in ids if e.parentNode.nodeName == 'entry'] + source = [e for e in ids if e.parentNode.nodeName == 'source'] + if entry and source: + index[filename('',entry[0].childNodes[0].nodeValue)] = \ + source[0].childNodes[0].nodeValue + doc.freeDoc() + except: + log.error(file) + + log.info(str(len(index.keys())) + " entries indexed") + index.close() + + return open() + +if __name__ == '__main__': + if len(sys.argv) < 2: + print 'Usage: %s [-c|-d]' % sys.argv[0] + sys.exit(1) + + config.load(sys.argv[1]) + + if len(sys.argv) > 2 and sys.argv[2] == '-c': + create() + elif len(sys.argv) > 2 and sys.argv[2] == '-d': + destroy() + else: + from planet import logger as log + index = open() + if index: + log.info(str(len(index.keys())) + " entries indexed") + index.close() + else: + log.info("no entries indexed") diff --git a/lib/venus/planet/opml.py b/lib/venus/planet/opml.py new file mode 100755 index 0000000..6e0b02a --- /dev/null +++ b/lib/venus/planet/opml.py @@ -0,0 +1,154 @@ +from xml.sax import ContentHandler, make_parser, SAXParseException +from xml.sax.xmlreader import InputSource +from sgmllib import SGMLParser +from cStringIO import StringIO +from ConfigParser import ConfigParser +from htmlentitydefs import entitydefs +import re + +# input = opml, output = ConfigParser +def opml2config(opml, config=None): + + if hasattr(opml, 'read'): + opml = opml.read() + + if not config: + config = ConfigParser() + + opmlParser = OpmlParser(config) + + try: + # try SAX + source = InputSource() + source.setByteStream(StringIO(opml)) + parser = make_parser() + parser.setContentHandler(opmlParser) + parser.parse(source) + except SAXParseException: + # try as SGML + opmlParser.feed(opml) + + return config + +# Parse OPML via either SAX or SGML +class OpmlParser(ContentHandler,SGMLParser): + entities = re.compile('&(#?\w+);') + + def __init__(self, config): + ContentHandler.__init__(self) + SGMLParser.__init__(self) + self.config = config + + def startElement(self, name, attrs): + + # we are only looking for data in 'outline' nodes. + if name != 'outline': return + + # A type of 'rss' is meant to be used generically to indicate that + # this is an entry in a subscription list, but some leave this + # attribute off, and others have placed 'atom' in here + if attrs.has_key('type'): + if attrs['type'] == 'link' and not attrs.has_key('url'): + # Auto-correct WordPress link manager OPML files + attrs = dict(attrs.items()) + attrs['type'] = 'rss' + if attrs['type'].lower() not in['rss','atom']: return + + # The feed itself is supposed to be in an attribute named 'xmlUrl' + # (note the camel casing), but this has proven to be problematic, + # with the most common misspelling being in all lower-case + if not attrs.has_key('xmlUrl') or not attrs['xmlUrl'].strip(): + for attribute in attrs.keys(): + if attribute.lower() == 'xmlurl' and attrs[attribute].strip(): + attrs = dict(attrs.items()) + attrs['xmlUrl'] = attrs[attribute] + break + else: + return + + # the text attribute is nominally required in OPML, but this + # data is often found in a title attribute instead + if not attrs.has_key('text') or not attrs['text'].strip(): + if not attrs.has_key('title') or not attrs['title'].strip(): return + attrs = dict(attrs.items()) + attrs['text'] = attrs['title'] + + # if we get this far, we either have a valid subscription list entry, + # or one with a correctable error. Add it to the configuration, if + # it is not already there. + xmlUrl = attrs['xmlUrl'] + if not self.config.has_section(xmlUrl): + self.config.add_section(xmlUrl) + self.config.set(xmlUrl, 'name', self.unescape(attrs['text'])) + + def unescape(self, text): + parsed = self.entities.split(text) + + for i in range(1,len(parsed),2): + + if parsed[i] in entitydefs.keys(): + # named entities + codepoint=entitydefs[parsed[i]] + match=self.entities.match(codepoint) + if match: + parsed[i]=match.group(1) + else: + parsed[i]=unichr(ord(codepoint)) + + # numeric entities + if parsed[i].startswith('#'): + if parsed[i].startswith('#x'): + parsed[i]=unichr(int(parsed[i][2:],16)) + else: + parsed[i]=unichr(int(parsed[i][1:])) + + return u''.join(parsed).encode('utf-8') + # SGML => SAX + def unknown_starttag(self, name, attrs): + attrs = dict(attrs) + for attribute in attrs: + try: + attrs[attribute] = attrs[attribute].decode('utf-8') + except: + work = attrs[attribute].decode('iso-8859-1') + work = u''.join([c in cp1252 and cp1252[c] or c for c in work]) + attrs[attribute] = work + self.startElement(name, attrs) + +# http://www.intertwingly.net/stories/2004/04/14/i18n.html#CleaningWindows +cp1252 = { + unichr(128): unichr(8364), # euro sign + unichr(130): unichr(8218), # single low-9 quotation mark + unichr(131): unichr( 402), # latin small letter f with hook + unichr(132): unichr(8222), # double low-9 quotation mark + unichr(133): unichr(8230), # horizontal ellipsis + unichr(134): unichr(8224), # dagger + unichr(135): unichr(8225), # double dagger + unichr(136): unichr( 710), # modifier letter circumflex accent + unichr(137): unichr(8240), # per mille sign + unichr(138): unichr( 352), # latin capital letter s with caron + unichr(139): unichr(8249), # single left-pointing angle quotation mark + unichr(140): unichr( 338), # latin capital ligature oe + unichr(142): unichr( 381), # latin capital letter z with caron + unichr(145): unichr(8216), # left single quotation mark + unichr(146): unichr(8217), # right single quotation mark + unichr(147): unichr(8220), # left double quotation mark + unichr(148): unichr(8221), # right double quotation mark + unichr(149): unichr(8226), # bullet + unichr(150): unichr(8211), # en dash + unichr(151): unichr(8212), # em dash + unichr(152): unichr( 732), # small tilde + unichr(153): unichr(8482), # trade mark sign + unichr(154): unichr( 353), # latin small letter s with caron + unichr(155): unichr(8250), # single right-pointing angle quotation mark + unichr(156): unichr( 339), # latin small ligature oe + unichr(158): unichr( 382), # latin small letter z with caron + unichr(159): unichr( 376)} # latin capital letter y with diaeresis + +if __name__ == "__main__": + # small main program which converts OPML into config.ini format + import sys, urllib + config = ConfigParser() + for opml in sys.argv[1:]: + opml2config(urllib.urlopen(opml), config) + config.write(sys.stdout) diff --git a/lib/venus/planet/reconstitute.py b/lib/venus/planet/reconstitute.py new file mode 100644 index 0000000..760af50 --- /dev/null +++ b/lib/venus/planet/reconstitute.py @@ -0,0 +1,334 @@ +""" +Reconstitute an entry document from the output of the Universal Feed Parser. + +The main entry point is called 'reconstitute'. Input parameters are: + + results: this is the entire hash table return by the UFP + entry: this is the entry in the hash that you want reconstituted + +The value returned is an XML DOM. Every effort is made to convert +everything to unicode, and text fields into either plain text or +well formed XHTML. + +Todo: + * extension elements +""" +import re, time, md5, sgmllib +from xml.sax.saxutils import escape +from xml.dom import minidom, Node +from html5lib import liberalxmlparser +from html5lib.treebuilders import dom +import planet, config + +illegal_xml_chars = re.compile("[\x01-\x08\x0B\x0C\x0E-\x1F]") + +def createTextElement(parent, name, value): + """ utility function to create a child element with the specified text""" + if not value: return + if isinstance(value,str): + try: + value=value.decode('utf-8') + except: + value=value.decode('iso-8859-1') + xdoc = parent.ownerDocument + xelement = xdoc.createElement(name) + xelement.appendChild(xdoc.createTextNode(value)) + parent.appendChild(xelement) + return xelement + +def invalidate(c): + """ replace invalid characters """ + return '<acronym title="U+%s">\xef\xbf\xbd</acronym>' % \ + ('000' + hex(ord(c.group(0)))[2:])[-4:] + +def ncr2c(value): + """ convert numeric character references to characters """ + value=value.group(1) + if value.startswith('x'): + value=unichr(int(value[1:],16)) + else: + value=unichr(int(value)) + return value + +nonalpha=re.compile('\W+',re.UNICODE) +def cssid(name): + """ generate a css id from a name """ + try: + name = nonalpha.sub('-',name.decode('utf-8')).lower().encode('utf-8') + except: + name = nonalpha.sub('-',name).lower() + return name.strip('-') + +def id(xentry, entry): + """ copy or compute an id for the entry """ + + if entry.has_key("id") and entry.id: + entry_id = entry.id + elif entry.has_key("link") and entry.link: + entry_id = entry.link + elif entry.has_key("title") and entry.title: + entry_id = (entry.title_detail.base + "/" + + md5.new(entry.title).hexdigest()) + elif entry.has_key("summary") and entry.summary: + entry_id = (entry.summary_detail.base + "/" + + md5.new(entry.summary).hexdigest()) + elif entry.has_key("content") and entry.content: + + entry_id = (entry.content[0].base + "/" + + md5.new(entry.content[0].value).hexdigest()) + else: + return + + if xentry: createTextElement(xentry, 'id', entry_id) + return entry_id + +def links(xentry, entry): + """ copy links to the entry """ + if not entry.has_key('links'): + entry['links'] = [] + if entry.has_key('link'): + entry['links'].append({'rel':'alternate', 'href':entry.link}) + xdoc = xentry.ownerDocument + for link in entry['links']: + if not 'href' in link.keys(): continue + xlink = xdoc.createElement('link') + xlink.setAttribute('href', link.get('href')) + if link.has_key('type'): + xlink.setAttribute('type', link.get('type')) + if link.has_key('rel'): + xlink.setAttribute('rel', link.get('rel',None)) + if link.has_key('length'): + xlink.setAttribute('length', link.get('length')) + xentry.appendChild(xlink) + +def date(xentry, name, parsed): + """ insert a date-formated element into the entry """ + if not parsed: return + formatted = time.strftime("%Y-%m-%dT%H:%M:%SZ", parsed) + xdate = createTextElement(xentry, name, formatted) + formatted = time.strftime(config.date_format(), parsed) + xdate.setAttribute('planet:format', formatted.decode('utf-8')) + +def category(xentry, tag): + xtag = xentry.ownerDocument.createElement('category') + if not tag.has_key('term') or not tag.term: return + xtag.setAttribute('term', tag.get('term')) + if tag.has_key('scheme') and tag.scheme: + xtag.setAttribute('scheme', tag.get('scheme')) + if tag.has_key('label') and tag.label: + xtag.setAttribute('label', tag.get('label')) + xentry.appendChild(xtag) + +def author(xentry, name, detail): + """ insert an author-like element into the entry """ + if not detail: return + xdoc = xentry.ownerDocument + xauthor = xdoc.createElement(name) + + if detail.get('name', None): + createTextElement(xauthor, 'name', detail.get('name')) + else: + xauthor.appendChild(xdoc.createElement('name')) + + createTextElement(xauthor, 'email', detail.get('email', None)) + createTextElement(xauthor, 'uri', detail.get('href', None)) + + xentry.appendChild(xauthor) + +def content(xentry, name, detail, bozo): + """ insert a content-like element into the entry """ + if not detail or not detail.value: return + + data = None + xdiv = '<div xmlns="http://www.w3.org/1999/xhtml">%s</div>' + xdoc = xentry.ownerDocument + xcontent = xdoc.createElement(name) + + if isinstance(detail.value,unicode): + detail.value=detail.value.encode('utf-8') + + if not detail.has_key('type') or detail.type.lower().find('html')<0: + detail['value'] = escape(detail.value) + detail['type'] = 'text/html' + + if detail.type.find('xhtml')>=0 and not bozo: + try: + data = minidom.parseString(xdiv % detail.value).documentElement + xcontent.setAttribute('type', 'xhtml') + except: + bozo=1 + + if detail.type.find('xhtml')<0 or bozo: + parser = liberalxmlparser.XHTMLParser(tree=dom.TreeBuilder) + html = parser.parse(xdiv % detail.value, encoding="utf-8") + for body in html.documentElement.childNodes: + if body.nodeType != Node.ELEMENT_NODE: continue + if body.nodeName != 'body': continue + for div in body.childNodes: + if div.nodeType != Node.ELEMENT_NODE: continue + if div.nodeName != 'div': continue + try: + div.normalize() + if len(div.childNodes) == 1 and \ + div.firstChild.nodeType == Node.TEXT_NODE: + data = div.firstChild + else: + data = div + xcontent.setAttribute('type', 'xhtml') + break + except: + # in extremely nested cases, the Python runtime decides + # that normalize() must be in an infinite loop; mark + # the content as escaped html and proceed on... + xcontent.setAttribute('type', 'html') + data = xdoc.createTextNode(detail.value.decode('utf-8')) + + if data: xcontent.appendChild(data) + + if detail.get("language"): + xcontent.setAttribute('xml:lang', detail.language) + + xentry.appendChild(xcontent) + +def location(xentry, long, lat): + """ insert geo location into the entry """ + if not lat or not long: return + + xlat = createTextElement(xentry, '%s:%s' % ('geo','lat'), '%f' % lat) + xlat.setAttribute('xmlns:%s' % 'geo', 'http://www.w3.org/2003/01/geo/wgs84_pos#') + xlong = createTextElement(xentry, '%s:%s' % ('geo','long'), '%f' % long) + xlong.setAttribute('xmlns:%s' % 'geo', 'http://www.w3.org/2003/01/geo/wgs84_pos#') + + xentry.appendChild(xlat) + xentry.appendChild(xlong) + +def source(xsource, source, bozo, format): + """ copy source information to the entry """ + xdoc = xsource.ownerDocument + + createTextElement(xsource, 'id', source.get('id', source.get('link',None))) + createTextElement(xsource, 'icon', source.get('icon', None)) + createTextElement(xsource, 'logo', source.get('logo', None)) + + if not source.has_key('logo') and source.has_key('image'): + createTextElement(xsource, 'logo', source.image.get('href',None)) + + for tag in source.get('tags',[]): + category(xsource, tag) + + author(xsource, 'author', source.get('author_detail',{})) + for contributor in source.get('contributors',[]): + author(xsource, 'contributor', contributor) + + links(xsource, source) + + content(xsource, 'rights', source.get('rights_detail',None), bozo) + content(xsource, 'subtitle', source.get('subtitle_detail',None), bozo) + content(xsource, 'title', source.get('title_detail',None), bozo) + + date(xsource, 'updated', source.get('updated_parsed',time.gmtime())) + + if format: source['planet_format'] = format + if not bozo == None: source['planet_bozo'] = bozo and 'true' or 'false' + + # propagate planet inserted information + if source.has_key('planet_name') and not source.has_key('planet_css-id'): + source['planet_css-id'] = cssid(source['planet_name']) + for key, value in source.items(): + if key.startswith('planet_'): + createTextElement(xsource, key.replace('_',':',1), value) + +def reconstitute(feed, entry): + """ create an entry document from a parsed feed """ + xdoc=minidom.parseString('<entry xmlns="http://www.w3.org/2005/Atom"/>\n') + xentry=xdoc.documentElement + xentry.setAttribute('xmlns:planet',planet.xmlns) + + if entry.has_key('language'): + xentry.setAttribute('xml:lang', entry.language) + elif feed.feed.has_key('language'): + xentry.setAttribute('xml:lang', feed.feed.language) + + id(xentry, entry) + links(xentry, entry) + + bozo = feed.bozo + if not entry.has_key('title') or not entry.title: + xentry.appendChild(xdoc.createElement('title')) + + content(xentry, 'title', entry.get('title_detail',None), bozo) + content(xentry, 'summary', entry.get('summary_detail',None), bozo) + content(xentry, 'content', entry.get('content',[None])[0], bozo) + content(xentry, 'rights', entry.get('rights_detail',None), bozo) + + date(xentry, 'updated', entry_updated(feed.feed, entry, time.gmtime())) + date(xentry, 'published', entry.get('published_parsed',None)) + + for tag in entry.get('tags',[]): + category(xentry, tag) + + # known, simple text extensions + for ns,name in [('feedburner','origLink')]: + if entry.has_key('%s_%s' % (ns,name.lower())) and \ + feed.namespaces.has_key(ns): + xoriglink = createTextElement(xentry, '%s:%s' % (ns,name), + entry['%s_%s' % (ns,name.lower())]) + xoriglink.setAttribute('xmlns:%s' % ns, feed.namespaces[ns]) + + # geo location + if entry.has_key('where') and \ + entry.get('where',[]).has_key('type') and \ + entry.get('where',[]).has_key('coordinates'): + where = entry.get('where',[]) + type = where.get('type',None) + coordinates = where.get('coordinates',None) + if type == 'Point': + location(xentry, coordinates[0], coordinates[1]) + elif type == 'Box' or type == 'LineString' or type == 'Polygon': + location(xentry, coordinates[0][0], coordinates[0][1]) + if entry.has_key('geo_lat') and \ + entry.has_key('geo_long'): + location(xentry, (float)(entry.get('geo_long',None)), (float)(entry.get('geo_lat',None))) + + # author / contributor + author_detail = entry.get('author_detail',{}) + if author_detail and not author_detail.has_key('name') and \ + feed.feed.has_key('planet_name'): + author_detail['name'] = feed.feed['planet_name'] + author(xentry, 'author', author_detail) + for contributor in entry.get('contributors',[]): + author(xentry, 'contributor', contributor) + + # merge in planet:* from feed (or simply use the feed if no source) + src = entry.get('source') + if src: + for name,value in feed.feed.items(): + if name.startswith('planet_'): src[name]=value + if feed.feed.has_key('id'): + src['planet_id'] = feed.feed.id + else: + src = feed.feed + + # source:author + src_author = src.get('author_detail',{}) + if (not author_detail or not author_detail.has_key('name')) and \ + not src_author.has_key('name') and feed.feed.has_key('planet_name'): + if src_author: src_author = src_author.__class__(src_author.copy()) + src['author_detail'] = src_author + src_author['name'] = feed.feed['planet_name'] + + # source + xsource = xdoc.createElement('source') + source(xsource, src, bozo, feed.version) + xentry.appendChild(xsource) + + return xdoc + +def entry_updated(feed, entry, default = None): + chks = ((entry, 'updated_parsed'), + (entry, 'published_parsed'), + (feed, 'updated_parsed'),) + for node, field in chks: + if node.has_key(field) and node[field]: + return node[field] + return default diff --git a/lib/venus/planet/scrub.py b/lib/venus/planet/scrub.py new file mode 100644 index 0000000..9d48753 --- /dev/null +++ b/lib/venus/planet/scrub.py @@ -0,0 +1,132 @@ +""" +Process a set of configuration defined sanitations on a given feed. +""" + +# Standard library modules +import time +# Planet modules +import planet, config, shell +from planet import feedparser + +type_map = {'text': 'text/plain', 'html': 'text/html', + 'xhtml': 'application/xhtml+xml'} + +def scrub(feed_uri, data): + + # some data is not trustworthy + for tag in config.ignore_in_feed(feed_uri).split(): + if tag.find('lang')>=0: tag='language' + if data.feed.has_key(tag): del data.feed[tag] + for entry in data.entries: + if entry.has_key(tag): del entry[tag] + if entry.has_key(tag + "_detail"): del entry[tag + "_detail"] + if entry.has_key(tag + "_parsed"): del entry[tag + "_parsed"] + for key in entry.keys(): + if not key.endswith('_detail'): continue + for detail in entry[key].copy(): + if detail == tag: del entry[key][detail] + + # adjust title types + if config.title_type(feed_uri): + title_type = config.title_type(feed_uri) + title_type = type_map.get(title_type, title_type) + for entry in data.entries: + if entry.has_key('title_detail'): + entry.title_detail['type'] = title_type + + # adjust summary types + if config.summary_type(feed_uri): + summary_type = config.summary_type(feed_uri) + summary_type = type_map.get(summary_type, summary_type) + for entry in data.entries: + if entry.has_key('summary_detail'): + entry.summary_detail['type'] = summary_type + + # adjust content types + if config.content_type(feed_uri): + content_type = config.content_type(feed_uri) + content_type = type_map.get(content_type, content_type) + for entry in data.entries: + if entry.has_key('content'): + entry.content[0]['type'] = content_type + + # some people put html in author names + if config.name_type(feed_uri).find('html')>=0: + from shell.tmpl import stripHtml + if data.feed.has_key('author_detail') and \ + data.feed.author_detail.has_key('name'): + data.feed.author_detail['name'] = \ + str(stripHtml(data.feed.author_detail.name)) + for entry in data.entries: + if entry.has_key('author_detail') and \ + entry.author_detail.has_key('name'): + entry.author_detail['name'] = \ + str(stripHtml(entry.author_detail.name)) + if entry.has_key('source'): + source = entry.source + if source.has_key('author_detail') and \ + source.author_detail.has_key('name'): + source.author_detail['name'] = \ + str(stripHtml(source.author_detail.name)) + + # handle dates in the future + future_dates = config.future_dates(feed_uri).lower() + if future_dates == 'ignore_date': + now = time.gmtime() + if data.feed.has_key('updated_parsed') and data.feed['updated_parsed']: + if data.feed['updated_parsed'] > now: del data.feed['updated_parsed'] + for entry in data.entries: + if entry.has_key('published_parsed') and entry['published_parsed']: + if entry['published_parsed'] > now: + del entry['published_parsed'] + del entry['published'] + if entry.has_key('updated_parsed') and entry['updated_parsed']: + if entry['updated_parsed'] > now: + del entry['updated_parsed'] + del entry['updated'] + elif future_dates == 'ignore_entry': + now = time.time() + if data.feed.has_key('updated_parsed') and data.feed['updated_parsed']: + if data.feed['updated_parsed'] > now: del data.feed['updated_parsed'] + data.entries = [entry for entry in data.entries if + (not entry.has_key('published_parsed') or not entry['published_parsed'] + or entry['published_parsed'] <= now) and + (not entry.has_key('updated_parsed') or not entry['updated_parsed'] + or entry['updated_parsed'] <= now)] + + scrub_xmlbase = config.xml_base(feed_uri) + + # resolve relative URIs and sanitize + for entry in data.entries + [data.feed]: + for key in entry.keys(): + if key == 'content'and not entry.has_key('content_detail'): + node = entry.content[0] + elif key.endswith('_detail'): + node = entry[key] + else: + continue + + if not node.has_key('type'): continue + if not 'html' in node['type']: continue + if not node.has_key('value'): continue + + if node.has_key('base'): + if scrub_xmlbase: + if scrub_xmlbase == 'feed_alternate': + if entry.has_key('source') and \ + entry.source.has_key('link'): + node['base'] = entry.source.link + elif data.feed.has_key('link'): + node['base'] = data.feed.link + elif scrub_xmlbase == 'entry_alternate': + if entry.has_key('link'): + node['base'] = entry.link + else: + node['base'] = feedparser._urljoin( + node['base'], scrub_xmlbase) + + node['value'] = feedparser._resolveRelativeURIs( + node.value, node.base, 'utf-8', node.type) + + node['value'] = feedparser._sanitizeHTML( + node.value, 'utf-8', node.type) diff --git a/lib/venus/planet/shell/__init__.py b/lib/venus/planet/shell/__init__.py new file mode 100644 index 0000000..49b8557 --- /dev/null +++ b/lib/venus/planet/shell/__init__.py @@ -0,0 +1,67 @@ +import planet +import os +import sys + +logged_modes = [] + +def run(template_file, doc, mode='template'): + """ select a template module based on file extension and execute it """ + log = planet.logger + + if mode == 'template': + dirs = planet.config.template_directories() + else: + dirs = planet.config.filter_directories() + + # parse out "extra" options + if template_file.find('?') < 0: + extra_options = {} + else: + import cgi + template_file, extra_options = template_file.split('?',1) + extra_options = dict(cgi.parse_qsl(extra_options)) + + # see if the template can be located + for template_dir in dirs: + template_resolved = os.path.join(template_dir, template_file) + if os.path.exists(template_resolved): break + else: + log.error("Unable to locate %s %s", mode, template_file) + if not mode in logged_modes: + log.info("%s search path:", mode) + for template_dir in dirs: + log.info(" %s", os.path.realpath(template_dir)) + logged_modes.append(mode) + return + template_resolved = os.path.realpath(template_resolved) + + # Add shell directory to the path, if not already there + shellpath = os.path.join(sys.path[0],'planet','shell') + if shellpath not in sys.path: + sys.path.append(shellpath) + + # Try loading module for processing this template, based on the extension + base,ext = os.path.splitext(os.path.basename(template_resolved)) + module_name = ext[1:] + try: + try: + module = __import__("_" + module_name) + except: + module = __import__(module_name) + except Exception, inst: + return log.error("Skipping %s '%s' after failing to load '%s': %s", + mode, template_resolved, module_name, inst) + + # Execute the shell module + options = planet.config.template_options(template_file) + if module_name == 'plugin': options['__file__'] = template_file + options.update(extra_options) + log.debug("Processing %s %s using %s", mode, + os.path.realpath(template_resolved), module_name) + if mode == 'filter': + return module.run(template_resolved, doc, None, options) + else: + output_dir = planet.config.output_dir() + output_file = os.path.join(output_dir, base) + module.run(template_resolved, doc, output_file, options) + return output_file diff --git a/lib/venus/planet/shell/_genshi.py b/lib/venus/planet/shell/_genshi.py new file mode 100644 index 0000000..5dffab2 --- /dev/null +++ b/lib/venus/planet/shell/_genshi.py @@ -0,0 +1,143 @@ +from StringIO import StringIO +from xml.sax.saxutils import escape + +from genshi.input import HTMLParser, XMLParser +from genshi.template import Context, MarkupTemplate + +subscriptions = [] +feed_types = [ + 'application/atom+xml', + 'application/rss+xml', + 'application/rdf+xml' +] + +def norm(value): + """ Convert to Unicode """ + if hasattr(value,'items'): + return dict([(norm(n),norm(v)) for n,v in value.items()]) + + try: + return value.decode('utf-8') + except: + return value.decode('iso-8859-1') + +def find_config(config, feed): + # match based on self link + for link in feed.links: + if link.has_key('rel') and link.rel=='self': + if link.has_key('type') and link.type in feed_types: + if link.has_key('href') and link.href in subscriptions: + return norm(dict(config.parser.items(link.href))) + + # match based on name + for sub in subscriptions: + if config.parser.has_option(sub, 'name') and \ + norm(config.parser.get(sub, 'name')) == feed.planet_name: + return norm(dict(config.parser.items(sub))) + + return {} + +class XHTMLParser(object): + """ parse an XHTML fragment """ + def __init__(self, text): + self.parser = XMLParser(StringIO("<div>%s</div>" % text)) + self.depth = 0 + def __iter__(self): + self.iter = self.parser.__iter__() + return self + def next(self): + object = self.iter.next() + if object[0] == 'END': self.depth = self.depth - 1 + predepth = self.depth + if object[0] == 'START': self.depth = self.depth + 1 + if predepth: return object + return self.next() + +def streamify(text,bozo): + """ add a .stream to a _detail textConstruct """ + if text.type == 'text/plain': + text.stream = HTMLParser(StringIO(escape(text.value))) + elif text.type == 'text/html' or bozo != 'false': + text.stream = HTMLParser(StringIO(text.value)) + else: + text.stream = XHTMLParser(text.value) + +def run(script, doc, output_file=None, options={}): + """ process an Genshi template """ + + context = Context(**options) + + tmpl_fileobj = open(script) + tmpl = MarkupTemplate(tmpl_fileobj, script) + tmpl_fileobj.close() + + if not output_file: + # filter + context.push({'input':XMLParser(StringIO(doc))}) + else: + # template + import time + from planet import config,feedparser + from planet.spider import filename + + # gather a list of subscriptions, feeds + global subscriptions + feeds = [] + sources = config.cache_sources_directory() + for sub in config.subscriptions(): + data=feedparser.parse(filename(sources,sub)) + data.feed.config = norm(dict(config.parser.items(sub))) + if data.feed.has_key('link'): + feeds.append((data.feed.config.get('name',''),data.feed)) + subscriptions.append(norm(sub)) + feeds.sort() + + # annotate each entry + new_date_format = config.new_date_format() + vars = feedparser.parse(StringIO(doc)) + vars.feeds = [value for name,value in feeds] + last_feed = None + last_date = None + for entry in vars.entries: + entry.source.config = find_config(config, entry.source) + + # add new_feed and new_date fields + entry.new_feed = entry.source.id + entry.new_date = date = None + if entry.has_key('published_parsed'): date=entry.published_parsed + if entry.has_key('updated_parsed'): date=entry.updated_parsed + if date: entry.new_date = time.strftime(new_date_format, date) + + # remove new_feed and new_date fields if not "new" + if entry.new_date == last_date: + entry.new_date = None + if entry.new_feed == last_feed: + entry.new_feed = None + else: + last_feed = entry.new_feed + elif entry.new_date: + last_date = entry.new_date + last_feed = None + + # add streams for all text constructs + for key in entry.keys(): + if key.endswith("_detail") and entry[key].has_key('type') and \ + entry[key].has_key('value'): + streamify(entry[key],entry.source.planet_bozo) + if entry.has_key('content'): + for content in entry.content: + streamify(content,entry.source.planet_bozo) + + # add cumulative feed information to the Genshi context + vars.feed.config = dict(config.parser.items('Planet',True)) + context.push(vars) + + # apply template + output=tmpl.generate(context).render('xml') + + if output_file: + out_file = open(output_file,'w') + out_file.write(output) + out_file.close() + else: + return output diff --git a/lib/venus/planet/shell/dj.py b/lib/venus/planet/shell/dj.py new file mode 100644 index 0000000..05baa62 --- /dev/null +++ b/lib/venus/planet/shell/dj.py @@ -0,0 +1,50 @@ +import os.path +import urlparse +import datetime + +import tmpl +from planet import config + +def DjangoPlanetDate(value): + return datetime.datetime(*value[:6]) + +# remap PlanetDate to be a datetime, so Django template authors can use +# the "date" filter on these values +tmpl.PlanetDate = DjangoPlanetDate + +def run(script, doc, output_file=None, options={}): + """process a Django template file""" + + # this is needed to use the Django template system as standalone + # I need to re-import the settings at every call because I have to + # set the TEMPLATE_DIRS variable programmatically + from django.conf import settings + try: + settings.configure( + DEBUG=True, TEMPLATE_DEBUG=True, + TEMPLATE_DIRS=(os.path.dirname(script),) + ) + except EnvironmentError: + pass + from django.template import Context + from django.template.loader import get_template + + # set up the Django context by using the default htmltmpl + # datatype converters + context = Context() + context.update(tmpl.template_info(doc)) + context['Config'] = config.planet_options() + t = get_template(script) + + if output_file: + reluri = os.path.splitext(os.path.basename(output_file))[0] + context['url'] = urlparse.urljoin(config.link(),reluri) + f = open(output_file, 'w') + ss = t.render(context) + if isinstance(ss,unicode): ss=ss.encode('utf-8') + f.write(ss) + f.close() + else: + # @@this is useful for testing purposes, but does it + # belong here? + return t.render(context) diff --git a/lib/venus/planet/shell/plugin.py b/lib/venus/planet/shell/plugin.py new file mode 100644 index 0000000..dd94380 --- /dev/null +++ b/lib/venus/planet/shell/plugin.py @@ -0,0 +1,64 @@ +import os, sys, imp +from StringIO import StringIO + +def run(script, doc, output_file=None, options={}): + """ process an Python script using imp """ + save_sys = (sys.stdin, sys.stdout, sys.stderr, sys.argv) + plugin_stdout = StringIO() + plugin_stderr = StringIO() + + try: + # redirect stdin + sys.stdin = StringIO(doc) + + # redirect stdout + if output_file: + sys.stdout = open(output_file, 'w') + else: + sys.stdout = plugin_stdout + + # redirect stderr + sys.stderr = plugin_stderr + + # determine __file__ value + if options.has_key("__file__"): + plugin_file = options["__file__"] + del options["__file__"] + else: + plugin_file = script + + # set sys.argv + options = sum([['--'+key, value] for key,value in options.items()], []) + sys.argv = [plugin_file] + options + + # import script + handle = open(script, 'r') + cwd = os.getcwd() + try: + try: + try: + description=('.plugin', 'rb', imp.PY_SOURCE) + imp.load_module('__main__',handle,plugin_file,description) + except SystemExit,e: + if e.code: log.error('%s exit rc=%d',(plugin_file,e.code)) + except Exception, e: + import traceback + type, value, tb = sys.exc_info() + plugin_stderr.write(''.join( + traceback.format_exception_only(type,value) + + traceback.format_tb(tb))) + finally: + handle.close() + if cwd != os.getcwd(): os.chdir(cwd) + + finally: + # restore system state + sys.stdin, sys.stdout, sys.stderr, sys.argv = save_sys + + # log anything sent to stderr + if plugin_stderr.getvalue(): + import planet + planet.logger.error(plugin_stderr.getvalue()) + + # return stdout + return plugin_stdout.getvalue() diff --git a/lib/venus/planet/shell/py.py b/lib/venus/planet/shell/py.py new file mode 100644 index 0000000..8f365f6 --- /dev/null +++ b/lib/venus/planet/shell/py.py @@ -0,0 +1,22 @@ +from subprocess import Popen, PIPE +import sys + +def run(script, doc, output_file=None, options={}): + """ process an Python script """ + + if output_file: + out = open(output_file, 'w') + else: + out = PIPE + + options = sum([['--'+key, value] for key,value in options.items()], []) + + proc = Popen([sys.executable, script] + options, + stdin=PIPE, stdout=out, stderr=PIPE) + + stdout, stderr = proc.communicate(doc) + if stderr: + import planet + planet.logger.error(stderr) + + return stdout diff --git a/lib/venus/planet/shell/sed.py b/lib/venus/planet/shell/sed.py new file mode 100644 index 0000000..06082a7 --- /dev/null +++ b/lib/venus/planet/shell/sed.py @@ -0,0 +1,19 @@ +from subprocess import Popen, PIPE + +def run(script, doc, output_file=None, options={}): + """ process an Python script """ + + if output_file: + out = open(output_file, 'w') + else: + out = PIPE + + proc = Popen(['sed', '-f', script], + stdin=PIPE, stdout=out, stderr=PIPE) + + stdout, stderr = proc.communicate(doc) + if stderr: + import planet + planet.logger.error(stderr) + + return stdout diff --git a/lib/venus/planet/shell/tmpl.py b/lib/venus/planet/shell/tmpl.py new file mode 100644 index 0000000..dfde6fe --- /dev/null +++ b/lib/venus/planet/shell/tmpl.py @@ -0,0 +1,272 @@ +from xml.sax.saxutils import escape +import sgmllib, time, os, sys, new, urlparse, re +from planet import config, feedparser +import htmltmpl + +voids=feedparser._BaseHTMLProcessor.elements_no_end_tag +empty=re.compile(r"<((%s)[^>]*)></\2>" % '|'.join(voids)) + +class stripHtml(sgmllib.SGMLParser): + "remove all tags from the data" + def __init__(self, data): + sgmllib.SGMLParser.__init__(self) + self.result='' + if isinstance(data, str): + try: + self.feed(data.decode('utf-8')) + except: + self.feed(data) + else: + self.feed(data) + self.close() + def __str__(self): + if isinstance(self.result, unicode): + return self.result.encode('utf-8') + return self.result + def handle_entityref(self, ref): + import htmlentitydefs + if ref in htmlentitydefs.entitydefs: + ref=htmlentitydefs.entitydefs[ref] + if len(ref)==1: + self.result+=unichr(ord(ref)) + elif ref.startswith('&#') and ref.endswith(';'): + self.handle_charref(ref[2:-1]) + else: + self.result+='&%s;' % ref + else: + self.result+='&%s;' % ref + def handle_charref(self, ref): + try: + if ref.startswith('x'): + self.result+=unichr(int(ref[1:],16)) + else: + self.result+=unichr(int(ref)) + except: + self.result+='&#%s;' % ref + def handle_data(self, data): + if data: self.result+=data + +# Data format mappers + +def String(value): + if isinstance(value, unicode): return value.encode('utf-8') + return value + +def Plain(value): + return str(stripHtml(value)) + +def PlanetDate(value): + return time.strftime(config.date_format(), value) + +def NewDate(value): + return time.strftime(config.new_date_format(), value) + +def Rfc822(value): + return time.strftime("%a, %d %b %Y %H:%M:%S +0000", value) + +def Rfc3399(value): + return time.strftime("%Y-%m-%dT%H:%M:%S+00:00", value) + +# Map from FeedParser path to Planet tmpl names +Base = [ + ['author', String, 'author'], + ['author_name', String, 'author_detail', 'name'], + ['generator', String, 'generator'], + ['id', String, 'id'], + ['icon', String, 'icon'], + ['last_updated_822', Rfc822, 'updated_parsed'], + ['last_updated_iso', Rfc3399, 'updated_parsed'], + ['last_updated', PlanetDate, 'updated_parsed'], + ['link', String, 'link'], + ['logo', String, 'logo'], + ['rights', String, 'rights_detail', 'value'], + ['subtitle', String, 'subtitle_detail', 'value'], + ['title', String, 'title_detail', 'value'], + ['title_plain', Plain, 'title_detail', 'value'], + ['url', String, 'links', {'rel':'self'}, 'href'], + ['url', String, 'planet_http_location'], +] + +Items = [ + ['author', String, 'author'], + ['author_email', String, 'author_detail', 'email'], + ['author_name', String, 'author_detail', 'name'], + ['author_uri', String, 'author_detail', 'href'], + ['content_language', String, 'content', 0, 'language'], + ['content', String, 'summary_detail', 'value'], + ['content', String, 'content', 0, 'value'], + ['date', PlanetDate, 'published_parsed'], + ['date', PlanetDate, 'updated_parsed'], + ['date_822', Rfc822, 'published_parsed'], + ['date_822', Rfc822, 'updated_parsed'], + ['date_iso', Rfc3399, 'published_parsed'], + ['date_iso', Rfc3399, 'updated_parsed'], + ['enclosure_href', String, 'links', {'rel': 'enclosure'}, 'href'], + ['enclosure_length', String, 'links', {'rel': 'enclosure'}, 'length'], + ['enclosure_type', String, 'links', {'rel': 'enclosure'}, 'type'], + ['id', String, 'id'], + ['link', String, 'links', {'rel': 'alternate'}, 'href'], + ['new_channel', String, 'source', 'id'], + ['new_date', NewDate, 'published_parsed'], + ['new_date', NewDate, 'updated_parsed'], + ['rights', String, 'rights_detail', 'value'], + ['title_language', String, 'title_detail', 'language'], + ['title_plain', Plain, 'title_detail', 'value'], + ['title', String, 'title_detail', 'value'], + ['summary_language', String, 'summary_detail', 'language'], + ['updated', PlanetDate, 'updated_parsed'], + ['updated_822', Rfc822, 'updated_parsed'], + ['updated_iso', Rfc3399, 'updated_parsed'], + ['published', PlanetDate, 'published_parsed'], + ['published_822', Rfc822, 'published_parsed'], + ['published_iso', Rfc3399, 'published_parsed'], +] + +# Add additional rules for source information +for rule in Base: + Items.append(['channel_'+rule[0], rule[1], 'source'] + rule[2:]) + +def tmpl_mapper(source, rules): + "Apply specified rules to the source, and return a template dictionary" + output = {} + + for rule in rules: + node = source + for path in rule[2:]: + if isinstance(path, str) and path in node: + if path == 'value': + if node.get('type','')=='text/plain': + node['value'] = escape(node['value']) + node['type'] = 'text/html' + elif node.get('type','')=='application/xhtml+xml': + node['value'] = empty.sub(r"<\1 />", node['value']) + node = node[path] + elif isinstance(path, int): + node = node[path] + elif isinstance(path, dict): + for test in node: + for key, value in path.items(): + if test.get(key,None) != value: break + else: + node = test + break + else: + break + else: + break + else: + if node: output[rule[0]] = rule[1](node) + + # copy over all planet namespaced elements from parent source + for name,value in source.items(): + if name.startswith('planet_'): + output[name[7:]] = String(value) + if not output.get('name') and source.has_key('title_detail'): + output['name'] = Plain(source.title_detail.value) + + # copy over all planet namespaced elements from child source element + if 'source' in source: + for name,value in source.source.items(): + if name.startswith('planet_'): + output['channel_' + name[7:]] = String(value) + if not output.get('channel_name') and \ + source.source.has_key('title_detail'): + output['channel_name'] = Plain(source.source.title_detail.value) + + return output + +def _end_planet_source(self): + self._end_source() + context = self._getContext() + if not context.has_key('sources'): context['sources'] = [] + context.sources.append(context.source) + del context['source'] + +def template_info(source): + """ get template information from a feedparser output """ + + # wire in support for planet:source, call feedparser, unplug planet:source + mixin=feedparser._FeedParserMixin + mixin._start_planet_source = mixin._start_source + mixin._end_planet_source = \ + new.instancemethod(_end_planet_source, None, mixin) + data=feedparser.parse(source) + del mixin._start_planet_source + del mixin._end_planet_source + + # apply rules to convert feed parser output to htmltmpl input + output = {'Channels': [], 'Items': []} + output.update(tmpl_mapper(data.feed, Base)) + sources = [(source.get('planet_name',None),source) + for source in data.feed.get('sources',[])] + sources.sort() + for name, feed in sources: + output['Channels'].append(tmpl_mapper(feed, Base)) + for entry in data.entries: + output['Items'].append(tmpl_mapper(entry, Items)) + + # synthesize isPermaLink attribute + for item in output['Items']: + if item.get('id') == item.get('link'): + item['guid_isPermaLink']='true' + else: + item['guid_isPermaLink']='false' + + # feed level information + output['generator'] = config.generator_uri() + output['name'] = config.name() + output['link'] = config.link() + output['owner_name'] = config.owner_name() + output['owner_email'] = config.owner_email() + if config.feed(): + output['feed'] = config.feed() + output['feedtype'] = config.feed().find('rss')>=0 and 'rss' or 'atom' + + # date/time information + date = time.gmtime() + output['date'] = PlanetDate(date) + output['date_iso'] = Rfc3399(date) + output['date_822'] = Rfc822(date) + + # remove new_dates and new_channels that aren't "new" + date = channel = None + for item in output['Items']: + if item.has_key('new_date'): + if item['new_date'] == date: + del item['new_date'] + else: + date = item['new_date'] + + if item.has_key('new_channel'): + if item['new_channel'] == channel and not item.has_key('new_date'): + del item['new_channel'] + else: + channel = item['new_channel'] + + return output + +def run(script, doc, output_file=None, options={}): + """ process an HTMLTMPL file """ + manager = htmltmpl.TemplateManager() + template = manager.prepare(script) + tp = htmltmpl.TemplateProcessor(html_escape=0) + for key,value in template_info(doc).items(): + tp.set(key, value) + + if output_file: + reluri = os.path.splitext(os.path.basename(output_file))[0] + tp.set('url', urlparse.urljoin(config.link(),reluri)) + + output = open(output_file, "w") + output.write(tp.process(template)) + output.close() + else: + return tp.process(template) + +if __name__ == '__main__': + sys.path.insert(0, os.path.split(sys.path[0])[0]) + + for test in sys.argv[1:]: + from pprint import pprint + pprint(template_info('/home/rubys/bzr/venus/tests/data/filter/tmpl/'+test)) + diff --git a/lib/venus/planet/shell/xslt.py b/lib/venus/planet/shell/xslt.py new file mode 100644 index 0000000..0b6579f --- /dev/null +++ b/lib/venus/planet/shell/xslt.py @@ -0,0 +1,78 @@ +import os + +def quote(string, apos): + """ quote a string so that it can be passed as a parameter """ + if type(string) == unicode: + string=string.encode('utf-8') + if apos.startswith("\\"): string.replace('\\','\\\\') + + if string.find("'")<0: + return "'" + string + "'" + elif string.find("'")<0: + return '"' + string + '"' + else: + # unclear how to quote strings with both types of quotes for libxslt + return "'" + string.replace("'",apos) + "'" + +def run(script, doc, output_file=None, options={}): + """ process an XSLT stylesheet """ + + try: + # if available, use the python interface to libxslt + import libxml2 + import libxslt + dom = libxml2.parseDoc(doc) + docfile = None + except: + # otherwise, use the command line interface + dom = None + + # do it + result = None + if dom: + styledoc = libxml2.parseFile(script) + style = libxslt.parseStylesheetDoc(styledoc) + for key in options.keys(): + options[key] = quote(options[key], apos="\xe2\x80\x99") + output = style.applyStylesheet(dom, options) + if output_file: + style.saveResultToFilename(output_file, output, 0) + else: + result = output.serialize('utf-8') + style.freeStylesheet() + output.freeDoc() + elif output_file: + import warnings + if hasattr(warnings, 'simplefilter'): + warnings.simplefilter('ignore', RuntimeWarning) + docfile = os.tmpnam() + file = open(docfile,'w') + file.write(doc) + file.close() + + cmdopts = [] + for key,value in options.items(): + if value.find("'")>=0 and value.find('"')>=0: continue + cmdopts += ['--stringparam', key, quote(value, apos=r"\'")] + + os.system('xsltproc %s %s %s > %s' % + (' '.join(cmdopts), script, docfile, output_file)) + os.unlink(docfile) + else: + import sys + from subprocess import Popen, PIPE + + options = sum([['--stringparam', key, value] + for key,value in options.items()], []) + + proc = Popen(['xsltproc'] + options + [script, '-'], + stdin=PIPE, stdout=PIPE, stderr=PIPE) + + result, stderr = proc.communicate(doc) + if stderr: + import planet + planet.logger.error(stderr) + + if dom: dom.freeDoc() + + return result diff --git a/lib/venus/planet/spider.py b/lib/venus/planet/spider.py new file mode 100644 index 0000000..7e72343 --- /dev/null +++ b/lib/venus/planet/spider.py @@ -0,0 +1,482 @@ +""" +Fetch either a single feed, or a set of feeds, normalize to Atom and XHTML, +and write each as a set of entries in a cache directory. +""" + +# Standard library modules +import time, calendar, re, os, urlparse +from xml.dom import minidom +# Planet modules +import planet, config, feedparser, reconstitute, shell, socket, scrub +from StringIO import StringIO + +# Regular expressions to sanitise cache filenames +re_url_scheme = re.compile(r'^\w+:/*(\w+:|www\.)?') +re_slash = re.compile(r'[?/:|]+') +re_initial_cruft = re.compile(r'^[,.]*') +re_final_cruft = re.compile(r'[,.]*$') + +index = True + +def filename(directory, filename): + """Return a filename suitable for the cache. + + Strips dangerous and common characters to create a filename we + can use to store the cache in. + """ + try: + if re_url_scheme.match(filename): + if isinstance(filename,str): + filename=filename.decode('utf-8').encode('idna') + else: + filename=filename.encode('idna') + except: + pass + if isinstance(filename,unicode): + filename=filename.encode('utf-8') + filename = re_url_scheme.sub("", filename) + filename = re_slash.sub(",", filename) + filename = re_initial_cruft.sub("", filename) + filename = re_final_cruft.sub("", filename) + + # limit length of filename + if len(filename)>250: + parts=filename.split(',') + for i in range(len(parts),0,-1): + if len(','.join(parts[:i])) < 220: + import md5 + filename = ','.join(parts[:i]) + ',' + \ + md5.new(','.join(parts[i:])).hexdigest() + break + + return os.path.join(directory, filename) + +def write(xdoc, out): + """ write the document out to disk """ + file = open(out,'w') + file.write(xdoc) + file.close() + +def _is_http_uri(uri): + parsed = urlparse.urlparse(uri) + return parsed[0] in ['http', 'https'] + +def writeCache(feed_uri, feed_info, data): + log = planet.logger + sources = config.cache_sources_directory() + + # capture http status + if not data.has_key("status"): + if data.has_key("entries") and len(data.entries)>0: + data.status = 200 + elif data.bozo and \ + data.bozo_exception.__class__.__name__.lower()=='timeout': + data.status = 408 + else: + data.status = 500 + + activity_horizon = \ + time.gmtime(time.time()-86400*config.activity_threshold(feed_uri)) + + # process based on the HTTP status code + if data.status == 200 and data.has_key("url"): + feed_info.feed['planet_http_location'] = data.url + if data.has_key("entries") and len(data.entries) == 0: + log.warning("No data %s", feed_uri) + feed_info.feed['planet_message'] = 'no data' + elif feed_uri == data.url: + log.info("Updating feed %s", feed_uri) + else: + log.info("Updating feed %s @ %s", feed_uri, data.url) + elif data.status == 301 and data.has_key("entries") and len(data.entries)>0: + log.warning("Feed has moved from <%s> to <%s>", feed_uri, data.url) + data.feed['planet_http_location'] = data.url + elif data.status == 304 and data.has_key("url"): + feed_info.feed['planet_http_location'] = data.url + if feed_uri == data.url: + log.info("Feed %s unchanged", feed_uri) + else: + log.info("Feed %s unchanged @ %s", feed_uri, data.url) + + if not feed_info.feed.has_key('planet_message'): + if feed_info.feed.has_key('planet_updated'): + updated = feed_info.feed.planet_updated + if feedparser._parse_date_iso8601(updated) >= activity_horizon: + return + else: + if feed_info.feed.planet_message.startswith("no activity in"): + return + if not feed_info.feed.planet_message.startswith("duplicate") and \ + not feed_info.feed.planet_message.startswith("no data"): + del feed_info.feed['planet_message'] + + elif data.status == 410: + log.info("Feed %s gone", feed_uri) + elif data.status == 408: + log.warning("Feed %s timed out", feed_uri) + elif data.status >= 400: + log.error("Error %d while updating feed %s", data.status, feed_uri) + else: + log.info("Updating feed %s", feed_uri) + + # if read failed, retain cached information + if not data.version and feed_info.version: + data.feed = feed_info.feed + data.bozo = feed_info.feed.get('planet_bozo','true') == 'true' + data.version = feed_info.feed.get('planet_format') + data.feed['planet_http_status'] = str(data.status) + + # capture etag and last-modified information + if data.has_key('headers'): + if data.has_key('etag') and data.etag: + data.feed['planet_http_etag'] = data.etag + elif data.headers.has_key('etag') and data.headers['etag']: + data.feed['planet_http_etag'] = data.headers['etag'] + + if data.headers.has_key('last-modified'): + data.feed['planet_http_last_modified']=data.headers['last-modified'] + elif data.has_key('modified') and data.modified: + data.feed['planet_http_last_modified'] = time.asctime(data.modified) + + if data.headers.has_key('-content-hash'): + data.feed['planet_content_hash'] = data.headers['-content-hash'] + + # capture feed and data from the planet configuration file + if data.version: + if not data.feed.has_key('links'): data.feed['links'] = list() + feedtype = 'application/atom+xml' + if data.version.startswith('rss'): feedtype = 'application/rss+xml' + if data.version in ['rss090','rss10']: feedtype = 'application/rdf+xml' + for link in data.feed.links: + if link.rel == 'self': + link['type'] = feedtype + break + else: + data.feed.links.append(feedparser.FeedParserDict( + {'rel':'self', 'type':feedtype, 'href':feed_uri})) + for name, value in config.feed_options(feed_uri).items(): + data.feed['planet_'+name] = value + + # perform user configured scrub operations on the data + scrub.scrub(feed_uri, data) + + from planet import idindex + global index + if index != None: index = idindex.open() + + # select latest entry for each unique id + ids = {} + for entry in data.entries: + # generate an id, if none is present + if not entry.has_key('id') or not entry.id: + entry['id'] = reconstitute.id(None, entry) + if not entry['id']: continue + + # determine updated date for purposes of selection + updated = '' + if entry.has_key('published'): updated=entry.published + if entry.has_key('updated'): updated=entry.updated + + # if not seen or newer than last seen, select it + if updated >= ids.get(entry.id,('',))[0]: + ids[entry.id] = (updated, entry) + + # write each entry to the cache + cache = config.cache_directory() + for updated, entry in ids.values(): + + # compute cache file name based on the id + cache_file = filename(cache, entry.id) + + # get updated-date either from the entry or the cache (default to now) + mtime = None + if not entry.has_key('updated_parsed') or not entry['updated_parsed']: + entry['updated_parsed'] = entry.get('published_parsed',None) + if entry.has_key('updated_parsed'): + try: + mtime = calendar.timegm(entry.updated_parsed) + except: + pass + if not mtime: + try: + mtime = os.stat(cache_file).st_mtime + except: + if data.feed.has_key('updated_parsed'): + try: + mtime = calendar.timegm(data.feed.updated_parsed) + except: + pass + if not mtime: mtime = time.time() + entry['updated_parsed'] = time.gmtime(mtime) + + # apply any filters + xdoc = reconstitute.reconstitute(data, entry) + output = xdoc.toxml().encode('utf-8') + xdoc.unlink() + for filter in config.filters(feed_uri): + output = shell.run(filter, output, mode="filter") + if not output: break + if not output: + if os.path.exists(cache_file): os.remove(cache_file) + continue + + # write out and timestamp the results + write(output, cache_file) + os.utime(cache_file, (mtime, mtime)) + + # optionally index + if index != None: + feedid = data.feed.get('id', data.feed.get('link',None)) + if feedid: + if type(feedid) == unicode: feedid = feedid.encode('utf-8') + index[filename('', entry.id)] = feedid + + if index: index.close() + + # identify inactive feeds + if config.activity_threshold(feed_uri): + updated = [entry.updated_parsed for entry in data.entries + if entry.has_key('updated_parsed')] + updated.sort() + + if updated: + data.feed['planet_updated'] = \ + time.strftime("%Y-%m-%dT%H:%M:%SZ", updated[-1]) + elif data.feed.has_key('planet_updated'): + updated = [feedparser._parse_date_iso8601(data.feed.planet_updated)] + + if not updated or updated[-1] < activity_horizon: + msg = "no activity in %d days" % config.activity_threshold(feed_uri) + log.info(msg) + data.feed['planet_message'] = msg + + # report channel level errors + if data.status == 226: + if data.feed.has_key('planet_message'): del data.feed['planet_message'] + if feed_info.feed.has_key('planet_updated'): + data.feed['planet_updated'] = feed_info.feed['planet_updated'] + elif data.status == 403: + data.feed['planet_message'] = "403: forbidden" + elif data.status == 404: + data.feed['planet_message'] = "404: not found" + elif data.status == 408: + data.feed['planet_message'] = "408: request timeout" + elif data.status == 410: + data.feed['planet_message'] = "410: gone" + elif data.status == 500: + data.feed['planet_message'] = "internal server error" + elif data.status >= 400: + data.feed['planet_message'] = "http status %s" % data.status + + # write the feed info to the cache + if not os.path.exists(sources): os.makedirs(sources) + xdoc=minidom.parseString('''<feed xmlns:planet="%s" + xmlns="http://www.w3.org/2005/Atom"/>\n''' % planet.xmlns) + reconstitute.source(xdoc.documentElement,data.feed,data.bozo,data.version) + write(xdoc.toxml().encode('utf-8'), filename(sources, feed_uri)) + xdoc.unlink() + +def httpThread(thread_index, input_queue, output_queue, log): + import httplib2, md5 + from httplib import BadStatusLine + + h = httplib2.Http(config.http_cache_directory()) + uri, feed_info = input_queue.get(block=True) + while uri: + log.info("Fetching %s via %d", uri, thread_index) + feed = StringIO('') + setattr(feed, 'url', uri) + setattr(feed, 'headers', + feedparser.FeedParserDict({'status':'500'})) + try: + # map IRI => URI + try: + if isinstance(uri,unicode): + idna = uri.encode('idna') + else: + idna = uri.decode('utf-8').encode('idna') + if idna != uri: log.info("IRI %s mapped to %s", uri, idna) + except: + log.info("unable to map %s to a URI", uri) + idna = uri + + # cache control headers + headers = {} + if feed_info.feed.has_key('planet_http_etag'): + headers['If-None-Match'] = feed_info.feed['planet_http_etag'] + if feed_info.feed.has_key('planet_http_last_modified'): + headers['If-Modified-Since'] = \ + feed_info.feed['planet_http_last_modified'] + + # issue request + (resp, content) = h.request(idna, 'GET', headers=headers) + + # unchanged detection + resp['-content-hash'] = md5.new(content or '').hexdigest() + if resp.status == 200: + if resp.fromcache: + resp.status = 304 + elif feed_info.feed.has_key('planet_content_hash') and \ + feed_info.feed['planet_content_hash'] == \ + resp['-content-hash']: + resp.status = 304 + + # build a file-like object + feed = StringIO(content) + setattr(feed, 'url', resp.get('content-location', uri)) + if resp.has_key('content-encoding'): + del resp['content-encoding'] + setattr(feed, 'headers', resp) + except BadStatusLine: + log.error("Bad Status Line received for %s via %d", + uri, thread_index) + except httplib2.HttpLib2Error, e: + log.error("HttpLib2Error: %s via %d", str(e), thread_index) + except socket.error, e: + if e.__class__.__name__.lower()=='timeout': + feed.headers['status'] = '408' + log.warn("Timeout in thread-%d", thread_index) + else: + log.error("HTTP Error: %s in thread-%d", str(e), thread_index) + except Exception, e: + import sys, traceback + type, value, tb = sys.exc_info() + log.error('Error processing %s', uri) + for line in (traceback.format_exception_only(type, value) + + traceback.format_tb(tb)): + log.error(line.rstrip()) + + output_queue.put(block=True, item=(uri, feed_info, feed)) + uri, feed_info = input_queue.get(block=True) + +def spiderPlanet(only_if_new = False): + """ Spider (fetch) an entire planet """ + log = planet.logger + + global index + index = True + + timeout = config.feed_timeout() + try: + socket.setdefaulttimeout(float(timeout)) + log.info("Socket timeout set to %d seconds", timeout) + except: + try: + import timeoutsocket + timeoutsocket.setDefaultSocketTimeout(float(timeout)) + log.info("Socket timeout set to %d seconds", timeout) + except: + log.warning("Timeout set to invalid value '%s', skipping", timeout) + + from Queue import Queue + from threading import Thread + + fetch_queue = Queue() + parse_queue = Queue() + + threads = {} + http_cache = config.http_cache_directory() + # Should this be done in config? + if http_cache and not os.path.exists(http_cache): + os.makedirs(http_cache) + + + if int(config.spider_threads()): + # Start all the worker threads + for i in range(int(config.spider_threads())): + threads[i] = Thread(target=httpThread, + args=(i,fetch_queue, parse_queue, log)) + threads[i].start() + else: + log.info("Building work queue") + + # Load the fetch and parse work queues + for uri in config.subscriptions(): + # read cached feed info + sources = config.cache_sources_directory() + feed_source = filename(sources, uri) + feed_info = feedparser.parse(feed_source) + + if feed_info.feed and only_if_new: + log.info("Feed %s already in cache", uri) + continue + if feed_info.feed.get('planet_http_status',None) == '410': + log.info("Feed %s gone", uri) + continue + + if threads and _is_http_uri(uri): + fetch_queue.put(item=(uri, feed_info)) + else: + parse_queue.put(item=(uri, feed_info, uri)) + + # Mark the end of the fetch queue + for thread in threads.keys(): + fetch_queue.put(item=(None, None)) + + # Process the results as they arrive + feeds_seen = {} + while fetch_queue.qsize() or parse_queue.qsize() or threads: + while parse_queue.qsize() == 0 and threads: + time.sleep(0.1) + while parse_queue.qsize(): + (uri, feed_info, feed) = parse_queue.get(False) + try: + + if not hasattr(feed,'headers') or int(feed.headers.status)<300: + options = {} + if hasattr(feed_info,'feed'): + options['etag'] = \ + feed_info.feed.get('planet_http_etag',None) + try: + modified=time.strptime( + feed_info.feed.get('planet_http_last_modified', + None)) + except: + pass + + data = feedparser.parse(feed, **options) + else: + data = feedparser.FeedParserDict({'version': None, + 'headers': feed.headers, 'entries': [], 'feed': {}, + 'href': feed.url, 'bozo': 0, + 'status': int(feed.headers.status)}) + + # duplicate feed? + id = data.feed.get('id', None) + if not id: id = feed_info.feed.get('id', None) + + href=uri + if data.has_key('href'): href=data.href + + duplicate = None + if id and id in feeds_seen: + duplicate = id + elif href and href in feeds_seen: + duplicate = href + + if duplicate: + feed_info.feed['planet_message'] = \ + 'duplicate subscription: ' + feeds_seen[duplicate] + log.warn('Duplicate subscription: %s and %s' % + (uri, feeds_seen[duplicate])) + if href: feed_info.feed['planet_http_location'] = href + + if id: feeds_seen[id] = uri + if href: feeds_seen[href] = uri + + # complete processing for the feed + writeCache(uri, feed_info, data) + + except Exception, e: + import sys, traceback + type, value, tb = sys.exc_info() + log.error('Error processing %s', uri) + for line in (traceback.format_exception_only(type, value) + + traceback.format_tb(tb)): + log.error(line.rstrip()) + + for index in threads.keys(): + if not threads[index].isAlive(): + del threads[index] + if not threads: + log.info("Finished threaded part of processing.") diff --git a/lib/venus/planet/splice.py b/lib/venus/planet/splice.py new file mode 100644 index 0000000..f751975 --- /dev/null +++ b/lib/venus/planet/splice.py @@ -0,0 +1,167 @@ +""" Splice together a planet from a cache of feed entries """ +import glob, os, time, shutil +from xml.dom import minidom +import planet, config, feedparser, reconstitute, shell +from reconstitute import createTextElement, date +from spider import filename +from planet import idindex + +def splice(): + """ Splice together a planet from a cache of entries """ + import planet + log = planet.logger + + log.info("Loading cached data") + cache = config.cache_directory() + dir=[(os.stat(file).st_mtime,file) for file in glob.glob(cache+"/*") + if not os.path.isdir(file)] + dir.sort() + dir.reverse() + + max_items=max([config.items_per_page(templ) + for templ in config.template_files() or ['Planet']]) + + doc = minidom.parseString('<feed xmlns="http://www.w3.org/2005/Atom"/>') + feed = doc.documentElement + + # insert feed information + createTextElement(feed, 'title', config.name()) + date(feed, 'updated', time.gmtime()) + gen = createTextElement(feed, 'generator', config.generator()) + gen.setAttribute('uri', config.generator_uri()) + + author = doc.createElement('author') + createTextElement(author, 'name', config.owner_name()) + createTextElement(author, 'email', config.owner_email()) + feed.appendChild(author) + + if config.feed(): + createTextElement(feed, 'id', config.feed()) + link = doc.createElement('link') + link.setAttribute('rel', 'self') + link.setAttribute('href', config.feed()) + if config.feedtype(): + link.setAttribute('type', "application/%s+xml" % config.feedtype()) + feed.appendChild(link) + + if config.link(): + link = doc.createElement('link') + link.setAttribute('rel', 'alternate') + link.setAttribute('href', config.link()) + feed.appendChild(link) + + # insert subscription information + sub_ids = [] + feed.setAttribute('xmlns:planet',planet.xmlns) + sources = config.cache_sources_directory() + for sub in config.subscriptions(): + data=feedparser.parse(filename(sources,sub)) + if data.feed.has_key('id'): sub_ids.append(data.feed.id) + if not data.feed: continue + xdoc=minidom.parseString('''<planet:source xmlns:planet="%s" + xmlns="http://www.w3.org/2005/Atom"/>\n''' % planet.xmlns) + reconstitute.source(xdoc.documentElement, data.feed, None, None) + feed.appendChild(xdoc.documentElement) + + index = idindex.open() + + # insert entry information + items = 0 + count = {} + new_feed_items = config.new_feed_items() + for mtime,file in dir: + if index != None: + base = os.path.basename(file) + if index.has_key(base) and index[base] not in sub_ids: continue + + try: + entry=minidom.parse(file) + + # verify that this entry is currently subscribed to and that the + # number of entries contributed by this feed does not exceed + # config.new_feed_items + entry.normalize() + sources = entry.getElementsByTagName('source') + if sources: + ids = sources[0].getElementsByTagName('id') + if ids: + id = ids[0].childNodes[0].nodeValue + count[id] = count.get(id,0) + 1 + if new_feed_items and count[id] > new_feed_items: continue + + if id not in sub_ids: + ids = sources[0].getElementsByTagName('planet:id') + if not ids: continue + id = ids[0].childNodes[0].nodeValue + if id not in sub_ids: continue + + # add entry to feed + feed.appendChild(entry.documentElement) + items = items + 1 + if items >= max_items: break + except: + log.error("Error parsing %s", file) + + if index: index.close() + + return doc + +def apply(doc): + output_dir = config.output_dir() + if not os.path.exists(output_dir): os.makedirs(output_dir) + log = planet.logger + + planet_filters = config.filters('Planet') + + # Go-go-gadget-template + for template_file in config.template_files(): + output_file = shell.run(template_file, doc) + + # run any template specific filters + if config.filters(template_file) != planet_filters: + output = open(output_file).read() + for filter in config.filters(template_file): + if filter in planet_filters: continue + if filter.find('>')>0: + # tee'd output + filter,dest = filter.split('>',1) + tee = shell.run(filter.strip(), output, mode="filter") + if tee: + output_dir = planet.config.output_dir() + dest_file = os.path.join(output_dir, dest.strip()) + dest_file = open(dest_file,'w') + dest_file.write(tee) + dest_file.close() + else: + # pipe'd output + output = shell.run(filter, output, mode="filter") + if not output: + os.unlink(output_file) + break + else: + handle = open(output_file,'w') + handle.write(output) + handle.close() + + # Process bill of materials + for copy_file in config.bill_of_materials(): + dest = os.path.join(output_dir, copy_file) + for template_dir in config.template_directories(): + source = os.path.join(template_dir, copy_file) + if os.path.exists(source): break + else: + log.error('Unable to locate %s', copy_file) + log.info("Template search path:") + for template_dir in config.template_directories(): + log.info(" %s", os.path.realpath(template_dir)) + continue + + mtime = os.stat(source).st_mtime + if not os.path.exists(dest) or os.stat(dest).st_mtime < mtime: + dest_dir = os.path.split(dest)[0] + if not os.path.exists(dest_dir): os.makedirs(dest_dir) + + log.info("Copying %s to %s", source, dest) + if os.path.exists(dest): os.chmod(dest, 0644) + shutil.copyfile(source, dest) + shutil.copystat(source, dest) diff --git a/lib/venus/planet/vendor/compat_logging/__init__.py b/lib/venus/planet/vendor/compat_logging/__init__.py new file mode 100644 index 0000000..3b83493 --- /dev/null +++ b/lib/venus/planet/vendor/compat_logging/__init__.py @@ -0,0 +1,1196 @@ +# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose and without fee is hereby granted, +# provided that the above copyright notice appear in all copies and that +# both that copyright notice and this permission notice appear in +# supporting documentation, and that the name of Vinay Sajip +# not be used in advertising or publicity pertaining to distribution +# of the software without specific, written prior permission. +# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER +# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" +Logging package for Python. Based on PEP 282 and comments thereto in +comp.lang.python, and influenced by Apache's log4j system. + +Should work under Python versions >= 1.5.2, except that source line +information is not available unless 'sys._getframe()' is. + +Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved. + +To use, simply 'import logging' and log away! +""" + +import sys, os, types, time, string, cStringIO + +try: + import thread + import threading +except ImportError: + thread = None + +__author__ = "Vinay Sajip <vinay_sajip@red-dove.com>" +__status__ = "beta" +__version__ = "0.4.8.1" +__date__ = "26 June 2003" + +#--------------------------------------------------------------------------- +# Miscellaneous module data +#--------------------------------------------------------------------------- + +# +#_srcfile is used when walking the stack to check when we've got the first +# caller stack frame. +# +if string.lower(__file__[-4:]) in ['.pyc', '.pyo']: + _srcfile = __file__[:-4] + '.py' +else: + _srcfile = __file__ +_srcfile = os.path.normcase(_srcfile) + +# _srcfile is only used in conjunction with sys._getframe(). +# To provide compatibility with older versions of Python, set _srcfile +# to None if _getframe() is not available; this value will prevent +# findCaller() from being called. +if not hasattr(sys, "_getframe"): + _srcfile = None + +# +#_startTime is used as the base when calculating the relative time of events +# +_startTime = time.time() + +# +#raiseExceptions is used to see if exceptions during handling should be +#propagated +# +raiseExceptions = 1 + +#--------------------------------------------------------------------------- +# Level related stuff +#--------------------------------------------------------------------------- +# +# Default levels and level names, these can be replaced with any positive set +# of values having corresponding names. There is a pseudo-level, NOTSET, which +# is only really there as a lower limit for user-defined levels. Handlers and +# loggers are initialized with NOTSET so that they will log all messages, even +# at user-defined levels. +# +CRITICAL = 50 +FATAL = CRITICAL +ERROR = 40 +WARNING = 30 +WARN = WARNING +INFO = 20 +DEBUG = 10 +NOTSET = 0 + +_levelNames = { + CRITICAL : 'CRITICAL', + ERROR : 'ERROR', + WARNING : 'WARNING', + INFO : 'INFO', + DEBUG : 'DEBUG', + NOTSET : 'NOTSET', + 'CRITICAL' : CRITICAL, + 'ERROR' : ERROR, + 'WARN' : WARNING, + 'WARNING' : WARNING, + 'INFO' : INFO, + 'DEBUG' : DEBUG, + 'NOTSET' : NOTSET, +} + +def getLevelName(level): + """ + Return the textual representation of logging level 'level'. + + If the level is one of the predefined levels (CRITICAL, ERROR, WARNING, + INFO, DEBUG) then you get the corresponding string. If you have + associated levels with names using addLevelName then the name you have + associated with 'level' is returned. Otherwise, the string + "Level %s" % level is returned. + """ + return _levelNames.get(level, ("Level %s" % level)) + +def addLevelName(level, levelName): + """ + Associate 'levelName' with 'level'. + + This is used when converting levels to text during message formatting. + """ + _acquireLock() + try: #unlikely to cause an exception, but you never know... + _levelNames[level] = levelName + _levelNames[levelName] = level + finally: + _releaseLock() + +#--------------------------------------------------------------------------- +# Thread-related stuff +#--------------------------------------------------------------------------- + +# +#_lock is used to serialize access to shared data structures in this module. +#This needs to be an RLock because fileConfig() creates Handlers and so +#might arbitrary user threads. Since Handler.__init__() updates the shared +#dictionary _handlers, it needs to acquire the lock. But if configuring, +#the lock would already have been acquired - so we need an RLock. +#The same argument applies to Loggers and Manager.loggerDict. +# +_lock = None + +def _acquireLock(): + """ + Acquire the module-level lock for serializing access to shared data. + + This should be released with _releaseLock(). + """ + global _lock + if (not _lock) and thread: + _lock = threading.RLock() + if _lock: + _lock.acquire() + +def _releaseLock(): + """ + Release the module-level lock acquired by calling _acquireLock(). + """ + if _lock: + _lock.release() + +#--------------------------------------------------------------------------- +# The logging record +#--------------------------------------------------------------------------- + +class LogRecord: + """ + A LogRecord instance represents an event being logged. + + LogRecord instances are created every time something is logged. They + contain all the information pertinent to the event being logged. The + main information passed in is in msg and args, which are combined + using str(msg) % args to create the message field of the record. The + record also includes information such as when the record was created, + the source line where the logging call was made, and any exception + information to be logged. + """ + def __init__(self, name, level, pathname, lineno, msg, args, exc_info): + """ + Initialize a logging record with interesting information. + """ + ct = time.time() + self.name = name + self.msg = msg + self.args = args + self.levelname = getLevelName(level) + self.levelno = level + self.pathname = pathname + try: + self.filename = os.path.basename(pathname) + self.module = os.path.splitext(self.filename)[0] + except: + self.filename = pathname + self.module = "Unknown module" + self.exc_info = exc_info + self.lineno = lineno + self.created = ct + self.msecs = (ct - long(ct)) * 1000 + self.relativeCreated = (self.created - _startTime) * 1000 + if thread: + self.thread = thread.get_ident() + else: + self.thread = None + if hasattr(os, 'getpid'): + self.process = os.getpid() + else: + self.process = None + + def __str__(self): + return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno, + self.pathname, self.lineno, self.msg) + + def getMessage(self): + """ + Return the message for this LogRecord. + + Return the message for this LogRecord after merging any user-supplied + arguments with the message. + """ + if not hasattr(types, "UnicodeType"): #if no unicode support... + msg = str(self.msg) + else: + try: + msg = str(self.msg) + except UnicodeError: + msg = self.msg #Defer encoding till later + if self.args: + msg = msg % self.args + return msg + +def makeLogRecord(dict): + """ + Make a LogRecord whose attributes are defined by the specified dictionary, + This function is useful for converting a logging event received over + a socket connection (which is sent as a dictionary) into a LogRecord + instance. + """ + rv = LogRecord(None, None, "", 0, "", (), None) + rv.__dict__.update(dict) + return rv + +#--------------------------------------------------------------------------- +# Formatter classes and functions +#--------------------------------------------------------------------------- + +class Formatter: + """ + Formatter instances are used to convert a LogRecord to text. + + Formatters need to know how a LogRecord is constructed. They are + responsible for converting a LogRecord to (usually) a string which can + be interpreted by either a human or an external system. The base Formatter + allows a formatting string to be specified. If none is supplied, the + default value of "%s(message)\\n" is used. + + The Formatter can be initialized with a format string which makes use of + knowledge of the LogRecord attributes - e.g. the default value mentioned + above makes use of the fact that the user's message and arguments are pre- + formatted into a LogRecord's message attribute. Currently, the useful + attributes in a LogRecord are described by: + + %(name)s Name of the logger (logging channel) + %(levelno)s Numeric logging level for the message (DEBUG, INFO, + WARNING, ERROR, CRITICAL) + %(levelname)s Text logging level for the message ("DEBUG", "INFO", + "WARNING", "ERROR", "CRITICAL") + %(pathname)s Full pathname of the source file where the logging + call was issued (if available) + %(filename)s Filename portion of pathname + %(module)s Module (name portion of filename) + %(lineno)d Source line number where the logging call was issued + (if available) + %(created)f Time when the LogRecord was created (time.time() + return value) + %(asctime)s Textual time when the LogRecord was created + %(msecs)d Millisecond portion of the creation time + %(relativeCreated)d Time in milliseconds when the LogRecord was created, + relative to the time the logging module was loaded + (typically at application startup time) + %(thread)d Thread ID (if available) + %(process)d Process ID (if available) + %(message)s The result of record.getMessage(), computed just as + the record is emitted + """ + + converter = time.localtime + + def __init__(self, fmt=None, datefmt=None): + """ + Initialize the formatter with specified format strings. + + Initialize the formatter either with the specified format string, or a + default as described above. Allow for specialized date formatting with + the optional datefmt argument (if omitted, you get the ISO8601 format). + """ + if fmt: + self._fmt = fmt + else: + self._fmt = "%(message)s" + self.datefmt = datefmt + + def formatTime(self, record, datefmt=None): + """ + Return the creation time of the specified LogRecord as formatted text. + + This method should be called from format() by a formatter which + wants to make use of a formatted time. This method can be overridden + in formatters to provide for any specific requirement, but the + basic behaviour is as follows: if datefmt (a string) is specified, + it is used with time.strftime() to format the creation time of the + record. Otherwise, the ISO8601 format is used. The resulting + string is returned. This function uses a user-configurable function + to convert the creation time to a tuple. By default, time.localtime() + is used; to change this for a particular formatter instance, set the + 'converter' attribute to a function with the same signature as + time.localtime() or time.gmtime(). To change it for all formatters, + for example if you want all logging times to be shown in GMT, + set the 'converter' attribute in the Formatter class. + """ + ct = self.converter(record.created) + if datefmt: + s = time.strftime(datefmt, ct) + else: + t = time.strftime("%Y-%m-%d %H:%M:%S", ct) + s = "%s,%03d" % (t, record.msecs) + return s + + def formatException(self, ei): + """ + Format and return the specified exception information as a string. + + This default implementation just uses + traceback.print_exception() + """ + import traceback + sio = cStringIO.StringIO() + traceback.print_exception(ei[0], ei[1], ei[2], None, sio) + s = sio.getvalue() + sio.close() + if s[-1] == "\n": + s = s[:-1] + return s + + def format(self, record): + """ + Format the specified record as text. + + The record's attribute dictionary is used as the operand to a + string formatting operation which yields the returned string. + Before formatting the dictionary, a couple of preparatory steps + are carried out. The message attribute of the record is computed + using LogRecord.getMessage(). If the formatting string contains + "%(asctime)", formatTime() is called to format the event time. + If there is exception information, it is formatted using + formatException() and appended to the message. + """ + record.message = record.getMessage() + if string.find(self._fmt,"%(asctime)") >= 0: + record.asctime = self.formatTime(record, self.datefmt) + s = self._fmt % record.__dict__ + if record.exc_info: + if s[-1] != "\n": + s = s + "\n" + s = s + self.formatException(record.exc_info) + return s + +# +# The default formatter to use when no other is specified +# +_defaultFormatter = Formatter() + +class BufferingFormatter: + """ + A formatter suitable for formatting a number of records. + """ + def __init__(self, linefmt=None): + """ + Optionally specify a formatter which will be used to format each + individual record. + """ + if linefmt: + self.linefmt = linefmt + else: + self.linefmt = _defaultFormatter + + def formatHeader(self, records): + """ + Return the header string for the specified records. + """ + return "" + + def formatFooter(self, records): + """ + Return the footer string for the specified records. + """ + return "" + + def format(self, records): + """ + Format the specified records and return the result as a string. + """ + rv = "" + if len(records) > 0: + rv = rv + self.formatHeader(records) + for record in records: + rv = rv + self.linefmt.format(record) + rv = rv + self.formatFooter(records) + return rv + +#--------------------------------------------------------------------------- +# Filter classes and functions +#--------------------------------------------------------------------------- + +class Filter: + """ + Filter instances are used to perform arbitrary filtering of LogRecords. + + Loggers and Handlers can optionally use Filter instances to filter + records as desired. The base filter class only allows events which are + below a certain point in the logger hierarchy. For example, a filter + initialized with "A.B" will allow events logged by loggers "A.B", + "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If + initialized with the empty string, all events are passed. + """ + def __init__(self, name=''): + """ + Initialize a filter. + + Initialize with the name of the logger which, together with its + children, will have its events allowed through the filter. If no + name is specified, allow every event. + """ + self.name = name + self.nlen = len(name) + + def filter(self, record): + """ + Determine if the specified record is to be logged. + + Is the specified record to be logged? Returns 0 for no, nonzero for + yes. If deemed appropriate, the record may be modified in-place. + """ + if self.nlen == 0: + return 1 + elif self.name == record.name: + return 1 + elif string.find(record.name, self.name, 0, self.nlen) != 0: + return 0 + return (record.name[self.nlen] == ".") + +class Filterer: + """ + A base class for loggers and handlers which allows them to share + common code. + """ + def __init__(self): + """ + Initialize the list of filters to be an empty list. + """ + self.filters = [] + + def addFilter(self, filter): + """ + Add the specified filter to this handler. + """ + if not (filter in self.filters): + self.filters.append(filter) + + def removeFilter(self, filter): + """ + Remove the specified filter from this handler. + """ + if filter in self.filters: + self.filters.remove(filter) + + def filter(self, record): + """ + Determine if a record is loggable by consulting all the filters. + + The default is to allow the record to be logged; any filter can veto + this and the record is then dropped. Returns a zero value if a record + is to be dropped, else non-zero. + """ + rv = 1 + for f in self.filters: + if not f.filter(record): + rv = 0 + break + return rv + +#--------------------------------------------------------------------------- +# Handler classes and functions +#--------------------------------------------------------------------------- + +_handlers = {} #repository of handlers (for flushing when shutdown called) + +class Handler(Filterer): + """ + Handler instances dispatch logging events to specific destinations. + + The base handler class. Acts as a placeholder which defines the Handler + interface. Handlers can optionally use Formatter instances to format + records as desired. By default, no formatter is specified; in this case, + the 'raw' message as determined by record.message is logged. + """ + def __init__(self, level=NOTSET): + """ + Initializes the instance - basically setting the formatter to None + and the filter list to empty. + """ + Filterer.__init__(self) + self.level = level + self.formatter = None + #get the module data lock, as we're updating a shared structure. + _acquireLock() + try: #unlikely to raise an exception, but you never know... + _handlers[self] = 1 + finally: + _releaseLock() + self.createLock() + + def createLock(self): + """ + Acquire a thread lock for serializing access to the underlying I/O. + """ + if thread: + self.lock = thread.allocate_lock() + else: + self.lock = None + + def acquire(self): + """ + Acquire the I/O thread lock. + """ + if self.lock: + self.lock.acquire() + + def release(self): + """ + Release the I/O thread lock. + """ + if self.lock: + self.lock.release() + + def setLevel(self, level): + """ + Set the logging level of this handler. + """ + self.level = level + + def format(self, record): + """ + Format the specified record. + + If a formatter is set, use it. Otherwise, use the default formatter + for the module. + """ + if self.formatter: + fmt = self.formatter + else: + fmt = _defaultFormatter + return fmt.format(record) + + def emit(self, record): + """ + Do whatever it takes to actually log the specified logging record. + + This version is intended to be implemented by subclasses and so + raises a NotImplementedError. + """ + raise NotImplementedError, 'emit must be implemented '\ + 'by Handler subclasses' + + def handle(self, record): + """ + Conditionally emit the specified logging record. + + Emission depends on filters which may have been added to the handler. + Wrap the actual emission of the record with acquisition/release of + the I/O thread lock. Returns whether the filter passed the record for + emission. + """ + rv = self.filter(record) + if rv: + self.acquire() + try: + self.emit(record) + finally: + self.release() + return rv + + def setFormatter(self, fmt): + """ + Set the formatter for this handler. + """ + self.formatter = fmt + + def flush(self): + """ + Ensure all logging output has been flushed. + + This version does nothing and is intended to be implemented by + subclasses. + """ + pass + + def close(self): + """ + Tidy up any resources used by the handler. + + This version does nothing and is intended to be implemented by + subclasses. + """ + pass + + def handleError(self, record): + """ + Handle errors which occur during an emit() call. + + This method should be called from handlers when an exception is + encountered during an emit() call. If raiseExceptions is false, + exceptions get silently ignored. This is what is mostly wanted + for a logging system - most users will not care about errors in + the logging system, they are more interested in application errors. + You could, however, replace this with a custom handler if you wish. + The record which was being processed is passed in to this method. + """ + if raiseExceptions: + import traceback + ei = sys.exc_info() + traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr) + del ei + +class StreamHandler(Handler): + """ + A handler class which writes logging records, appropriately formatted, + to a stream. Note that this class does not close the stream, as + sys.stdout or sys.stderr may be used. + """ + def __init__(self, strm=None): + """ + Initialize the handler. + + If strm is not specified, sys.stderr is used. + """ + Handler.__init__(self) + if not strm: + strm = sys.stderr + self.stream = strm + self.formatter = None + + def flush(self): + """ + Flushes the stream. + """ + self.stream.flush() + + def emit(self, record): + """ + Emit a record. + + If a formatter is specified, it is used to format the record. + The record is then written to the stream with a trailing newline + [N.B. this may be removed depending on feedback]. If exception + information is present, it is formatted using + traceback.print_exception and appended to the stream. + """ + try: + msg = self.format(record) + if not hasattr(types, "UnicodeType"): #if no unicode support... + self.stream.write("%s\n" % msg) + else: + try: + self.stream.write("%s\n" % msg) + except UnicodeError: + self.stream.write("%s\n" % msg.encode("UTF-8")) + self.flush() + except: + self.handleError(record) + +class FileHandler(StreamHandler): + """ + A handler class which writes formatted logging records to disk files. + """ + def __init__(self, filename, mode="a"): + """ + Open the specified file and use it as the stream for logging. + """ + StreamHandler.__init__(self, open(filename, mode)) + self.baseFilename = filename + self.mode = mode + + def close(self): + """ + Closes the stream. + """ + self.stream.close() + +#--------------------------------------------------------------------------- +# Manager classes and functions +#--------------------------------------------------------------------------- + +class PlaceHolder: + """ + PlaceHolder instances are used in the Manager logger hierarchy to take + the place of nodes for which no loggers have been defined [FIXME add + example]. + """ + def __init__(self, alogger): + """ + Initialize with the specified logger being a child of this placeholder. + """ + self.loggers = [alogger] + + def append(self, alogger): + """ + Add the specified logger as a child of this placeholder. + """ + if alogger not in self.loggers: + self.loggers.append(alogger) + +# +# Determine which class to use when instantiating loggers. +# +_loggerClass = None + +def setLoggerClass(klass): + """ + Set the class to be used when instantiating a logger. The class should + define __init__() such that only a name argument is required, and the + __init__() should call Logger.__init__() + """ + if klass != Logger: + if not issubclass(klass, Logger): + raise TypeError, "logger not derived from logging.Logger: " + \ + klass.__name__ + global _loggerClass + _loggerClass = klass + +class Manager: + """ + There is [under normal circumstances] just one Manager instance, which + holds the hierarchy of loggers. + """ + def __init__(self, rootnode): + """ + Initialize the manager with the root node of the logger hierarchy. + """ + self.root = rootnode + self.disable = 0 + self.emittedNoHandlerWarning = 0 + self.loggerDict = {} + + def getLogger(self, name): + """ + Get a logger with the specified name (channel name), creating it + if it doesn't yet exist. + + If a PlaceHolder existed for the specified name [i.e. the logger + didn't exist but a child of it did], replace it with the created + logger and fix up the parent/child references which pointed to the + placeholder to now point to the logger. + """ + rv = None + _acquireLock() + try: + if self.loggerDict.has_key(name): + rv = self.loggerDict[name] + if isinstance(rv, PlaceHolder): + ph = rv + rv = _loggerClass(name) + rv.manager = self + self.loggerDict[name] = rv + self._fixupChildren(ph, rv) + self._fixupParents(rv) + else: + rv = _loggerClass(name) + rv.manager = self + self.loggerDict[name] = rv + self._fixupParents(rv) + finally: + _releaseLock() + return rv + + def _fixupParents(self, alogger): + """ + Ensure that there are either loggers or placeholders all the way + from the specified logger to the root of the logger hierarchy. + """ + name = alogger.name + i = string.rfind(name, ".") + rv = None + while (i > 0) and not rv: + substr = name[:i] + if not self.loggerDict.has_key(substr): + self.loggerDict[substr] = PlaceHolder(alogger) + else: + obj = self.loggerDict[substr] + if isinstance(obj, Logger): + rv = obj + else: + assert isinstance(obj, PlaceHolder) + obj.append(alogger) + i = string.rfind(name, ".", 0, i - 1) + if not rv: + rv = self.root + alogger.parent = rv + + def _fixupChildren(self, ph, alogger): + """ + Ensure that children of the placeholder ph are connected to the + specified logger. + """ + for c in ph.loggers: + if string.find(c.parent.name, alogger.name) <> 0: + alogger.parent = c.parent + c.parent = alogger + +#--------------------------------------------------------------------------- +# Logger classes and functions +#--------------------------------------------------------------------------- + +class Logger(Filterer): + """ + Instances of the Logger class represent a single logging channel. A + "logging channel" indicates an area of an application. Exactly how an + "area" is defined is up to the application developer. Since an + application can have any number of areas, logging channels are identified + by a unique string. Application areas can be nested (e.g. an area + of "input processing" might include sub-areas "read CSV files", "read + XLS files" and "read Gnumeric files"). To cater for this natural nesting, + channel names are organized into a namespace hierarchy where levels are + separated by periods, much like the Java or Python package namespace. So + in the instance given above, channel names might be "input" for the upper + level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels. + There is no arbitrary limit to the depth of nesting. + """ + def __init__(self, name, level=NOTSET): + """ + Initialize the logger with a name and an optional level. + """ + Filterer.__init__(self) + self.name = name + self.level = level + self.parent = None + self.propagate = 1 + self.handlers = [] + self.disabled = 0 + + def setLevel(self, level): + """ + Set the logging level of this logger. + """ + self.level = level + +# def getRoot(self): +# """ +# Get the root of the logger hierarchy. +# """ +# return Logger.root + + def debug(self, msg, *args, **kwargs): + """ + Log 'msg % args' with severity 'DEBUG'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.debug("Houston, we have a %s", "thorny problem", exc_info=1) + """ + if self.manager.disable >= DEBUG: + return + if DEBUG >= self.getEffectiveLevel(): + apply(self._log, (DEBUG, msg, args), kwargs) + + def info(self, msg, *args, **kwargs): + """ + Log 'msg % args' with severity 'INFO'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.info("Houston, we have a %s", "interesting problem", exc_info=1) + """ + if self.manager.disable >= INFO: + return + if INFO >= self.getEffectiveLevel(): + apply(self._log, (INFO, msg, args), kwargs) + + def warning(self, msg, *args, **kwargs): + """ + Log 'msg % args' with severity 'WARNING'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1) + """ + if self.manager.disable >= WARNING: + return + if self.isEnabledFor(WARNING): + apply(self._log, (WARNING, msg, args), kwargs) + + warn = warning + + def error(self, msg, *args, **kwargs): + """ + Log 'msg % args' with severity 'ERROR'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.error("Houston, we have a %s", "major problem", exc_info=1) + """ + if self.manager.disable >= ERROR: + return + if self.isEnabledFor(ERROR): + apply(self._log, (ERROR, msg, args), kwargs) + + def exception(self, msg, *args): + """ + Convenience method for logging an ERROR with exception information. + """ + apply(self.error, (msg,) + args, {'exc_info': 1}) + + def critical(self, msg, *args, **kwargs): + """ + Log 'msg % args' with severity 'CRITICAL'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.critical("Houston, we have a %s", "major disaster", exc_info=1) + """ + if self.manager.disable >= CRITICAL: + return + if CRITICAL >= self.getEffectiveLevel(): + apply(self._log, (CRITICAL, msg, args), kwargs) + + fatal = critical + + def log(self, level, msg, *args, **kwargs): + """ + Log 'msg % args' with the severity 'level'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.log(level, "We have a %s", "mysterious problem", exc_info=1) + """ + if self.manager.disable >= level: + return + if self.isEnabledFor(level): + apply(self._log, (level, msg, args), kwargs) + + def findCaller(self): + """ + Find the stack frame of the caller so that we can note the source + file name and line number. + """ + f = sys._getframe(1) + while 1: + co = f.f_code + filename = os.path.normcase(co.co_filename) + if filename == _srcfile: + f = f.f_back + continue + return filename, f.f_lineno + + def makeRecord(self, name, level, fn, lno, msg, args, exc_info): + """ + A factory method which can be overridden in subclasses to create + specialized LogRecords. + """ + return LogRecord(name, level, fn, lno, msg, args, exc_info) + + def _log(self, level, msg, args, exc_info=None): + """ + Low-level logging routine which creates a LogRecord and then calls + all the handlers of this logger to handle the record. + """ + if _srcfile: + fn, lno = self.findCaller() + else: + fn, lno = "<unknown file>", 0 + if exc_info: + exc_info = sys.exc_info() + record = self.makeRecord(self.name, level, fn, lno, msg, args, exc_info) + self.handle(record) + + def handle(self, record): + """ + Call the handlers for the specified record. + + This method is used for unpickled records received from a socket, as + well as those created locally. Logger-level filtering is applied. + """ + if (not self.disabled) and self.filter(record): + self.callHandlers(record) + + def addHandler(self, hdlr): + """ + Add the specified handler to this logger. + """ + if not (hdlr in self.handlers): + self.handlers.append(hdlr) + + def removeHandler(self, hdlr): + """ + Remove the specified handler from this logger. + """ + if hdlr in self.handlers: + #hdlr.close() + self.handlers.remove(hdlr) + + def callHandlers(self, record): + """ + Pass a record to all relevant handlers. + + Loop through all handlers for this logger and its parents in the + logger hierarchy. If no handler was found, output a one-off error + message to sys.stderr. Stop searching up the hierarchy whenever a + logger with the "propagate" attribute set to zero is found - that + will be the last logger whose handlers are called. + """ + c = self + found = 0 + while c: + for hdlr in c.handlers: + found = found + 1 + if record.levelno >= hdlr.level: + hdlr.handle(record) + if not c.propagate: + c = None #break out + else: + c = c.parent + if (found == 0) and not self.manager.emittedNoHandlerWarning: + sys.stderr.write("No handlers could be found for logger" + " \"%s\"\n" % self.name) + self.manager.emittedNoHandlerWarning = 1 + + def getEffectiveLevel(self): + """ + Get the effective level for this logger. + + Loop through this logger and its parents in the logger hierarchy, + looking for a non-zero logging level. Return the first one found. + """ + logger = self + while logger: + if logger.level: + return logger.level + logger = logger.parent + return NOTSET + + def isEnabledFor(self, level): + """ + Is this logger enabled for level 'level'? + """ + if self.manager.disable >= level: + return 0 + return level >= self.getEffectiveLevel() + +class RootLogger(Logger): + """ + A root logger is not that different to any other logger, except that + it must have a logging level and there is only one instance of it in + the hierarchy. + """ + def __init__(self, level): + """ + Initialize the logger with the name "root". + """ + Logger.__init__(self, "root", level) + +_loggerClass = Logger + +root = RootLogger(WARNING) +Logger.root = root +Logger.manager = Manager(Logger.root) + +#--------------------------------------------------------------------------- +# Configuration classes and functions +#--------------------------------------------------------------------------- + +BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s" + +def basicConfig(format=BASIC_FORMAT): + """ + Do basic configuration for the logging system by creating a + StreamHandler with a default Formatter and adding it to the + root logger. + """ + if len(root.handlers) == 0: + hdlr = StreamHandler() + fmt = Formatter(format) + hdlr.setFormatter(fmt) + root.addHandler(hdlr) + +#--------------------------------------------------------------------------- +# Utility functions at module level. +# Basically delegate everything to the root logger. +#--------------------------------------------------------------------------- + +def getLogger(name=None): + """ + Return a logger with the specified name, creating it if necessary. + + If no name is specified, return the root logger. + """ + if name: + return Logger.manager.getLogger(name) + else: + return root + +#def getRootLogger(): +# """ +# Return the root logger. +# +# Note that getLogger('') now does the same thing, so this function is +# deprecated and may disappear in the future. +# """ +# return root + +def critical(msg, *args, **kwargs): + """ + Log a message with severity 'CRITICAL' on the root logger. + """ + if len(root.handlers) == 0: + basicConfig() + apply(root.critical, (msg,)+args, kwargs) + +fatal = critical + +def error(msg, *args, **kwargs): + """ + Log a message with severity 'ERROR' on the root logger. + """ + if len(root.handlers) == 0: + basicConfig() + apply(root.error, (msg,)+args, kwargs) + +def exception(msg, *args): + """ + Log a message with severity 'ERROR' on the root logger, + with exception information. + """ + apply(error, (msg,)+args, {'exc_info': 1}) + +def warning(msg, *args, **kwargs): + """ + Log a message with severity 'WARNING' on the root logger. + """ + if len(root.handlers) == 0: + basicConfig() + apply(root.warning, (msg,)+args, kwargs) + +warn = warning + +def info(msg, *args, **kwargs): + """ + Log a message with severity 'INFO' on the root logger. + """ + if len(root.handlers) == 0: + basicConfig() + apply(root.info, (msg,)+args, kwargs) + +def debug(msg, *args, **kwargs): + """ + Log a message with severity 'DEBUG' on the root logger. + """ + if len(root.handlers) == 0: + basicConfig() + apply(root.debug, (msg,)+args, kwargs) + +def disable(level): + """ + Disable all logging calls less severe than 'level'. + """ + root.manager.disable = level + +def shutdown(): + """ + Perform any cleanup actions in the logging system (e.g. flushing + buffers). + + Should be called at application exit. + """ + for h in _handlers.keys(): + h.flush() + h.close() diff --git a/lib/venus/planet/vendor/compat_logging/config.py b/lib/venus/planet/vendor/compat_logging/config.py new file mode 100644 index 0000000..d4d08f0 --- /dev/null +++ b/lib/venus/planet/vendor/compat_logging/config.py @@ -0,0 +1,299 @@ +# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose and without fee is hereby granted, +# provided that the above copyright notice appear in all copies and that +# both that copyright notice and this permission notice appear in +# supporting documentation, and that the name of Vinay Sajip +# not be used in advertising or publicity pertaining to distribution +# of the software without specific, written prior permission. +# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER +# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" +Logging package for Python. Based on PEP 282 and comments thereto in +comp.lang.python, and influenced by Apache's log4j system. + +Should work under Python versions >= 1.5.2, except that source line +information is not available unless 'inspect' is. + +Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved. + +To use, simply 'import logging' and log away! +""" + +import sys, logging, logging.handlers, string, thread, threading, socket, struct, os + +from SocketServer import ThreadingTCPServer, StreamRequestHandler + + +DEFAULT_LOGGING_CONFIG_PORT = 9030 +if sys.platform == "win32": + RESET_ERROR = 10054 #WSAECONNRESET +else: + RESET_ERROR = 104 #ECONNRESET + +# +# The following code implements a socket listener for on-the-fly +# reconfiguration of logging. +# +# _listener holds the server object doing the listening +_listener = None + +def fileConfig(fname, defaults=None): + """ + Read the logging configuration from a ConfigParser-format file. + + This can be called several times from an application, allowing an end user + the ability to select from various pre-canned configurations (if the + developer provides a mechanism to present the choices and load the chosen + configuration). + In versions of ConfigParser which have the readfp method [typically + shipped in 2.x versions of Python], you can pass in a file-like object + rather than a filename, in which case the file-like object will be read + using readfp. + """ + import ConfigParser + + cp = ConfigParser.ConfigParser(defaults) + if hasattr(cp, 'readfp') and hasattr(fname, 'readline'): + cp.readfp(fname) + else: + cp.read(fname) + #first, do the formatters... + flist = cp.get("formatters", "keys") + if len(flist): + flist = string.split(flist, ",") + formatters = {} + for form in flist: + sectname = "formatter_%s" % form + opts = cp.options(sectname) + if "format" in opts: + fs = cp.get(sectname, "format", 1) + else: + fs = None + if "datefmt" in opts: + dfs = cp.get(sectname, "datefmt", 1) + else: + dfs = None + f = logging.Formatter(fs, dfs) + formatters[form] = f + #next, do the handlers... + #critical section... + logging._acquireLock() + try: + try: + #first, lose the existing handlers... + logging._handlers.clear() + #now set up the new ones... + hlist = cp.get("handlers", "keys") + if len(hlist): + hlist = string.split(hlist, ",") + handlers = {} + fixups = [] #for inter-handler references + for hand in hlist: + sectname = "handler_%s" % hand + klass = cp.get(sectname, "class") + opts = cp.options(sectname) + if "formatter" in opts: + fmt = cp.get(sectname, "formatter") + else: + fmt = "" + klass = eval(klass, vars(logging)) + args = cp.get(sectname, "args") + args = eval(args, vars(logging)) + h = apply(klass, args) + if "level" in opts: + level = cp.get(sectname, "level") + h.setLevel(logging._levelNames[level]) + if len(fmt): + h.setFormatter(formatters[fmt]) + #temporary hack for FileHandler and MemoryHandler. + if klass == logging.handlers.MemoryHandler: + if "target" in opts: + target = cp.get(sectname,"target") + else: + target = "" + if len(target): #the target handler may not be loaded yet, so keep for later... + fixups.append((h, target)) + handlers[hand] = h + #now all handlers are loaded, fixup inter-handler references... + for fixup in fixups: + h = fixup[0] + t = fixup[1] + h.setTarget(handlers[t]) + #at last, the loggers...first the root... + llist = cp.get("loggers", "keys") + llist = string.split(llist, ",") + llist.remove("root") + sectname = "logger_root" + root = logging.root + log = root + opts = cp.options(sectname) + if "level" in opts: + level = cp.get(sectname, "level") + log.setLevel(logging._levelNames[level]) + for h in root.handlers[:]: + root.removeHandler(h) + hlist = cp.get(sectname, "handlers") + if len(hlist): + hlist = string.split(hlist, ",") + for hand in hlist: + log.addHandler(handlers[hand]) + #and now the others... + #we don't want to lose the existing loggers, + #since other threads may have pointers to them. + #existing is set to contain all existing loggers, + #and as we go through the new configuration we + #remove any which are configured. At the end, + #what's left in existing is the set of loggers + #which were in the previous configuration but + #which are not in the new configuration. + existing = root.manager.loggerDict.keys() + #now set up the new ones... + for log in llist: + sectname = "logger_%s" % log + qn = cp.get(sectname, "qualname") + opts = cp.options(sectname) + if "propagate" in opts: + propagate = cp.getint(sectname, "propagate") + else: + propagate = 1 + logger = logging.getLogger(qn) + if qn in existing: + existing.remove(qn) + if "level" in opts: + level = cp.get(sectname, "level") + logger.setLevel(logging._levelNames[level]) + for h in logger.handlers[:]: + logger.removeHandler(h) + logger.propagate = propagate + logger.disabled = 0 + hlist = cp.get(sectname, "handlers") + if len(hlist): + hlist = string.split(hlist, ",") + for hand in hlist: + logger.addHandler(handlers[hand]) + #Disable any old loggers. There's no point deleting + #them as other threads may continue to hold references + #and by disabling them, you stop them doing any logging. + for log in existing: + root.manager.loggerDict[log].disabled = 1 + except: + import traceback + ei = sys.exc_info() + traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr) + del ei + finally: + logging._releaseLock() + +def listen(port=DEFAULT_LOGGING_CONFIG_PORT): + """ + Start up a socket server on the specified port, and listen for new + configurations. + + These will be sent as a file suitable for processing by fileConfig(). + Returns a Thread object on which you can call start() to start the server, + and which you can join() when appropriate. To stop the server, call + stopListening(). + """ + if not thread: + raise NotImplementedError, "listen() needs threading to work" + + class ConfigStreamHandler(StreamRequestHandler): + """ + Handler for a logging configuration request. + + It expects a completely new logging configuration and uses fileConfig + to install it. + """ + def handle(self): + """ + Handle a request. + + Each request is expected to be a 4-byte length, + followed by the config file. Uses fileConfig() to do the + grunt work. + """ + import tempfile + try: + conn = self.connection + chunk = conn.recv(4) + if len(chunk) == 4: + slen = struct.unpack(">L", chunk)[0] + chunk = self.connection.recv(slen) + while len(chunk) < slen: + chunk = chunk + conn.recv(slen - len(chunk)) + #Apply new configuration. We'd like to be able to + #create a StringIO and pass that in, but unfortunately + #1.5.2 ConfigParser does not support reading file + #objects, only actual files. So we create a temporary + #file and remove it later. + file = tempfile.mktemp(".ini") + f = open(file, "w") + f.write(chunk) + f.close() + fileConfig(file) + os.remove(file) + except socket.error, e: + if type(e.args) != types.TupleType: + raise + else: + errcode = e.args[0] + if errcode != RESET_ERROR: + raise + + class ConfigSocketReceiver(ThreadingTCPServer): + """ + A simple TCP socket-based logging config receiver. + """ + + allow_reuse_address = 1 + + def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT, + handler=None): + ThreadingTCPServer.__init__(self, (host, port), handler) + logging._acquireLock() + self.abort = 0 + logging._releaseLock() + self.timeout = 1 + + def serve_until_stopped(self): + import select + abort = 0 + while not abort: + rd, wr, ex = select.select([self.socket.fileno()], + [], [], + self.timeout) + if rd: + self.handle_request() + logging._acquireLock() + abort = self.abort + logging._releaseLock() + + def serve(rcvr, hdlr, port): + server = rcvr(port=port, handler=hdlr) + global _listener + logging._acquireLock() + _listener = server + logging._releaseLock() + server.serve_until_stopped() + + return threading.Thread(target=serve, + args=(ConfigSocketReceiver, + ConfigStreamHandler, port)) + +def stopListening(): + """ + Stop the listening server which was created with a call to listen(). + """ + global _listener + if _listener: + logging._acquireLock() + _listener.abort = 1 + _listener = None + logging._releaseLock() diff --git a/lib/venus/planet/vendor/compat_logging/handlers.py b/lib/venus/planet/vendor/compat_logging/handlers.py new file mode 100644 index 0000000..26ca8ad --- /dev/null +++ b/lib/venus/planet/vendor/compat_logging/handlers.py @@ -0,0 +1,728 @@ +# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose and without fee is hereby granted, +# provided that the above copyright notice appear in all copies and that +# both that copyright notice and this permission notice appear in +# supporting documentation, and that the name of Vinay Sajip +# not be used in advertising or publicity pertaining to distribution +# of the software without specific, written prior permission. +# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER +# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" +Logging package for Python. Based on PEP 282 and comments thereto in +comp.lang.python, and influenced by Apache's log4j system. + +Should work under Python versions >= 1.5.2, except that source line +information is not available unless 'inspect' is. + +Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved. + +To use, simply 'import logging' and log away! +""" + +import sys, logging, socket, types, os, string, cPickle, struct, time + +from SocketServer import ThreadingTCPServer, StreamRequestHandler + +# +# Some constants... +# + +DEFAULT_TCP_LOGGING_PORT = 9020 +DEFAULT_UDP_LOGGING_PORT = 9021 +DEFAULT_HTTP_LOGGING_PORT = 9022 +DEFAULT_SOAP_LOGGING_PORT = 9023 +SYSLOG_UDP_PORT = 514 + + +class RotatingFileHandler(logging.FileHandler): + def __init__(self, filename, mode="a", maxBytes=0, backupCount=0): + """ + Open the specified file and use it as the stream for logging. + + By default, the file grows indefinitely. You can specify particular + values of maxBytes and backupCount to allow the file to rollover at + a predetermined size. + + Rollover occurs whenever the current log file is nearly maxBytes in + length. If backupCount is >= 1, the system will successively create + new files with the same pathname as the base file, but with extensions + ".1", ".2" etc. appended to it. For example, with a backupCount of 5 + and a base file name of "app.log", you would get "app.log", + "app.log.1", "app.log.2", ... through to "app.log.5". The file being + written to is always "app.log" - when it gets filled up, it is closed + and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. + exist, then they are renamed to "app.log.2", "app.log.3" etc. + respectively. + + If maxBytes is zero, rollover never occurs. + """ + logging.FileHandler.__init__(self, filename, mode) + self.maxBytes = maxBytes + self.backupCount = backupCount + if maxBytes > 0: + self.mode = "a" + + def doRollover(self): + """ + Do a rollover, as described in __init__(). + """ + + self.stream.close() + if self.backupCount > 0: + for i in range(self.backupCount - 1, 0, -1): + sfn = "%s.%d" % (self.baseFilename, i) + dfn = "%s.%d" % (self.baseFilename, i + 1) + if os.path.exists(sfn): + #print "%s -> %s" % (sfn, dfn) + if os.path.exists(dfn): + os.remove(dfn) + os.rename(sfn, dfn) + dfn = self.baseFilename + ".1" + if os.path.exists(dfn): + os.remove(dfn) + os.rename(self.baseFilename, dfn) + #print "%s -> %s" % (self.baseFilename, dfn) + self.stream = open(self.baseFilename, "w") + + def emit(self, record): + """ + Emit a record. + + Output the record to the file, catering for rollover as described + in doRollover(). + """ + if self.maxBytes > 0: # are we rolling over? + msg = "%s\n" % self.format(record) + self.stream.seek(0, 2) #due to non-posix-compliant Windows feature + if self.stream.tell() + len(msg) >= self.maxBytes: + self.doRollover() + logging.FileHandler.emit(self, record) + + +class SocketHandler(logging.Handler): + """ + A handler class which writes logging records, in pickle format, to + a streaming socket. The socket is kept open across logging calls. + If the peer resets it, an attempt is made to reconnect on the next call. + The pickle which is sent is that of the LogRecord's attribute dictionary + (__dict__), so that the receiver does not need to have the logging module + installed in order to process the logging event. + + To unpickle the record at the receiving end into a LogRecord, use the + makeLogRecord function. + """ + + def __init__(self, host, port): + """ + Initializes the handler with a specific host address and port. + + The attribute 'closeOnError' is set to 1 - which means that if + a socket error occurs, the socket is silently closed and then + reopened on the next logging call. + """ + logging.Handler.__init__(self) + self.host = host + self.port = port + self.sock = None + self.closeOnError = 0 + + def makeSocket(self): + """ + A factory method which allows subclasses to define the precise + type of socket they want. + """ + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect((self.host, self.port)) + return s + + def send(self, s): + """ + Send a pickled string to the socket. + + This function allows for partial sends which can happen when the + network is busy. + """ + if hasattr(self.sock, "sendall"): + self.sock.sendall(s) + else: + sentsofar = 0 + left = len(s) + while left > 0: + sent = self.sock.send(s[sentsofar:]) + sentsofar = sentsofar + sent + left = left - sent + + def makePickle(self, record): + """ + Pickles the record in binary format with a length prefix, and + returns it ready for transmission across the socket. + """ + s = cPickle.dumps(record.__dict__, 1) + #n = len(s) + #slen = "%c%c" % ((n >> 8) & 0xFF, n & 0xFF) + slen = struct.pack(">L", len(s)) + return slen + s + + def handleError(self, record): + """ + Handle an error during logging. + + An error has occurred during logging. Most likely cause - + connection lost. Close the socket so that we can retry on the + next event. + """ + if self.closeOnError and self.sock: + self.sock.close() + self.sock = None #try to reconnect next time + else: + logging.Handler.handleError(self, record) + + def emit(self, record): + """ + Emit a record. + + Pickles the record and writes it to the socket in binary format. + If there is an error with the socket, silently drop the packet. + If there was a problem with the socket, re-establishes the + socket. + """ + try: + s = self.makePickle(record) + if not self.sock: + self.sock = self.makeSocket() + self.send(s) + except: + self.handleError(record) + + def close(self): + """ + Closes the socket. + """ + if self.sock: + self.sock.close() + self.sock = None + +class DatagramHandler(SocketHandler): + """ + A handler class which writes logging records, in pickle format, to + a datagram socket. The pickle which is sent is that of the LogRecord's + attribute dictionary (__dict__), so that the receiver does not need to + have the logging module installed in order to process the logging event. + + To unpickle the record at the receiving end into a LogRecord, use the + makeLogRecord function. + + """ + def __init__(self, host, port): + """ + Initializes the handler with a specific host address and port. + """ + SocketHandler.__init__(self, host, port) + self.closeOnError = 0 + + def makeSocket(self): + """ + The factory method of SocketHandler is here overridden to create + a UDP socket (SOCK_DGRAM). + """ + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + return s + + def send(self, s): + """ + Send a pickled string to a socket. + + This function no longer allows for partial sends which can happen + when the network is busy - UDP does not guarantee delivery and + can deliver packets out of sequence. + """ + self.sock.sendto(s, (self.host, self.port)) + +class SysLogHandler(logging.Handler): + """ + A handler class which sends formatted logging records to a syslog + server. Based on Sam Rushing's syslog module: + http://www.nightmare.com/squirl/python-ext/misc/syslog.py + Contributed by Nicolas Untz (after which minor refactoring changes + have been made). + """ + + # from <linux/sys/syslog.h>: + # ====================================================================== + # priorities/facilities are encoded into a single 32-bit quantity, where + # the bottom 3 bits are the priority (0-7) and the top 28 bits are the + # facility (0-big number). Both the priorities and the facilities map + # roughly one-to-one to strings in the syslogd(8) source code. This + # mapping is included in this file. + # + # priorities (these are ordered) + + LOG_EMERG = 0 # system is unusable + LOG_ALERT = 1 # action must be taken immediately + LOG_CRIT = 2 # critical conditions + LOG_ERR = 3 # error conditions + LOG_WARNING = 4 # warning conditions + LOG_NOTICE = 5 # normal but significant condition + LOG_INFO = 6 # informational + LOG_DEBUG = 7 # debug-level messages + + # facility codes + LOG_KERN = 0 # kernel messages + LOG_USER = 1 # random user-level messages + LOG_MAIL = 2 # mail system + LOG_DAEMON = 3 # system daemons + LOG_AUTH = 4 # security/authorization messages + LOG_SYSLOG = 5 # messages generated internally by syslogd + LOG_LPR = 6 # line printer subsystem + LOG_NEWS = 7 # network news subsystem + LOG_UUCP = 8 # UUCP subsystem + LOG_CRON = 9 # clock daemon + LOG_AUTHPRIV = 10 # security/authorization messages (private) + + # other codes through 15 reserved for system use + LOG_LOCAL0 = 16 # reserved for local use + LOG_LOCAL1 = 17 # reserved for local use + LOG_LOCAL2 = 18 # reserved for local use + LOG_LOCAL3 = 19 # reserved for local use + LOG_LOCAL4 = 20 # reserved for local use + LOG_LOCAL5 = 21 # reserved for local use + LOG_LOCAL6 = 22 # reserved for local use + LOG_LOCAL7 = 23 # reserved for local use + + priority_names = { + "alert": LOG_ALERT, + "crit": LOG_CRIT, + "critical": LOG_CRIT, + "debug": LOG_DEBUG, + "emerg": LOG_EMERG, + "err": LOG_ERR, + "error": LOG_ERR, # DEPRECATED + "info": LOG_INFO, + "notice": LOG_NOTICE, + "panic": LOG_EMERG, # DEPRECATED + "warn": LOG_WARNING, # DEPRECATED + "warning": LOG_WARNING, + } + + facility_names = { + "auth": LOG_AUTH, + "authpriv": LOG_AUTHPRIV, + "cron": LOG_CRON, + "daemon": LOG_DAEMON, + "kern": LOG_KERN, + "lpr": LOG_LPR, + "mail": LOG_MAIL, + "news": LOG_NEWS, + "security": LOG_AUTH, # DEPRECATED + "syslog": LOG_SYSLOG, + "user": LOG_USER, + "uucp": LOG_UUCP, + "local0": LOG_LOCAL0, + "local1": LOG_LOCAL1, + "local2": LOG_LOCAL2, + "local3": LOG_LOCAL3, + "local4": LOG_LOCAL4, + "local5": LOG_LOCAL5, + "local6": LOG_LOCAL6, + "local7": LOG_LOCAL7, + } + + def __init__(self, address=('localhost', SYSLOG_UDP_PORT), facility=LOG_USER): + """ + Initialize a handler. + + If address is specified as a string, UNIX socket is used. + If facility is not specified, LOG_USER is used. + """ + logging.Handler.__init__(self) + + self.address = address + self.facility = facility + if type(address) == types.StringType: + self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + # syslog may require either DGRAM or STREAM sockets + try: + self.socket.connect(address) + except socket.error: + self.socket.close() + self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + self.socket.connect(address) + self.unixsocket = 1 + else: + self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + self.unixsocket = 0 + + self.formatter = None + + # curious: when talking to the unix-domain '/dev/log' socket, a + # zero-terminator seems to be required. this string is placed + # into a class variable so that it can be overridden if + # necessary. + log_format_string = '<%d>%s\000' + + def encodePriority (self, facility, priority): + """ + Encode the facility and priority. You can pass in strings or + integers - if strings are passed, the facility_names and + priority_names mapping dictionaries are used to convert them to + integers. + """ + if type(facility) == types.StringType: + facility = self.facility_names[facility] + if type(priority) == types.StringType: + priority = self.priority_names[priority] + return (facility << 3) | priority + + def close (self): + """ + Closes the socket. + """ + if self.unixsocket: + self.socket.close() + + def emit(self, record): + """ + Emit a record. + + The record is formatted, and then sent to the syslog server. If + exception information is present, it is NOT sent to the server. + """ + msg = self.format(record) + """ + We need to convert record level to lowercase, maybe this will + change in the future. + """ + msg = self.log_format_string % ( + self.encodePriority(self.facility, + string.lower(record.levelname)), + msg) + try: + if self.unixsocket: + self.socket.send(msg) + else: + self.socket.sendto(msg, self.address) + except: + self.handleError(record) + +class SMTPHandler(logging.Handler): + """ + A handler class which sends an SMTP email for each logging event. + """ + def __init__(self, mailhost, fromaddr, toaddrs, subject): + """ + Initialize the handler. + + Initialize the instance with the from and to addresses and subject + line of the email. To specify a non-standard SMTP port, use the + (host, port) tuple format for the mailhost argument. + """ + logging.Handler.__init__(self) + if type(mailhost) == types.TupleType: + host, port = mailhost + self.mailhost = host + self.mailport = port + else: + self.mailhost = mailhost + self.mailport = None + self.fromaddr = fromaddr + if type(toaddrs) == types.StringType: + toaddrs = [toaddrs] + self.toaddrs = toaddrs + self.subject = subject + + def getSubject(self, record): + """ + Determine the subject for the email. + + If you want to specify a subject line which is record-dependent, + override this method. + """ + return self.subject + + weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] + + monthname = [None, + 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', + 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] + + def date_time(self): + """Return the current date and time formatted for a MIME header.""" + year, month, day, hh, mm, ss, wd, y, z = time.gmtime(time.time()) + s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( + self.weekdayname[wd], + day, self.monthname[month], year, + hh, mm, ss) + return s + + def emit(self, record): + """ + Emit a record. + + Format the record and send it to the specified addressees. + """ + try: + import smtplib + port = self.mailport + if not port: + port = smtplib.SMTP_PORT + smtp = smtplib.SMTP(self.mailhost, port) + msg = self.format(record) + msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % ( + self.fromaddr, + string.join(self.toaddrs, ","), + self.getSubject(record), + self.date_time(), msg) + smtp.sendmail(self.fromaddr, self.toaddrs, msg) + smtp.quit() + except: + self.handleError(record) + +class NTEventLogHandler(logging.Handler): + """ + A handler class which sends events to the NT Event Log. Adds a + registry entry for the specified application name. If no dllname is + provided, win32service.pyd (which contains some basic message + placeholders) is used. Note that use of these placeholders will make + your event logs big, as the entire message source is held in the log. + If you want slimmer logs, you have to pass in the name of your own DLL + which contains the message definitions you want to use in the event log. + """ + def __init__(self, appname, dllname=None, logtype="Application"): + logging.Handler.__init__(self) + try: + import win32evtlogutil, win32evtlog + self.appname = appname + self._welu = win32evtlogutil + if not dllname: + dllname = os.path.split(self._welu.__file__) + dllname = os.path.split(dllname[0]) + dllname = os.path.join(dllname[0], r'win32service.pyd') + self.dllname = dllname + self.logtype = logtype + self._welu.AddSourceToRegistry(appname, dllname, logtype) + self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE + self.typemap = { + logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE, + logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE, + logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE, + logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE, + logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE, + } + except ImportError: + print "The Python Win32 extensions for NT (service, event "\ + "logging) appear not to be available." + self._welu = None + + def getMessageID(self, record): + """ + Return the message ID for the event record. If you are using your + own messages, you could do this by having the msg passed to the + logger being an ID rather than a formatting string. Then, in here, + you could use a dictionary lookup to get the message ID. This + version returns 1, which is the base message ID in win32service.pyd. + """ + return 1 + + def getEventCategory(self, record): + """ + Return the event category for the record. + + Override this if you want to specify your own categories. This version + returns 0. + """ + return 0 + + def getEventType(self, record): + """ + Return the event type for the record. + + Override this if you want to specify your own types. This version does + a mapping using the handler's typemap attribute, which is set up in + __init__() to a dictionary which contains mappings for DEBUG, INFO, + WARNING, ERROR and CRITICAL. If you are using your own levels you will + either need to override this method or place a suitable dictionary in + the handler's typemap attribute. + """ + return self.typemap.get(record.levelno, self.deftype) + + def emit(self, record): + """ + Emit a record. + + Determine the message ID, event category and event type. Then + log the message in the NT event log. + """ + if self._welu: + try: + id = self.getMessageID(record) + cat = self.getEventCategory(record) + type = self.getEventType(record) + msg = self.format(record) + self._welu.ReportEvent(self.appname, id, cat, type, [msg]) + except: + self.handleError(record) + + def close(self): + """ + Clean up this handler. + + You can remove the application name from the registry as a + source of event log entries. However, if you do this, you will + not be able to see the events as you intended in the Event Log + Viewer - it needs to be able to access the registry to get the + DLL name. + """ + #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype) + pass + +class HTTPHandler(logging.Handler): + """ + A class which sends records to a Web server, using either GET or + POST semantics. + """ + def __init__(self, host, url, method="GET"): + """ + Initialize the instance with the host, the request URL, and the method + ("GET" or "POST") + """ + logging.Handler.__init__(self) + method = string.upper(method) + if method not in ["GET", "POST"]: + raise ValueError, "method must be GET or POST" + self.host = host + self.url = url + self.method = method + + def mapLogRecord(self, record): + """ + Default implementation of mapping the log record into a dict + that is send as the CGI data. Overwrite in your class. + Contributed by Franz Glasner. + """ + return record.__dict__ + + def emit(self, record): + """ + Emit a record. + + Send the record to the Web server as an URL-encoded dictionary + """ + try: + import httplib, urllib + h = httplib.HTTP(self.host) + url = self.url + data = urllib.urlencode(self.mapLogRecord(record)) + if self.method == "GET": + if (string.find(url, '?') >= 0): + sep = '&' + else: + sep = '?' + url = url + "%c%s" % (sep, data) + h.putrequest(self.method, url) + if self.method == "POST": + h.putheader("Content-length", str(len(data))) + h.endheaders() + if self.method == "POST": + h.send(data) + h.getreply() #can't do anything with the result + except: + self.handleError(record) + +class BufferingHandler(logging.Handler): + """ + A handler class which buffers logging records in memory. Whenever each + record is added to the buffer, a check is made to see if the buffer should + be flushed. If it should, then flush() is expected to do what's needed. + """ + def __init__(self, capacity): + """ + Initialize the handler with the buffer size. + """ + logging.Handler.__init__(self) + self.capacity = capacity + self.buffer = [] + + def shouldFlush(self, record): + """ + Should the handler flush its buffer? + + Returns true if the buffer is up to capacity. This method can be + overridden to implement custom flushing strategies. + """ + return (len(self.buffer) >= self.capacity) + + def emit(self, record): + """ + Emit a record. + + Append the record. If shouldFlush() tells us to, call flush() to process + the buffer. + """ + self.buffer.append(record) + if self.shouldFlush(record): + self.flush() + + def flush(self): + """ + Override to implement custom flushing behaviour. + + This version just zaps the buffer to empty. + """ + self.buffer = [] + +class MemoryHandler(BufferingHandler): + """ + A handler class which buffers logging records in memory, periodically + flushing them to a target handler. Flushing occurs whenever the buffer + is full, or when an event of a certain severity or greater is seen. + """ + def __init__(self, capacity, flushLevel=logging.ERROR, target=None): + """ + Initialize the handler with the buffer size, the level at which + flushing should occur and an optional target. + + Note that without a target being set either here or via setTarget(), + a MemoryHandler is no use to anyone! + """ + BufferingHandler.__init__(self, capacity) + self.flushLevel = flushLevel + self.target = target + + def shouldFlush(self, record): + """ + Check for buffer full or a record at the flushLevel or higher. + """ + return (len(self.buffer) >= self.capacity) or \ + (record.levelno >= self.flushLevel) + + def setTarget(self, target): + """ + Set the target handler for this handler. + """ + self.target = target + + def flush(self): + """ + For a MemoryHandler, flushing means just sending the buffered + records to the target, if there is one. Override if you want + different behaviour. + """ + if self.target: + for record in self.buffer: + self.target.handle(record) + self.buffer = [] + + def close(self): + """ + Flush, set the target to None and lose the buffer. + """ + self.flush() + self.target = None + self.buffer = [] diff --git a/lib/venus/planet/vendor/feedparser.py b/lib/venus/planet/vendor/feedparser.py new file mode 100755 index 0000000..3208684 --- /dev/null +++ b/lib/venus/planet/vendor/feedparser.py @@ -0,0 +1,3612 @@ +#!/usr/bin/env python +"""Universal feed parser + +Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds + +Visit http://feedparser.org/ for the latest version +Visit http://feedparser.org/docs/ for the latest documentation + +Required: Python 2.1 or later +Recommended: Python 2.3 or later +Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/> +""" + +__version__ = "4.2-pre-" + "$Revision: 291 $"[11:14] + "-svn" +__license__ = """Copyright (c) 2002-2008, Mark Pilgrim, All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE.""" +__author__ = "Mark Pilgrim <http://diveintomark.org/>" +__contributors__ = ["Jason Diamond <http://injektilo.org/>", + "John Beimler <http://john.beimler.org/>", + "Fazal Majid <http://www.majid.info/mylos/weblog/>", + "Aaron Swartz <http://aaronsw.com/>", + "Kevin Marks <http://epeus.blogspot.com/>", + "Sam Ruby <http://intertwingly.net/>"] +_debug = 0 + +# HTTP "User-Agent" header to send to servers when downloading feeds. +# If you are embedding feedparser in a larger application, you should +# change this to your application name and URL. +USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__ + +# HTTP "Accept" header to send to servers when downloading feeds. If you don't +# want to send an Accept header, set this to None. +ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1" + +# List of preferred XML parsers, by SAX driver name. These will be tried first, +# but if they're not installed, Python will keep searching through its own list +# of pre-installed parsers until it finds one that supports everything we need. +PREFERRED_XML_PARSERS = ["drv_libxml2"] + +# If you want feedparser to automatically run HTML markup through HTML Tidy, set +# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html> +# or utidylib <http://utidylib.berlios.de/>. +TIDY_MARKUP = 0 + +# List of Python interfaces for HTML Tidy, in order of preference. Only useful +# if TIDY_MARKUP = 1 +PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"] + +# If you want feedparser to automatically resolve all relative URIs, set this +# to 1. +RESOLVE_RELATIVE_URIS = 1 + +# If you want feedparser to automatically sanitize all potentially unsafe +# HTML content, set this to 1. +SANITIZE_HTML = 1 + +# ---------- required modules (should come with any Python distribution) ---------- +import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2 +try: + from cStringIO import StringIO as _StringIO +except: + from StringIO import StringIO as _StringIO + +# ---------- optional modules (feedparser will work without these, but with reduced functionality) ---------- + +# gzip is included with most Python distributions, but may not be available if you compiled your own +try: + import gzip +except: + gzip = None +try: + import zlib +except: + zlib = None + +# If a real XML parser is available, feedparser will attempt to use it. feedparser has +# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the +# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some +# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing. +try: + import xml.sax + xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers + from xml.sax.saxutils import escape as _xmlescape + _XML_AVAILABLE = 1 +except: + _XML_AVAILABLE = 0 + def _xmlescape(data,entities={}): + data = data.replace('&', '&') + data = data.replace('>', '>') + data = data.replace('<', '<') + for char, entity in entities: + data = data.replace(char, entity) + return data + +# base64 support for Atom feeds that contain embedded binary data +try: + import base64, binascii +except: + base64 = binascii = None + +# cjkcodecs and iconv_codec provide support for more character encodings. +# Both are available from http://cjkpython.i18n.org/ +try: + import cjkcodecs.aliases +except: + pass +try: + import iconv_codec +except: + pass + +# chardet library auto-detects character encodings +# Download from http://chardet.feedparser.org/ +try: + import chardet + if _debug: + import chardet.constants + chardet.constants._debug = 1 +except: + chardet = None + +# reversable htmlentitydefs mappings for Python 2.2 +try: + from htmlentitydefs import name2codepoint, codepoint2name +except: + import htmlentitydefs + name2codepoint={} + codepoint2name={} + for (name,codepoint) in htmlentitydefs.entitydefs.iteritems(): + if codepoint.startswith('&#'): codepoint=unichr(int(codepoint[2:-1])) + name2codepoint[name]=ord(codepoint) + codepoint2name[ord(codepoint)]=name + +# BeautifulSoup parser used for parsing microformats from embedded HTML content +# http://www.crummy.com/software/BeautifulSoup/ +# feedparser is tested with BeautifulSoup 3.0.x, but it might work with the +# older 2.x series. If it doesn't, and you can figure out why, I'll accept a +# patch and modify the compatibility statement accordingly. +try: + import BeautifulSoup +except: + BeautifulSoup = None + +# ---------- don't touch these ---------- +class ThingsNobodyCaresAboutButMe(Exception): pass +class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass +class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass +class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass +class UndeclaredNamespace(Exception): pass + +sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') +sgmllib.special = re.compile('<!') +sgmllib.charref = re.compile('&#(\d+|x[0-9a-fA-F]+);') + +if sgmllib.endbracket.search(' <').start(0): + class EndBracketMatch: + endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''') + def search(self,string,index=0): + self.match = self.endbracket.match(string,index) + if self.match: return self + def start(self,n): + return self.match.end(n) + sgmllib.endbracket = EndBracketMatch() + +SUPPORTED_VERSIONS = {'': 'unknown', + 'rss090': 'RSS 0.90', + 'rss091n': 'RSS 0.91 (Netscape)', + 'rss091u': 'RSS 0.91 (Userland)', + 'rss092': 'RSS 0.92', + 'rss093': 'RSS 0.93', + 'rss094': 'RSS 0.94', + 'rss20': 'RSS 2.0', + 'rss10': 'RSS 1.0', + 'rss': 'RSS (unknown version)', + 'atom01': 'Atom 0.1', + 'atom02': 'Atom 0.2', + 'atom03': 'Atom 0.3', + 'atom10': 'Atom 1.0', + 'atom': 'Atom (unknown version)', + 'cdf': 'CDF', + 'hotrss': 'Hot RSS' + } + +try: + UserDict = dict +except NameError: + # Python 2.1 does not have dict + from UserDict import UserDict + def dict(aList): + rc = {} + for k, v in aList: + rc[k] = v + return rc + +class FeedParserDict(UserDict): + keymap = {'channel': 'feed', + 'items': 'entries', + 'guid': 'id', + 'date': 'updated', + 'date_parsed': 'updated_parsed', + 'description': ['subtitle', 'summary'], + 'url': ['href'], + 'modified': 'updated', + 'modified_parsed': 'updated_parsed', + 'issued': 'published', + 'issued_parsed': 'published_parsed', + 'copyright': 'rights', + 'copyright_detail': 'rights_detail', + 'tagline': 'subtitle', + 'tagline_detail': 'subtitle_detail'} + def __getitem__(self, key): + if key == 'category': + return UserDict.__getitem__(self, 'tags')[0]['term'] + if key == 'enclosures': + norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel']) + return [norel(link) for link in UserDict.__getitem__(self, 'links') if link['rel']=='enclosure'] + if key == 'license': + for link in UserDict.__getitem__(self, 'links'): + if link['rel']=='license' and link.has_key('href'): + return link['href'] + if key == 'categories': + return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')] + realkey = self.keymap.get(key, key) + if type(realkey) == types.ListType: + for k in realkey: + if UserDict.has_key(self, k): + return UserDict.__getitem__(self, k) + if UserDict.has_key(self, key): + return UserDict.__getitem__(self, key) + return UserDict.__getitem__(self, realkey) + + def __setitem__(self, key, value): + for k in self.keymap.keys(): + if key == k: + key = self.keymap[k] + if type(key) == types.ListType: + key = key[0] + return UserDict.__setitem__(self, key, value) + + def get(self, key, default=None): + if self.has_key(key): + return self[key] + else: + return default + + def setdefault(self, key, value): + if not self.has_key(key): + self[key] = value + return self[key] + + def has_key(self, key): + try: + return hasattr(self, key) or UserDict.has_key(self, key) + except AttributeError: + return False + + def __getattr__(self, key): + try: + return self.__dict__[key] + except KeyError: + pass + try: + assert not key.startswith('_') + return self.__getitem__(key) + except: + raise AttributeError, "object has no attribute '%s'" % key + + def __setattr__(self, key, value): + if key.startswith('_') or key == 'data': + self.__dict__[key] = value + else: + return self.__setitem__(key, value) + + def __contains__(self, key): + return self.has_key(key) + +def zopeCompatibilityHack(): + global FeedParserDict + del FeedParserDict + def FeedParserDict(aDict=None): + rc = {} + if aDict: + rc.update(aDict) + return rc + +_ebcdic_to_ascii_map = None +def _ebcdic_to_ascii(s): + global _ebcdic_to_ascii_map + if not _ebcdic_to_ascii_map: + emap = ( + 0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15, + 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31, + 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7, + 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26, + 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33, + 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94, + 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63, + 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34, + 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201, + 202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208, + 209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215, + 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, + 123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237, + 125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243, + 92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249, + 48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255 + ) + import string + _ebcdic_to_ascii_map = string.maketrans( \ + ''.join(map(chr, range(256))), ''.join(map(chr, emap))) + return s.translate(_ebcdic_to_ascii_map) + +_cp1252 = { + unichr(128): unichr(8364), # euro sign + unichr(130): unichr(8218), # single low-9 quotation mark + unichr(131): unichr( 402), # latin small letter f with hook + unichr(132): unichr(8222), # double low-9 quotation mark + unichr(133): unichr(8230), # horizontal ellipsis + unichr(134): unichr(8224), # dagger + unichr(135): unichr(8225), # double dagger + unichr(136): unichr( 710), # modifier letter circumflex accent + unichr(137): unichr(8240), # per mille sign + unichr(138): unichr( 352), # latin capital letter s with caron + unichr(139): unichr(8249), # single left-pointing angle quotation mark + unichr(140): unichr( 338), # latin capital ligature oe + unichr(142): unichr( 381), # latin capital letter z with caron + unichr(145): unichr(8216), # left single quotation mark + unichr(146): unichr(8217), # right single quotation mark + unichr(147): unichr(8220), # left double quotation mark + unichr(148): unichr(8221), # right double quotation mark + unichr(149): unichr(8226), # bullet + unichr(150): unichr(8211), # en dash + unichr(151): unichr(8212), # em dash + unichr(152): unichr( 732), # small tilde + unichr(153): unichr(8482), # trade mark sign + unichr(154): unichr( 353), # latin small letter s with caron + unichr(155): unichr(8250), # single right-pointing angle quotation mark + unichr(156): unichr( 339), # latin small ligature oe + unichr(158): unichr( 382), # latin small letter z with caron + unichr(159): unichr( 376)} # latin capital letter y with diaeresis + +_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)') +def _urljoin(base, uri): + uri = _urifixer.sub(r'\1\3', uri) + try: + return urlparse.urljoin(base, uri) + except: + uri = urlparse.urlunparse([urllib.quote(part) for part in urlparse.urlparse(uri)]) + return urlparse.urljoin(base, uri) + +class _FeedParserMixin: + namespaces = {'': '', + 'http://backend.userland.com/rss': '', + 'http://blogs.law.harvard.edu/tech/rss': '', + 'http://purl.org/rss/1.0/': '', + 'http://my.netscape.com/rdf/simple/0.9/': '', + 'http://example.com/newformat#': '', + 'http://example.com/necho': '', + 'http://purl.org/echo/': '', + 'uri/of/echo/namespace#': '', + 'http://purl.org/pie/': '', + 'http://purl.org/atom/ns#': '', + 'http://www.w3.org/2005/Atom': '', + 'http://purl.org/rss/1.0/modules/rss091#': '', + + 'http://webns.net/mvcb/': 'admin', + 'http://purl.org/rss/1.0/modules/aggregation/': 'ag', + 'http://purl.org/rss/1.0/modules/annotate/': 'annotate', + 'http://media.tangent.org/rss/1.0/': 'audio', + 'http://backend.userland.com/blogChannelModule': 'blogChannel', + 'http://web.resource.org/cc/': 'cc', + 'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons', + 'http://purl.org/rss/1.0/modules/company': 'co', + 'http://purl.org/rss/1.0/modules/content/': 'content', + 'http://my.theinfo.org/changed/1.0/rss/': 'cp', + 'http://purl.org/dc/elements/1.1/': 'dc', + 'http://purl.org/dc/terms/': 'dcterms', + 'http://purl.org/rss/1.0/modules/email/': 'email', + 'http://purl.org/rss/1.0/modules/event/': 'ev', + 'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner', + 'http://freshmeat.net/rss/fm/': 'fm', + 'http://xmlns.com/foaf/0.1/': 'foaf', + 'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo', + 'http://postneo.com/icbm/': 'icbm', + 'http://purl.org/rss/1.0/modules/image/': 'image', + 'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes', + 'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes', + 'http://purl.org/rss/1.0/modules/link/': 'l', + 'http://search.yahoo.com/mrss': 'media', + 'http://madskills.com/public/xml/rss/module/pingback/': 'pingback', + 'http://prismstandard.org/namespaces/1.2/basic/': 'prism', + 'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf', + 'http://www.w3.org/2000/01/rdf-schema#': 'rdfs', + 'http://purl.org/rss/1.0/modules/reference/': 'ref', + 'http://purl.org/rss/1.0/modules/richequiv/': 'reqv', + 'http://purl.org/rss/1.0/modules/search/': 'search', + 'http://purl.org/rss/1.0/modules/slash/': 'slash', + 'http://schemas.xmlsoap.org/soap/envelope/': 'soap', + 'http://purl.org/rss/1.0/modules/servicestatus/': 'ss', + 'http://hacks.benhammersley.com/rss/streaming/': 'str', + 'http://purl.org/rss/1.0/modules/subscription/': 'sub', + 'http://purl.org/rss/1.0/modules/syndication/': 'sy', + 'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf', + 'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo', + 'http://purl.org/rss/1.0/modules/threading/': 'thr', + 'http://purl.org/rss/1.0/modules/textinput/': 'ti', + 'http://madskills.com/public/xml/rss/module/trackback/':'trackback', + 'http://wellformedweb.org/commentAPI/': 'wfw', + 'http://purl.org/rss/1.0/modules/wiki/': 'wiki', + 'http://www.w3.org/1999/xhtml': 'xhtml', + 'http://www.w3.org/1999/xlink': 'xlink', + 'http://www.w3.org/XML/1998/namespace': 'xml' +} + _matchnamespaces = {} + + can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo'] + can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'] + can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'] + html_types = ['text/html', 'application/xhtml+xml'] + + def __init__(self, baseuri=None, baselang=None, encoding='utf-8'): + if _debug: sys.stderr.write('initializing FeedParser\n') + if not self._matchnamespaces: + for k, v in self.namespaces.items(): + self._matchnamespaces[k.lower()] = v + self.feeddata = FeedParserDict() # feed-level data + self.encoding = encoding # character encoding + self.entries = [] # list of entry-level data + self.version = '' # feed type/version, see SUPPORTED_VERSIONS + self.namespacesInUse = {} # dictionary of namespaces defined by the feed + + # the following are used internally to track state; + # this is really out of control and should be refactored + self.infeed = 0 + self.inentry = 0 + self.incontent = 0 + self.intextinput = 0 + self.inimage = 0 + self.inauthor = 0 + self.incontributor = 0 + self.inpublisher = 0 + self.insource = 0 + self.sourcedata = FeedParserDict() + self.contentparams = FeedParserDict() + self._summaryKey = None + self.namespacemap = {} + self.elementstack = [] + self.basestack = [] + self.langstack = [] + self.baseuri = baseuri or '' + self.lang = baselang or None + self.svgOK = 0 + self.hasTitle = 0 + if baselang: + self.feeddata['language'] = baselang.replace('_','-') + + def unknown_starttag(self, tag, attrs): + if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs)) + # normalize attrs + attrs = [(k.lower(), v) for k, v in attrs] + attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] + + # track xml:base and xml:lang + attrsD = dict(attrs) + baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri + if type(baseuri) != type(u''): + try: + baseuri = unicode(baseuri, self.encoding) + except: + baseuri = unicode(baseuri, 'iso-8859-1') + self.baseuri = _urljoin(self.baseuri, baseuri) + lang = attrsD.get('xml:lang', attrsD.get('lang')) + if lang == '': + # xml:lang could be explicitly set to '', we need to capture that + lang = None + elif lang is None: + # if no xml:lang is specified, use parent lang + lang = self.lang + if lang: + if tag in ('feed', 'rss', 'rdf:RDF'): + self.feeddata['language'] = lang.replace('_','-') + self.lang = lang + self.basestack.append(self.baseuri) + self.langstack.append(lang) + + # track namespaces + for prefix, uri in attrs: + if prefix.startswith('xmlns:'): + self.trackNamespace(prefix[6:], uri) + elif prefix == 'xmlns': + self.trackNamespace(None, uri) + + # track inline content + if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): + if tag in ['xhtml:div', 'div']: return # typepad does this 10/2007 + # element declared itself as escaped markup, but it isn't really + self.contentparams['type'] = 'application/xhtml+xml' + if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml': + if tag.find(':') <> -1: + prefix, tag = tag.split(':', 1) + namespace = self.namespacesInUse.get(prefix, '') + if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML': + attrs.append(('xmlns',namespace)) + if tag=='svg' and namespace=='http://www.w3.org/2000/svg': + attrs.append(('xmlns',namespace)) + if tag == 'svg': self.svgOK += 1 + return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0) + + # match namespaces + if tag.find(':') <> -1: + prefix, suffix = tag.split(':', 1) + else: + prefix, suffix = '', tag + prefix = self.namespacemap.get(prefix, prefix) + if prefix: + prefix = prefix + '_' + + # special hack for better tracking of empty textinput/image elements in illformed feeds + if (not prefix) and tag not in ('title', 'link', 'description', 'name'): + self.intextinput = 0 + if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'): + self.inimage = 0 + + # call special handler (if defined) or default handler + methodname = '_start_' + prefix + suffix + try: + method = getattr(self, methodname) + return method(attrsD) + except AttributeError: + return self.push(prefix + suffix, 1) + + def unknown_endtag(self, tag): + if _debug: sys.stderr.write('end %s\n' % tag) + # match namespaces + if tag.find(':') <> -1: + prefix, suffix = tag.split(':', 1) + else: + prefix, suffix = '', tag + prefix = self.namespacemap.get(prefix, prefix) + if prefix: + prefix = prefix + '_' + if suffix == 'svg' and self.svgOK: self.svgOK -= 1 + + # call special handler (if defined) or default handler + methodname = '_end_' + prefix + suffix + try: + if self.svgOK: raise AttributeError() + method = getattr(self, methodname) + method() + except AttributeError: + self.pop(prefix + suffix) + + # track inline content + if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): + # element declared itself as escaped markup, but it isn't really + if tag in ['xhtml:div', 'div']: return # typepad does this 10/2007 + self.contentparams['type'] = 'application/xhtml+xml' + if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml': + tag = tag.split(':')[-1] + self.handle_data('</%s>' % tag, escape=0) + + # track xml:base and xml:lang going out of scope + if self.basestack: + self.basestack.pop() + if self.basestack and self.basestack[-1]: + self.baseuri = self.basestack[-1] + if self.langstack: + self.langstack.pop() + if self.langstack: # and (self.langstack[-1] is not None): + self.lang = self.langstack[-1] + + def handle_charref(self, ref): + # called for each character reference, e.g. for ' ', ref will be '160' + if not self.elementstack: return + ref = ref.lower() + if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'): + text = '&#%s;' % ref + else: + if ref[0] == 'x': + c = int(ref[1:], 16) + else: + c = int(ref) + text = unichr(c).encode('utf-8') + self.elementstack[-1][2].append(text) + + def handle_entityref(self, ref): + # called for each entity reference, e.g. for '©', ref will be 'copy' + if not self.elementstack: return + if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref) + if ref in ('lt', 'gt', 'quot', 'amp', 'apos'): + text = '&%s;' % ref + elif ref in self.entities.keys(): + text = self.entities[ref] + if text.startswith('&#') and text.endswith(';'): + return self.handle_entityref(text) + else: + try: name2codepoint[ref] + except KeyError: text = '&%s;' % ref + else: text = unichr(name2codepoint[ref]).encode('utf-8') + self.elementstack[-1][2].append(text) + + def handle_data(self, text, escape=1): + # called for each block of plain text, i.e. outside of any tag and + # not containing any character or entity references + if not self.elementstack: return + if escape and self.contentparams.get('type') == 'application/xhtml+xml': + text = _xmlescape(text) + self.elementstack[-1][2].append(text) + + def handle_comment(self, text): + # called for each comment, e.g. <!-- insert message here --> + pass + + def handle_pi(self, text): + # called for each processing instruction, e.g. <?instruction> + pass + + def handle_decl(self, text): + pass + + def parse_declaration(self, i): + # override internal declaration handler to handle CDATA blocks + if _debug: sys.stderr.write('entering parse_declaration\n') + if self.rawdata[i:i+9] == '<![CDATA[': + k = self.rawdata.find(']]>', i) + if k == -1: k = len(self.rawdata) + self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0) + return k+3 + else: + k = self.rawdata.find('>', i) + return k+1 + + def mapContentType(self, contentType): + contentType = contentType.lower() + if contentType == 'text': + contentType = 'text/plain' + elif contentType == 'html': + contentType = 'text/html' + elif contentType == 'xhtml': + contentType = 'application/xhtml+xml' + return contentType + + def trackNamespace(self, prefix, uri): + loweruri = uri.lower() + if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version: + self.version = 'rss090' + if loweruri == 'http://purl.org/rss/1.0/' and not self.version: + self.version = 'rss10' + if loweruri == 'http://www.w3.org/2005/atom' and not self.version: + self.version = 'atom10' + if loweruri.find('backend.userland.com/rss') <> -1: + # match any backend.userland.com namespace + uri = 'http://backend.userland.com/rss' + loweruri = uri + if self._matchnamespaces.has_key(loweruri): + self.namespacemap[prefix] = self._matchnamespaces[loweruri] + self.namespacesInUse[self._matchnamespaces[loweruri]] = uri + else: + self.namespacesInUse[prefix or ''] = uri + + def resolveURI(self, uri): + return _urljoin(self.baseuri or '', uri) + + def decodeEntities(self, element, data): + return data + + def strattrs(self, attrs): + return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs]) + + def push(self, element, expectingText): + self.elementstack.append([element, expectingText, []]) + + def pop(self, element, stripWhitespace=1): + if not self.elementstack: return + if self.elementstack[-1][0] != element: return + + element, expectingText, pieces = self.elementstack.pop() + + if self.version == 'atom10' and self.contentparams.get('type','text') == 'application/xhtml+xml': + # remove enclosing child element, but only if it is a <div> and + # only if all the remaining content is nested underneath it. + # This means that the divs would be retained in the following: + # <div>foo</div><div>bar</div> + while pieces and len(pieces)>1 and not pieces[-1].strip(): + del pieces[-1] + while pieces and len(pieces)>1 and not pieces[0].strip(): + del pieces[0] + if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>': + depth = 0 + for piece in pieces[:-1]: + if piece.startswith('</'): + depth -= 1 + if depth == 0: break + elif piece.startswith('<') and not piece.endswith('/>'): + depth += 1 + else: + pieces = pieces[1:-1] + + output = ''.join(pieces) + if stripWhitespace: + output = output.strip() + if not expectingText: return output + + # decode base64 content + if base64 and self.contentparams.get('base64', 0): + try: + output = base64.decodestring(output) + except binascii.Error: + pass + except binascii.Incomplete: + pass + + # resolve relative URIs + if (element in self.can_be_relative_uri) and output: + output = self.resolveURI(output) + + # decode entities within embedded markup + if not self.contentparams.get('base64', 0): + output = self.decodeEntities(element, output) + + if self.lookslikehtml(output): + self.contentparams['type']='text/html' + + # remove temporary cruft from contentparams + try: + del self.contentparams['mode'] + except KeyError: + pass + try: + del self.contentparams['base64'] + except KeyError: + pass + + is_htmlish = self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types + # resolve relative URIs within embedded markup + if is_htmlish and RESOLVE_RELATIVE_URIS: + if element in self.can_contain_relative_uris: + output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', 'text/html')) + + # parse microformats + # (must do this before sanitizing because some microformats + # rely on elements that we sanitize) + if is_htmlish and element in ['content', 'description', 'summary']: + mfresults = _parseMicroformats(output, self.baseuri, self.encoding) + if mfresults: + for tag in mfresults.get('tags', []): + self._addTag(tag['term'], tag['scheme'], tag['label']) + for enclosure in mfresults.get('enclosures', []): + self._start_enclosure(enclosure) + for xfn in mfresults.get('xfn', []): + self._addXFN(xfn['relationships'], xfn['href'], xfn['name']) + vcard = mfresults.get('vcard') + if vcard: + self._getContext()['vcard'] = vcard + + # sanitize embedded markup + if is_htmlish and SANITIZE_HTML: + if element in self.can_contain_dangerous_markup: + output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', 'text/html')) + + if self.encoding and type(output) != type(u''): + try: + output = unicode(output, self.encoding) + except: + pass + + # address common error where people take data that is already + # utf-8, presume that it is iso-8859-1, and re-encode it. + if self.encoding=='utf-8' and type(output) == type(u''): + try: + output = unicode(output.encode('iso-8859-1'), 'utf-8') + except: + pass + + # map win-1252 extensions to the proper code points + if type(output) == type(u''): + output = u''.join([c in _cp1252.keys() and _cp1252[c] or c for c in output]) + + # categories/tags/keywords/whatever are handled in _end_category + if element == 'category': + return output + + if element == 'title' and self.hasTitle: + return output + + # store output in appropriate place(s) + if self.inentry and not self.insource: + if element == 'content': + self.entries[-1].setdefault(element, []) + contentparams = copy.deepcopy(self.contentparams) + contentparams['value'] = output + self.entries[-1][element].append(contentparams) + elif element == 'link': + self.entries[-1][element] = output + if output: + self.entries[-1]['links'][-1]['href'] = output + else: + if element == 'description': + element = 'summary' + self.entries[-1][element] = output + if self.incontent: + contentparams = copy.deepcopy(self.contentparams) + contentparams['value'] = output + self.entries[-1][element + '_detail'] = contentparams + elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage): + context = self._getContext() + if element == 'description': + element = 'subtitle' + context[element] = output + if element == 'link': + context['links'][-1]['href'] = output + elif self.incontent: + contentparams = copy.deepcopy(self.contentparams) + contentparams['value'] = output + context[element + '_detail'] = contentparams + return output + + def pushContent(self, tag, attrsD, defaultContentType, expectingText): + self.incontent += 1 + if self.lang: self.lang=self.lang.replace('_','-') + self.contentparams = FeedParserDict({ + 'type': self.mapContentType(attrsD.get('type', defaultContentType)), + 'language': self.lang, + 'base': self.baseuri}) + self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams) + self.push(tag, expectingText) + + def popContent(self, tag): + value = self.pop(tag) + self.incontent -= 1 + self.contentparams.clear() + return value + + # a number of elements in a number of RSS variants are nominally plain + # text, but this is routinely ignored. This is an attempt to detect + # the most common cases. As false positives often result in silent + # data loss, this function errs on the conservative side. + def lookslikehtml(self, str): + if self.version.startswith('atom'): return + if self.contentparams.get('type','text/html') != 'text/plain': return + + # must have a close tag or a entity reference to qualify + if not (re.search(r'</(\w+)>',str) or re.search("&#?\w+;",str)): return + + # all tags must be in a restricted subset of valid HTML tags + if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements, + re.findall(r'</?(\w+)',str)): return + + # all entities must have been defined as valid HTML entities + from htmlentitydefs import entitydefs + if filter(lambda e: e not in entitydefs.keys(), + re.findall(r'&(\w+);',str)): return + + return 1 + + def _mapToStandardPrefix(self, name): + colonpos = name.find(':') + if colonpos <> -1: + prefix = name[:colonpos] + suffix = name[colonpos+1:] + prefix = self.namespacemap.get(prefix, prefix) + name = prefix + ':' + suffix + return name + + def _getAttribute(self, attrsD, name): + return attrsD.get(self._mapToStandardPrefix(name)) + + def _isBase64(self, attrsD, contentparams): + if attrsD.get('mode', '') == 'base64': + return 1 + if self.contentparams['type'].startswith('text/'): + return 0 + if self.contentparams['type'].endswith('+xml'): + return 0 + if self.contentparams['type'].endswith('/xml'): + return 0 + return 1 + + def _itsAnHrefDamnIt(self, attrsD): + href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None))) + if href: + try: + del attrsD['url'] + except KeyError: + pass + try: + del attrsD['uri'] + except KeyError: + pass + attrsD['href'] = href + return attrsD + + def _save(self, key, value): + context = self._getContext() + context.setdefault(key, value) + + def _start_rss(self, attrsD): + versionmap = {'0.91': 'rss091u', + '0.92': 'rss092', + '0.93': 'rss093', + '0.94': 'rss094'} + if not self.version: + attr_version = attrsD.get('version', '') + version = versionmap.get(attr_version) + if version: + self.version = version + elif attr_version.startswith('2.'): + self.version = 'rss20' + else: + self.version = 'rss' + + def _start_dlhottitles(self, attrsD): + self.version = 'hotrss' + + def _start_channel(self, attrsD): + self.infeed = 1 + self._cdf_common(attrsD) + _start_feedinfo = _start_channel + + def _cdf_common(self, attrsD): + if attrsD.has_key('lastmod'): + self._start_modified({}) + self.elementstack[-1][-1] = attrsD['lastmod'] + self._end_modified() + if attrsD.has_key('href'): + self._start_link({}) + self.elementstack[-1][-1] = attrsD['href'] + self._end_link() + + def _start_feed(self, attrsD): + self.infeed = 1 + versionmap = {'0.1': 'atom01', + '0.2': 'atom02', + '0.3': 'atom03'} + if not self.version: + attr_version = attrsD.get('version') + version = versionmap.get(attr_version) + if version: + self.version = version + else: + self.version = 'atom' + + def _end_channel(self): + self.infeed = 0 + _end_feed = _end_channel + + def _start_image(self, attrsD): + context = self._getContext() + context.setdefault('image', FeedParserDict()) + self.inimage = 1 + self.hasTitle = 0 + self.push('image', 0) + + def _end_image(self): + self.pop('image') + self.inimage = 0 + + def _start_textinput(self, attrsD): + context = self._getContext() + context.setdefault('textinput', FeedParserDict()) + self.intextinput = 1 + self.hasTitle = 0 + self.push('textinput', 0) + _start_textInput = _start_textinput + + def _end_textinput(self): + self.pop('textinput') + self.intextinput = 0 + _end_textInput = _end_textinput + + def _start_author(self, attrsD): + self.inauthor = 1 + self.push('author', 1) + _start_managingeditor = _start_author + _start_dc_author = _start_author + _start_dc_creator = _start_author + _start_itunes_author = _start_author + + def _end_author(self): + self.pop('author') + self.inauthor = 0 + self._sync_author_detail() + _end_managingeditor = _end_author + _end_dc_author = _end_author + _end_dc_creator = _end_author + _end_itunes_author = _end_author + + def _start_itunes_owner(self, attrsD): + self.inpublisher = 1 + self.push('publisher', 0) + + def _end_itunes_owner(self): + self.pop('publisher') + self.inpublisher = 0 + self._sync_author_detail('publisher') + + def _start_contributor(self, attrsD): + self.incontributor = 1 + context = self._getContext() + context.setdefault('contributors', []) + context['contributors'].append(FeedParserDict()) + self.push('contributor', 0) + + def _end_contributor(self): + self.pop('contributor') + self.incontributor = 0 + + def _start_dc_contributor(self, attrsD): + self.incontributor = 1 + context = self._getContext() + context.setdefault('contributors', []) + context['contributors'].append(FeedParserDict()) + self.push('name', 0) + + def _end_dc_contributor(self): + self._end_name() + self.incontributor = 0 + + def _start_name(self, attrsD): + self.push('name', 0) + _start_itunes_name = _start_name + + def _end_name(self): + value = self.pop('name') + if self.inpublisher: + self._save_author('name', value, 'publisher') + elif self.inauthor: + self._save_author('name', value) + elif self.incontributor: + self._save_contributor('name', value) + elif self.intextinput: + context = self._getContext() + context['name'] = value + _end_itunes_name = _end_name + + def _start_width(self, attrsD): + self.push('width', 0) + + def _end_width(self): + value = self.pop('width') + try: + value = int(value) + except: + value = 0 + if self.inimage: + context = self._getContext() + context['width'] = value + + def _start_height(self, attrsD): + self.push('height', 0) + + def _end_height(self): + value = self.pop('height') + try: + value = int(value) + except: + value = 0 + if self.inimage: + context = self._getContext() + context['height'] = value + + def _start_url(self, attrsD): + self.push('href', 1) + _start_homepage = _start_url + _start_uri = _start_url + + def _end_url(self): + value = self.pop('href') + if self.inauthor: + self._save_author('href', value) + elif self.incontributor: + self._save_contributor('href', value) + _end_homepage = _end_url + _end_uri = _end_url + + def _start_email(self, attrsD): + self.push('email', 0) + _start_itunes_email = _start_email + + def _end_email(self): + value = self.pop('email') + if self.inpublisher: + self._save_author('email', value, 'publisher') + elif self.inauthor: + self._save_author('email', value) + elif self.incontributor: + self._save_contributor('email', value) + _end_itunes_email = _end_email + + def _getContext(self): + if self.insource: + context = self.sourcedata + elif self.inimage: + context = self.feeddata['image'] + elif self.intextinput: + context = self.feeddata['textinput'] + elif self.inentry: + context = self.entries[-1] + else: + context = self.feeddata + return context + + def _save_author(self, key, value, prefix='author'): + context = self._getContext() + context.setdefault(prefix + '_detail', FeedParserDict()) + context[prefix + '_detail'][key] = value + self._sync_author_detail() + + def _save_contributor(self, key, value): + context = self._getContext() + context.setdefault('contributors', [FeedParserDict()]) + context['contributors'][-1][key] = value + + def _sync_author_detail(self, key='author'): + context = self._getContext() + detail = context.get('%s_detail' % key) + if detail: + name = detail.get('name') + email = detail.get('email') + if name and email: + context[key] = '%s (%s)' % (name, email) + elif name: + context[key] = name + elif email: + context[key] = email + else: + author, email = context.get(key), None + if not author: return + emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author) + if emailmatch: + email = emailmatch.group(0) + # probably a better way to do the following, but it passes all the tests + author = author.replace(email, '') + author = author.replace('()', '') + author = author.replace('<>', '') + author = author.replace('<>', '') + author = author.strip() + if author and (author[0] == '('): + author = author[1:] + if author and (author[-1] == ')'): + author = author[:-1] + author = author.strip() + if author or email: + context.setdefault('%s_detail' % key, FeedParserDict()) + if author: + context['%s_detail' % key]['name'] = author + if email: + context['%s_detail' % key]['email'] = email + + def _start_subtitle(self, attrsD): + self.pushContent('subtitle', attrsD, 'text/plain', 1) + _start_tagline = _start_subtitle + _start_itunes_subtitle = _start_subtitle + + def _end_subtitle(self): + self.popContent('subtitle') + _end_tagline = _end_subtitle + _end_itunes_subtitle = _end_subtitle + + def _start_rights(self, attrsD): + self.pushContent('rights', attrsD, 'text/plain', 1) + _start_dc_rights = _start_rights + _start_copyright = _start_rights + + def _end_rights(self): + self.popContent('rights') + _end_dc_rights = _end_rights + _end_copyright = _end_rights + + def _start_item(self, attrsD): + self.entries.append(FeedParserDict()) + self.push('item', 0) + self.inentry = 1 + self.guidislink = 0 + self.hasTitle = 0 + id = self._getAttribute(attrsD, 'rdf:about') + if id: + context = self._getContext() + context['id'] = id + self._cdf_common(attrsD) + _start_entry = _start_item + _start_product = _start_item + + def _end_item(self): + self.pop('item') + self.inentry = 0 + _end_entry = _end_item + + def _start_dc_language(self, attrsD): + self.push('language', 1) + _start_language = _start_dc_language + + def _end_dc_language(self): + self.lang = self.pop('language') + _end_language = _end_dc_language + + def _start_dc_publisher(self, attrsD): + self.push('publisher', 1) + _start_webmaster = _start_dc_publisher + + def _end_dc_publisher(self): + self.pop('publisher') + self._sync_author_detail('publisher') + _end_webmaster = _end_dc_publisher + + def _start_published(self, attrsD): + self.push('published', 1) + _start_dcterms_issued = _start_published + _start_issued = _start_published + + def _end_published(self): + value = self.pop('published') + self._save('published_parsed', _parse_date(value)) + _end_dcterms_issued = _end_published + _end_issued = _end_published + + def _start_updated(self, attrsD): + self.push('updated', 1) + _start_modified = _start_updated + _start_dcterms_modified = _start_updated + _start_pubdate = _start_updated + _start_dc_date = _start_updated + + def _end_updated(self): + value = self.pop('updated') + parsed_value = _parse_date(value) + self._save('updated_parsed', parsed_value) + _end_modified = _end_updated + _end_dcterms_modified = _end_updated + _end_pubdate = _end_updated + _end_dc_date = _end_updated + + def _start_created(self, attrsD): + self.push('created', 1) + _start_dcterms_created = _start_created + + def _end_created(self): + value = self.pop('created') + self._save('created_parsed', _parse_date(value)) + _end_dcterms_created = _end_created + + def _start_expirationdate(self, attrsD): + self.push('expired', 1) + + def _end_expirationdate(self): + self._save('expired_parsed', _parse_date(self.pop('expired'))) + + def _start_cc_license(self, attrsD): + context = self._getContext() + value = self._getAttribute(attrsD, 'rdf:resource') + attrsD = FeedParserDict() + attrsD['rel']='license' + if value: attrsD['href']=value + context.setdefault('links', []).append(attrsD) + + def _start_creativecommons_license(self, attrsD): + self.push('license', 1) + _start_creativeCommons_license = _start_creativecommons_license + + def _end_creativecommons_license(self): + value = self.pop('license') + context = self._getContext() + attrsD = FeedParserDict() + attrsD['rel']='license' + if value: attrsD['href']=value + context.setdefault('links', []).append(attrsD) + del context['license'] + _end_creativeCommons_license = _end_creativecommons_license + + def _addXFN(self, relationships, href, name): + context = self._getContext() + xfn = context.setdefault('xfn', []) + value = FeedParserDict({'relationships': relationships, 'href': href, 'name': name}) + if value not in xfn: + xfn.append(value) + + def _addTag(self, term, scheme, label): + context = self._getContext() + tags = context.setdefault('tags', []) + if (not term) and (not scheme) and (not label): return + value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label}) + if value not in tags: + tags.append(value) + + def _start_category(self, attrsD): + if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD)) + term = attrsD.get('term') + scheme = attrsD.get('scheme', attrsD.get('domain')) + label = attrsD.get('label') + self._addTag(term, scheme, label) + self.push('category', 1) + _start_dc_subject = _start_category + _start_keywords = _start_category + + def _end_itunes_keywords(self): + for term in self.pop('itunes_keywords').split(): + self._addTag(term, 'http://www.itunes.com/', None) + + def _start_itunes_category(self, attrsD): + self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None) + self.push('category', 1) + + def _end_category(self): + value = self.pop('category') + if not value: return + context = self._getContext() + tags = context['tags'] + if value and len(tags) and not tags[-1]['term']: + tags[-1]['term'] = value + else: + self._addTag(value, None, None) + _end_dc_subject = _end_category + _end_keywords = _end_category + _end_itunes_category = _end_category + + def _start_cloud(self, attrsD): + self._getContext()['cloud'] = FeedParserDict(attrsD) + + def _start_link(self, attrsD): + attrsD.setdefault('rel', 'alternate') + if attrsD['rel'] == 'self': + attrsD.setdefault('type', 'application/atom+xml') + else: + attrsD.setdefault('type', 'text/html') + context = self._getContext() + attrsD = self._itsAnHrefDamnIt(attrsD) + if attrsD.has_key('href'): + attrsD['href'] = self.resolveURI(attrsD['href']) + if attrsD.get('rel')=='enclosure' and not context.get('id'): + context['id'] = attrsD.get('href') + expectingText = self.infeed or self.inentry or self.insource + context.setdefault('links', []) + context['links'].append(FeedParserDict(attrsD)) + if attrsD.has_key('href'): + expectingText = 0 + if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types): + context['link'] = attrsD['href'] + else: + self.push('link', expectingText) + _start_producturl = _start_link + + def _end_link(self): + value = self.pop('link') + context = self._getContext() + _end_producturl = _end_link + + def _start_guid(self, attrsD): + self.guidislink = (attrsD.get('ispermalink', 'true') == 'true') + self.push('id', 1) + + def _end_guid(self): + value = self.pop('id') + self._save('guidislink', self.guidislink and not self._getContext().has_key('link')) + if self.guidislink: + # guid acts as link, but only if 'ispermalink' is not present or is 'true', + # and only if the item doesn't already have a link element + self._save('link', value) + + def _start_title(self, attrsD): + if self.svgOK: return self.unknown_starttag('title', attrsD.items()) + self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) + _start_dc_title = _start_title + _start_media_title = _start_title + + def _end_title(self): + if self.svgOK: return + value = self.popContent('title') + if not value: return + context = self._getContext() + self.hasTitle = 1 + _end_dc_title = _end_title + + def _end_media_title(self): + hasTitle = self.hasTitle + self._end_title() + self.hasTitle = hasTitle + + def _start_description(self, attrsD): + context = self._getContext() + if context.has_key('summary'): + self._summaryKey = 'content' + self._start_content(attrsD) + else: + self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource) + _start_dc_description = _start_description + + def _start_abstract(self, attrsD): + self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) + + def _end_description(self): + if self._summaryKey == 'content': + self._end_content() + else: + value = self.popContent('description') + self._summaryKey = None + _end_abstract = _end_description + _end_dc_description = _end_description + + def _start_info(self, attrsD): + self.pushContent('info', attrsD, 'text/plain', 1) + _start_feedburner_browserfriendly = _start_info + + def _end_info(self): + self.popContent('info') + _end_feedburner_browserfriendly = _end_info + + def _start_generator(self, attrsD): + if attrsD: + attrsD = self._itsAnHrefDamnIt(attrsD) + if attrsD.has_key('href'): + attrsD['href'] = self.resolveURI(attrsD['href']) + self._getContext()['generator_detail'] = FeedParserDict(attrsD) + self.push('generator', 1) + + def _end_generator(self): + value = self.pop('generator') + context = self._getContext() + if context.has_key('generator_detail'): + context['generator_detail']['name'] = value + + def _start_admin_generatoragent(self, attrsD): + self.push('generator', 1) + value = self._getAttribute(attrsD, 'rdf:resource') + if value: + self.elementstack[-1][2].append(value) + self.pop('generator') + self._getContext()['generator_detail'] = FeedParserDict({'href': value}) + + def _start_admin_errorreportsto(self, attrsD): + self.push('errorreportsto', 1) + value = self._getAttribute(attrsD, 'rdf:resource') + if value: + self.elementstack[-1][2].append(value) + self.pop('errorreportsto') + + def _start_summary(self, attrsD): + context = self._getContext() + if context.has_key('summary'): + self._summaryKey = 'content' + self._start_content(attrsD) + else: + self._summaryKey = 'summary' + self.pushContent(self._summaryKey, attrsD, 'text/plain', 1) + _start_itunes_summary = _start_summary + + def _end_summary(self): + if self._summaryKey == 'content': + self._end_content() + else: + self.popContent(self._summaryKey or 'summary') + self._summaryKey = None + _end_itunes_summary = _end_summary + + def _start_enclosure(self, attrsD): + attrsD = self._itsAnHrefDamnIt(attrsD) + context = self._getContext() + attrsD['rel']='enclosure' + context.setdefault('links', []).append(FeedParserDict(attrsD)) + href = attrsD.get('href') + if href and not context.get('id'): + context['id'] = href + + def _start_source(self, attrsD): + self.insource = 1 + self.hasTitle = 0 + + def _end_source(self): + self.insource = 0 + self._getContext()['source'] = copy.deepcopy(self.sourcedata) + self.sourcedata.clear() + + def _start_content(self, attrsD): + self.pushContent('content', attrsD, 'text/plain', 1) + src = attrsD.get('src') + if src: + self.contentparams['src'] = src + self.push('content', 1) + + def _start_prodlink(self, attrsD): + self.pushContent('content', attrsD, 'text/html', 1) + + def _start_body(self, attrsD): + self.pushContent('content', attrsD, 'application/xhtml+xml', 1) + _start_xhtml_body = _start_body + + def _start_content_encoded(self, attrsD): + self.pushContent('content', attrsD, 'text/html', 1) + _start_fullitem = _start_content_encoded + + def _end_content(self): + copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types) + value = self.popContent('content') + if copyToDescription: + self._save('description', value) + + _end_body = _end_content + _end_xhtml_body = _end_content + _end_content_encoded = _end_content + _end_fullitem = _end_content + _end_prodlink = _end_content + + def _start_itunes_image(self, attrsD): + self.push('itunes_image', 0) + self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')}) + _start_itunes_link = _start_itunes_image + + def _end_itunes_block(self): + value = self.pop('itunes_block', 0) + self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0 + + def _end_itunes_explicit(self): + value = self.pop('itunes_explicit', 0) + self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0 + +if _XML_AVAILABLE: + class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler): + def __init__(self, baseuri, baselang, encoding): + if _debug: sys.stderr.write('trying StrictFeedParser\n') + xml.sax.handler.ContentHandler.__init__(self) + _FeedParserMixin.__init__(self, baseuri, baselang, encoding) + self.bozo = 0 + self.exc = None + + def startPrefixMapping(self, prefix, uri): + self.trackNamespace(prefix, uri) + + def startElementNS(self, name, qname, attrs): + namespace, localname = name + lowernamespace = str(namespace or '').lower() + if lowernamespace.find('backend.userland.com/rss') <> -1: + # match any backend.userland.com namespace + namespace = 'http://backend.userland.com/rss' + lowernamespace = namespace + if qname and qname.find(':') > 0: + givenprefix = qname.split(':')[0] + else: + givenprefix = None + prefix = self._matchnamespaces.get(lowernamespace, givenprefix) + if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix): + raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix + localname = str(localname).lower() + + # qname implementation is horribly broken in Python 2.1 (it + # doesn't report any), and slightly broken in Python 2.2 (it + # doesn't report the xml: namespace). So we match up namespaces + # with a known list first, and then possibly override them with + # the qnames the SAX parser gives us (if indeed it gives us any + # at all). Thanks to MatejC for helping me test this and + # tirelessly telling me that it didn't work yet. + attrsD = {} + if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML': + attrsD['xmlns']=namespace + if localname=='svg' and namespace=='http://www.w3.org/2000/svg': + attrsD['xmlns']=namespace + + if prefix: + localname = prefix.lower() + ':' + localname + elif namespace and not qname: #Expat + for name,value in self.namespacesInUse.items(): + if name and value == namespace: + localname = name + ':' + localname + break + if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname)) + + for (namespace, attrlocalname), attrvalue in attrs._attrs.items(): + lowernamespace = (namespace or '').lower() + prefix = self._matchnamespaces.get(lowernamespace, '') + if prefix: + attrlocalname = prefix + ':' + attrlocalname + attrsD[str(attrlocalname).lower()] = attrvalue + for qname in attrs.getQNames(): + attrsD[str(qname).lower()] = attrs.getValueByQName(qname) + self.unknown_starttag(localname, attrsD.items()) + + def characters(self, text): + self.handle_data(text) + + def endElementNS(self, name, qname): + namespace, localname = name + lowernamespace = str(namespace or '').lower() + if qname and qname.find(':') > 0: + givenprefix = qname.split(':')[0] + else: + givenprefix = '' + prefix = self._matchnamespaces.get(lowernamespace, givenprefix) + if prefix: + localname = prefix + ':' + localname + elif namespace and not qname: #Expat + for name,value in self.namespacesInUse.items(): + if name and value == namespace: + localname = name + ':' + localname + break + localname = str(localname).lower() + self.unknown_endtag(localname) + + def error(self, exc): + self.bozo = 1 + self.exc = exc + + def fatalError(self, exc): + self.error(exc) + raise exc + +class _BaseHTMLProcessor(sgmllib.SGMLParser): + special = re.compile('''[<>'"]''') + bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)") + elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr', + 'img', 'input', 'isindex', 'link', 'meta', 'param'] + + def __init__(self, encoding, type): + self.encoding = encoding + self.type = type + if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding) + sgmllib.SGMLParser.__init__(self) + + def reset(self): + self.pieces = [] + sgmllib.SGMLParser.reset(self) + + def _shorttag_replace(self, match): + tag = match.group(1) + if tag in self.elements_no_end_tag: + return '<' + tag + ' />' + else: + return '<' + tag + '></' + tag + '>' + + def parse_starttag(self,i): + j=sgmllib.SGMLParser.parse_starttag(self, i) + if self.type == 'application/xhtml+xml': + if j>2 and self.rawdata[j-2:j]=='/>': + self.unknown_endtag(self.lasttag) + return j + + def feed(self, data): + data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data) + #data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace + data = re.sub(r'<([^<\s]+?)\s*/>', self._shorttag_replace, data) + data = data.replace(''', "'") + data = data.replace('"', '"') + if self.encoding and type(data) == type(u''): + data = data.encode(self.encoding) + sgmllib.SGMLParser.feed(self, data) + sgmllib.SGMLParser.close(self) + + def normalize_attrs(self, attrs): + if not attrs: return attrs + # utility method to be called by descendants + attrs = dict([(k.lower(), v) for k, v in attrs]).items() + attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] + attrs.sort() + return attrs + + def unknown_starttag(self, tag, attrs): + # called for each start tag + # attrs is a list of (attr, value) tuples + # e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')] + if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag) + uattrs = [] + strattrs='' + if attrs: + for key, value in attrs: + value=value.replace('>','>').replace('<','<').replace('"','"') + value = self.bare_ampersand.sub("&", value) + # thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds + if type(value) != type(u''): + try: + value = unicode(value, self.encoding) + except: + value = unicode(value, 'iso-8859-1') + uattrs.append((unicode(key, self.encoding), value)) + strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]) + if self.encoding: + try: + strattrs=strattrs.encode(self.encoding) + except: + pass + if tag in self.elements_no_end_tag: + self.pieces.append('<%(tag)s%(strattrs)s />' % locals()) + else: + self.pieces.append('<%(tag)s%(strattrs)s>' % locals()) + + def unknown_endtag(self, tag): + # called for each end tag, e.g. for </pre>, tag will be 'pre' + # Reconstruct the original end tag. + if tag not in self.elements_no_end_tag: + self.pieces.append("</%(tag)s>" % locals()) + + def handle_charref(self, ref): + # called for each character reference, e.g. for ' ', ref will be '160' + # Reconstruct the original character reference. + if ref.startswith('x'): + value = unichr(int(ref[1:],16)) + else: + value = unichr(int(ref)) + + if value in _cp1252.keys(): + self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:]) + else: + self.pieces.append('&#%(ref)s;' % locals()) + + def handle_entityref(self, ref): + # called for each entity reference, e.g. for '©', ref will be 'copy' + # Reconstruct the original entity reference. + if name2codepoint.has_key(ref): + self.pieces.append('&%(ref)s;' % locals()) + else: + self.pieces.append('&%(ref)s' % locals()) + + def handle_data(self, text): + # called for each block of plain text, i.e. outside of any tag and + # not containing any character or entity references + # Store the original text verbatim. + if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text) + self.pieces.append(text) + + def handle_comment(self, text): + # called for each HTML comment, e.g. <!-- insert Javascript code here --> + # Reconstruct the original comment. + self.pieces.append('<!--%(text)s-->' % locals()) + + def handle_pi(self, text): + # called for each processing instruction, e.g. <?instruction> + # Reconstruct original processing instruction. + self.pieces.append('<?%(text)s>' % locals()) + + def handle_decl(self, text): + # called for the DOCTYPE, if present, e.g. + # <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" + # "http://www.w3.org/TR/html4/loose.dtd"> + # Reconstruct original DOCTYPE + self.pieces.append('<!%(text)s>' % locals()) + + _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match + def _scan_name(self, i, declstartpos): + rawdata = self.rawdata + n = len(rawdata) + if i == n: + return None, -1 + m = self._new_declname_match(rawdata, i) + if m: + s = m.group() + name = s.strip() + if (i + len(s)) == n: + return None, -1 # end of buffer + return name.lower(), m.end() + else: + self.handle_data(rawdata) +# self.updatepos(declstartpos, i) + return None, -1 + + def convert_charref(self, name): + return '&#%s;' % name + + def convert_entityref(self, name): + return '&%s;' % name + + def output(self): + '''Return processed HTML as a single string''' + return ''.join([str(p) for p in self.pieces]) + +class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor): + def __init__(self, baseuri, baselang, encoding, entities): + sgmllib.SGMLParser.__init__(self) + _FeedParserMixin.__init__(self, baseuri, baselang, encoding) + _BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml') + self.entities=entities + + def decodeEntities(self, element, data): + data = data.replace('<', '<') + data = data.replace('<', '<') + data = data.replace('<', '<') + data = data.replace('>', '>') + data = data.replace('>', '>') + data = data.replace('>', '>') + data = data.replace('&', '&') + data = data.replace('&', '&') + data = data.replace('"', '"') + data = data.replace('"', '"') + data = data.replace(''', ''') + data = data.replace(''', ''') + if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): + data = data.replace('<', '<') + data = data.replace('>', '>') + data = data.replace('&', '&') + data = data.replace('"', '"') + data = data.replace(''', "'") + return data + + def strattrs(self, attrs): + return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs]) + +class _MicroformatsParser: + STRING = 1 + DATE = 2 + URI = 3 + NODE = 4 + EMAIL = 5 + + known_xfn_relationships = ['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me'] + known_binary_extensions = ['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv'] + + def __init__(self, data, baseuri, encoding): + self.document = BeautifulSoup.BeautifulSoup(data) + self.baseuri = baseuri + self.encoding = encoding + if type(data) == type(u''): + data = data.encode(encoding) + self.tags = [] + self.enclosures = [] + self.xfn = [] + self.vcard = None + + def vcardEscape(self, s): + if type(s) in (type(''), type(u'')): + s = s.replace(',', '\\,').replace(';', '\\;').replace('\n', '\\n') + return s + + def vcardFold(self, s): + s = re.sub(';+$', '', s) + sFolded = '' + iMax = 75 + sPrefix = '' + while len(s) > iMax: + sFolded += sPrefix + s[:iMax] + '\n' + s = s[iMax:] + sPrefix = ' ' + iMax = 74 + sFolded += sPrefix + s + return sFolded + + def normalize(self, s): + return re.sub(r'\s+', ' ', s).strip() + + def unique(self, aList): + results = [] + for element in aList: + if element not in results: + results.append(element) + return results + + def toISO8601(self, dt): + return time.strftime('%Y-%m-%dT%H:%M:%SZ', dt) + + def getPropertyValue(self, elmRoot, sProperty, iPropertyType=4, bAllowMultiple=0, bAutoEscape=0): + all = lambda x: 1 + sProperty = sProperty.lower() + bFound = 0 + bNormalize = 1 + propertyMatch = {'class': re.compile(r'\b%s\b' % sProperty)} + if bAllowMultiple and (iPropertyType != self.NODE): + snapResults = [] + containers = elmRoot(['ul', 'ol'], propertyMatch) + for container in containers: + snapResults.extend(container('li')) + bFound = (len(snapResults) != 0) + if not bFound: + snapResults = elmRoot(all, propertyMatch) + bFound = (len(snapResults) != 0) + if (not bFound) and (sProperty == 'value'): + snapResults = elmRoot('pre') + bFound = (len(snapResults) != 0) + bNormalize = not bFound + if not bFound: + snapResults = [elmRoot] + bFound = (len(snapResults) != 0) + arFilter = [] + if sProperty == 'vcard': + snapFilter = elmRoot(all, propertyMatch) + for node in snapFilter: + if node.findParent(all, propertyMatch): + arFilter.append(node) + arResults = [] + for node in snapResults: + if node not in arFilter: + arResults.append(node) + bFound = (len(arResults) != 0) + if not bFound: + if bAllowMultiple: return [] + elif iPropertyType == self.STRING: return '' + elif iPropertyType == self.DATE: return None + elif iPropertyType == self.URI: return '' + elif iPropertyType == self.NODE: return None + else: return None + arValues = [] + for elmResult in arResults: + sValue = None + if iPropertyType == self.NODE: + if bAllowMultiple: + arValues.append(elmResult) + continue + else: + return elmResult + sNodeName = elmResult.name.lower() + if (iPropertyType == self.EMAIL) and (sNodeName == 'a'): + sValue = (elmResult.get('href') or '').split('mailto:').pop().split('?')[0] + if sValue: + sValue = bNormalize and self.normalize(sValue) or sValue.strip() + if (not sValue) and (sNodeName == 'abbr'): + sValue = elmResult.get('title') + if sValue: + sValue = bNormalize and self.normalize(sValue) or sValue.strip() + if (not sValue) and (iPropertyType == self.URI): + if sNodeName == 'a': sValue = elmResult.get('href') + elif sNodeName == 'img': sValue = elmResult.get('src') + elif sNodeName == 'object': sValue = elmResult.get('data') + if sValue: + sValue = bNormalize and self.normalize(sValue) or sValue.strip() + if (not sValue) and (sNodeName == 'img'): + sValue = elmResult.get('alt') + if sValue: + sValue = bNormalize and self.normalize(sValue) or sValue.strip() + if not sValue: + sValue = elmResult.renderContents() + sValue = re.sub(r'<\S[^>]*>', '', sValue) + sValue = sValue.replace('\r\n', '\n') + sValue = sValue.replace('\r', '\n') + if sValue: + sValue = bNormalize and self.normalize(sValue) or sValue.strip() + if not sValue: continue + if iPropertyType == self.DATE: + sValue = _parse_date_iso8601(sValue) + if bAllowMultiple: + arValues.append(bAutoEscape and self.vcardEscape(sValue) or sValue) + else: + return bAutoEscape and self.vcardEscape(sValue) or sValue + return arValues + + def findVCards(self, elmRoot, bAgentParsing=0): + sVCards = '' + + if not bAgentParsing: + arCards = self.getPropertyValue(elmRoot, 'vcard', bAllowMultiple=1) + else: + arCards = [elmRoot] + + for elmCard in arCards: + arLines = [] + + def processSingleString(sProperty): + sValue = self.getPropertyValue(elmCard, sProperty, self.STRING, bAutoEscape=1) + if sValue: + arLines.append(self.vcardFold(sProperty.upper() + ':' + sValue)) + return sValue or '' + + def processSingleURI(sProperty): + sValue = self.getPropertyValue(elmCard, sProperty, self.URI) + if sValue: + sContentType = '' + sEncoding = '' + sValueKey = '' + if sValue.startswith('data:'): + sEncoding = ';ENCODING=b' + sContentType = sValue.split(';')[0].split('/').pop() + sValue = sValue.split(',', 1).pop() + else: + elmValue = self.getPropertyValue(elmCard, sProperty) + if elmValue: + if sProperty != 'url': + sValueKey = ';VALUE=uri' + sContentType = elmValue.get('type', '').strip().split('/').pop().strip() + sContentType = sContentType.upper() + if sContentType == 'OCTET-STREAM': + sContentType = '' + if sContentType: + sContentType = ';TYPE=' + sContentType.upper() + arLines.append(self.vcardFold(sProperty.upper() + sEncoding + sContentType + sValueKey + ':' + sValue)) + + def processTypeValue(sProperty, arDefaultType, arForceType=None): + arResults = self.getPropertyValue(elmCard, sProperty, bAllowMultiple=1) + for elmResult in arResults: + arType = self.getPropertyValue(elmResult, 'type', self.STRING, 1, 1) + if arForceType: + arType = self.unique(arForceType + arType) + if not arType: + arType = arDefaultType + sValue = self.getPropertyValue(elmResult, 'value', self.EMAIL, 0) + if sValue: + arLines.append(self.vcardFold(sProperty.upper() + ';TYPE=' + ','.join(arType) + ':' + sValue)) + + # AGENT + # must do this before all other properties because it is destructive + # (removes nested class="vcard" nodes so they don't interfere with + # this vcard's other properties) + arAgent = self.getPropertyValue(elmCard, 'agent', bAllowMultiple=1) + for elmAgent in arAgent: + if re.compile(r'\bvcard\b').search(elmAgent.get('class')): + sAgentValue = self.findVCards(elmAgent, 1) + '\n' + sAgentValue = sAgentValue.replace('\n', '\\n') + sAgentValue = sAgentValue.replace(';', '\\;') + if sAgentValue: + arLines.append(self.vcardFold('AGENT:' + sAgentValue)) + elmAgent['class'] = '' + elmAgent.contents = [] + else: + sAgentValue = self.getPropertyValue(elmAgent, 'value', self.URI, bAutoEscape=1); + if sAgentValue: + arLines.append(self.vcardFold('AGENT;VALUE=uri:' + sAgentValue)) + + # FN (full name) + sFN = processSingleString('fn') + + # N (name) + elmName = self.getPropertyValue(elmCard, 'n') + if elmName: + sFamilyName = self.getPropertyValue(elmName, 'family-name', self.STRING, bAutoEscape=1) + sGivenName = self.getPropertyValue(elmName, 'given-name', self.STRING, bAutoEscape=1) + arAdditionalNames = self.getPropertyValue(elmName, 'additional-name', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'additional-names', self.STRING, 1, 1) + arHonorificPrefixes = self.getPropertyValue(elmName, 'honorific-prefix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-prefixes', self.STRING, 1, 1) + arHonorificSuffixes = self.getPropertyValue(elmName, 'honorific-suffix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-suffixes', self.STRING, 1, 1) + arLines.append(self.vcardFold('N:' + sFamilyName + ';' + + sGivenName + ';' + + ','.join(arAdditionalNames) + ';' + + ','.join(arHonorificPrefixes) + ';' + + ','.join(arHonorificSuffixes))) + elif sFN: + # implied "N" optimization + # http://microformats.org/wiki/hcard#Implied_.22N.22_Optimization + arNames = self.normalize(sFN).split() + if len(arNames) == 2: + bFamilyNameFirst = (arNames[0].endswith(',') or + len(arNames[1]) == 1 or + ((len(arNames[1]) == 2) and (arNames[1].endswith('.')))) + if bFamilyNameFirst: + arLines.append(self.vcardFold('N:' + arNames[0] + ';' + arNames[1])) + else: + arLines.append(self.vcardFold('N:' + arNames[1] + ';' + arNames[0])) + + # SORT-STRING + sSortString = self.getPropertyValue(elmCard, 'sort-string', self.STRING, bAutoEscape=1) + if sSortString: + arLines.append(self.vcardFold('SORT-STRING:' + sSortString)) + + # NICKNAME + arNickname = self.getPropertyValue(elmCard, 'nickname', self.STRING, 1, 1) + if arNickname: + arLines.append(self.vcardFold('NICKNAME:' + ','.join(arNickname))) + + # PHOTO + processSingleURI('photo') + + # BDAY + dtBday = self.getPropertyValue(elmCard, 'bday', self.DATE) + if dtBday: + arLines.append(self.vcardFold('BDAY:' + self.toISO8601(dtBday))) + + # ADR (address) + arAdr = self.getPropertyValue(elmCard, 'adr', bAllowMultiple=1) + for elmAdr in arAdr: + arType = self.getPropertyValue(elmAdr, 'type', self.STRING, 1, 1) + if not arType: + arType = ['intl','postal','parcel','work'] # default adr types, see RFC 2426 section 3.2.1 + sPostOfficeBox = self.getPropertyValue(elmAdr, 'post-office-box', self.STRING, 0, 1) + sExtendedAddress = self.getPropertyValue(elmAdr, 'extended-address', self.STRING, 0, 1) + sStreetAddress = self.getPropertyValue(elmAdr, 'street-address', self.STRING, 0, 1) + sLocality = self.getPropertyValue(elmAdr, 'locality', self.STRING, 0, 1) + sRegion = self.getPropertyValue(elmAdr, 'region', self.STRING, 0, 1) + sPostalCode = self.getPropertyValue(elmAdr, 'postal-code', self.STRING, 0, 1) + sCountryName = self.getPropertyValue(elmAdr, 'country-name', self.STRING, 0, 1) + arLines.append(self.vcardFold('ADR;TYPE=' + ','.join(arType) + ':' + + sPostOfficeBox + ';' + + sExtendedAddress + ';' + + sStreetAddress + ';' + + sLocality + ';' + + sRegion + ';' + + sPostalCode + ';' + + sCountryName)) + + # LABEL + processTypeValue('label', ['intl','postal','parcel','work']) + + # TEL (phone number) + processTypeValue('tel', ['voice']) + + # EMAIL + processTypeValue('email', ['internet'], ['internet']) + + # MAILER + processSingleString('mailer') + + # TZ (timezone) + processSingleString('tz') + + # GEO (geographical information) + elmGeo = self.getPropertyValue(elmCard, 'geo') + if elmGeo: + sLatitude = self.getPropertyValue(elmGeo, 'latitude', self.STRING, 0, 1) + sLongitude = self.getPropertyValue(elmGeo, 'longitude', self.STRING, 0, 1) + arLines.append(self.vcardFold('GEO:' + sLatitude + ';' + sLongitude)) + + # TITLE + processSingleString('title') + + # ROLE + processSingleString('role') + + # LOGO + processSingleURI('logo') + + # ORG (organization) + elmOrg = self.getPropertyValue(elmCard, 'org') + if elmOrg: + sOrganizationName = self.getPropertyValue(elmOrg, 'organization-name', self.STRING, 0, 1) + if not sOrganizationName: + # implied "organization-name" optimization + # http://microformats.org/wiki/hcard#Implied_.22organization-name.22_Optimization + sOrganizationName = self.getPropertyValue(elmCard, 'org', self.STRING, 0, 1) + if sOrganizationName: + arLines.append(self.vcardFold('ORG:' + sOrganizationName)) + else: + arOrganizationUnit = self.getPropertyValue(elmOrg, 'organization-unit', self.STRING, 1, 1) + arLines.append(self.vcardFold('ORG:' + sOrganizationName + ';' + ';'.join(arOrganizationUnit))) + + # CATEGORY + arCategory = self.getPropertyValue(elmCard, 'category', self.STRING, 1, 1) + self.getPropertyValue(elmCard, 'categories', self.STRING, 1, 1) + if arCategory: + arLines.append(self.vcardFold('CATEGORIES:' + ','.join(arCategory))) + + # NOTE + processSingleString('note') + + # REV + processSingleString('rev') + + # SOUND + processSingleURI('sound') + + # UID + processSingleString('uid') + + # URL + processSingleURI('url') + + # CLASS + processSingleString('class') + + # KEY + processSingleURI('key') + + if arLines: + arLines = ['BEGIN:vCard','VERSION:3.0'] + arLines + ['END:vCard'] + sVCards += '\n'.join(arLines) + '\n' + + return sVCards.strip() + + def isProbablyDownloadable(self, elm): + attrsD = elm.attrMap + if not attrsD.has_key('href'): return 0 + linktype = attrsD.get('type', '').strip() + if linktype.startswith('audio/') or \ + linktype.startswith('video/') or \ + (linktype.startswith('application/') and not linktype.endswith('xml')): + return 1 + path = urlparse.urlparse(attrsD['href'])[2] + if path.find('.') == -1: return 0 + fileext = path.split('.').pop().lower() + return fileext in self.known_binary_extensions + + def findTags(self): + all = lambda x: 1 + for elm in self.document(all, {'rel': re.compile(r'\btag\b')}): + href = elm.get('href') + if not href: continue + urlscheme, domain, path, params, query, fragment = \ + urlparse.urlparse(_urljoin(self.baseuri, href)) + segments = path.split('/') + tag = segments.pop() + if not tag: + tag = segments.pop() + tagscheme = urlparse.urlunparse((urlscheme, domain, '/'.join(segments), '', '', '')) + if not tagscheme.endswith('/'): + tagscheme += '/' + self.tags.append(FeedParserDict({"term": tag, "scheme": tagscheme, "label": elm.string or ''})) + + def findEnclosures(self): + all = lambda x: 1 + enclosure_match = re.compile(r'\benclosure\b') + for elm in self.document(all, {'href': re.compile(r'.+')}): + if not enclosure_match.search(elm.get('rel', '')) and not self.isProbablyDownloadable(elm): continue + if elm.attrMap not in self.enclosures: + self.enclosures.append(elm.attrMap) + if elm.string and not elm.get('title'): + self.enclosures[-1]['title'] = elm.string + + def findXFN(self): + all = lambda x: 1 + for elm in self.document(all, {'rel': re.compile('.+'), 'href': re.compile('.+')}): + rels = elm.get('rel', '').split() + xfn_rels = [] + for rel in rels: + if rel in self.known_xfn_relationships: + xfn_rels.append(rel) + if xfn_rels: + self.xfn.append({"relationships": xfn_rels, "href": elm.get('href', ''), "name": elm.string}) + +def _parseMicroformats(htmlSource, baseURI, encoding): + if not BeautifulSoup: return + if _debug: sys.stderr.write('entering _parseMicroformats\n') + p = _MicroformatsParser(htmlSource, baseURI, encoding) + p.vcard = p.findVCards(p.document) + p.findTags() + p.findEnclosures() + p.findXFN() + return {"tags": p.tags, "enclosures": p.enclosures, "xfn": p.xfn, "vcard": p.vcard} + +class _RelativeURIResolver(_BaseHTMLProcessor): + relative_uris = [('a', 'href'), + ('applet', 'codebase'), + ('area', 'href'), + ('blockquote', 'cite'), + ('body', 'background'), + ('del', 'cite'), + ('form', 'action'), + ('frame', 'longdesc'), + ('frame', 'src'), + ('iframe', 'longdesc'), + ('iframe', 'src'), + ('head', 'profile'), + ('img', 'longdesc'), + ('img', 'src'), + ('img', 'usemap'), + ('input', 'src'), + ('input', 'usemap'), + ('ins', 'cite'), + ('link', 'href'), + ('object', 'classid'), + ('object', 'codebase'), + ('object', 'data'), + ('object', 'usemap'), + ('q', 'cite'), + ('script', 'src')] + + def __init__(self, baseuri, encoding, type): + _BaseHTMLProcessor.__init__(self, encoding, type) + self.baseuri = baseuri + + def resolveURI(self, uri): + return _urljoin(self.baseuri, uri.strip()) + + def unknown_starttag(self, tag, attrs): + attrs = self.normalize_attrs(attrs) + attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs] + _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) + +def _resolveRelativeURIs(htmlSource, baseURI, encoding, type): + if _debug: sys.stderr.write('entering _resolveRelativeURIs\n') + p = _RelativeURIResolver(baseURI, encoding, type) + p.feed(htmlSource) + return p.output() + +class _HTMLSanitizer(_BaseHTMLProcessor): + acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'article', + 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button', 'canvas', + 'caption', 'center', 'cite', 'code', 'col', 'colgroup', 'command', + 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn', 'dialog', 'dir', + 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset', 'figure', 'footer', + 'font', 'form', 'header', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', + 'img', 'input', 'ins', 'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', + 'menu', 'meter', 'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', + 'option', 'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select', + 'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong', 'sub', + 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot', 'th', 'thead', + 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript'] + + acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey', + 'action', 'align', 'alt', 'autoplay', 'autocomplete', 'autofocus', 'axis', + 'background', 'balance', 'bgcolor', 'bgproperties', 'border', + 'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding', + 'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff', + 'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols', + 'colspan', 'compact', 'contenteditable', 'coords', 'data', 'datafld', + 'datapagesize', 'datasrc', 'datetime', 'default', 'delay', 'dir', + 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for', + 'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus', + 'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode', + 'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc', + 'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max', + 'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref', + 'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size', + 'prompt', 'pqg', 'radiogroup', 'readonly', 'rel', 'repeat-max', + 'repeat-min', 'replace', 'required', 'rev', 'rightspacing', 'rows', + 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', + 'start', 'step', 'summary', 'suppress', 'tabindex', 'target', 'template', + 'title', 'toppadding', 'type', 'unselectable', 'usemap', 'urn', 'valign', + 'value', 'variable', 'volume', 'vspace', 'vrml', 'width', 'wrap', + 'xml:lang'] + + unacceptable_elements_with_end_tag = ['script', 'applet', 'style'] + + acceptable_css_properties = ['azimuth', 'background-color', + 'border-bottom-color', 'border-collapse', 'border-color', + 'border-left-color', 'border-right-color', 'border-top-color', 'clear', + 'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font', + 'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight', + 'height', 'letter-spacing', 'line-height', 'overflow', 'pause', + 'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness', + 'speak', 'speak-header', 'speak-numeral', 'speak-punctuation', + 'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent', + 'unicode-bidi', 'vertical-align', 'voice-family', 'volume', + 'white-space', 'width'] + + # survey of common keywords found in feeds + acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue', + 'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed', + 'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left', + 'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive', + 'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top', + 'transparent', 'underline', 'white', 'yellow'] + + valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' + + '\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$') + + mathml_elements = ['annotation', 'annotation-xml', 'maction', 'math', + 'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', + 'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', + 'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder', + 'munderover', 'none', 'semantics'] + + mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign', + 'columnalign', 'close', 'columnlines', 'columnspacing', 'columnspan', 'depth', + 'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows', + 'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', + 'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', + 'maxsize', 'minsize', 'open', 'other', 'rowalign', 'rowalign', 'rowalign', + 'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection', + 'separator', 'separators', 'stretchy', 'width', 'width', 'xlink:href', + 'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink'] + + # svgtiny - foreignObject + linearGradient + radialGradient + stop + svg_elements = ['a', 'animate', 'animateColor', 'animateMotion', + 'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject', + 'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern', + 'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath', + 'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop', + 'svg', 'switch', 'text', 'title', 'tspan', 'use'] + + # svgtiny + class + opacity + offset + xmlns + xmlns:xlink + svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic', + 'arabic-form', 'ascent', 'attributeName', 'attributeType', + 'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height', + 'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx', + 'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity', + 'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style', + 'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2', + 'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x', + 'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines', + 'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid', + 'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max', + 'min', 'name', 'offset', 'opacity', 'orient', 'origin', + 'overline-position', 'overline-thickness', 'panose-1', 'path', + 'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY', + 'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures', + 'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', + 'stop-color', 'stop-opacity', 'strikethrough-position', + 'strikethrough-thickness', 'stroke', 'stroke-dasharray', + 'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin', + 'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage', + 'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2', + 'underline-position', 'underline-thickness', 'unicode', 'unicode-range', + 'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width', + 'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole', + 'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type', + 'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1', + 'y2', 'zoomAndPan'] + + svg_attr_map = None + svg_elem_map = None + + acceptable_svg_properties = [ 'fill', 'fill-opacity', 'fill-rule', + 'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin', + 'stroke-opacity'] + + def reset(self): + _BaseHTMLProcessor.reset(self) + self.unacceptablestack = 0 + self.mathmlOK = 0 + self.svgOK = 0 + + def unknown_starttag(self, tag, attrs): + acceptable_attributes = self.acceptable_attributes + keymap = {} + if not tag in self.acceptable_elements or self.svgOK: + if tag in self.unacceptable_elements_with_end_tag: + self.unacceptablestack += 1 + + # not otherwise acceptable, perhaps it is MathML or SVG? + if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs: + self.mathmlOK += 1 + if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs: + self.svgOK += 1 + + # chose acceptable attributes based on tag class, else bail + if self.mathmlOK and tag in self.mathml_elements: + acceptable_attributes = self.mathml_attributes + elif self.svgOK and tag in self.svg_elements: + # for most vocabularies, lowercasing is a good idea. Many + # svg elements, however, are camel case + if not self.svg_attr_map: + lower=[attr.lower() for attr in self.svg_attributes] + mix=[a for a in self.svg_attributes if a not in lower] + self.svg_attributes = lower + self.svg_attr_map = dict([(a.lower(),a) for a in mix]) + + lower=[attr.lower() for attr in self.svg_elements] + mix=[a for a in self.svg_elements if a not in lower] + self.svg_elements = lower + self.svg_elem_map = dict([(a.lower(),a) for a in mix]) + acceptable_attributes = self.svg_attributes + tag = self.svg_elem_map.get(tag,tag) + keymap = self.svg_attr_map + elif not tag in self.acceptable_elements: + return + + # declare xlink namespace, if needed + if self.mathmlOK or self.svgOK: + if filter(lambda (n,v): n.startswith('xlink:'),attrs): + if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs: + attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink')) + + clean_attrs = [] + for key, value in self.normalize_attrs(attrs): + if key in acceptable_attributes: + key=keymap.get(key,key) + clean_attrs.append((key,value)) + elif key=='style': + clean_value = self.sanitize_style(value) + if clean_value: clean_attrs.append((key,clean_value)) + _BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs) + + def unknown_endtag(self, tag): + if not tag in self.acceptable_elements: + if tag in self.unacceptable_elements_with_end_tag: + self.unacceptablestack -= 1 + if self.mathmlOK and tag in self.mathml_elements: + if tag == 'math' and self.mathmlOK: self.mathmlOK -= 1 + elif self.svgOK and tag in self.svg_elements: + tag = self.svg_elem_map.get(tag,tag) + if tag == 'svg' and self.svgOK: self.svgOK -= 1 + else: + return + _BaseHTMLProcessor.unknown_endtag(self, tag) + + def handle_pi(self, text): + pass + + def handle_decl(self, text): + pass + + def handle_data(self, text): + if not self.unacceptablestack: + _BaseHTMLProcessor.handle_data(self, text) + + def sanitize_style(self, style): + # disallow urls + style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style) + + # gauntlet + if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return '' + if not re.match("^(\s*[-\w]+\s*:\s*[^:;]*(;|$))*$", style): return '' + + clean = [] + for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style): + if not value: continue + if prop.lower() in self.acceptable_css_properties: + clean.append(prop + ': ' + value + ';') + elif prop.split('-')[0].lower() in ['background','border','margin','padding']: + for keyword in value.split(): + if not keyword in self.acceptable_css_keywords and \ + not self.valid_css_values.match(keyword): + break + else: + clean.append(prop + ': ' + value + ';') + elif self.svgOK and prop.lower() in self.acceptable_svg_properties: + clean.append(prop + ': ' + value + ';') + + return ' '.join(clean) + + +def _sanitizeHTML(htmlSource, encoding, type): + p = _HTMLSanitizer(encoding, type) + p.feed(htmlSource) + data = p.output() + if TIDY_MARKUP: + # loop through list of preferred Tidy interfaces looking for one that's installed, + # then set up a common _tidy function to wrap the interface-specific API. + _tidy = None + for tidy_interface in PREFERRED_TIDY_INTERFACES: + try: + if tidy_interface == "uTidy": + from tidy import parseString as _utidy + def _tidy(data, **kwargs): + return str(_utidy(data, **kwargs)) + break + elif tidy_interface == "mxTidy": + from mx.Tidy import Tidy as _mxtidy + def _tidy(data, **kwargs): + nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs) + return data + break + except: + pass + if _tidy: + utf8 = type(data) == type(u'') + if utf8: + data = data.encode('utf-8') + data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8") + if utf8: + data = unicode(data, 'utf-8') + if data.count('<body'): + data = data.split('<body', 1)[1] + if data.count('>'): + data = data.split('>', 1)[1] + if data.count('</body'): + data = data.split('</body', 1)[0] + data = data.strip().replace('\r\n', '\n') + return data + +class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler): + def http_error_default(self, req, fp, code, msg, headers): + if ((code / 100) == 3) and (code != 304): + return self.http_error_302(req, fp, code, msg, headers) + infourl = urllib.addinfourl(fp, headers, req.get_full_url()) + infourl.status = code + return infourl + + def http_error_302(self, req, fp, code, msg, headers): + if headers.dict.has_key('location'): + infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers) + else: + infourl = urllib.addinfourl(fp, headers, req.get_full_url()) + if not hasattr(infourl, 'status'): + infourl.status = code + return infourl + + def http_error_301(self, req, fp, code, msg, headers): + if headers.dict.has_key('location'): + infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers) + else: + infourl = urllib.addinfourl(fp, headers, req.get_full_url()) + if not hasattr(infourl, 'status'): + infourl.status = code + return infourl + + http_error_300 = http_error_302 + http_error_303 = http_error_302 + http_error_307 = http_error_302 + + def http_error_401(self, req, fp, code, msg, headers): + # Check if + # - server requires digest auth, AND + # - we tried (unsuccessfully) with basic auth, AND + # - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions) + # If all conditions hold, parse authentication information + # out of the Authorization header we sent the first time + # (for the username and password) and the WWW-Authenticate + # header the server sent back (for the realm) and retry + # the request with the appropriate digest auth headers instead. + # This evil genius hack has been brought to you by Aaron Swartz. + host = urlparse.urlparse(req.get_full_url())[1] + try: + assert sys.version.split()[0] >= '2.3.3' + assert base64 != None + user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':') + realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0] + self.add_password(realm, host, user, passw) + retry = self.http_error_auth_reqed('www-authenticate', host, req, headers) + self.reset_retry_count() + return retry + except: + return self.http_error_default(req, fp, code, msg, headers) + +def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers): + """URL, filename, or string --> stream + + This function lets you define parsers that take any input source + (URL, pathname to local or network file, or actual data as a string) + and deal with it in a uniform manner. Returned object is guaranteed + to have all the basic stdio read methods (read, readline, readlines). + Just .close() the object when you're done with it. + + If the etag argument is supplied, it will be used as the value of an + If-None-Match request header. + + If the modified argument is supplied, it can be a tuple of 9 integers + (as returned by gmtime() in the standard Python time module) or a date + string in any format supported by feedparser. Regardless, it MUST + be in GMT (Greenwich Mean Time). It will be reformatted into an + RFC 1123-compliant date and used as the value of an If-Modified-Since + request header. + + If the agent argument is supplied, it will be used as the value of a + User-Agent request header. + + If the referrer argument is supplied, it will be used as the value of a + Referer[sic] request header. + + If handlers is supplied, it is a list of handlers used to build a + urllib2 opener. + """ + + if hasattr(url_file_stream_or_string, 'read'): + return url_file_stream_or_string + + if url_file_stream_or_string == '-': + return sys.stdin + + if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'): + if not agent: + agent = USER_AGENT + # test for inline user:password for basic auth + auth = None + if base64: + urltype, rest = urllib.splittype(url_file_stream_or_string) + realhost, rest = urllib.splithost(rest) + if realhost: + user_passwd, realhost = urllib.splituser(realhost) + if user_passwd: + url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest) + auth = base64.encodestring(user_passwd).strip() + + # iri support + try: + if isinstance(url_file_stream_or_string,unicode): + url_file_stream_or_string = url_file_stream_or_string.encode('idna') + else: + url_file_stream_or_string = url_file_stream_or_string.decode('utf-8').encode('idna') + except: + pass + + # try to open with urllib2 (to use optional headers) + request = urllib2.Request(url_file_stream_or_string) + request.add_header('User-Agent', agent) + if etag: + request.add_header('If-None-Match', etag) + if type(modified) == type(''): + modified = _parse_date(modified) + if modified: + # format into an RFC 1123-compliant timestamp. We can't use + # time.strftime() since the %a and %b directives can be affected + # by the current locale, but RFC 2616 states that dates must be + # in English. + short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] + months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] + request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5])) + if referrer: + request.add_header('Referer', referrer) + if gzip and zlib: + request.add_header('Accept-encoding', 'gzip, deflate') + elif gzip: + request.add_header('Accept-encoding', 'gzip') + elif zlib: + request.add_header('Accept-encoding', 'deflate') + else: + request.add_header('Accept-encoding', '') + if auth: + request.add_header('Authorization', 'Basic %s' % auth) + if ACCEPT_HEADER: + request.add_header('Accept', ACCEPT_HEADER) + request.add_header('A-IM', 'feed') # RFC 3229 support + opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers)) + opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent + try: + return opener.open(request) + finally: + opener.close() # JohnD + + # try to open with native open function (if url_file_stream_or_string is a filename) + try: + return open(url_file_stream_or_string) + except: + pass + + # treat url_file_stream_or_string as string + return _StringIO(str(url_file_stream_or_string)) + +_date_handlers = [] +def registerDateHandler(func): + '''Register a date handler function (takes string, returns 9-tuple date in GMT)''' + _date_handlers.insert(0, func) + +# ISO-8601 date parsing routines written by Fazal Majid. +# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601 +# parser is beyond the scope of feedparser and would be a worthwhile addition +# to the Python library. +# A single regular expression cannot parse ISO 8601 date formats into groups +# as the standard is highly irregular (for instance is 030104 2003-01-04 or +# 0301-04-01), so we use templates instead. +# Please note the order in templates is significant because we need a +# greedy match. +_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO', + 'YY-?MM-?DD', 'YY-?OOO', 'YYYY', + '-YY-?MM', '-OOO', '-YY', + '--MM-?DD', '--MM', + '---DD', + 'CC', ''] +_iso8601_re = [ + tmpl.replace( + 'YYYY', r'(?P<year>\d{4})').replace( + 'YY', r'(?P<year>\d\d)').replace( + 'MM', r'(?P<month>[01]\d)').replace( + 'DD', r'(?P<day>[0123]\d)').replace( + 'OOO', r'(?P<ordinal>[0123]\d\d)').replace( + 'CC', r'(?P<century>\d\d$)') + + r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})' + + r'(:(?P<second>\d{2}(\.\d*)?))?' + + r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?' + for tmpl in _iso8601_tmpl] +del tmpl +_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re] +del regex +def _parse_date_iso8601(dateString): + '''Parse a variety of ISO-8601-compatible formats like 20040105''' + m = None + for _iso8601_match in _iso8601_matches: + m = _iso8601_match(dateString) + if m: break + if not m: return + if m.span() == (0, 0): return + params = m.groupdict() + ordinal = params.get('ordinal', 0) + if ordinal: + ordinal = int(ordinal) + else: + ordinal = 0 + year = params.get('year', '--') + if not year or year == '--': + year = time.gmtime()[0] + elif len(year) == 2: + # ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993 + year = 100 * int(time.gmtime()[0] / 100) + int(year) + else: + year = int(year) + month = params.get('month', '-') + if not month or month == '-': + # ordinals are NOT normalized by mktime, we simulate them + # by setting month=1, day=ordinal + if ordinal: + month = 1 + else: + month = time.gmtime()[1] + month = int(month) + day = params.get('day', 0) + if not day: + # see above + if ordinal: + day = ordinal + elif params.get('century', 0) or \ + params.get('year', 0) or params.get('month', 0): + day = 1 + else: + day = time.gmtime()[2] + else: + day = int(day) + # special case of the century - is the first year of the 21st century + # 2000 or 2001 ? The debate goes on... + if 'century' in params.keys(): + year = (int(params['century']) - 1) * 100 + 1 + # in ISO 8601 most fields are optional + for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']: + if not params.get(field, None): + params[field] = 0 + hour = int(params.get('hour', 0)) + minute = int(params.get('minute', 0)) + second = int(float(params.get('second', 0))) + # weekday is normalized by mktime(), we can ignore it + weekday = 0 + daylight_savings_flag = -1 + tm = [year, month, day, hour, minute, second, weekday, + ordinal, daylight_savings_flag] + # ISO 8601 time zone adjustments + tz = params.get('tz') + if tz and tz != 'Z': + if tz[0] == '-': + tm[3] += int(params.get('tzhour', 0)) + tm[4] += int(params.get('tzmin', 0)) + elif tz[0] == '+': + tm[3] -= int(params.get('tzhour', 0)) + tm[4] -= int(params.get('tzmin', 0)) + else: + return None + # Python's time.mktime() is a wrapper around the ANSI C mktime(3c) + # which is guaranteed to normalize d/m/y/h/m/s. + # Many implementations have bugs, but we'll pretend they don't. + return time.localtime(time.mktime(tm)) +registerDateHandler(_parse_date_iso8601) + +# 8-bit date handling routines written by ytrewq1. +_korean_year = u'\ub144' # b3e2 in euc-kr +_korean_month = u'\uc6d4' # bff9 in euc-kr +_korean_day = u'\uc77c' # c0cf in euc-kr +_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr +_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr + +_korean_onblog_date_re = \ + re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \ + (_korean_year, _korean_month, _korean_day)) +_korean_nate_date_re = \ + re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \ + (_korean_am, _korean_pm)) +def _parse_date_onblog(dateString): + '''Parse a string according to the OnBlog 8-bit date format''' + m = _korean_onblog_date_re.match(dateString) + if not m: return + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ + {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ + 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ + 'zonediff': '+09:00'} + if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate) + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_onblog) + +def _parse_date_nate(dateString): + '''Parse a string according to the Nate 8-bit date format''' + m = _korean_nate_date_re.match(dateString) + if not m: return + hour = int(m.group(5)) + ampm = m.group(4) + if (ampm == _korean_pm): + hour += 12 + hour = str(hour) + if len(hour) == 1: + hour = '0' + hour + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ + {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ + 'hour': hour, 'minute': m.group(6), 'second': m.group(7),\ + 'zonediff': '+09:00'} + if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate) + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_nate) + +_mssql_date_re = \ + re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?') +def _parse_date_mssql(dateString): + '''Parse a string according to the MS SQL date format''' + m = _mssql_date_re.match(dateString) + if not m: return + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ + {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ + 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ + 'zonediff': '+09:00'} + if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate) + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_mssql) + +# Unicode strings for Greek date strings +_greek_months = \ + { \ + u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7 + u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7 + u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7 + u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7 + u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7 + u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7 + u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7 + u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7 + u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7 + u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7 + u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7 + u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7 + u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7 + u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7 + u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7 + u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7 + u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7 + u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7 + u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7 + } + +_greek_wdays = \ + { \ + u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7 + u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7 + u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7 + u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7 + u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7 + u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7 + u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7 + } + +_greek_date_format_re = \ + re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)') + +def _parse_date_greek(dateString): + '''Parse a string according to a Greek 8-bit date format.''' + m = _greek_date_format_re.match(dateString) + if not m: return + try: + wday = _greek_wdays[m.group(1)] + month = _greek_months[m.group(3)] + except: + return + rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \ + {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\ + 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\ + 'zonediff': m.group(8)} + if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date) + return _parse_date_rfc822(rfc822date) +registerDateHandler(_parse_date_greek) + +# Unicode strings for Hungarian date strings +_hungarian_months = \ + { \ + u'janu\u00e1r': u'01', # e1 in iso-8859-2 + u'febru\u00e1ri': u'02', # e1 in iso-8859-2 + u'm\u00e1rcius': u'03', # e1 in iso-8859-2 + u'\u00e1prilis': u'04', # e1 in iso-8859-2 + u'm\u00e1ujus': u'05', # e1 in iso-8859-2 + u'j\u00fanius': u'06', # fa in iso-8859-2 + u'j\u00falius': u'07', # fa in iso-8859-2 + u'augusztus': u'08', + u'szeptember': u'09', + u'okt\u00f3ber': u'10', # f3 in iso-8859-2 + u'november': u'11', + u'december': u'12', + } + +_hungarian_date_format_re = \ + re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))') + +def _parse_date_hungarian(dateString): + '''Parse a string according to a Hungarian 8-bit date format.''' + m = _hungarian_date_format_re.match(dateString) + if not m: return + try: + month = _hungarian_months[m.group(2)] + day = m.group(3) + if len(day) == 1: + day = '0' + day + hour = m.group(4) + if len(hour) == 1: + hour = '0' + hour + except: + return + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \ + {'year': m.group(1), 'month': month, 'day': day,\ + 'hour': hour, 'minute': m.group(5),\ + 'zonediff': m.group(6)} + if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate) + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_hungarian) + +# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by +# Drake and licensed under the Python license. Removed all range checking +# for month, day, hour, minute, and second, since mktime will normalize +# these later +def _parse_date_w3dtf(dateString): + def __extract_date(m): + year = int(m.group('year')) + if year < 100: + year = 100 * int(time.gmtime()[0] / 100) + int(year) + if year < 1000: + return 0, 0, 0 + julian = m.group('julian') + if julian: + julian = int(julian) + month = julian / 30 + 1 + day = julian % 30 + 1 + jday = None + while jday != julian: + t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0)) + jday = time.gmtime(t)[-2] + diff = abs(jday - julian) + if jday > julian: + if diff < day: + day = day - diff + else: + month = month - 1 + day = 31 + elif jday < julian: + if day + diff < 28: + day = day + diff + else: + month = month + 1 + return year, month, day + month = m.group('month') + day = 1 + if month is None: + month = 1 + else: + month = int(month) + day = m.group('day') + if day: + day = int(day) + else: + day = 1 + return year, month, day + + def __extract_time(m): + if not m: + return 0, 0, 0 + hours = m.group('hours') + if not hours: + return 0, 0, 0 + hours = int(hours) + minutes = int(m.group('minutes')) + seconds = m.group('seconds') + if seconds: + seconds = int(seconds) + else: + seconds = 0 + return hours, minutes, seconds + + def __extract_tzd(m): + '''Return the Time Zone Designator as an offset in seconds from UTC.''' + if not m: + return 0 + tzd = m.group('tzd') + if not tzd: + return 0 + if tzd == 'Z': + return 0 + hours = int(m.group('tzdhours')) + minutes = m.group('tzdminutes') + if minutes: + minutes = int(minutes) + else: + minutes = 0 + offset = (hours*60 + minutes) * 60 + if tzd[0] == '+': + return -offset + return offset + + __date_re = ('(?P<year>\d\d\d\d)' + '(?:(?P<dsep>-|)' + '(?:(?P<julian>\d\d\d)' + '|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?') + __tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)' + __tzd_rx = re.compile(__tzd_re) + __time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)' + '(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?' + + __tzd_re) + __datetime_re = '%s(?:T%s)?' % (__date_re, __time_re) + __datetime_rx = re.compile(__datetime_re) + m = __datetime_rx.match(dateString) + if (m is None) or (m.group() != dateString): return + gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0) + if gmt[0] == 0: return + return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone) +registerDateHandler(_parse_date_w3dtf) + +def _parse_date_rfc822(dateString): + '''Parse an RFC822, RFC1123, RFC2822, or asctime-style date''' + data = dateString.split() + if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames: + del data[0] + if len(data) == 4: + s = data[3] + i = s.find('+') + if i > 0: + data[3:] = [s[:i], s[i+1:]] + else: + data.append('') + dateString = " ".join(data) + if len(data) < 5: + dateString += ' 00:00:00 GMT' + tm = rfc822.parsedate_tz(dateString) + if tm: + return time.gmtime(rfc822.mktime_tz(tm)) +# rfc822.py defines several time zones, but we define some extra ones. +# 'ET' is equivalent to 'EST', etc. +_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800} +rfc822._timezones.update(_additional_timezones) +registerDateHandler(_parse_date_rfc822) + +def _parse_date_perforce(aDateString): + """parse a date in yyyy/mm/dd hh:mm:ss TTT format""" + # Fri, 2006/09/15 08:19:53 EDT + _my_date_pattern = re.compile( \ + r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})') + + dow, year, month, day, hour, minute, second, tz = \ + _my_date_pattern.search(aDateString).groups() + months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] + dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz) + tm = rfc822.parsedate_tz(dateString) + if tm: + return time.gmtime(rfc822.mktime_tz(tm)) +registerDateHandler(_parse_date_perforce) + +def _parse_date(dateString): + '''Parses a variety of date formats into a 9-tuple in GMT''' + for handler in _date_handlers: + try: + date9tuple = handler(dateString) + if not date9tuple: continue + if len(date9tuple) != 9: + if _debug: sys.stderr.write('date handler function must return 9-tuple\n') + raise ValueError + map(int, date9tuple) + return date9tuple + except Exception, e: + if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e))) + pass + return None + +def _getCharacterEncoding(http_headers, xml_data): + '''Get the character encoding of the XML document + + http_headers is a dictionary + xml_data is a raw string (not Unicode) + + This is so much trickier than it sounds, it's not even funny. + According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type + is application/xml, application/*+xml, + application/xml-external-parsed-entity, or application/xml-dtd, + the encoding given in the charset parameter of the HTTP Content-Type + takes precedence over the encoding given in the XML prefix within the + document, and defaults to 'utf-8' if neither are specified. But, if + the HTTP Content-Type is text/xml, text/*+xml, or + text/xml-external-parsed-entity, the encoding given in the XML prefix + within the document is ALWAYS IGNORED and only the encoding given in + the charset parameter of the HTTP Content-Type header should be + respected, and it defaults to 'us-ascii' if not specified. + + Furthermore, discussion on the atom-syntax mailing list with the + author of RFC 3023 leads me to the conclusion that any document + served with a Content-Type of text/* and no charset parameter + must be treated as us-ascii. (We now do this.) And also that it + must always be flagged as non-well-formed. (We now do this too.) + + If Content-Type is unspecified (input was local file or non-HTTP source) + or unrecognized (server just got it totally wrong), then go by the + encoding given in the XML prefix of the document and default to + 'iso-8859-1' as per the HTTP specification (RFC 2616). + + Then, assuming we didn't find a character encoding in the HTTP headers + (and the HTTP Content-type allowed us to look in the body), we need + to sniff the first few bytes of the XML data and try to determine + whether the encoding is ASCII-compatible. Section F of the XML + specification shows the way here: + http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info + + If the sniffed encoding is not ASCII-compatible, we need to make it + ASCII compatible so that we can sniff further into the XML declaration + to find the encoding attribute, which will tell us the true encoding. + + Of course, none of this guarantees that we will be able to parse the + feed in the declared character encoding (assuming it was declared + correctly, which many are not). CJKCodecs and iconv_codec help a lot; + you should definitely install them if you can. + http://cjkpython.i18n.org/ + ''' + + def _parseHTTPContentType(content_type): + '''takes HTTP Content-Type header and returns (content type, charset) + + If no charset is specified, returns (content type, '') + If no content type is specified, returns ('', '') + Both return parameters are guaranteed to be lowercase strings + ''' + content_type = content_type or '' + content_type, params = cgi.parse_header(content_type) + return content_type, params.get('charset', '').replace("'", '') + + sniffed_xml_encoding = '' + xml_encoding = '' + true_encoding = '' + http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type')) + # Must sniff for non-ASCII-compatible character encodings before + # searching for XML declaration. This heuristic is defined in + # section F of the XML specification: + # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info + try: + if xml_data[:4] == '\x4c\x6f\xa7\x94': + # EBCDIC + xml_data = _ebcdic_to_ascii(xml_data) + elif xml_data[:4] == '\x00\x3c\x00\x3f': + # UTF-16BE + sniffed_xml_encoding = 'utf-16be' + xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') + elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'): + # UTF-16BE with BOM + sniffed_xml_encoding = 'utf-16be' + xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') + elif xml_data[:4] == '\x3c\x00\x3f\x00': + # UTF-16LE + sniffed_xml_encoding = 'utf-16le' + xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') + elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'): + # UTF-16LE with BOM + sniffed_xml_encoding = 'utf-16le' + xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') + elif xml_data[:4] == '\x00\x00\x00\x3c': + # UTF-32BE + sniffed_xml_encoding = 'utf-32be' + xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') + elif xml_data[:4] == '\x3c\x00\x00\x00': + # UTF-32LE + sniffed_xml_encoding = 'utf-32le' + xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') + elif xml_data[:4] == '\x00\x00\xfe\xff': + # UTF-32BE with BOM + sniffed_xml_encoding = 'utf-32be' + xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') + elif xml_data[:4] == '\xff\xfe\x00\x00': + # UTF-32LE with BOM + sniffed_xml_encoding = 'utf-32le' + xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') + elif xml_data[:3] == '\xef\xbb\xbf': + # UTF-8 with BOM + sniffed_xml_encoding = 'utf-8' + xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') + else: + # ASCII-compatible + pass + xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data) + except: + xml_encoding_match = None + if xml_encoding_match: + xml_encoding = xml_encoding_match.groups()[0].lower() + if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')): + xml_encoding = sniffed_xml_encoding + acceptable_content_type = 0 + application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity') + text_content_types = ('text/xml', 'text/xml-external-parsed-entity') + if (http_content_type in application_content_types) or \ + (http_content_type.startswith('application/') and http_content_type.endswith('+xml')): + acceptable_content_type = 1 + true_encoding = http_encoding or xml_encoding or 'utf-8' + elif (http_content_type in text_content_types) or \ + (http_content_type.startswith('text/')) and http_content_type.endswith('+xml'): + acceptable_content_type = 1 + true_encoding = http_encoding or 'us-ascii' + elif http_content_type.startswith('text/'): + true_encoding = http_encoding or 'us-ascii' + elif http_headers and (not http_headers.has_key('content-type')): + true_encoding = xml_encoding or 'iso-8859-1' + else: + true_encoding = xml_encoding or 'utf-8' + # some feeds claim to be gb2312 but are actually gb18030. + # apparently MSIE and Firefox both do the following switch: + if true_encoding.lower() == 'gb2312': + true_encoding = 'gb18030' + return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type + +def _toUTF8(data, encoding): + '''Changes an XML data stream on the fly to specify a new encoding + + data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already + encoding is a string recognized by encodings.aliases + ''' + if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding) + # strip Byte Order Mark (if present) + if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'): + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-16be': + sys.stderr.write('trying utf-16be instead\n') + encoding = 'utf-16be' + data = data[2:] + elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'): + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-16le': + sys.stderr.write('trying utf-16le instead\n') + encoding = 'utf-16le' + data = data[2:] + elif data[:3] == '\xef\xbb\xbf': + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-8': + sys.stderr.write('trying utf-8 instead\n') + encoding = 'utf-8' + data = data[3:] + elif data[:4] == '\x00\x00\xfe\xff': + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-32be': + sys.stderr.write('trying utf-32be instead\n') + encoding = 'utf-32be' + data = data[4:] + elif data[:4] == '\xff\xfe\x00\x00': + if _debug: + sys.stderr.write('stripping BOM\n') + if encoding != 'utf-32le': + sys.stderr.write('trying utf-32le instead\n') + encoding = 'utf-32le' + data = data[4:] + newdata = unicode(data, encoding) + if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding) + declmatch = re.compile('^<\?xml[^>]*?>') + newdecl = '''<?xml version='1.0' encoding='utf-8'?>''' + if declmatch.search(newdata): + newdata = declmatch.sub(newdecl, newdata) + else: + newdata = newdecl + u'\n' + newdata + return newdata.encode('utf-8') + +def _stripDoctype(data): + '''Strips DOCTYPE from XML document, returns (rss_version, stripped_data) + + rss_version may be 'rss091n' or None + stripped_data is the same XML document, minus the DOCTYPE + ''' + start = re.search('<\w',data) + start = start and start.start() or -1 + head,data = data[:start+1], data[start+1:] + + entity_pattern = re.compile(r'^\s*<!ENTITY([^>]*?)>', re.MULTILINE) + entity_results=entity_pattern.findall(head) + head = entity_pattern.sub('', head) + doctype_pattern = re.compile(r'^\s*<!DOCTYPE([^>]*?)>', re.MULTILINE) + doctype_results = doctype_pattern.findall(head) + doctype = doctype_results and doctype_results[0] or '' + if doctype.lower().count('netscape'): + version = 'rss091n' + else: + version = None + + # only allow in 'safe' inline entity definitions + replacement='' + if len(doctype_results)==1 and entity_results: + safe_pattern=re.compile('\s+(\w+)\s+"(&#\w+;|[^&"]*)"') + safe_entities=filter(lambda e: safe_pattern.match(e),entity_results) + if safe_entities: + replacement='<!DOCTYPE feed [\n <!ENTITY %s>\n]>' % '>\n <!ENTITY '.join(safe_entities) + data = doctype_pattern.sub(replacement, head) + data + + return version, data, dict(replacement and safe_pattern.findall(replacement)) + +def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]): + '''Parse a feed from a URL, file, stream, or string''' + result = FeedParserDict() + result['feed'] = FeedParserDict() + result['entries'] = [] + if _XML_AVAILABLE: + result['bozo'] = 0 + if type(handlers) == types.InstanceType: + handlers = [handlers] + try: + f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers) + data = f.read() + except Exception, e: + result['bozo'] = 1 + result['bozo_exception'] = e + data = '' + f = None + + # if feed is gzip-compressed, decompress it + if f and data and hasattr(f, 'headers'): + if gzip and f.headers.get('content-encoding', '') == 'gzip': + try: + data = gzip.GzipFile(fileobj=_StringIO(data)).read() + except Exception, e: + # Some feeds claim to be gzipped but they're not, so + # we get garbage. Ideally, we should re-request the + # feed without the 'Accept-encoding: gzip' header, + # but we don't. + result['bozo'] = 1 + result['bozo_exception'] = e + data = '' + elif zlib and f.headers.get('content-encoding', '') == 'deflate': + try: + data = zlib.decompress(data, -zlib.MAX_WBITS) + except Exception, e: + result['bozo'] = 1 + result['bozo_exception'] = e + data = '' + + # save HTTP headers + if hasattr(f, 'info'): + info = f.info() + etag = info.getheader('ETag') + if etag: + result['etag'] = etag + last_modified = info.getheader('Last-Modified') + if last_modified: + result['modified'] = _parse_date(last_modified) + if hasattr(f, 'url'): + result['href'] = f.url + result['status'] = 200 + if hasattr(f, 'status'): + result['status'] = f.status + if hasattr(f, 'headers'): + result['headers'] = f.headers.dict + if hasattr(f, 'close'): + f.close() + + # there are four encodings to keep track of: + # - http_encoding is the encoding declared in the Content-Type HTTP header + # - xml_encoding is the encoding declared in the <?xml declaration + # - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data + # - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications + http_headers = result.get('headers', {}) + result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \ + _getCharacterEncoding(http_headers, data) + if http_headers and (not acceptable_content_type): + if http_headers.has_key('content-type'): + bozo_message = '%s is not an XML media type' % http_headers['content-type'] + else: + bozo_message = 'no Content-type specified' + result['bozo'] = 1 + result['bozo_exception'] = NonXMLContentType(bozo_message) + + result['version'], data, entities = _stripDoctype(data) + + baseuri = http_headers.get('content-location', result.get('href')) + baselang = http_headers.get('content-language', None) + + # if server sent 304, we're done + if result.get('status', 0) == 304: + result['version'] = '' + result['debug_message'] = 'The feed has not changed since you last checked, ' + \ + 'so the server sent no data. This is a feature, not a bug!' + return result + + # if there was a problem downloading, we're done + if not data: + return result + + # determine character encoding + use_strict_parser = 0 + known_encoding = 0 + tried_encodings = [] + # try: HTTP encoding, declared XML encoding, encoding sniffed from BOM + for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding): + if not proposed_encoding: continue + if proposed_encoding in tried_encodings: continue + tried_encodings.append(proposed_encoding) + try: + data = _toUTF8(data, proposed_encoding) + known_encoding = use_strict_parser = 1 + break + except: + pass + # if no luck and we have auto-detection library, try that + if (not known_encoding) and chardet: + try: + proposed_encoding = chardet.detect(data)['encoding'] + if proposed_encoding and (proposed_encoding not in tried_encodings): + tried_encodings.append(proposed_encoding) + data = _toUTF8(data, proposed_encoding) + known_encoding = use_strict_parser = 1 + except: + pass + # if still no luck and we haven't tried utf-8 yet, try that + if (not known_encoding) and ('utf-8' not in tried_encodings): + try: + proposed_encoding = 'utf-8' + tried_encodings.append(proposed_encoding) + data = _toUTF8(data, proposed_encoding) + known_encoding = use_strict_parser = 1 + except: + pass + # if still no luck and we haven't tried windows-1252 yet, try that + if (not known_encoding) and ('windows-1252' not in tried_encodings): + try: + proposed_encoding = 'windows-1252' + tried_encodings.append(proposed_encoding) + data = _toUTF8(data, proposed_encoding) + known_encoding = use_strict_parser = 1 + except: + pass + # if still no luck and we haven't tried iso-8859-2 yet, try that. + if (not known_encoding) and ('iso-8859-2' not in tried_encodings): + try: + proposed_encoding = 'iso-8859-2' + tried_encodings.append(proposed_encoding) + data = _toUTF8(data, proposed_encoding) + known_encoding = use_strict_parser = 1 + except: + pass + # if still no luck, give up + if not known_encoding: + result['bozo'] = 1 + result['bozo_exception'] = CharacterEncodingUnknown( \ + 'document encoding unknown, I tried ' + \ + '%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' % \ + (result['encoding'], xml_encoding)) + result['encoding'] = '' + elif proposed_encoding != result['encoding']: + result['bozo'] = 1 + result['bozo_exception'] = CharacterEncodingOverride( \ + 'documented declared as %s, but parsed as %s' % \ + (result['encoding'], proposed_encoding)) + result['encoding'] = proposed_encoding + + if not _XML_AVAILABLE: + use_strict_parser = 0 + if use_strict_parser: + # initialize the SAX parser + feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8') + saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS) + saxparser.setFeature(xml.sax.handler.feature_namespaces, 1) + saxparser.setContentHandler(feedparser) + saxparser.setErrorHandler(feedparser) + source = xml.sax.xmlreader.InputSource() + source.setByteStream(_StringIO(data)) + if hasattr(saxparser, '_ns_stack'): + # work around bug in built-in SAX parser (doesn't recognize xml: namespace) + # PyXML doesn't have this problem, and it doesn't have _ns_stack either + saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'}) + try: + saxparser.parse(source) + except Exception, e: + if _debug: + import traceback + traceback.print_stack() + traceback.print_exc() + sys.stderr.write('xml parsing failed\n') + result['bozo'] = 1 + result['bozo_exception'] = feedparser.exc or e + use_strict_parser = 0 + if not use_strict_parser: + feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '', entities) + feedparser.feed(data) + result['feed'] = feedparser.feeddata + result['entries'] = feedparser.entries + result['version'] = result['version'] or feedparser.version + result['namespaces'] = feedparser.namespacesInUse + return result + +class Serializer: + def __init__(self, results): + self.results = results + +class TextSerializer(Serializer): + def write(self, stream=sys.stdout): + self._writer(stream, self.results, '') + + def _writer(self, stream, node, prefix): + if not node: return + if hasattr(node, 'keys'): + keys = node.keys() + keys.sort() + for k in keys: + if k in ('description', 'link'): continue + if node.has_key(k + '_detail'): continue + if node.has_key(k + '_parsed'): continue + self._writer(stream, node[k], prefix + k + '.') + elif type(node) == types.ListType: + index = 0 + for n in node: + self._writer(stream, n, prefix[:-1] + '[' + str(index) + '].') + index += 1 + else: + try: + s = str(node).encode('utf-8') + s = s.replace('\\', '\\\\') + s = s.replace('\r', '') + s = s.replace('\n', r'\n') + stream.write(prefix[:-1]) + stream.write('=') + stream.write(s) + stream.write('\n') + except: + pass + +class PprintSerializer(Serializer): + def write(self, stream=sys.stdout): + if self.results.has_key('href'): + stream.write(self.results['href'] + '\n\n') + from pprint import pprint + pprint(self.results, stream) + stream.write('\n') + +if __name__ == '__main__': + try: + from optparse import OptionParser + except: + OptionParser = None + + if OptionParser: + optionParser = OptionParser(version=__version__, usage="%prog [options] url_or_filename_or_-") + optionParser.set_defaults(format="pprint") + optionParser.add_option("-A", "--user-agent", dest="agent", metavar="AGENT", help="User-Agent for HTTP URLs") + optionParser.add_option("-e", "--referer", "--referrer", dest="referrer", metavar="URL", help="Referrer for HTTP URLs") + optionParser.add_option("-t", "--etag", dest="etag", metavar="TAG", help="ETag/If-None-Match for HTTP URLs") + optionParser.add_option("-m", "--last-modified", dest="modified", metavar="DATE", help="Last-modified/If-Modified-Since for HTTP URLs (any supported date format)") + optionParser.add_option("-f", "--format", dest="format", metavar="FORMAT", help="output results in FORMAT (text, pprint)") + optionParser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="write debugging information to stderr") + (options, urls) = optionParser.parse_args() + if options.verbose: + _debug = 1 + if not urls: + optionParser.print_help() + sys.exit(0) + else: + if not sys.argv[1:]: + print __doc__ + sys.exit(0) + class _Options: + etag = modified = agent = referrer = None + format = 'pprint' + options = _Options() + urls = sys.argv[1:] + + zopeCompatibilityHack() + + serializer = globals().get(options.format.capitalize() + 'Serializer', Serializer) + for url in urls: + results = parse(url, etag=options.etag, modified=options.modified, agent=options.agent, referrer=options.referrer) + serializer(results).write(sys.stdout) diff --git a/lib/venus/planet/vendor/html5lib/__init__.py b/lib/venus/planet/vendor/html5lib/__init__.py new file mode 100644 index 0000000..4dbcb69 --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/__init__.py @@ -0,0 +1,15 @@ +""" +HTML parsing library based on the WHATWG "HTML5" +specification. The parser is designed to be compatible with existing +HTML found in the wild and implements well-defined error recovery that +is largely compatible with modern desktop web browsers. + +Example usage: + +import html5lib +f = open("my_document.html") +p = html5lib.HTMLParser() +tree = p.parse(f) +""" +from html5parser import HTMLParser +from liberalxmlparser import XMLParser, XHTMLParser diff --git a/lib/venus/planet/vendor/html5lib/constants.py b/lib/venus/planet/vendor/html5lib/constants.py new file mode 100644 index 0000000..459098f --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/constants.py @@ -0,0 +1,816 @@ +import string + +try: + frozenset +except NameError: + # Import from the sets module for python 2.3 + from sets import Set as set + from sets import ImmutableSet as frozenset + +EOF = None + +contentModelFlags = { + "PCDATA":0, + "RCDATA":1, + "CDATA":2, + "PLAINTEXT":3 +} + +scopingElements = frozenset(( + "button", + "caption", + "html", + "marquee", + "object", + "table", + "td", + "th" +)) + +formattingElements = frozenset(( + "a", + "b", + "big", + "em", + "font", + "i", + "nobr", + "s", + "small", + "strike", + "strong", + "tt", + "u" +)) + +specialElements = frozenset(( + "address", + "area", + "base", + "basefont", + "bgsound", + "blockquote", + "body", + "br", + "center", + "col", + "colgroup", + "dd", + "dir", + "div", + "dl", + "dt", + "embed", + "fieldset", + "form", + "frame", + "frameset", + "h1", + "h2", + "h3", + "h4", + "h5", + "h6", + "head", + "hr", + "iframe", + "image", + "img", + "input", + "isindex", + "li", + "link", + "listing", + "menu", + "meta", + "noembed", + "noframes", + "noscript", + "ol", + "optgroup", + "option", + "p", + "param", + "plaintext", + "pre", + "script", + "select", + "spacer", + "style", + "tbody", + "textarea", + "tfoot", + "thead", + "title", + "tr", + "ul", + "wbr" +)) + +spaceCharacters = frozenset(( + u"\t", + u"\n", + u"\u000B", + u"\u000C", + u" ", + u"\r" +)) + +tableInsertModeElements = frozenset(( + "table", + "tbody", + "tfoot", + "thead", + "tr" +)) + +asciiLowercase = frozenset(string.ascii_lowercase) +asciiUppercase = frozenset(string.ascii_uppercase) +asciiLetters = frozenset(string.ascii_letters) +digits = frozenset(string.digits) +hexDigits = frozenset(string.hexdigits) + +asciiUpper2Lower = dict([(ord(c),ord(c.lower())) + for c in string.ascii_uppercase]) + +# Heading elements need to be ordered +headingElements = ( + "h1", + "h2", + "h3", + "h4", + "h5", + "h6" +) + +# XXX What about event-source and command? +voidElements = frozenset(( + "base", + "link", + "meta", + "hr", + "br", + "img", + "embed", + "param", + "area", + "col", + "input" +)) + +cdataElements = frozenset(('title', 'textarea')) + +rcdataElements = frozenset(( + 'style', + 'script', + 'xmp', + 'iframe', + 'noembed', + 'noframes', + 'noscript' +)) + +booleanAttributes = { + "": frozenset(("irrelevant",)), + "style": frozenset(("scoped",)), + "img": frozenset(("ismap",)), + "audio": frozenset(("autoplay","controls")), + "video": frozenset(("autoplay","controls")), + "script": frozenset(("defer", "async")), + "details": frozenset(("open",)), + "datagrid": frozenset(("multiple", "disabled")), + "command": frozenset(("hidden", "disabled", "checked", "default")), + "menu": frozenset(("autosubmit",)), + "fieldset": frozenset(("disabled", "readonly")), + "option": frozenset(("disabled", "readonly", "selected")), + "optgroup": frozenset(("disabled", "readonly")), + "button": frozenset(("disabled", "autofocus")), + "input": frozenset(("disabled", "readonly", "required", "autofocus", "checked", "ismap")), + "select": frozenset(("disabled", "readonly", "autofocus", "multiple")), + "output": frozenset(("disabled", "readonly")), +} + +# entitiesWindows1252 has to be _ordered_ and needs to have an index. It +# therefore can't be a frozenset. +entitiesWindows1252 = ( + 8364, # 0x80 0x20AC EURO SIGN + 65533, # 0x81 UNDEFINED + 8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK + 402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK + 8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK + 8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS + 8224, # 0x86 0x2020 DAGGER + 8225, # 0x87 0x2021 DOUBLE DAGGER + 710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT + 8240, # 0x89 0x2030 PER MILLE SIGN + 352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON + 8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK + 338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE + 65533, # 0x8D UNDEFINED + 381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON + 65533, # 0x8F UNDEFINED + 65533, # 0x90 UNDEFINED + 8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK + 8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK + 8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK + 8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK + 8226, # 0x95 0x2022 BULLET + 8211, # 0x96 0x2013 EN DASH + 8212, # 0x97 0x2014 EM DASH + 732, # 0x98 0x02DC SMALL TILDE + 8482, # 0x99 0x2122 TRADE MARK SIGN + 353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON + 8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK + 339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE + 65533, # 0x9D UNDEFINED + 382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON + 376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS +) + +entities = { + "AElig;": u"\u00C6", + "AElig": u"\u00C6", + "AMP;": u"\u0026", + "AMP": u"\u0026", + "Aacute;": u"\u00C1", + "Aacute": u"\u00C1", + "Acirc;": u"\u00C2", + "Acirc": u"\u00C2", + "Agrave;": u"\u00C0", + "Agrave": u"\u00C0", + "Alpha;": u"\u0391", + "Aring;": u"\u00C5", + "Aring": u"\u00C5", + "Atilde;": u"\u00C3", + "Atilde": u"\u00C3", + "Auml;": u"\u00C4", + "Auml": u"\u00C4", + "Beta;": u"\u0392", + "COPY;": u"\u00A9", + "COPY": u"\u00A9", + "Ccedil;": u"\u00C7", + "Ccedil": u"\u00C7", + "Chi;": u"\u03A7", + "Dagger;": u"\u2021", + "Delta;": u"\u0394", + "ETH;": u"\u00D0", + "ETH": u"\u00D0", + "Eacute;": u"\u00C9", + "Eacute": u"\u00C9", + "Ecirc;": u"\u00CA", + "Ecirc": u"\u00CA", + "Egrave;": u"\u00C8", + "Egrave": u"\u00C8", + "Epsilon;": u"\u0395", + "Eta;": u"\u0397", + "Euml;": u"\u00CB", + "Euml": u"\u00CB", + "GT;": u"\u003E", + "GT": u"\u003E", + "Gamma;": u"\u0393", + "Iacute;": u"\u00CD", + "Iacute": u"\u00CD", + "Icirc;": u"\u00CE", + "Icirc": u"\u00CE", + "Igrave;": u"\u00CC", + "Igrave": u"\u00CC", + "Iota;": u"\u0399", + "Iuml;": u"\u00CF", + "Iuml": u"\u00CF", + "Kappa;": u"\u039A", + "LT;": u"\u003C", + "LT": u"\u003C", + "Lambda;": u"\u039B", + "Mu;": u"\u039C", + "Ntilde;": u"\u00D1", + "Ntilde": u"\u00D1", + "Nu;": u"\u039D", + "OElig;": u"\u0152", + "Oacute;": u"\u00D3", + "Oacute": u"\u00D3", + "Ocirc;": u"\u00D4", + "Ocirc": u"\u00D4", + "Ograve;": u"\u00D2", + "Ograve": u"\u00D2", + "Omega;": u"\u03A9", + "Omicron;": u"\u039F", + "Oslash;": u"\u00D8", + "Oslash": u"\u00D8", + "Otilde;": u"\u00D5", + "Otilde": u"\u00D5", + "Ouml;": u"\u00D6", + "Ouml": u"\u00D6", + "Phi;": u"\u03A6", + "Pi;": u"\u03A0", + "Prime;": u"\u2033", + "Psi;": u"\u03A8", + "QUOT;": u"\u0022", + "QUOT": u"\u0022", + "REG;": u"\u00AE", + "REG": u"\u00AE", + "Rho;": u"\u03A1", + "Scaron;": u"\u0160", + "Sigma;": u"\u03A3", + "THORN;": u"\u00DE", + "THORN": u"\u00DE", + "TRADE;": u"\u2122", + "Tau;": u"\u03A4", + "Theta;": u"\u0398", + "Uacute;": u"\u00DA", + "Uacute": u"\u00DA", + "Ucirc;": u"\u00DB", + "Ucirc": u"\u00DB", + "Ugrave;": u"\u00D9", + "Ugrave": u"\u00D9", + "Upsilon;": u"\u03A5", + "Uuml;": u"\u00DC", + "Uuml": u"\u00DC", + "Xi;": u"\u039E", + "Yacute;": u"\u00DD", + "Yacute": u"\u00DD", + "Yuml;": u"\u0178", + "Zeta;": u"\u0396", + "aacute;": u"\u00E1", + "aacute": u"\u00E1", + "acirc;": u"\u00E2", + "acirc": u"\u00E2", + "acute;": u"\u00B4", + "acute": u"\u00B4", + "aelig;": u"\u00E6", + "aelig": u"\u00E6", + "agrave;": u"\u00E0", + "agrave": u"\u00E0", + "alefsym;": u"\u2135", + "alpha;": u"\u03B1", + "amp;": u"\u0026", + "amp": u"\u0026", + "and;": u"\u2227", + "ang;": u"\u2220", + "apos;": u"\u0027", + "aring;": u"\u00E5", + "aring": u"\u00E5", + "asymp;": u"\u2248", + "atilde;": u"\u00E3", + "atilde": u"\u00E3", + "auml;": u"\u00E4", + "auml": u"\u00E4", + "bdquo;": u"\u201E", + "beta;": u"\u03B2", + "brvbar;": u"\u00A6", + "brvbar": u"\u00A6", + "bull;": u"\u2022", + "cap;": u"\u2229", + "ccedil;": u"\u00E7", + "ccedil": u"\u00E7", + "cedil;": u"\u00B8", + "cedil": u"\u00B8", + "cent;": u"\u00A2", + "cent": u"\u00A2", + "chi;": u"\u03C7", + "circ;": u"\u02C6", + "clubs;": u"\u2663", + "cong;": u"\u2245", + "copy;": u"\u00A9", + "copy": u"\u00A9", + "crarr;": u"\u21B5", + "cup;": u"\u222A", + "curren;": u"\u00A4", + "curren": u"\u00A4", + "dArr;": u"\u21D3", + "dagger;": u"\u2020", + "darr;": u"\u2193", + "deg;": u"\u00B0", + "deg": u"\u00B0", + "delta;": u"\u03B4", + "diams;": u"\u2666", + "divide;": u"\u00F7", + "divide": u"\u00F7", + "eacute;": u"\u00E9", + "eacute": u"\u00E9", + "ecirc;": u"\u00EA", + "ecirc": u"\u00EA", + "egrave;": u"\u00E8", + "egrave": u"\u00E8", + "empty;": u"\u2205", + "emsp;": u"\u2003", + "ensp;": u"\u2002", + "epsilon;": u"\u03B5", + "equiv;": u"\u2261", + "eta;": u"\u03B7", + "eth;": u"\u00F0", + "eth": u"\u00F0", + "euml;": u"\u00EB", + "euml": u"\u00EB", + "euro;": u"\u20AC", + "exist;": u"\u2203", + "fnof;": u"\u0192", + "forall;": u"\u2200", + "frac12;": u"\u00BD", + "frac12": u"\u00BD", + "frac14;": u"\u00BC", + "frac14": u"\u00BC", + "frac34;": u"\u00BE", + "frac34": u"\u00BE", + "frasl;": u"\u2044", + "gamma;": u"\u03B3", + "ge;": u"\u2265", + "gt;": u"\u003E", + "gt": u"\u003E", + "hArr;": u"\u21D4", + "harr;": u"\u2194", + "hearts;": u"\u2665", + "hellip;": u"\u2026", + "iacute;": u"\u00ED", + "iacute": u"\u00ED", + "icirc;": u"\u00EE", + "icirc": u"\u00EE", + "iexcl;": u"\u00A1", + "iexcl": u"\u00A1", + "igrave;": u"\u00EC", + "igrave": u"\u00EC", + "image;": u"\u2111", + "infin;": u"\u221E", + "int;": u"\u222B", + "iota;": u"\u03B9", + "iquest;": u"\u00BF", + "iquest": u"\u00BF", + "isin;": u"\u2208", + "iuml;": u"\u00EF", + "iuml": u"\u00EF", + "kappa;": u"\u03BA", + "lArr;": u"\u21D0", + "lambda;": u"\u03BB", + "lang;": u"\u3008", + "laquo;": u"\u00AB", + "laquo": u"\u00AB", + "larr;": u"\u2190", + "lceil;": u"\u2308", + "ldquo;": u"\u201C", + "le;": u"\u2264", + "lfloor;": u"\u230A", + "lowast;": u"\u2217", + "loz;": u"\u25CA", + "lrm;": u"\u200E", + "lsaquo;": u"\u2039", + "lsquo;": u"\u2018", + "lt;": u"\u003C", + "lt": u"\u003C", + "macr;": u"\u00AF", + "macr": u"\u00AF", + "mdash;": u"\u2014", + "micro;": u"\u00B5", + "micro": u"\u00B5", + "middot;": u"\u00B7", + "middot": u"\u00B7", + "minus;": u"\u2212", + "mu;": u"\u03BC", + "nabla;": u"\u2207", + "nbsp;": u"\u00A0", + "nbsp": u"\u00A0", + "ndash;": u"\u2013", + "ne;": u"\u2260", + "ni;": u"\u220B", + "not;": u"\u00AC", + "not": u"\u00AC", + "notin;": u"\u2209", + "nsub;": u"\u2284", + "ntilde;": u"\u00F1", + "ntilde": u"\u00F1", + "nu;": u"\u03BD", + "oacute;": u"\u00F3", + "oacute": u"\u00F3", + "ocirc;": u"\u00F4", + "ocirc": u"\u00F4", + "oelig;": u"\u0153", + "ograve;": u"\u00F2", + "ograve": u"\u00F2", + "oline;": u"\u203E", + "omega;": u"\u03C9", + "omicron;": u"\u03BF", + "oplus;": u"\u2295", + "or;": u"\u2228", + "ordf;": u"\u00AA", + "ordf": u"\u00AA", + "ordm;": u"\u00BA", + "ordm": u"\u00BA", + "oslash;": u"\u00F8", + "oslash": u"\u00F8", + "otilde;": u"\u00F5", + "otilde": u"\u00F5", + "otimes;": u"\u2297", + "ouml;": u"\u00F6", + "ouml": u"\u00F6", + "para;": u"\u00B6", + "para": u"\u00B6", + "part;": u"\u2202", + "permil;": u"\u2030", + "perp;": u"\u22A5", + "phi;": u"\u03C6", + "pi;": u"\u03C0", + "piv;": u"\u03D6", + "plusmn;": u"\u00B1", + "plusmn": u"\u00B1", + "pound;": u"\u00A3", + "pound": u"\u00A3", + "prime;": u"\u2032", + "prod;": u"\u220F", + "prop;": u"\u221D", + "psi;": u"\u03C8", + "quot;": u"\u0022", + "quot": u"\u0022", + "rArr;": u"\u21D2", + "radic;": u"\u221A", + "rang;": u"\u3009", + "raquo;": u"\u00BB", + "raquo": u"\u00BB", + "rarr;": u"\u2192", + "rceil;": u"\u2309", + "rdquo;": u"\u201D", + "real;": u"\u211C", + "reg;": u"\u00AE", + "reg": u"\u00AE", + "rfloor;": u"\u230B", + "rho;": u"\u03C1", + "rlm;": u"\u200F", + "rsaquo;": u"\u203A", + "rsquo;": u"\u2019", + "sbquo;": u"\u201A", + "scaron;": u"\u0161", + "sdot;": u"\u22C5", + "sect;": u"\u00A7", + "sect": u"\u00A7", + "shy;": u"\u00AD", + "shy": u"\u00AD", + "sigma;": u"\u03C3", + "sigmaf;": u"\u03C2", + "sim;": u"\u223C", + "spades;": u"\u2660", + "sub;": u"\u2282", + "sube;": u"\u2286", + "sum;": u"\u2211", + "sup1;": u"\u00B9", + "sup1": u"\u00B9", + "sup2;": u"\u00B2", + "sup2": u"\u00B2", + "sup3;": u"\u00B3", + "sup3": u"\u00B3", + "sup;": u"\u2283", + "supe;": u"\u2287", + "szlig;": u"\u00DF", + "szlig": u"\u00DF", + "tau;": u"\u03C4", + "there4;": u"\u2234", + "theta;": u"\u03B8", + "thetasym;": u"\u03D1", + "thinsp;": u"\u2009", + "thorn;": u"\u00FE", + "thorn": u"\u00FE", + "tilde;": u"\u02DC", + "times;": u"\u00D7", + "times": u"\u00D7", + "trade;": u"\u2122", + "uArr;": u"\u21D1", + "uacute;": u"\u00FA", + "uacute": u"\u00FA", + "uarr;": u"\u2191", + "ucirc;": u"\u00FB", + "ucirc": u"\u00FB", + "ugrave;": u"\u00F9", + "ugrave": u"\u00F9", + "uml;": u"\u00A8", + "uml": u"\u00A8", + "upsih;": u"\u03D2", + "upsilon;": u"\u03C5", + "uuml;": u"\u00FC", + "uuml": u"\u00FC", + "weierp;": u"\u2118", + "xi;": u"\u03BE", + "yacute;": u"\u00FD", + "yacute": u"\u00FD", + "yen;": u"\u00A5", + "yen": u"\u00A5", + "yuml;": u"\u00FF", + "yuml": u"\u00FF", + "zeta;": u"\u03B6", + "zwj;": u"\u200D", + "zwnj;": u"\u200C" +} + +encodings = frozenset(( + "ansi_x3.4-1968", + "iso-ir-6", + "ansi_x3.4-1986", + "iso_646.irv:1991", + "ascii", + "iso646-us", + "us-ascii", + "us", + "ibm367", + "cp367", + "csascii", + "ks_c_5601-1987", + "korean", + "iso-2022-kr", + "csiso2022kr", + "euc-kr", + "iso-2022-jp", + "csiso2022jp", + "iso-2022-jp-2", + "iso-ir-58", + "chinese", + "csiso58gb231280", + "iso_8859-1:1987", + "iso-ir-100", + "iso_8859-1", + "iso-8859-1", + "latin1", + "l1", + "ibm819", + "cp819", + "csisolatin1", + "iso_8859-2:1987", + "iso-ir-101", + "iso_8859-2", + "iso-8859-2", + "latin2", + "l2", + "csisolatin2", + "iso_8859-3:1988", + "iso-ir-109", + "iso_8859-3", + "iso-8859-3", + "latin3", + "l3", + "csisolatin3", + "iso_8859-4:1988", + "iso-ir-110", + "iso_8859-4", + "iso-8859-4", + "latin4", + "l4", + "csisolatin4", + "iso_8859-6:1987", + "iso-ir-127", + "iso_8859-6", + "iso-8859-6", + "ecma-114", + "asmo-708", + "arabic", + "csisolatinarabic", + "iso_8859-7:1987", + "iso-ir-126", + "iso_8859-7", + "iso-8859-7", + "elot_928", + "ecma-118", + "greek", + "greek8", + "csisolatingreek", + "iso_8859-8:1988", + "iso-ir-138", + "iso_8859-8", + "iso-8859-8", + "hebrew", + "csisolatinhebrew", + "iso_8859-5:1988", + "iso-ir-144", + "iso_8859-5", + "iso-8859-5", + "cyrillic", + "csisolatincyrillic", + "iso_8859-9:1989", + "iso-ir-148", + "iso_8859-9", + "iso-8859-9", + "latin5", + "l5", + "csisolatin5", + "iso-8859-10", + "iso-ir-157", + "l6", + "iso_8859-10:1992", + "csisolatin6", + "latin6", + "hp-roman8", + "roman8", + "r8", + "ibm037", + "cp037", + "csibm037", + "ibm424", + "cp424", + "csibm424", + "ibm437", + "cp437", + "437", + "cspc8codepage437", + "ibm500", + "cp500", + "csibm500", + "ibm775", + "cp775", + "cspc775baltic", + "ibm850", + "cp850", + "850", + "cspc850multilingual", + "ibm852", + "cp852", + "852", + "cspcp852", + "ibm855", + "cp855", + "855", + "csibm855", + "ibm857", + "cp857", + "857", + "csibm857", + "ibm860", + "cp860", + "860", + "csibm860", + "ibm861", + "cp861", + "861", + "cp-is", + "csibm861", + "ibm862", + "cp862", + "862", + "cspc862latinhebrew", + "ibm863", + "cp863", + "863", + "csibm863", + "ibm864", + "cp864", + "csibm864", + "ibm865", + "cp865", + "865", + "csibm865", + "ibm866", + "cp866", + "866", + "csibm866", + "ibm869", + "cp869", + "869", + "cp-gr", + "csibm869", + "ibm1026", + "cp1026", + "csibm1026", + "koi8-r", + "cskoi8r", + "koi8-u", + "big5-hkscs", + "ptcp154", + "csptcp154", + "pt154", + "cp154", + "utf-7", + "utf-16be", + "utf-16le", + "utf-16", + "utf-8", + "iso-8859-13", + "iso-8859-14", + "iso-ir-199", + "iso_8859-14:1998", + "iso_8859-14", + "latin8", + "iso-celtic", + "l8", + "iso-8859-15", + "iso_8859-15", + "iso-8859-16", + "iso-ir-226", + "iso_8859-16:2001", + "iso_8859-16", + "latin10", + "l10", + "gbk", + "cp936", + "ms936", + "gb18030", + "shift_jis", + "ms_kanji", + "csshiftjis", + "euc-jp", + "gb2312", + "big5", + "csbig5", + "windows-1250", + "windows-1251", + "windows-1252", + "windows-1253", + "windows-1254", + "windows-1255", + "windows-1256", + "windows-1257", + "windows-1258", + "tis-620", + "hz-gb-2312", + )) \ No newline at end of file diff --git a/lib/venus/planet/vendor/html5lib/filters/__init__.py b/lib/venus/planet/vendor/html5lib/filters/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/lib/venus/planet/vendor/html5lib/filters/_base.py b/lib/venus/planet/vendor/html5lib/filters/_base.py new file mode 100644 index 0000000..bca94ad --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/filters/_base.py @@ -0,0 +1,10 @@ + +class Filter(object): + def __init__(self, source): + self.source = source + + def __iter__(self): + return iter(self.source) + + def __getattr__(self, name): + return getattr(self.source, name) diff --git a/lib/venus/planet/vendor/html5lib/filters/inject_meta_charset.py b/lib/venus/planet/vendor/html5lib/filters/inject_meta_charset.py new file mode 100644 index 0000000..35a2d95 --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/filters/inject_meta_charset.py @@ -0,0 +1,63 @@ +import _base + +class Filter(_base.Filter): + def __init__(self, source, encoding): + _base.Filter.__init__(self, source) + self.encoding = encoding + + def __iter__(self): + state = "pre_head" + meta_found = (self.encoding is None) + pending = [] + + for token in _base.Filter.__iter__(self): + type = token["type"] + if type == "StartTag": + if token["name"].lower() == "head": + state = "in_head" + + elif type == "EmptyTag": + if token["name"].lower() == "meta": + # replace charset with actual encoding + has_http_equiv_content_type = False + content_index = -1 + for i,(name,value) in enumerate(token["data"]): + if name.lower() == 'charset': + token["data"][i] = (u'charset', self.encoding) + meta_found = True + break + elif name == 'http-equiv' and value.lower() == 'content-type': + has_http_equiv_content_type = True + elif name == 'content': + content_index = i + else: + if has_http_equiv_content_type and content_index >= 0: + token["data"][content_index] = (u'content', u'text/html; charset=%s' % self.encoding) + meta_found = True + + elif token["name"].lower() == "head" and not meta_found: + # insert meta into empty head + yield {"type": "StartTag", "name": "head", + "data": token["data"]} + yield {"type": "EmptyTag", "name": "meta", + "data": [["charset", self.encoding]]} + yield {"type": "EndTag", "name": "head"} + meta_found = True + continue + + elif type == "EndTag": + if token["name"].lower() == "head" and pending: + # insert meta into head (if necessary) and flush pending queue + yield pending.pop(0) + if not meta_found: + yield {"type": "EmptyTag", "name": "meta", + "data": [["charset", self.encoding]]} + while pending: + yield pending.pop(0) + meta_found = True + state = "post_head" + + if state == "in_head": + pending.append(token) + else: + yield token diff --git a/lib/venus/planet/vendor/html5lib/filters/lint.py b/lib/venus/planet/vendor/html5lib/filters/lint.py new file mode 100644 index 0000000..ea5c619 --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/filters/lint.py @@ -0,0 +1,88 @@ +from gettext import gettext +_ = gettext + +import _base +from html5lib.constants import cdataElements, rcdataElements, voidElements + +from html5lib.constants import spaceCharacters +spaceCharacters = u"".join(spaceCharacters) + +class LintError(Exception): pass + +class Filter(_base.Filter): + def __iter__(self): + open_elements = [] + contentModelFlag = "PCDATA" + for token in _base.Filter.__iter__(self): + type = token["type"] + if type in ("StartTag", "EmptyTag"): + name = token["name"] + if contentModelFlag != "PCDATA": + raise LintError(_("StartTag not in PCDATA content model flag: %s") % name) + if not isinstance(name, unicode): + raise LintError(_(u"Tag name is not a string: %r") % name) + if not name: + raise LintError(_(u"Empty tag name")) + if type == "StartTag" and name in voidElements: + raise LintError(_(u"Void element reported as StartTag token: %s") % name) + elif type == "EmptyTag" and name not in voidElements: + raise LintError(_(u"Non-void element reported as EmptyTag token: %s") % token["name"]) + if type == "StartTag": + open_elements.append(name) + for name, value in token["data"]: + if not isinstance(name, unicode): + raise LintError(_("Attribute name is not a string: %r") % name) + if not name: + raise LintError(_(u"Empty attribute name")) + if not isinstance(value, unicode): + raise LintError(_("Attribute value is not a string: %r") % value) + if name in cdataElements: + contentModelFlag = "CDATA" + elif name in rcdataElements: + contentModelFlag = "RCDATA" + elif name == "plaintext": + contentModelFlag = "PLAINTEXT" + + elif type == "EndTag": + name = token["name"] + if not isinstance(name, unicode): + raise LintError(_(u"Tag name is not a string: %r") % name) + if not name: + raise LintError(_(u"Empty tag name")) + if name in voidElements: + raise LintError(_(u"Void element reported as EndTag token: %s") % name) + start_name = open_elements.pop() + if start_name != name: + raise LintError(_(u"EndTag (%s) does not match StartTag (%s)") % (name, start_name)) + contentModelFlag = "PCDATA" + + elif type == "Comment": + if contentModelFlag != "PCDATA": + raise LintError(_("Comment not in PCDATA content model flag")) + + elif type in ("Characters", "SpaceCharacters"): + data = token["data"] + if not isinstance(data, unicode): + raise LintError(_("Attribute name is not a string: %r") % data) + if not data: + raise LintError(_(u"%s token with empty data") % type) + if type == "SpaceCharacters": + data = data.strip(spaceCharacters) + if data: + raise LintError(_(u"Non-space character(s) found in SpaceCharacters token: ") % data) + + elif type == "Doctype": + name = token["name"] + if contentModelFlag != "PCDATA": + raise LintError(_("Doctype not in PCDATA content model flag: %s") % name) + if not isinstance(name, unicode): + raise LintError(_(u"Tag name is not a string: %r") % name) + # XXX: what to do with token["data"] ? + + elif type in ("ParseError", "SerializeError"): + pass + + else: + raise LintError(_(u"Unknown token type: %s") % type) + + yield token diff --git a/lib/venus/planet/vendor/html5lib/filters/optionaltags.py b/lib/venus/planet/vendor/html5lib/filters/optionaltags.py new file mode 100644 index 0000000..73da96c --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/filters/optionaltags.py @@ -0,0 +1,175 @@ +import _base + +class Filter(_base.Filter): + def slider(self): + previous1 = previous2 = None + for token in self.source: + if previous1 is not None: + yield previous2, previous1, token + previous2 = previous1 + previous1 = token + yield previous2, previous1, None + + def __iter__(self): + for previous, token, next in self.slider(): + type = token["type"] + if type == "StartTag": + if token["data"] or not self.is_optional_start(token["name"], previous, next): + yield token + elif type == "EndTag": + if not self.is_optional_end(token["name"], next): + yield token + else: + yield token + + def is_optional_start(self, tagname, previous, next): + type = next and next["type"] or None + if tagname in 'html': + # An html element's start tag may be omitted if the first thing + # inside the html element is not a space character or a comment. + return type not in ("Comment", "SpaceCharacters") + elif tagname == 'head': + # A head element's start tag may be omitted if the first thing + # inside the head element is an element. + return type == "StartTag" + elif tagname == 'body': + # A body element's start tag may be omitted if the first thing + # inside the body element is not a space character or a comment, + # except if the first thing inside the body element is a script + # or style element and the node immediately preceding the body + # element is a head element whose end tag has been omitted. + if type in ("Comment", "SpaceCharacters"): + return False + elif type == "StartTag": + # XXX: we do not look at the preceding event, so we never omit + # the body element's start tag if it's followed by a script or + # a style element. + return next["name"] not in ('script', 'style') + else: + return True + elif tagname == 'colgroup': + # A colgroup element's start tag may be omitted if the first thing + # inside the colgroup element is a col element, and if the element + # is not immediately preceeded by another colgroup element whose + # end tag has been omitted. + if type == "StartTag": + # XXX: we do not look at the preceding event, so instead we never + # omit the colgroup element's end tag when it is immediately + # followed by another colgroup element. See is_optional_end. + return next["name"] == "col" + else: + return False + elif tagname == 'tbody': + # A tbody element's start tag may be omitted if the first thing + # inside the tbody element is a tr element, and if the element is + # not immediately preceeded by a tbody, thead, or tfoot element + # whose end tag has been omitted. + if type == "StartTag": + # omit the thead and tfoot elements' end tag when they are + # immediately followed by a tbody element. See is_optional_end. + if previous and previous['type'] == 'EndTag' and \ + previous['name'] in ('tbody','thead','tfoot'): + return False + return next["name"] == 'tr' + else: + return False + return False + + def is_optional_end(self, tagname, next): + type = next and next["type"] or None + if tagname in ('html', 'head', 'body'): + # An html element's end tag may be omitted if the html element + # is not immediately followed by a space character or a comment. + return type not in ("Comment", "SpaceCharacters") + elif tagname in ('li', 'optgroup', 'option', 'tr'): + # A li element's end tag may be omitted if the li element is + # immediately followed by another li element or if there is + # no more content in the parent element. + # An optgroup element's end tag may be omitted if the optgroup + # element is immediately followed by another optgroup element, + # or if there is no more content in the parent element. + # An option element's end tag may be omitted if the option + # element is immediately followed by another option element, + # or if there is no more content in the parent element. + # A tr element's end tag may be omitted if the tr element is + # immediately followed by another tr element, or if there is + # no more content in the parent element. + if type == "StartTag": + return next["name"] == tagname + else: + return type == "EndTag" or type is None + elif tagname in ('dt', 'dd'): + # A dt element's end tag may be omitted if the dt element is + # immediately followed by another dt element or a dd element. + # A dd element's end tag may be omitted if the dd element is + # immediately followed by another dd element or a dt element, + # or if there is no more content in the parent element. + if type == "StartTag": + return next["name"] in ('dt', 'dd') + elif tagname == 'dd': + return type == "EndTag" or type is None + else: + return False + elif tagname == 'p': + # A p element's end tag may be omitted if the p element is + # immediately followed by an address, blockquote, dl, fieldset, + # form, h1, h2, h3, h4, h5, h6, hr, menu, ol, p, pre, table, + # or ul element, or if there is no more content in the parent + # element. + if type == "StartTag": + return next["name"] in ('address', 'blockquote', \ + 'dl', 'fieldset', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', \ + 'h6', 'hr', 'menu', 'ol', 'p', 'pre', 'table', 'ul') + else: + return type == "EndTag" or type is None + elif tagname == 'colgroup': + # A colgroup element's end tag may be omitted if the colgroup + # element is not immediately followed by a space character or + # a comment. + if type in ("Comment", "SpaceCharacters"): + return False + elif type == "StartTag": + # XXX: we also look for an immediately following colgroup + # element. See is_optional_start. + return next["name"] != 'colgroup' + else: + return True + elif tagname in ('thead', 'tbody'): + # A thead element's end tag may be omitted if the thead element + # is immediately followed by a tbody or tfoot element. + # A tbody element's end tag may be omitted if the tbody element + # is immediately followed by a tbody or tfoot element, or if + # there is no more content in the parent element. + # A tfoot element's end tag may be omitted if the tfoot element + # is immediately followed by a tbody element, or if there is no + # more content in the parent element. + # XXX: we never omit the end tag when the following element is + # a tbody. See is_optional_start. + if type == "StartTag": + return next["name"] in ['tbody', 'tfoot'] + elif tagname == 'tbody': + return type == "EndTag" or type is None + else: + return False + elif tagname == 'tfoot': + # A tfoot element's end tag may be omitted if the tfoot element + # is immediately followed by a tbody element, or if there is no + # more content in the parent element. + # XXX: we never omit the end tag when the following element is + # a tbody. See is_optional_start. + if type == "StartTag": + return next["name"] == 'tbody' + else: + return type == "EndTag" or type is None + elif tagname in ('td', 'th'): + # A td element's end tag may be omitted if the td element is + # immediately followed by a td or th element, or if there is + # no more content in the parent element. + # A th element's end tag may be omitted if the th element is + # immediately followed by a td or th element, or if there is + # no more content in the parent element. + if type == "StartTag": + return next["name"] in ('td', 'th') + else: + return type == "EndTag" or type is None + return False diff --git a/lib/venus/planet/vendor/html5lib/filters/whitespace.py b/lib/venus/planet/vendor/html5lib/filters/whitespace.py new file mode 100644 index 0000000..74d6f4d --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/filters/whitespace.py @@ -0,0 +1,41 @@ +try: + frozenset +except NameError: + # Import from the sets module for python 2.3 + from sets import ImmutableSet as frozenset + +import re + +import _base +from html5lib.constants import rcdataElements, spaceCharacters +spaceCharacters = u"".join(spaceCharacters) + +SPACES_REGEX = re.compile(u"[%s]+" % spaceCharacters) + +class Filter(_base.Filter): + + spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements)) + + def __iter__(self): + preserve = 0 + for token in _base.Filter.__iter__(self): + type = token["type"] + if type == "StartTag" \ + and (preserve or token["name"] in self.spacePreserveElements): + preserve += 1 + + elif type == "EndTag" and preserve: + preserve -= 1 + + elif not preserve and type == "SpaceCharacters" and token["data"]: + # Test on token["data"] above to not introduce spaces where there were not + token["data"] = u" " + + elif not preserve and type == "Characters": + token["data"] = collapse_spaces(token["data"]) + + yield token + +def collapse_spaces(text): + return SPACES_REGEX.sub(' ', text) + diff --git a/lib/venus/planet/vendor/html5lib/html5parser.py b/lib/venus/planet/vendor/html5lib/html5parser.py new file mode 100644 index 0000000..1c0fd3e --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/html5parser.py @@ -0,0 +1,1985 @@ +# Differences from the current specification are as follows: +# * Phases and insertion modes are one concept in parser.py. +# * EOF handling is slightly different to make sure <html>, <head> and <body> +# always exist. + + +try: + frozenset +except NameError: + # Import from the sets module for python 2.3 + from sets import Set as set + from sets import ImmutableSet as frozenset +import gettext +_ = gettext.gettext +import sys + +import tokenizer + +import treebuilders +from treebuilders._base import Marker +from treebuilders import simpletree + +import utils +from constants import contentModelFlags, spaceCharacters, asciiUpper2Lower +from constants import scopingElements, formattingElements, specialElements +from constants import headingElements, tableInsertModeElements +from constants import cdataElements, rcdataElements, voidElements + +class HTMLParser(object): + """HTML parser. Generates a tree structure from a stream of (possibly + malformed) HTML""" + + def __init__(self, strict = False, tree=simpletree.TreeBuilder, + tokenizer=tokenizer.HTMLTokenizer): + """ + strict - raise an exception when a parse error is encountered + + tree - a treebuilder class controlling the type of tree that will be + returned. Built in treebuilders can be accessed through + html5lib.treebuilders.getTreeBuilder(treeType) + """ + + # Raise an exception on the first error encountered + self.strict = strict + + self.tree = tree() + self.tokenizer_class = tokenizer + self.errors = [] + + # "quirks" / "almost-standards" / "standards" + self.quirksMode = "standards" + + self.phases = { + "initial": InitialPhase(self, self.tree), + "rootElement": RootElementPhase(self, self.tree), + "beforeHead": BeforeHeadPhase(self, self.tree), + "inHead": InHeadPhase(self, self.tree), + # XXX "inHeadNoscript": InHeadNoScriptPhase(self, self.tree), + "afterHead": AfterHeadPhase(self, self.tree), + "inBody": InBodyPhase(self, self.tree), + "inTable": InTablePhase(self, self.tree), + "inCaption": InCaptionPhase(self, self.tree), + "inColumnGroup": InColumnGroupPhase(self, self.tree), + "inTableBody": InTableBodyPhase(self, self.tree), + "inRow": InRowPhase(self, self.tree), + "inCell": InCellPhase(self, self.tree), + "inSelect": InSelectPhase(self, self.tree), + "afterBody": AfterBodyPhase(self, self.tree), + "inFrameset": InFramesetPhase(self, self.tree), + "afterFrameset": AfterFramesetPhase(self, self.tree), + "trailingEnd": TrailingEndPhase(self, self.tree) + } + + def _parse(self, stream, innerHTML=False, container="div", + encoding=None, **kwargs): + + self.tree.reset() + self.firstStartTag = False + self.errors = [] + + self.tokenizer = self.tokenizer_class(stream, encoding=encoding, + parseMeta=not innerHTML, **kwargs) + + if innerHTML: + self.innerHTML = container.lower() + + if self.innerHTML in cdataElements: + self.tokenizer.contentModelFlag = tokenizer.contentModelFlags["RCDATA"] + elif self.innerHTML in rcdataElements: + self.tokenizer.contentModelFlag = tokenizer.contentModelFlags["CDATA"] + elif self.innerHTML == 'plaintext': + self.tokenizer.contentModelFlag = tokenizer.contentModelFlags["PLAINTEXT"] + else: + # contentModelFlag already is PCDATA + #self.tokenizer.contentModelFlag = tokenizer.contentModelFlags["PCDATA"] + pass + self.phase = self.phases["rootElement"] + self.phase.insertHtmlElement() + self.resetInsertionMode() + else: + self.innerHTML = False + self.phase = self.phases["initial"] + + # We only seem to have InBodyPhase testcases where the following is + # relevant ... need others too + self.lastPhase = None + + # XXX This is temporary for the moment so there isn't any other + # changes needed for the parser to work with the iterable tokenizer + for token in self.tokenizer: + token = self.normalizeToken(token) + type = token["type"] + method = getattr(self.phase, "process%s" % type, None) + if type in ("Characters", "SpaceCharacters", "Comment"): + method(token["data"]) + elif type == "StartTag": + method(token["name"], token["data"]) + elif type == "EndTag": + method(token["name"]) + elif type == "Doctype": + method(token["name"], token["publicId"], token["systemId"], token["correct"]) + else: + self.parseError(token["data"]) + + # When the loop finishes it's EOF + self.phase.processEOF() + + def parse(self, stream, encoding=None): + """Parse a HTML document into a well-formed tree + + stream - a filelike object or string containing the HTML to be parsed + + The optional encoding parameter must be a string that indicates + the encoding. If specified, that encoding will be used, + regardless of any BOM or later declaration (such as in a meta + element) + """ + self._parse(stream, innerHTML=False, encoding=encoding) + return self.tree.getDocument() + + def parseFragment(self, stream, container="div", encoding=None): + """Parse a HTML fragment into a well-formed tree fragment + + container - name of the element we're setting the innerHTML property + if set to None, default to 'div' + + stream - a filelike object or string containing the HTML to be parsed + + The optional encoding parameter must be a string that indicates + the encoding. If specified, that encoding will be used, + regardless of any BOM or later declaration (such as in a meta + element) + """ + self._parse(stream, True, container=container, encoding=encoding) + return self.tree.getFragment() + + def parseError(self, data="XXX ERROR MESSAGE NEEDED"): + # XXX The idea is to make data mandatory. + self.errors.append((self.tokenizer.stream.position(), data)) + if self.strict: + raise ParseError + + def normalizeToken(self, token): + """ HTML5 specific normalizations to the token stream """ + + if token["type"] == "EmptyTag": + # When a solidus (/) is encountered within a tag name what happens + # depends on whether the current tag name matches that of a void + # element. If it matches a void element atheists did the wrong + # thing and if it doesn't it's wrong for everyone. + + if token["name"] not in voidElements: + self.parseError(_(u"Solidus (/) incorrectly placed in tag.")) + + token["type"] = "StartTag" + + if token["type"] == "StartTag": + token["data"] = dict(token["data"][::-1]) + + return token + + + def resetInsertionMode(self): + # The name of this method is mostly historical. (It's also used in the + # specification.) + last = False + newModes = { + "select":"inSelect", + "td":"inCell", + "th":"inCell", + "tr":"inRow", + "tbody":"inTableBody", + "thead":"inTableBody", + "tfoot":"inTableBody", + "caption":"inCaption", + "colgroup":"inColumnGroup", + "table":"inTable", + "head":"inBody", + "body":"inBody", + "frameset":"inFrameset" + } + for node in self.tree.openElements[::-1]: + nodeName = node.name + if node == self.tree.openElements[0]: + last = True + if nodeName not in ['td', 'th']: + # XXX + assert self.innerHTML + nodeName = self.innerHTML + # Check for conditions that should only happen in the innerHTML + # case + if nodeName in ("select", "colgroup", "head", "frameset"): + # XXX + assert self.innerHTML + if nodeName in newModes: + self.phase = self.phases[newModes[nodeName]] + break + elif nodeName == "html": + if self.tree.headPointer is None: + self.phase = self.phases["beforeHead"] + else: + self.phase = self.phases["afterHead"] + break + elif last: + self.phase = self.phases["inBody"] + break + +class Phase(object): + """Base class for helper object that implements each phase of processing + """ + # Order should be (they can be omitted): + # * EOF + # * Comment + # * Doctype + # * SpaceCharacters + # * Characters + # * StartTag + # - startTag* methods + # * EndTag + # - endTag* methods + + def __init__(self, parser, tree): + self.parser = parser + self.tree = tree + + def processEOF(self): + self.tree.generateImpliedEndTags() + if len(self.tree.openElements) > 2: + self.parser.parseError(_(u"Unexpected end of file. " + u"Missing closing tags.")) + elif len(self.tree.openElements) == 2 and\ + self.tree.openElements[1].name != "body": + # This happens for framesets or something? + self.parser.parseError(_(u"Unexpected end of file. Expected end " + u"tag (%s) first.") % (self.tree.openElements[1].name,)) + elif self.parser.innerHTML and len(self.tree.openElements) > 1 : + # XXX This is not what the specification says. Not sure what to do + # here. + self.parser.parseError(_(u"XXX innerHTML EOF")) + # Betting ends. + + def processComment(self, data): + # For most phases the following is correct. Where it's not it will be + # overridden. + self.tree.insertComment(data, self.tree.openElements[-1]) + + def processDoctype(self, name, publicId, systemId, correct): + self.parser.parseError(_(u"Unexpected DOCTYPE. Ignored.")) + + def processSpaceCharacters(self, data): + self.tree.insertText(data) + + def processStartTag(self, name, attributes): + self.startTagHandler[name](name, attributes) + + def startTagHtml(self, name, attributes): + if self.parser.firstStartTag == False and name == "html": + self.parser.parseError(_(u"html needs to be the first start tag.")) + # XXX Need a check here to see if the first start tag token emitted is + # this token... If it's not, invoke self.parser.parseError(). + for attr, value in attributes.iteritems(): + if attr not in self.tree.openElements[0].attributes: + self.tree.openElements[0].attributes[attr] = value + self.parser.firstStartTag = False + + def processEndTag(self, name): + self.endTagHandler[name](name) + + +class InitialPhase(Phase): + # This phase deals with error handling as well which is currently not + # covered in the specification. The error handling is typically known as + # "quirks mode". It is expected that a future version of HTML5 will defin + # this. + def processEOF(self): + self.parser.parseError(_(u"Unexpected End of file. Expected DOCTYPE.")) + self.parser.phase = self.parser.phases["rootElement"] + self.parser.phase.processEOF() + + def processComment(self, data): + self.tree.insertComment(data, self.tree.document) + + def processDoctype(self, name, publicId, systemId, correct): + nameLower = name.translate(asciiUpper2Lower) + if nameLower != "html" or publicId != None or\ + systemId != None: + self.parser.parseError(_(u"Erroneous DOCTYPE.")) + # XXX need to update DOCTYPE tokens + self.tree.insertDoctype(name, publicId, systemId) + + if publicId == None: + publicId = "" + if publicId != "": + publicId = publicId.translate(asciiUpper2Lower) + + if nameLower != "html": + # XXX quirks mode + pass + else: + if publicId in\ + ("+//silmaril//dtd html pro v0r11 19970101//en", + "-//advasoft ltd//dtd html 3.0 aswedit + extensions//en", + "-//as//dtd html 3.0 aswedit + extensions//en", + "-//ietf//dtd html 2.0 level 1//en", + "-//ietf//dtd html 2.0 level 2//en", + "-//ietf//dtd html 2.0 strict level 1//en", + "-//ietf//dtd html 2.0 strict level 2//en", + "-//ietf//dtd html 2.0 strict//en", + "-//ietf//dtd html 2.0//en", + "-//ietf//dtd html 2.1e//en", + "-//ietf//dtd html 3.0//en", + "-//ietf//dtd html 3.0//en//", + "-//ietf//dtd html 3.2 final//en", + "-//ietf//dtd html 3.2//en", + "-//ietf//dtd html 3//en", + "-//ietf//dtd html level 0//en", + "-//ietf//dtd html level 0//en//2.0", + "-//ietf//dtd html level 1//en", + "-//ietf//dtd html level 1//en//2.0", + "-//ietf//dtd html level 2//en", + "-//ietf//dtd html level 2//en//2.0", + "-//ietf//dtd html level 3//en", + "-//ietf//dtd html level 3//en//3.0", + "-//ietf//dtd html strict level 0//en", + "-//ietf//dtd html strict level 0//en//2.0", + "-//ietf//dtd html strict level 1//en", + "-//ietf//dtd html strict level 1//en//2.0", + "-//ietf//dtd html strict level 2//en", + "-//ietf//dtd html strict level 2//en//2.0", + "-//ietf//dtd html strict level 3//en", + "-//ietf//dtd html strict level 3//en//3.0", + "-//ietf//dtd html strict//en", + "-//ietf//dtd html strict//en//2.0", + "-//ietf//dtd html strict//en//3.0", + "-//ietf//dtd html//en", + "-//ietf//dtd html//en//2.0", + "-//ietf//dtd html//en//3.0", + "-//metrius//dtd metrius presentational//en", + "-//microsoft//dtd internet explorer 2.0 html strict//en", + "-//microsoft//dtd internet explorer 2.0 html//en", + "-//microsoft//dtd internet explorer 2.0 tables//en", + "-//microsoft//dtd internet explorer 3.0 html strict//en", + "-//microsoft//dtd internet explorer 3.0 html//en", + "-//microsoft//dtd internet explorer 3.0 tables//en", + "-//netscape comm. corp.//dtd html//en", + "-//netscape comm. corp.//dtd strict html//en", + "-//o'reilly and associates//dtd html 2.0//en", + "-//o'reilly and associates//dtd html extended 1.0//en", + "-//spyglass//dtd html 2.0 extended//en", + "-//sq//dtd html 2.0 hotmetal + extensions//en", + "-//sun microsystems corp.//dtd hotjava html//en", + "-//sun microsystems corp.//dtd hotjava strict html//en", + "-//w3c//dtd html 3 1995-03-24//en", + "-//w3c//dtd html 3.2 draft//en", + "-//w3c//dtd html 3.2 final//en", + "-//w3c//dtd html 3.2//en", + "-//w3c//dtd html 3.2s draft//en", + "-//w3c//dtd html 4.0 frameset//en", + "-//w3c//dtd html 4.0 transitional//en", + "-//w3c//dtd html experimental 19960712//en", + "-//w3c//dtd html experimental 970421//en", + "-//w3c//dtd w3 html//en", + "-//w3o//dtd w3 html 3.0//en", + "-//w3o//dtd w3 html 3.0//en//", + "-//w3o//dtd w3 html strict 3.0//en//", + "-//webtechs//dtd mozilla html 2.0//en", + "-//webtechs//dtd mozilla html//en", + "-/w3c/dtd html 4.0 transitional/en", + "html")\ + or (publicId in\ + ("-//w3c//dtd html 4.01 frameset//EN", + "-//w3c//dtd html 4.01 transitional//EN") and systemId == None)\ + or (systemId != None and\ + systemId == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"): + #XXX quirks mode + pass + + self.parser.phase = self.parser.phases["rootElement"] + + def processSpaceCharacters(self, data): + pass + + def processCharacters(self, data): + self.parser.parseError(_(u"Unexpected non-space characters. " + u"Expected DOCTYPE.")) + self.parser.phase = self.parser.phases["rootElement"] + self.parser.phase.processCharacters(data) + + def processStartTag(self, name, attributes): + self.parser.parseError(_(u"Unexpected start tag (%s). Expected DOCTYPE.") % (name,)) + self.parser.phase = self.parser.phases["rootElement"] + self.parser.phase.processStartTag(name, attributes) + + def processEndTag(self, name): + self.parser.parseError(_(u"Unexpected end tag (%s). Expected DOCTYPE.") % (name,)) + self.parser.phase = self.parser.phases["rootElement"] + self.parser.phase.processEndTag(name) + + +class RootElementPhase(Phase): + # helper methods + def insertHtmlElement(self): + element = self.tree.createElement("html", {}) + self.tree.openElements.append(element) + self.tree.document.appendChild(element) + self.parser.phase = self.parser.phases["beforeHead"] + + # other + def processEOF(self): + self.insertHtmlElement() + self.parser.phase.processEOF() + + def processComment(self, data): + self.tree.insertComment(data, self.tree.document) + + def processSpaceCharacters(self, data): + pass + + def processCharacters(self, data): + self.insertHtmlElement() + self.parser.phase.processCharacters(data) + + def processStartTag(self, name, attributes): + if name == "html": + self.parser.firstStartTag = True + self.insertHtmlElement() + self.parser.phase.processStartTag(name, attributes) + + def processEndTag(self, name): + self.insertHtmlElement() + self.parser.phase.processEndTag(name) + + +class BeforeHeadPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("head", self.startTagHead) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = utils.MethodDispatcher([ + (("html", "head", "body", "br", "p"), self.endTagImplyHead) + ]) + self.endTagHandler.default = self.endTagOther + + def processEOF(self): + self.startTagHead("head", {}) + self.parser.phase.processEOF() + + def processCharacters(self, data): + self.startTagHead("head", {}) + self.parser.phase.processCharacters(data) + + def startTagHead(self, name, attributes): + self.tree.insertElement(name, attributes) + self.tree.headPointer = self.tree.openElements[-1] + self.parser.phase = self.parser.phases["inHead"] + + def startTagOther(self, name, attributes): + self.startTagHead("head", {}) + self.parser.phase.processStartTag(name, attributes) + + def endTagImplyHead(self, name): + self.startTagHead("head", {}) + self.parser.phase.processEndTag(name) + + def endTagOther(self, name): + self.parser.parseError(_(u"Unexpected end tag (%s) after the (implied) root element.") % (name,)) + +class InHeadPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("title", self.startTagTitle), + ("style", self.startTagStyle), + ("noscript", self.startTagNoScript), + ("script", self.startTagScript), + (("base", "link", "meta"), self.startTagBaseLinkMeta), + ("head", self.startTagHead) + ]) + self.startTagHandler.default = self.startTagOther + + self. endTagHandler = utils.MethodDispatcher([ + ("head", self.endTagHead), + (("html", "body", "br", "p"), self.endTagImplyAfterHead), + (("title", "style", "script", "noscript"), + self.endTagTitleStyleScriptNoScript) + ]) + self.endTagHandler.default = self.endTagOther + + # helper + def appendToHead(self, element): + if self.tree.headPointer is not None: + self.tree.headPointer.appendChild(element) + else: + assert self.parser.innerHTML + self.tree.openElements[-1].appendChild(element) + + # the real thing + def processEOF(self): + if self.tree.openElements[-1].name in ("title", "style", "script"): + self.parser.parseError(_(u"Unexpected end of file. " + u"Expected end tag (%s).") % (self.tree.openElements[-1].name,)) + self.tree.openElements.pop() + self.anythingElse() + self.parser.phase.processEOF() + + def processCharacters(self, data): + if self.tree.openElements[-1].name in\ + ("title", "style", "script", "noscript"): + self.tree.insertText(data) + else: + self.anythingElse() + self.parser.phase.processCharacters(data) + + def startTagHead(self, name, attributes): + self.parser.parseError(_(u"Unexpected start tag head in existing head. Ignored")) + + def startTagTitle(self, name, attributes): + element = self.tree.createElement(name, attributes) + self.appendToHead(element) + self.tree.openElements.append(element) + self.parser.tokenizer.contentModelFlag = contentModelFlags["RCDATA"] + + def startTagStyle(self, name, attributes): + element = self.tree.createElement(name, attributes) + if self.tree.headPointer is not None and\ + self.parser.phase == self.parser.phases["inHead"]: + self.appendToHead(element) + else: + self.tree.openElements[-1].appendChild(element) + self.tree.openElements.append(element) + self.parser.tokenizer.contentModelFlag = contentModelFlags["CDATA"] + + def startTagNoScript(self, name, attributes): + # XXX Need to decide whether to implement the scripting disabled case. + element = self.tree.createElement(name, attributes) + if self.tree.headPointer is not None and\ + self.parser.phase == self.parser.phases["inHead"]: + self.appendToHead(element) + else: + self.tree.openElements[-1].appendChild(element) + self.tree.openElements.append(element) + self.parser.tokenizer.contentModelFlag = contentModelFlags["CDATA"] + + def startTagScript(self, name, attributes): + #XXX Inner HTML case may be wrong + element = self.tree.createElement(name, attributes) + element._flags.append("parser-inserted") + if (self.tree.headPointer is not None and + self.parser.phase == self.parser.phases["inHead"]): + self.appendToHead(element) + else: + self.tree.openElements[-1].appendChild(element) + self.tree.openElements.append(element) + self.parser.tokenizer.contentModelFlag = contentModelFlags["CDATA"] + + def startTagBaseLinkMeta(self, name, attributes): + element = self.tree.createElement(name, attributes) + if (self.tree.headPointer is not None and + self.parser.phase == self.parser.phases["inHead"]): + self.appendToHead(element) + else: + self.tree.openElements[-1].appendChild(element) + + def startTagOther(self, name, attributes): + self.anythingElse() + self.parser.phase.processStartTag(name, attributes) + + def endTagHead(self, name): + if self.tree.openElements[-1].name == "head": + self.tree.openElements.pop() + else: + self.parser.parseError(_(u"Unexpected end tag (%s). Ignored.") % u'head') + self.parser.phase = self.parser.phases["afterHead"] + + def endTagImplyAfterHead(self, name): + self.anythingElse() + self.parser.phase.processEndTag(name) + + def endTagTitleStyleScriptNoScript(self, name): + if self.tree.openElements[-1].name == name: + self.tree.openElements.pop() + else: + self.parser.parseError(_(u"Unexpected end tag (%s). Ignored.") % (name,)) + + def endTagOther(self, name): + self.parser.parseError(_(u"Unexpected end tag (%s). Ignored.") % (name,)) + + def anythingElse(self): + if self.tree.openElements[-1].name == "head": + self.endTagHead("head") + else: + self.parser.phase = self.parser.phases["afterHead"] + +# XXX If we implement a parser for which scripting is disabled we need to +# implement this phase. +# +# class InHeadNoScriptPhase(Phase): + +class AfterHeadPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("body", self.startTagBody), + ("frameset", self.startTagFrameset), + (("base", "link", "meta", "script", "style", "title"), + self.startTagFromHead) + ]) + self.startTagHandler.default = self.startTagOther + + def processEOF(self): + self.anythingElse() + self.parser.phase.processEOF() + + def processCharacters(self, data): + self.anythingElse() + self.parser.phase.processCharacters(data) + + def startTagBody(self, name, attributes): + self.tree.insertElement(name, attributes) + self.parser.phase = self.parser.phases["inBody"] + + def startTagFrameset(self, name, attributes): + self.tree.insertElement(name, attributes) + self.parser.phase = self.parser.phases["inFrameset"] + + def startTagFromHead(self, name, attributes): + self.parser.parseError(_(u"Unexpected start tag (%s) that can be in head. Moved.") % (name,)) + self.parser.phase = self.parser.phases["inHead"] + self.parser.phase.processStartTag(name, attributes) + + def startTagOther(self, name, attributes): + self.anythingElse() + self.parser.phase.processStartTag(name, attributes) + + def processEndTag(self, name): + self.anythingElse() + self.parser.phase.processEndTag(name) + + def anythingElse(self): + self.tree.insertElement("body", {}) + self.parser.phase = self.parser.phases["inBody"] + + +class InBodyPhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#in-body + # the crazy mode + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + #Keep a ref to this for special handling of whitespace in <pre> + self.processSpaceCharactersNonPre = self.processSpaceCharacters + + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + (("base", "link", "meta", "script", "style"), + self.startTagProcessInHead), + ("title", self.startTagTitle), + ("body", self.startTagBody), + (("address", "blockquote", "center", "dir", "div", "dl", + "fieldset", "listing", "menu", "ol", "p", "pre", "ul"), + self.startTagCloseP), + ("form", self.startTagForm), + (("li", "dd", "dt"), self.startTagListItem), + ("plaintext",self.startTagPlaintext), + (headingElements, self.startTagHeading), + ("a", self.startTagA), + (("b", "big", "em", "font", "i", "s", "small", "strike", "strong", + "tt", "u"),self.startTagFormatting), + ("nobr", self.startTagNobr), + ("button", self.startTagButton), + (("marquee", "object"), self.startTagMarqueeObject), + ("xmp", self.startTagXmp), + ("table", self.startTagTable), + (("area", "basefont", "bgsound", "br", "embed", "img", "param", + "spacer", "wbr"), self.startTagVoidFormatting), + ("hr", self.startTagHr), + ("image", self.startTagImage), + ("input", self.startTagInput), + ("isindex", self.startTagIsIndex), + ("textarea", self.startTagTextarea), + (("iframe", "noembed", "noframes", "noscript"), self.startTagCdata), + ("select", self.startTagSelect), + (("caption", "col", "colgroup", "frame", "frameset", "head", + "option", "optgroup", "tbody", "td", "tfoot", "th", "thead", + "tr"), self.startTagMisplaced), + (("event-source", "section", "nav", "article", "aside", "header", + "footer", "datagrid", "command"), self.startTagNew) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = utils.MethodDispatcher([ + ("p",self.endTagP), + ("body",self.endTagBody), + ("html",self.endTagHtml), + (("address", "blockquote", "center", "div", "dl", "fieldset", + "listing", "menu", "ol", "pre", "ul"), self.endTagBlock), + ("form", self.endTagForm), + (("dd", "dt", "li"), self.endTagListItem), + (headingElements, self.endTagHeading), + (("a", "b", "big", "em", "font", "i", "nobr", "s", "small", + "strike", "strong", "tt", "u"), self.endTagFormatting), + (("marquee", "object", "button"), self.endTagButtonMarqueeObject), + (("head", "frameset", "select", "optgroup", "option", "table", + "caption", "colgroup", "col", "thead", "tfoot", "tbody", "tr", + "td", "th"), self.endTagMisplaced), + ("br", self.endTagBr), + (("area", "basefont", "bgsound", "embed", "hr", "image", + "img", "input", "isindex", "param", "spacer", "wbr", "frame"), + self.endTagNone), + (("noframes", "noscript", "noembed", "textarea", "xmp", "iframe"), + self.endTagCdataTextAreaXmp), + (("event-source", "section", "nav", "article", "aside", "header", + "footer", "datagrid", "command"), self.endTagNew) + ]) + self.endTagHandler.default = self.endTagOther + + # helper + def addFormattingElement(self, name, attributes): + self.tree.insertElement(name, attributes) + self.tree.activeFormattingElements.append( + self.tree.openElements[-1]) + + # the real deal + def processSpaceCharactersDropNewline(self, data): + # Sometimes (start of <pre> and <textarea> blocks) we want to drop + # leading newlines + self.processSpaceCharacters = self.processSpaceCharactersNonPre + if (data.startswith("\n") and + self.tree.openElements[-1].name in ("pre", "textarea") and + not self.tree.openElements[-1].hasContent()): + data = data[1:] + if data: + self.tree.reconstructActiveFormattingElements() + self.tree.insertText(data) + + def processCharacters(self, data): + # XXX The specification says to do this for every character at the + # moment, but apparently that doesn't match the real world so we don't + # do it for space characters. + self.tree.reconstructActiveFormattingElements() + self.tree.insertText(data) + + #This matches the current spec but may not match the real world + def processSpaceCharacters(self, data): + self.tree.reconstructActiveFormattingElements() + self.tree.insertText(data) + + def startTagProcessInHead(self, name, attributes): + self.parser.phases["inHead"].processStartTag(name, attributes) + + def startTagTitle(self, name, attributes): + self.parser.parseError(_(u"Unexpected start tag (%s) that belongs in the head. Moved.") % (name,)) + self.parser.phases["inHead"].processStartTag(name, attributes) + + def startTagBody(self, name, attributes): + self.parser.parseError(_(u"Unexpected start tag (body).")) + if (len(self.tree.openElements) == 1 + or self.tree.openElements[1].name != "body"): + assert self.parser.innerHTML + else: + for attr, value in attributes.iteritems(): + if attr not in self.tree.openElements[1].attributes: + self.tree.openElements[1].attributes[attr] = value + + def startTagCloseP(self, name, attributes): + if self.tree.elementInScope("p"): + self.endTagP("p") + self.tree.insertElement(name, attributes) + if name == "pre": + self.processSpaceCharacters = self.processSpaceCharactersDropNewline + + def startTagForm(self, name, attributes): + if self.tree.formPointer: + self.parser.parseError("Unexpected start tag (form). Ignored.") + else: + if self.tree.elementInScope("p"): + self.endTagP("p") + self.tree.insertElement(name, attributes) + self.tree.formPointer = self.tree.openElements[-1] + + def startTagListItem(self, name, attributes): + if self.tree.elementInScope("p"): + self.endTagP("p") + stopNames = {"li":("li"), "dd":("dd", "dt"), "dt":("dd", "dt")} + stopName = stopNames[name] + # AT Use reversed in Python 2.4... + for i, node in enumerate(self.tree.openElements[::-1]): + if node.name in stopName: + poppedNodes = [] + for j in range(i+1): + poppedNodes.append(self.tree.openElements.pop()) + if i >= 1: + self.parser.parseError( + (i == 1 and _(u"Missing end tag (%s)") or _(u"Missing end tags (%s)")) + % u", ".join([item.name for item in poppedNodes[:-1]])) + break + + + # Phrasing elements are all non special, non scoping, non + # formatting elements + if (node.name in (specialElements | scopingElements) + and node.name not in ("address", "div")): + break + # Always insert an <li> element. + self.tree.insertElement(name, attributes) + + def startTagPlaintext(self, name, attributes): + if self.tree.elementInScope("p"): + self.endTagP("p") + self.tree.insertElement(name, attributes) + self.parser.tokenizer.contentModelFlag = contentModelFlags["PLAINTEXT"] + + def startTagHeading(self, name, attributes): + if self.tree.elementInScope("p"): + self.endTagP("p") + # Uncomment the following for IE7 behavior: + # + #for item in headingElements: + # if self.tree.elementInScope(item): + # self.parser.parseError(_(u"Unexpected start tag (" + name +\ + # ").")) + # item = self.tree.openElements.pop() + # while item.name not in headingElements: + # item = self.tree.openElements.pop() + # break + self.tree.insertElement(name, attributes) + + def startTagA(self, name, attributes): + afeAElement = self.tree.elementInActiveFormattingElements("a") + if afeAElement: + self.parser.parseError(_(u"Unexpected start tag (%s) implies " + u"end tag (%s).") % (u'a', u'a')) + self.endTagFormatting("a") + if afeAElement in self.tree.openElements: + self.tree.openElements.remove(afeAElement) + if afeAElement in self.tree.activeFormattingElements: + self.tree.activeFormattingElements.remove(afeAElement) + self.tree.reconstructActiveFormattingElements() + self.addFormattingElement(name, attributes) + + def startTagFormatting(self, name, attributes): + self.tree.reconstructActiveFormattingElements() + self.addFormattingElement(name, attributes) + + def startTagNobr(self, name, attributes): + self.tree.reconstructActiveFormattingElements() + if self.tree.elementInScope("nobr"): + self.parser.parseError(_(u"Unexpected start tag (%s) implies " + u"end tag (%s).") % (u'nobr', u'nobr')) + self.processEndTag("nobr") + # XXX Need tests that trigger the following + self.tree.reconstructActiveFormattingElements() + self.addFormattingElement(name, attributes) + + def startTagButton(self, name, attributes): + if self.tree.elementInScope("button"): + self.parser.parseError(_(u"Unexpected start tag (%s) implied " + u"end tag (%s).") % (u'button', u'button')) + self.processEndTag("button") + self.parser.phase.processStartTag(name, attributes) + else: + self.tree.reconstructActiveFormattingElements() + self.tree.insertElement(name, attributes) + self.tree.activeFormattingElements.append(Marker) + + def startTagMarqueeObject(self, name, attributes): + self.tree.reconstructActiveFormattingElements() + self.tree.insertElement(name, attributes) + self.tree.activeFormattingElements.append(Marker) + + def startTagXmp(self, name, attributes): + self.tree.reconstructActiveFormattingElements() + self.tree.insertElement(name, attributes) + self.parser.tokenizer.contentModelFlag = contentModelFlags["CDATA"] + + def startTagTable(self, name, attributes): + if self.tree.elementInScope("p"): + self.processEndTag("p") + self.tree.insertElement(name, attributes) + self.parser.phase = self.parser.phases["inTable"] + + def startTagVoidFormatting(self, name, attributes): + self.tree.reconstructActiveFormattingElements() + self.tree.insertElement(name, attributes) + self.tree.openElements.pop() + + def startTagHr(self, name, attributes): + if self.tree.elementInScope("p"): + self.endTagP("p") + self.tree.insertElement(name, attributes) + self.tree.openElements.pop() + + def startTagImage(self, name, attributes): + # No really... + self.parser.parseError(_(u"Unexpected start tag (image). Treated " + u"as img.")) + self.processStartTag("img", attributes) + + def startTagInput(self, name, attributes): + self.tree.reconstructActiveFormattingElements() + self.tree.insertElement(name, attributes) + if self.tree.formPointer: + # XXX Not exactly sure what to do here + self.tree.openElements[-1].form = self.tree.formPointer + self.tree.openElements.pop() + + def startTagIsIndex(self, name, attributes): + self.parser.parseError("Unexpected start tag isindex. Don't use it!") + if self.tree.formPointer: + return + self.processStartTag("form", {}) + self.processStartTag("hr", {}) + self.processStartTag("p", {}) + self.processStartTag("label", {}) + # XXX Localization ... + self.processCharacters( + "This is a searchable index. Insert your search keywords here: ") + attributes["name"] = "isindex" + attrs = [[key,value] for key,value in attributes.iteritems()] + self.processStartTag("input", dict(attrs)) + self.processEndTag("label") + self.processEndTag("p") + self.processStartTag("hr", {}) + self.processEndTag("form") + + def startTagTextarea(self, name, attributes): + # XXX Form element pointer checking here as well... + self.tree.insertElement(name, attributes) + self.parser.tokenizer.contentModelFlag = contentModelFlags["RCDATA"] + self.processSpaceCharacters = self.processSpaceCharactersDropNewline + + def startTagCdata(self, name, attributes): + """iframe, noembed noframes, noscript(if scripting enabled)""" + self.tree.insertElement(name, attributes) + self.parser.tokenizer.contentModelFlag = contentModelFlags["CDATA"] + + def startTagSelect(self, name, attributes): + self.tree.reconstructActiveFormattingElements() + self.tree.insertElement(name, attributes) + self.parser.phase = self.parser.phases["inSelect"] + + def startTagMisplaced(self, name, attributes): + """ Elements that should be children of other elements that have a + different insertion mode; here they are ignored + "caption", "col", "colgroup", "frame", "frameset", "head", + "option", "optgroup", "tbody", "td", "tfoot", "th", "thead", + "tr", "noscript" + """ + self.parser.parseError(_(u"Unexpected start tag (%s). Ignored.") % (name,)) + + def startTagNew(self, name, attributes): + """New HTML5 elements, "event-source", "section", "nav", + "article", "aside", "header", "footer", "datagrid", "command" + """ + sys.stderr.write("Warning: Undefined behaviour for start tag %s"%name) + self.startTagOther(name, attributes) + #raise NotImplementedError + + def startTagOther(self, name, attributes): + self.tree.reconstructActiveFormattingElements() + self.tree.insertElement(name, attributes) + + def endTagP(self, name): + if self.tree.elementInScope("p"): + self.tree.generateImpliedEndTags("p") + if self.tree.openElements[-1].name != "p": + self.parser.parseError(_(u"Unexpected end tag (%s).") % (u'p',)) + if self.tree.elementInScope("p"): + while self.tree.elementInScope("p"): + self.tree.openElements.pop() + else: + self.startTagCloseP("p", {}) + self.endTagP("p") + + def endTagBody(self, name): + # XXX Need to take open <p> tags into account here. We shouldn't imply + # </p> but we should not throw a parse error either. Specification is + # likely to be updated. + if self.tree.openElements[1].name != "body": + # innerHTML case + self.parser.parseError() + return + if self.tree.openElements[-1].name != "body": + self.parser.parseError(_(u"Unexpected end tag (%s). Missing " + u"end tag (%s).") % (u'body', self.tree.openElements[-1].name)) + self.parser.phase = self.parser.phases["afterBody"] + + def endTagHtml(self, name): + self.endTagBody(name) + if not self.parser.innerHTML: + self.parser.phase.processEndTag(name) + + def endTagBlock(self, name): + #Put us back in the right whitespace handling mode + if name == "pre": + self.processSpaceCharacters = self.processSpaceCharactersNonPre + inScope = self.tree.elementInScope(name) + if inScope: + self.tree.generateImpliedEndTags() + if self.tree.openElements[-1].name != name: + self.parser.parseError(_(u"End tag (%s) seen too " + u"early. Expected other end tag.") % (name,)) + if inScope: + node = self.tree.openElements.pop() + while node.name != name: + node = self.tree.openElements.pop() + + def endTagForm(self, name): + if self.tree.elementInScope(name): + self.tree.generateImpliedEndTags() + if self.tree.openElements[-1].name != name: + self.parser.parseError(_(u"End tag (form) seen too early. Ignored.")) + else: + self.tree.openElements.pop() + self.tree.formPointer = None + + def endTagListItem(self, name): + # AT Could merge this with the Block case + if self.tree.elementInScope(name): + self.tree.generateImpliedEndTags(name) + + if self.tree.openElements[-1].name != name: + self.parser.parseError(_(u"End tag (%s) seen too " + u"early. Expected other end tag.") % (name,)) + + if self.tree.elementInScope(name): + node = self.tree.openElements.pop() + while node.name != name: + node = self.tree.openElements.pop() + + def endTagHeading(self, name): + for item in headingElements: + if self.tree.elementInScope(item): + self.tree.generateImpliedEndTags() + break + if self.tree.openElements[-1].name != name: + self.parser.parseError(_(u"Unexpected end tag (%s). " + u"Expected other end tag.") % (name,)) + + for item in headingElements: + if self.tree.elementInScope(item): + item = self.tree.openElements.pop() + while item.name not in headingElements: + item = self.tree.openElements.pop() + break + + def endTagFormatting(self, name): + """The much-feared adoption agency algorithm + """ + # http://www.whatwg.org/specs/web-apps/current-work/#adoptionAgency + # XXX Better parseError messages appreciated. + while True: + # Step 1 paragraph 1 + afeElement = self.tree.elementInActiveFormattingElements(name) + if not afeElement or (afeElement in self.tree.openElements and + not self.tree.elementInScope(afeElement.name)): + self.parser.parseError(_(u"End tag (%s) violates " + u" step 1, paragraph 1 of the adoption agency algorithm.") % (name,)) + return + + # Step 1 paragraph 2 + elif afeElement not in self.tree.openElements: + self.parser.parseError(_(u"End tag (%s) violates " + u" step 1, paragraph 2 of the adoption agency algorithm.") % (name,)) + self.tree.activeFormattingElements.remove(afeElement) + return + + # Step 1 paragraph 3 + if afeElement != self.tree.openElements[-1]: + self.parser.parseError(_(u"End tag (%s) violates " + u" step 1, paragraph 3 of the adoption agency algorithm.") % (name,)) + + # Step 2 + # Start of the adoption agency algorithm proper + afeIndex = self.tree.openElements.index(afeElement) + furthestBlock = None + for element in self.tree.openElements[afeIndex:]: + if element.name in specialElements | scopingElements: + furthestBlock = element + break + + # Step 3 + if furthestBlock is None: + element = self.tree.openElements.pop() + while element != afeElement: + element = self.tree.openElements.pop() + self.tree.activeFormattingElements.remove(element) + return + commonAncestor = self.tree.openElements[afeIndex-1] + + # Step 5 + if furthestBlock.parent: + furthestBlock.parent.removeChild(furthestBlock) + + # Step 6 + # The bookmark is supposed to help us identify where to reinsert + # nodes in step 12. We have to ensure that we reinsert nodes after + # the node before the active formatting element. Note the bookmark + # can move in step 7.4 + bookmark = self.tree.activeFormattingElements.index(afeElement) + + # Step 7 + lastNode = node = furthestBlock + while True: + # AT replace this with a function and recursion? + # Node is element before node in open elements + node = self.tree.openElements[ + self.tree.openElements.index(node)-1] + while node not in self.tree.activeFormattingElements: + tmpNode = node + node = self.tree.openElements[ + self.tree.openElements.index(node)-1] + self.tree.openElements.remove(tmpNode) + # Step 7.3 + if node == afeElement: + break + # Step 7.4 + if lastNode == furthestBlock: + # XXX should this be index(node) or index(node)+1 + # Anne: I think +1 is ok. Given x = [2,3,4,5] + # x.index(3) gives 1 and then x[1 +1] gives 4... + bookmark = self.tree.activeFormattingElements.\ + index(node) + 1 + # Step 7.5 + cite = node.parent + if node.hasContent(): + clone = node.cloneNode() + # Replace node with clone + self.tree.activeFormattingElements[ + self.tree.activeFormattingElements.index(node)] = clone + self.tree.openElements[ + self.tree.openElements.index(node)] = clone + node = clone + # Step 7.6 + # Remove lastNode from its parents, if any + if lastNode.parent: + lastNode.parent.removeChild(lastNode) + node.appendChild(lastNode) + # Step 7.7 + lastNode = node + # End of inner loop + + # Step 8 + if lastNode.parent: + lastNode.parent.removeChild(lastNode) + commonAncestor.appendChild(lastNode) + + # Step 9 + clone = afeElement.cloneNode() + + # Step 10 + furthestBlock.reparentChildren(clone) + + # Step 11 + furthestBlock.appendChild(clone) + + # Step 12 + self.tree.activeFormattingElements.remove(afeElement) + self.tree.activeFormattingElements.insert(bookmark, clone) + + # Step 13 + self.tree.openElements.remove(afeElement) + self.tree.openElements.insert( + self.tree.openElements.index(furthestBlock) + 1, clone) + + def endTagButtonMarqueeObject(self, name): + if self.tree.elementInScope(name): + self.tree.generateImpliedEndTags() + if self.tree.openElements[-1].name != name: + self.parser.parseError(_(u"Unexpected end tag (%s). Expected other end tag first.") % (name,)) + + if self.tree.elementInScope(name): + element = self.tree.openElements.pop() + while element.name != name: + element = self.tree.openElements.pop() + self.tree.clearActiveFormattingElements() + + def endTagMisplaced(self, name): + # This handles elements with end tags in other insertion modes. + self.parser.parseError(_(u"Unexpected end tag (%s). Ignored.") % (name,)) + + def endTagBr(self, name): + self.parser.parseError(_(u"Unexpected end tag (br). Treated as br element.")) + self.tree.reconstructActiveFormattingElements() + self.tree.insertElement(name, {}) + self.tree.openElements.pop() + + def endTagNone(self, name): + # This handles elements with no end tag. + self.parser.parseError(_(u"This tag (%s) has no end tag") % (name,)) + + def endTagCdataTextAreaXmp(self, name): + if self.tree.openElements[-1].name == name: + self.tree.openElements.pop() + else: + self.parser.parseError(_(u"Unexpected end tag (%s). Ignored.") % (name,)) + + def endTagNew(self, name): + """New HTML5 elements, "event-source", "section", "nav", + "article", "aside", "header", "footer", "datagrid", "command" + """ + sys.stderr.write("Warning: Undefined behaviour for end tag %s"%name) + self.endTagOther(name) + #raise NotImplementedError + + def endTagOther(self, name): + # XXX This logic should be moved into the treebuilder + # AT should use reversed instead of [::-1] when Python 2.4 == True. + for node in self.tree.openElements[::-1]: + if node.name == name: + self.tree.generateImpliedEndTags() + if self.tree.openElements[-1].name != name: + self.parser.parseError(_(u"Unexpected end tag (%s).") % (name,)) + while self.tree.openElements.pop() != node: + pass + break + else: + if node.name in specialElements | scopingElements: + self.parser.parseError(_(u"Unexpected end tag (%s). Ignored.") % (name,)) + break + +class InTablePhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#in-table + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("caption", self.startTagCaption), + ("colgroup", self.startTagColgroup), + ("col", self.startTagCol), + (("tbody", "tfoot", "thead"), self.startTagRowGroup), + (("td", "th", "tr"), self.startTagImplyTbody), + ("table", self.startTagTable) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = utils.MethodDispatcher([ + ("table", self.endTagTable), + (("body", "caption", "col", "colgroup", "html", "tbody", "td", + "tfoot", "th", "thead", "tr"), self.endTagIgnore) + ]) + self.endTagHandler.default = self.endTagOther + + # helper methods + def clearStackToTableContext(self): + # "clear the stack back to a table context" + while self.tree.openElements[-1].name not in ("table", "html"): + self.parser.parseError(_(u"Unexpected implied end tag (%s) in the table phase.") % (self.tree.openElements[-1].name,)) + self.tree.openElements.pop() + # When the current node is <html> it's an innerHTML case + + # processing methods + def processCharacters(self, data): + self.parser.parseError(_(u"Unexpected non-space characters in " + u"table context caused voodoo mode.")) + # Make all the special element rearranging voodoo kick in + self.tree.insertFromTable = True + # Process the character in the "in body" mode + self.parser.phases["inBody"].processCharacters(data) + self.tree.insertFromTable = False + + def startTagCaption(self, name, attributes): + self.clearStackToTableContext() + self.tree.activeFormattingElements.append(Marker) + self.tree.insertElement(name, attributes) + self.parser.phase = self.parser.phases["inCaption"] + + def startTagColgroup(self, name, attributes): + self.clearStackToTableContext() + self.tree.insertElement(name, attributes) + self.parser.phase = self.parser.phases["inColumnGroup"] + + def startTagCol(self, name, attributes): + self.startTagColgroup("colgroup", {}) + self.parser.phase.processStartTag(name, attributes) + + def startTagRowGroup(self, name, attributes): + self.clearStackToTableContext() + self.tree.insertElement(name, attributes) + self.parser.phase = self.parser.phases["inTableBody"] + + def startTagImplyTbody(self, name, attributes): + self.startTagRowGroup("tbody", {}) + self.parser.phase.processStartTag(name, attributes) + + def startTagTable(self, name, attributes): + self.parser.parseError(_(u"Unexpected start tag (table) in table " + u"phase. Implies end tag (table).")) + self.parser.phase.processEndTag("table") + if not self.parser.innerHTML: + self.parser.phase.processStartTag(name, attributes) + + def startTagOther(self, name, attributes): + self.parser.parseError(_(u"Unexpected start tag (%s) in " + u"table context caused voodoo mode.") % (name,)) + # Make all the special element rearranging voodoo kick in + self.tree.insertFromTable = True + # Process the start tag in the "in body" mode + self.parser.phases["inBody"].processStartTag(name, attributes) + self.tree.insertFromTable = False + + def endTagTable(self, name): + if self.tree.elementInScope("table", True): + self.tree.generateImpliedEndTags() + if self.tree.openElements[-1].name != "table": + self.parser.parseError(_(u"Unexpected end tag (table). " + u"Expected end tag (%s).") % (self.tree.openElements[-1].name,)) + while self.tree.openElements[-1].name != "table": + self.tree.openElements.pop() + self.tree.openElements.pop() + self.parser.resetInsertionMode() + else: + # innerHTML case + assert self.parser.innerHTML + self.parser.parseError() + + def endTagIgnore(self, name): + self.parser.parseError(_(u"Unexpected end tag (%s). Ignored.") % (name,)) + + def endTagOther(self, name): + self.parser.parseError(_(u"Unexpected end tag (%s) in " + u"table context caused voodoo mode.") % (name,)) + # Make all the special element rearranging voodoo kick in + self.tree.insertFromTable = True + # Process the end tag in the "in body" mode + self.parser.phases["inBody"].processEndTag(name) + self.tree.insertFromTable = False + + +class InCaptionPhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#in-caption + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + (("caption", "col", "colgroup", "tbody", "td", "tfoot", "th", + "thead", "tr"), self.startTagTableElement) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = utils.MethodDispatcher([ + ("caption", self.endTagCaption), + ("table", self.endTagTable), + (("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th", + "thead", "tr"), self.endTagIgnore) + ]) + self.endTagHandler.default = self.endTagOther + + def ignoreEndTagCaption(self): + return not self.tree.elementInScope("caption", True) + + def processCharacters(self, data): + self.parser.phases["inBody"].processCharacters(data) + + def startTagTableElement(self, name, attributes): + self.parser.parseError() + #XXX Have to duplicate logic here to find out if the tag is ignored + ignoreEndTag = self.ignoreEndTagCaption() + self.parser.phase.processEndTag("caption") + if not ignoreEndTag: + self.parser.phase.processStartTag(name, attributes) + + def startTagOther(self, name, attributes): + self.parser.phases["inBody"].processStartTag(name, attributes) + + def endTagCaption(self, name): + if not self.ignoreEndTagCaption(): + # AT this code is quite similar to endTagTable in "InTable" + self.tree.generateImpliedEndTags() + if self.tree.openElements[-1].name != "caption": + self.parser.parseError(_(u"Unexpected end tag (caption). " + u"Missing end tags.")) + while self.tree.openElements[-1].name != "caption": + self.tree.openElements.pop() + self.tree.openElements.pop() + self.tree.clearActiveFormattingElements() + self.parser.phase = self.parser.phases["inTable"] + else: + # innerHTML case + assert self.parser.innerHTML + self.parser.parseError() + + def endTagTable(self, name): + self.parser.parseError() + ignoreEndTag = self.ignoreEndTagCaption() + self.parser.phase.processEndTag("caption") + if not ignoreEndTag: + self.parser.phase.processEndTag(name) + + def endTagIgnore(self, name): + self.parser.parseError(_(u"Unexpected end tag (%s). Ignored.") % (name,)) + + def endTagOther(self, name): + self.parser.phases["inBody"].processEndTag(name) + + +class InColumnGroupPhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#in-column + + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("col", self.startTagCol) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = utils.MethodDispatcher([ + ("colgroup", self.endTagColgroup), + ("col", self.endTagCol) + ]) + self.endTagHandler.default = self.endTagOther + + def ignoreEndTagColgroup(self): + return self.tree.openElements[-1].name == "html" + + def processCharacters(self, data): + ignoreEndTag = self.ignoreEndTagColgroup() + self.endTagColgroup("colgroup") + if not ignoreEndTag: + self.parser.phase.processCharacters(data) + + def startTagCol(self, name ,attributes): + self.tree.insertElement(name, attributes) + self.tree.openElements.pop() + + def startTagOther(self, name, attributes): + ignoreEndTag = self.ignoreEndTagColgroup() + self.endTagColgroup("colgroup") + if not ignoreEndTag: + self.parser.phase.processStartTag(name, attributes) + + def endTagColgroup(self, name): + if self.ignoreEndTagColgroup(): + # innerHTML case + assert self.parser.innerHTML + self.parser.parseError() + else: + self.tree.openElements.pop() + self.parser.phase = self.parser.phases["inTable"] + + def endTagCol(self, name): + self.parser.parseError(_(u"Unexpected end tag (col). " + u"col has no end tag.")) + + def endTagOther(self, name): + ignoreEndTag = self.ignoreEndTagColgroup() + self.endTagColgroup("colgroup") + if not ignoreEndTag: + self.parser.phase.processEndTag(name) + + +class InTableBodyPhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#in-table0 + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("tr", self.startTagTr), + (("td", "th"), self.startTagTableCell), + (("caption", "col", "colgroup", "tbody", "tfoot", "thead"), self.startTagTableOther) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = utils.MethodDispatcher([ + (("tbody", "tfoot", "thead"), self.endTagTableRowGroup), + ("table", self.endTagTable), + (("body", "caption", "col", "colgroup", "html", "td", "th", + "tr"), self.endTagIgnore) + ]) + self.endTagHandler.default = self.endTagOther + + # helper methods + def clearStackToTableBodyContext(self): + while self.tree.openElements[-1].name not in ("tbody", "tfoot", + "thead", "html"): + self.parser.parseError(_(u"Unexpected implied end tag (%s) in the table body phase.") % (self.tree.openElements[-1].name,)) + self.tree.openElements.pop() + + # the rest + def processCharacters(self,data): + self.parser.phases["inTable"].processCharacters(data) + + def startTagTr(self, name, attributes): + self.clearStackToTableBodyContext() + self.tree.insertElement(name, attributes) + self.parser.phase = self.parser.phases["inRow"] + + def startTagTableCell(self, name, attributes): + self.parser.parseError(_(u"Unexpected table cell start tag (%s) in the table body phase.") % (name,)) + self.startTagTr("tr", {}) + self.parser.phase.processStartTag(name, attributes) + + def startTagTableOther(self, name, attributes): + # XXX AT Any ideas on how to share this with endTagTable? + if (self.tree.elementInScope("tbody", True) or + self.tree.elementInScope("thead", True) or + self.tree.elementInScope("tfoot", True)): + self.clearStackToTableBodyContext() + self.endTagTableRowGroup(self.tree.openElements[-1].name) + self.parser.phase.processStartTag(name, attributes) + else: + # innerHTML case + self.parser.parseError() + + def startTagOther(self, name, attributes): + self.parser.phases["inTable"].processStartTag(name, attributes) + + def endTagTableRowGroup(self, name): + if self.tree.elementInScope(name, True): + self.clearStackToTableBodyContext() + self.tree.openElements.pop() + self.parser.phase = self.parser.phases["inTable"] + else: + self.parser.parseError(_(u"Unexpected end tag (%s) in the table body phase. Ignored.") % (name,)) + + def endTagTable(self, name): + if (self.tree.elementInScope("tbody", True) or + self.tree.elementInScope("thead", True) or + self.tree.elementInScope("tfoot", True)): + self.clearStackToTableBodyContext() + self.endTagTableRowGroup(self.tree.openElements[-1].name) + self.parser.phase.processEndTag(name) + else: + # innerHTML case + self.parser.parseError() + + def endTagIgnore(self, name): + self.parser.parseError(_(u"Unexpected end tag (%s) in the table body phase. Ignored.") % (name,)) + + def endTagOther(self, name): + self.parser.phases["inTable"].processEndTag(name) + + +class InRowPhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#in-row + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + (("td", "th"), self.startTagTableCell), + (("caption", "col", "colgroup", "tbody", "tfoot", "thead", + "tr"), self.startTagTableOther) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = utils.MethodDispatcher([ + ("tr", self.endTagTr), + ("table", self.endTagTable), + (("tbody", "tfoot", "thead"), self.endTagTableRowGroup), + (("body", "caption", "col", "colgroup", "html", "td", "th"), + self.endTagIgnore) + ]) + self.endTagHandler.default = self.endTagOther + + # helper methods (XXX unify this with other table helper methods) + def clearStackToTableRowContext(self): + while self.tree.openElements[-1].name not in ("tr", "html"): + self.parser.parseError(_(u"Unexpected implied end tag (%s) in the row phase.") % (self.tree.openElements[-1].name,)) + self.tree.openElements.pop() + + def ignoreEndTagTr(self): + return not self.tree.elementInScope("tr", tableVariant=True) + + # the rest + def processCharacters(self, data): + self.parser.phases["inTable"].processCharacters(data) + + def startTagTableCell(self, name, attributes): + self.clearStackToTableRowContext() + self.tree.insertElement(name, attributes) + self.parser.phase = self.parser.phases["inCell"] + self.tree.activeFormattingElements.append(Marker) + + def startTagTableOther(self, name, attributes): + ignoreEndTag = self.ignoreEndTagTr() + self.endTagTr("tr") + # XXX how are we sure it's always ignored in the innerHTML case? + if not ignoreEndTag: + self.parser.phase.processStartTag(name, attributes) + + def startTagOther(self, name, attributes): + self.parser.phases["inTable"].processStartTag(name, attributes) + + def endTagTr(self, name): + if not self.ignoreEndTagTr(): + self.clearStackToTableRowContext() + self.tree.openElements.pop() + self.parser.phase = self.parser.phases["inTableBody"] + else: + # innerHTML case + assert self.parser.innerHTML + self.parser.parseError() + + def endTagTable(self, name): + ignoreEndTag = self.ignoreEndTagTr() + self.endTagTr("tr") + # Reprocess the current tag if the tr end tag was not ignored + # XXX how are we sure it's always ignored in the innerHTML case? + if not ignoreEndTag: + self.parser.phase.processEndTag(name) + + def endTagTableRowGroup(self, name): + if self.tree.elementInScope(name, True): + self.endTagTr("tr") + self.parser.phase.processEndTag(name) + else: + # innerHTML case + self.parser.parseError() + + def endTagIgnore(self, name): + self.parser.parseError(_(u"Unexpected end tag (%s) in the row phase. Ignored.") % (name,)) + + def endTagOther(self, name): + self.parser.phases["inTable"].processEndTag(name) + +class InCellPhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#in-cell + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + (("caption", "col", "colgroup", "tbody", "td", "tfoot", "th", + "thead", "tr"), self.startTagTableOther) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = utils.MethodDispatcher([ + (("td", "th"), self.endTagTableCell), + (("body", "caption", "col", "colgroup", "html"), self.endTagIgnore), + (("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply) + ]) + self.endTagHandler.default = self.endTagOther + + # helper + def closeCell(self): + if self.tree.elementInScope("td", True): + self.endTagTableCell("td") + elif self.tree.elementInScope("th", True): + self.endTagTableCell("th") + + # the rest + def processCharacters(self, data): + self.parser.phases["inBody"].processCharacters(data) + + def startTagTableOther(self, name, attributes): + if self.tree.elementInScope("td", True) or \ + self.tree.elementInScope("th", True): + self.closeCell() + self.parser.phase.processStartTag(name, attributes) + else: + # innerHTML case + self.parser.parseError() + + def startTagOther(self, name, attributes): + self.parser.phases["inBody"].processStartTag(name, attributes) + # Optimize this for subsequent invocations. Can't do this initially + # because self.phases doesn't really exist at that point. + self.startTagHandler.default =\ + self.parser.phases["inBody"].processStartTag + + def endTagTableCell(self, name): + if self.tree.elementInScope(name, True): + self.tree.generateImpliedEndTags(name) + if self.tree.openElements[-1].name != name: + self.parser.parseError("Got table cell end tag (" + name +\ + ") while required end tags are missing.") + while True: + node = self.tree.openElements.pop() + if node.name == name: + break + else: + self.tree.openElements.pop() + self.tree.clearActiveFormattingElements() + self.parser.phase = self.parser.phases["inRow"] + else: + self.parser.parseError(_(u"Unexpected end tag (%s). Ignored.") % (name,)) + + def endTagIgnore(self, name): + self.parser.parseError(_(u"Unexpected end tag (%s). Ignored.") % (name,)) + + def endTagImply(self, name): + if self.tree.elementInScope(name, True): + self.closeCell() + self.parser.phase.processEndTag(name) + else: + # sometimes innerHTML case + self.parser.parseError() + + def endTagOther(self, name): + self.parser.phases["inBody"].processEndTag(name) + # Optimize this for subsequent invocations. Can't do this initially + # because self.phases doesn't really exist at that point. + self.endTagHandler.default = self.parser.phases["inBody"].processEndTag + + +class InSelectPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("option", self.startTagOption), + ("optgroup", self.startTagOptgroup), + ("select", self.startTagSelect) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = utils.MethodDispatcher([ + ("option", self.endTagOption), + ("optgroup", self.endTagOptgroup), + ("select", self.endTagSelect), + (("caption", "table", "tbody", "tfoot", "thead", "tr", "td", + "th"), self.endTagTableElements) + ]) + self.endTagHandler.default = self.endTagOther + + # http://www.whatwg.org/specs/web-apps/current-work/#in-select + def processCharacters(self, data): + self.tree.insertText(data) + + def startTagOption(self, name, attributes): + # We need to imply </option> if <option> is the current node. + if self.tree.openElements[-1].name == "option": + self.tree.openElements.pop() + self.tree.insertElement(name, attributes) + + def startTagOptgroup(self, name, attributes): + if self.tree.openElements[-1].name == "option": + self.tree.openElements.pop() + if self.tree.openElements[-1].name == "optgroup": + self.tree.openElements.pop() + self.tree.insertElement(name, attributes) + + def startTagSelect(self, name, attributes): + self.parser.parseError(_(u"Unexpected start tag (select) in the " + u"select phase implies select start tag.")) + self.endTagSelect("select") + + def startTagOther(self, name, attributes): + self.parser.parseError(_(u"Unexpected start tag token (%s)" + u" in the select phase. Ignored.") % (name,)) + + def endTagOption(self, name): + if self.tree.openElements[-1].name == "option": + self.tree.openElements.pop() + else: + self.parser.parseError(_(u"Unexpected end tag (%s) in the " + u"select phase. Ignored.") % u'option') + + def endTagOptgroup(self, name): + # </optgroup> implicitly closes <option> + if self.tree.openElements[-1].name == "option" and \ + self.tree.openElements[-2].name == "optgroup": + self.tree.openElements.pop() + # It also closes </optgroup> + if self.tree.openElements[-1].name == "optgroup": + self.tree.openElements.pop() + # But nothing else + else: + self.parser.parseError(_(u"Unexpected end tag (%s) in the " + u"select phase. Ignored.") % u'optgroup') + + def endTagSelect(self, name): + if self.tree.elementInScope("select", True): + node = self.tree.openElements.pop() + while node.name != "select": + node = self.tree.openElements.pop() + self.parser.resetInsertionMode() + else: + # innerHTML case + self.parser.parseError() + + def endTagTableElements(self, name): + self.parser.parseError(_(u"Unexpected table end tag (%s)" + u" in the select phase.") % (name,)) + if self.tree.elementInScope(name, True): + self.endTagSelect("select") + self.parser.phase.processEndTag(name) + + def endTagOther(self, name): + self.parser.parseError(_(u"Unexpected end tag token (%s)" + u" in the select phase. Ignored.") % (name,)) + + +class AfterBodyPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + # XXX We should prolly add a handler for here as well... + self.endTagHandler = utils.MethodDispatcher([("html", self.endTagHtml)]) + self.endTagHandler.default = self.endTagOther + + def processComment(self, data): + # This is needed because data is to be appended to the <html> element + # here and not to whatever is currently open. + self.tree.insertComment(data, self.tree.openElements[0]) + + def processCharacters(self, data): + self.parser.parseError(_(u"Unexpected non-space characters in the " + u"after body phase.")) + self.parser.phase = self.parser.phases["inBody"] + self.parser.phase.processCharacters(data) + + def processStartTag(self, name, attributes): + self.parser.parseError(_(u"Unexpected start tag token (%s)" + u" in the after body phase.") % (name,)) + self.parser.phase = self.parser.phases["inBody"] + self.parser.phase.processStartTag(name, attributes) + + def endTagHtml(self,name): + if self.parser.innerHTML: + self.parser.parseError() + else: + # XXX: This may need to be done, not sure: + # Don't set lastPhase to the current phase but to the inBody phase + # instead. No need for extra parse errors if there's something + # after </html>. + # Try "<!doctype html>X</html>X" for instance. + self.parser.lastPhase = self.parser.phase + self.parser.phase = self.parser.phases["trailingEnd"] + + def endTagOther(self, name): + self.parser.parseError(_(u"Unexpected end tag token (%s)" + u" in the after body phase.") % (name,)) + self.parser.phase = self.parser.phases["inBody"] + self.parser.phase.processEndTag(name) + +class InFramesetPhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#in-frameset + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("frameset", self.startTagFrameset), + ("frame", self.startTagFrame), + ("noframes", self.startTagNoframes) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = utils.MethodDispatcher([ + ("frameset", self.endTagFrameset), + ("noframes", self.endTagNoframes) + ]) + self.endTagHandler.default = self.endTagOther + + def processCharacters(self, data): + self.parser.parseError(_(u"Unepxected characters in " + u"the frameset phase. Characters ignored.")) + + def startTagFrameset(self, name, attributes): + self.tree.insertElement(name, attributes) + + def startTagFrame(self, name, attributes): + self.tree.insertElement(name, attributes) + self.tree.openElements.pop() + + def startTagNoframes(self, name, attributes): + self.parser.phases["inBody"].processStartTag(name, attributes) + + def startTagOther(self, name, attributes): + self.parser.parseError(_(u"Unexpected start tag token (%s)" + u" in the frameset phase. Ignored") % (name,)) + + def endTagFrameset(self, name): + if self.tree.openElements[-1].name == "html": + # innerHTML case + self.parser.parseError(_(u"Unexpected end tag token (frameset)" + u"in the frameset phase (innerHTML).")) + else: + self.tree.openElements.pop() + if (not self.parser.innerHTML and + self.tree.openElements[-1].name != "frameset"): + # If we're not in innerHTML mode and the the current node is not a + # "frameset" element (anymore) then switch. + self.parser.phase = self.parser.phases["afterFrameset"] + + def endTagNoframes(self, name): + self.parser.phases["inBody"].processEndTag(name) + + def endTagOther(self, name): + self.parser.parseError(_(u"Unexpected end tag token (%s)" + u" in the frameset phase. Ignored.") % (name,)) + + +class AfterFramesetPhase(Phase): + # http://www.whatwg.org/specs/web-apps/current-work/#after3 + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = utils.MethodDispatcher([ + ("html", self.startTagHtml), + ("noframes", self.startTagNoframes) + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = utils.MethodDispatcher([ + ("html", self.endTagHtml) + ]) + self.endTagHandler.default = self.endTagOther + + def processCharacters(self, data): + self.parser.parseError(_(u"Unexpected non-space characters in the " + u"after frameset phase. Ignored.")) + + def startTagNoframes(self, name, attributes): + self.parser.phases["inBody"].processStartTag(name, attributes) + + def startTagOther(self, name, attributes): + self.parser.parseError(_(u"Unexpected start tag (%s)" + u" in the after frameset phase. Ignored.") % (name,)) + + def endTagHtml(self, name): + self.parser.lastPhase = self.parser.phase + self.parser.phase = self.parser.phases["trailingEnd"] + + def endTagOther(self, name): + self.parser.parseError(_(u"Unexpected end tag (%s)" + u" in the after frameset phase. Ignored.") % (name,)) + + +class TrailingEndPhase(Phase): + def processEOF(self): + pass + + def processComment(self, data): + self.tree.insertComment(data, self.tree.document) + + def processSpaceCharacters(self, data): + self.parser.lastPhase.processSpaceCharacters(data) + + def processCharacters(self, data): + self.parser.parseError(_(u"Unexpected non-space characters. " + u"Expected end of file.")) + self.parser.phase = self.parser.lastPhase + self.parser.phase.processCharacters(data) + + def processStartTag(self, name, attributes): + self.parser.parseError(_(u"Unexpected start tag (%s)" + u". Expected end of file.") % (name,)) + self.parser.phase = self.parser.lastPhase + self.parser.phase.processStartTag(name, attributes) + + def processEndTag(self, name): + self.parser.parseError(_(u"Unexpected end tag (%s)" + u". Expected end of file.") % (name,)) + self.parser.phase = self.parser.lastPhase + self.parser.phase.processEndTag(name) + + +class ParseError(Exception): + """Error in parsed document""" + pass diff --git a/lib/venus/planet/vendor/html5lib/inputstream.py b/lib/venus/planet/vendor/html5lib/inputstream.py new file mode 100644 index 0000000..b38979d --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/inputstream.py @@ -0,0 +1,602 @@ +import codecs +import re +import types + +from gettext import gettext +_ = gettext + +from constants import EOF, spaceCharacters, asciiLetters, asciiUppercase +from constants import encodings +from utils import MethodDispatcher + +class HTMLInputStream(object): + """Provides a unicode stream of characters to the HTMLTokenizer. + + This class takes care of character encoding and removing or replacing + incorrect byte-sequences and also provides column and line tracking. + + """ + + def __init__(self, source, encoding=None, parseMeta=True, chardet=True): + """Initialises the HTMLInputStream. + + HTMLInputStream(source, [encoding]) -> Normalized stream from source + for use by the HTML5Lib. + + source can be either a file-object, local filename or a string. + + The optional encoding parameter must be a string that indicates + the encoding. If specified, that encoding will be used, + regardless of any BOM or later declaration (such as in a meta + element) + + parseMeta - Look for a <meta> element containing encoding information + + """ + # List of where new lines occur + self.newLines = [0] + + self.charEncoding = encoding + + # Raw Stream - for unicode objects this will encode to utf-8 and set + # self.charEncoding as appropriate + self.rawStream = self.openStream(source) + + # Encoding Information + #Number of bytes to use when looking for a meta element with + #encoding information + self.numBytesMeta = 512 + #Number of bytes to use when using detecting encoding using chardet + self.numBytesChardet = 100 + #Encoding to use if no other information can be found + self.defaultEncoding = "windows-1252" + + #Detect encoding iff no explicit "transport level" encoding is supplied + if self.charEncoding is None or not isValidEncoding(self.charEncoding): + self.charEncoding = self.detectEncoding(parseMeta, chardet) + + self.dataStream = codecs.getreader(self.charEncoding)(self.rawStream, + 'replace') + + self.queue = [] + self.errors = [] + + self.line = self.col = 0 + self.lineLengths = [] + + #Flag to indicate we may have a CR LF broken across a data chunk + self._lastChunkEndsWithCR = False + + def openStream(self, source): + """Produces a file object from source. + + source can be either a file object, local filename or a string. + + """ + # Already a file object + if hasattr(source, 'read'): + stream = source + else: + # Otherwise treat source as a string and convert to a file object + if isinstance(source, unicode): + source = source.encode('utf-8') + self.charEncoding = "utf-8" + import cStringIO + stream = cStringIO.StringIO(str(source)) + return stream + + def detectEncoding(self, parseMeta=True, chardet=True): + + #First look for a BOM + #This will also read past the BOM if present + encoding = self.detectBOM() + #If there is no BOM need to look for meta elements with encoding + #information + if encoding is None and parseMeta: + encoding = self.detectEncodingMeta() + #Guess with chardet, if avaliable + if encoding is None and chardet: + try: + from chardet.universaldetector import UniversalDetector + buffers = [] + detector = UniversalDetector() + while not detector.done: + buffer = self.rawStream.read(self.numBytesChardet) + if not buffer: + break + buffers.append(buffer) + detector.feed(buffer) + detector.close() + encoding = detector.result['encoding'] + self.seek("".join(buffers), 0) + except ImportError: + pass + # If all else fails use the default encoding + if encoding is None: + encoding = self.defaultEncoding + + #Substitute for equivalent encodings: + encodingSub = {"iso-8859-1":"windows-1252"} + + if encoding.lower() in encodingSub: + encoding = encodingSub[encoding.lower()] + + return encoding + + def detectBOM(self): + """Attempts to detect at BOM at the start of the stream. If + an encoding can be determined from the BOM return the name of the + encoding otherwise return None""" + bomDict = { + codecs.BOM_UTF8: 'utf-8', + codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be', + codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be' + } + + # Go to beginning of file and read in 4 bytes + string = self.rawStream.read(4) + + # Try detecting the BOM using bytes from the string + encoding = bomDict.get(string[:3]) # UTF-8 + seek = 3 + if not encoding: + # Need to detect UTF-32 before UTF-16 + encoding = bomDict.get(string) # UTF-32 + seek = 4 + if not encoding: + encoding = bomDict.get(string[:2]) # UTF-16 + seek = 2 + + # Set the read position past the BOM if one was found, otherwise + # set it to the start of the stream + self.seek(string, encoding and seek or 0) + + return encoding + + def seek(self, buffer, n): + """Unget buffer[n:]""" + if hasattr(self.rawStream, 'unget'): + self.rawStream.unget(buffer[n:]) + return + + if hasattr(self.rawStream, 'seek'): + try: + self.rawStream.seek(n) + return + except IOError: + pass + + class BufferedStream: + def __init__(self, data, stream): + self.data = data + self.stream = stream + def read(self, chars=-1): + if chars == -1 or chars > len(self.data): + result = self.data + self.data = '' + if chars == -1: + return result + self.stream.read() + else: + return result + self.stream.read(chars-len(result)) + elif not self.data: + return self.stream.read(chars) + else: + result = self.data[:chars] + self.data = self.data[chars:] + return result + def unget(self, data): + if self.data: + self.data += data + else: + self.data = data + + self.rawStream = BufferedStream(buffer[n:], self.rawStream) + + def detectEncodingMeta(self): + """Report the encoding declared by the meta element + """ + buffer = self.rawStream.read(self.numBytesMeta) + parser = EncodingParser(buffer) + self.seek(buffer, 0) + return parser.getEncoding() + + def position(self): + """Returns (line, col) of the current position in the stream.""" + line, col = self.line, self.col + return (line + 1, col) + + def char(self): + """ Read one character from the stream or queue if available. Return + EOF when EOF is reached. + """ + if not self.queue: + self.readChunk() + #If we still don't have a character we have reached EOF + if not self.queue: + return EOF + + char = self.queue.pop(0) + + # update position in stream + if char == '\n': + self.lineLengths.append(self.col) + self.line += 1 + self.col = 0 + else: + self.col += 1 + return char + + def readChunk(self, chunkSize=10240): + data = self.dataStream.read(chunkSize) + if not data: + return + #Replace null characters + for i in xrange(data.count(u"\u0000")): + self.errors.append(_('null character found in input stream, ' + 'replaced with U+FFFD')) + data = data.replace(u"\u0000", u"\ufffd") + #Check for CR LF broken across chunks + if (self._lastChunkEndsWithCR and data[0] == "\n"): + data = data[1:] + self._lastChunkEndsWithCR = data[-1] == "\r" + data = data.replace("\r\n", "\n") + data = data.replace("\r", "\n") + + data = unicode(data) + self.queue.extend([char for char in data]) + + def charsUntil(self, characters, opposite = False): + """ Returns a string of characters from the stream up to but not + including any character in characters or EOF. characters can be + any container that supports the in method being called on it. + """ + + #This method is currently 40-50% of our total runtime and badly needs + #optimizing + #Possible improvements: + # - use regexp to find characters that match the required character set + # (with regexp cache since we do the same searches many many times) + # - improve EOF handling for fewer if statements + + if not self.queue: + self.readChunk() + #Break if we have reached EOF + if not self.queue or self.queue[0] == None: + return u"" + + i = 0 + while (self.queue[i] in characters) == opposite: + i += 1 + if i == len(self.queue): + self.readChunk() + #If the queue doesn't grow we have reached EOF + if i == len(self.queue) or self.queue[i] is EOF: + break + #XXX- wallpaper over bug in calculation below + #Otherwise change the stream position + if self.queue[i] == '\n': + self.lineLengths.append(self.col) + self.line += 1 + self.col = 0 + else: + self.col += 1 + + rv = u"".join(self.queue[:i]) + self.queue = self.queue[i:] + + #Calculate where we now are in the stream + #One possible optimisation would be to store all read characters and + #Calculate this on an as-needed basis (perhaps flushing the read data + #every time we read a new chunk) rather than once per call here and + #in .char() + + #XXX Temporarily disable this because there is a bug + + #lines = rv.split("\n") + # + #if lines: + # #Add number of lines passed onto positon + # oldCol = self.col + # self.line += len(lines)-1 + # if len(lines) > 1: + # self.col = len(lines[-1]) + # else: + # self.col += len(lines[0]) + # + # if self.lineLengths and oldCol > 0: + # self.lineLengths[-1] += len(lines[0]) + # lines = lines[1:-1] + # else: + # lines = lines[:-1] + # + # for line in lines: + # self.lineLengths.append(len(line)) + # + + return rv + + def unget(self, chars): + if chars: + self.queue = list(chars) + self.queue + #Alter the current line, col position + for c in chars[::-1]: + if c == '\n': + self.line -= 1 + self.col = self.lineLengths[self.line] + else: + self.col -= 1 + +class EncodingBytes(str): + """String-like object with an assosiated position and various extra methods + If the position is ever greater than the string length then an exception is + raised""" + def __init__(self, value): + str.__init__(self, value) + self._position=-1 + + def __iter__(self): + return self + + def next(self): + self._position += 1 + rv = self[self.position] + return rv + + def setPosition(self, position): + if self._position >= len(self): + raise StopIteration + self._position = position + + def getPosition(self): + if self._position >= len(self): + raise StopIteration + if self._position >= 0: + return self._position + else: + return None + + position = property(getPosition, setPosition) + + def getCurrentByte(self): + return self[self.position] + + currentByte = property(getCurrentByte) + + def skip(self, chars=spaceCharacters): + """Skip past a list of characters""" + while self.currentByte in chars: + self.position += 1 + + def matchBytes(self, bytes, lower=False): + """Look for a sequence of bytes at the start of a string. If the bytes + are found return True and advance the position to the byte after the + match. Otherwise return False and leave the position alone""" + data = self[self.position:self.position+len(bytes)] + if lower: + data = data.lower() + rv = data.startswith(bytes) + if rv == True: + self.position += len(bytes) + return rv + + def jumpTo(self, bytes): + """Look for the next sequence of bytes matching a given sequence. If + a match is found advance the position to the last byte of the match""" + newPosition = self[self.position:].find(bytes) + if newPosition > -1: + self._position += (newPosition + len(bytes)-1) + return True + else: + raise StopIteration + + def findNext(self, byteList): + """Move the pointer so it points to the next byte in a set of possible + bytes""" + while (self.currentByte not in byteList): + self.position += 1 + +class EncodingParser(object): + """Mini parser for detecting character encoding from meta elements""" + + def __init__(self, data): + """string - the data to work on for encoding detection""" + self.data = EncodingBytes(data) + self.encoding = None + + def getEncoding(self): + methodDispatch = ( + ("<!--",self.handleComment), + ("<meta",self.handleMeta), + ("</",self.handlePossibleEndTag), + ("<!",self.handleOther), + ("<?",self.handleOther), + ("<",self.handlePossibleStartTag)) + for byte in self.data: + keepParsing = True + for key, method in methodDispatch: + if self.data.matchBytes(key, lower=True): + try: + keepParsing = method() + break + except StopIteration: + keepParsing=False + break + if not keepParsing: + break + if self.encoding is not None: + self.encoding = self.encoding.strip() + return self.encoding + + def handleComment(self): + """Skip over comments""" + return self.data.jumpTo("-->") + + def handleMeta(self): + if self.data.currentByte not in spaceCharacters: + #if we have <meta not followed by a space so just keep going + return True + #We have a valid meta element we want to search for attributes + while True: + #Try to find the next attribute after the current position + attr = self.getAttribute() + if attr is None: + return True + else: + if attr[0] == "charset": + tentativeEncoding = attr[1] + if isValidEncoding(tentativeEncoding): + self.encoding = tentativeEncoding + return False + elif attr[0] == "content": + contentParser = ContentAttrParser(EncodingBytes(attr[1])) + tentativeEncoding = contentParser.parse() + if isValidEncoding(tentativeEncoding): + self.encoding = tentativeEncoding + return False + + def handlePossibleStartTag(self): + return self.handlePossibleTag(False) + + def handlePossibleEndTag(self): + self.data.position+=1 + return self.handlePossibleTag(True) + + def handlePossibleTag(self, endTag): + if self.data.currentByte not in asciiLetters: + #If the next byte is not an ascii letter either ignore this + #fragment (possible start tag case) or treat it according to + #handleOther + if endTag: + self.data.position -= 1 + self.handleOther() + return True + + self.data.findNext(list(spaceCharacters) + ["<", ">"]) + if self.data.currentByte == "<": + #return to the first step in the overall "two step" algorithm + #reprocessing the < byte + self.data.position -= 1 + else: + #Read all attributes + attr = self.getAttribute() + while attr is not None: + attr = self.getAttribute() + return True + + def handleOther(self): + return self.data.jumpTo(">") + + def getAttribute(self): + """Return a name,value pair for the next attribute in the stream, + if one is found, or None""" + self.data.skip(list(spaceCharacters)+["/"]) + if self.data.currentByte == "<": + self.data.position -= 1 + return None + elif self.data.currentByte == ">": + return None + attrName = [] + attrValue = [] + spaceFound = False + #Step 5 attribute name + while True: + if self.data.currentByte == "=" and attrName: + break + elif self.data.currentByte in spaceCharacters: + spaceFound=True + break + elif self.data.currentByte in ("/", "<", ">"): + return "".join(attrName), "" + elif self.data.currentByte in asciiUppercase: + attrName.extend(self.data.currentByte.lower()) + else: + attrName.extend(self.data.currentByte) + #Step 6 + self.data.position += 1 + #Step 7 + if spaceFound: + self.data.skip() + #Step 8 + if self.data.currentByte != "=": + self.data.position -= 1 + return "".join(attrName), "" + #XXX need to advance position in both spaces and value case + #Step 9 + self.data.position += 1 + #Step 10 + self.data.skip() + #Step 11 + if self.data.currentByte in ("'", '"'): + #11.1 + quoteChar = self.data.currentByte + while True: + self.data.position+=1 + #11.3 + if self.data.currentByte == quoteChar: + self.data.position += 1 + return "".join(attrName), "".join(attrValue) + #11.4 + elif self.data.currentByte in asciiUppercase: + attrValue.extend(self.data.currentByte.lower()) + #11.5 + else: + attrValue.extend(self.data.currentByte) + elif self.data.currentByte in (">", '<'): + return "".join(attrName), "" + elif self.data.currentByte in asciiUppercase: + attrValue.extend(self.data.currentByte.lower()) + else: + attrValue.extend(self.data.currentByte) + while True: + self.data.position +=1 + if self.data.currentByte in ( + list(spaceCharacters) + [">", '<']): + return "".join(attrName), "".join(attrValue) + elif self.data.currentByte in asciiUppercase: + attrValue.extend(self.data.currentByte.lower()) + else: + attrValue.extend(self.data.currentByte) + + +class ContentAttrParser(object): + def __init__(self, data): + self.data = data + def parse(self): + try: + #Skip to the first ";" + self.data.jumpTo(";") + self.data.position += 1 + self.data.skip() + #Check if the attr name is charset + #otherwise return + self.data.jumpTo("charset") + self.data.position += 1 + self.data.skip() + if not self.data.currentByte == "=": + #If there is no = sign keep looking for attrs + return None + self.data.position += 1 + self.data.skip() + #Look for an encoding between matching quote marks + if self.data.currentByte in ('"', "'"): + quoteMark = self.data.currentByte + self.data.position += 1 + oldPosition = self.data.position + self.data.jumpTo(quoteMark) + return self.data[oldPosition:self.data.position] + else: + #Unquoted value + oldPosition = self.data.position + try: + self.data.findNext(spaceCharacters) + return self.data[oldPosition:self.data.position] + except StopIteration: + #Return the whole remaining value + return self.data[oldPosition:] + except StopIteration: + return None + +def isValidEncoding(encoding): + """Determine if a string is a supported encoding""" + return (encoding is not None and type(encoding) == types.StringType and + encoding.lower().strip() in encodings) diff --git a/lib/venus/planet/vendor/html5lib/liberalxmlparser.py b/lib/venus/planet/vendor/html5lib/liberalxmlparser.py new file mode 100644 index 0000000..89e9f00 --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/liberalxmlparser.py @@ -0,0 +1,147 @@ +""" +Warning: this module is experimental and subject to change and even removal +at any time. + +For background/rationale, see: + * http://www.intertwingly.net/blog/2007/01/08/Xhtml5lib + * http://tinyurl.com/ylfj8k (and follow-ups) + +References: + * http://googlereader.blogspot.com/2005/12/xml-errors-in-feeds.html + * http://wiki.whatwg.org/wiki/HtmlVsXhtml + +@@TODO: + * Selectively lowercase only XHTML, but not foreign markup +""" + +import html5parser +from constants import voidElements, contentModelFlags + +from xml.dom import XHTML_NAMESPACE +from xml.sax.saxutils import unescape + +class XMLParser(html5parser.HTMLParser): + """ liberal XML parser """ + + def __init__(self, *args, **kwargs): + html5parser.HTMLParser.__init__(self, *args, **kwargs) + + self.phases["initial"] = XmlRootPhase(self, self.tree) + + def normalizeToken(self, token): + + if token["type"] in ("StartTag", "EmptyTag"): + token["data"] = dict(token["data"][::-1]) + + # For EmptyTags, process both a Start and an End tag + if token["type"] == "EmptyTag": + save = self.tokenizer.contentModelFlag + self.phase.processStartTag(token["name"], token["data"]) + self.tokenizer.contentModelFlag = save + token["data"] = {} + token["type"] = "EndTag" + + elif token["type"] == "Characters": + # un-escape rcdataElements (e.g. style, script) + if self.tokenizer.contentModelFlag == contentModelFlags["CDATA"]: + token["data"] = unescape(token["data"]) + + elif token["type"] == "Comment": + # Rescue CDATA from the comments + if (token["data"].startswith("[CDATA[") and + token["data"].endswith("]]")): + token["type"] = "Characters" + token["data"] = token["data"][7:-2] + + return token + + def _parse(self, stream, innerHTML=False, container="div", encoding=None, + **kwargs): + + html5parser.HTMLParser._parse(self, stream, innerHTML, container, + encoding, lowercaseElementName=False, + lowercaseAttrName=False) + +class XHTMLParser(XMLParser): + """ liberal XMTHML parser """ + + def __init__(self, *args, **kwargs): + html5parser.HTMLParser.__init__(self, *args, **kwargs) + self.phases["initial"] = XmlInitialPhase(self, self.tree) + self.phases["rootElement"] = XhmlRootPhase(self, self.tree) + + def normalizeToken(self, token): + token = XMLParser.normalizeToken(self, token) + + # ensure that non-void XHTML elements have content so that separate + # open and close tags are emitted + if token["type"] == "EndTag": + if token["name"] in voidElements: + if not self.tree.openElements or \ + self.tree.openElements[-1].name != token["name"]: + token["type"] = "EmptyTag" + if not token.has_key("data"): token["data"] = {} + else: + if token["name"] == self.tree.openElements[-1].name and \ + not self.tree.openElements[-1].hasContent(): + for e in self.tree.openElements: + if 'xmlns' in e.attributes.keys(): + if e.attributes['xmlns'] != XHTML_NAMESPACE: + break + else: + self.tree.insertText('') + + return token + +class XhmlRootPhase(html5parser.RootElementPhase): + def insertHtmlElement(self): + element = self.tree.createElement("html", {'xmlns': 'http://www.w3.org/1999/xhtml'}) + self.tree.openElements.append(element) + self.tree.document.appendChild(element) + self.parser.phase = self.parser.phases["beforeHead"] + +class XmlInitialPhase(html5parser.InitialPhase): + """ Consume XML Prologs """ + def processComment(self, data): + if not data.startswith('?xml') or not data.endswith('?'): + html5parser.InitialPhase.processComment(self, data) + +class XmlRootPhase(html5parser.Phase): + """ Consume XML Prologs """ + def processComment(self, data): + print repr(data) + if not data.startswith('?xml') or not data.endswith('?'): + html5parser.InitialPhase.processComment(self, data) + + """ Prime the Xml parser """ + def __getattr__(self, name): + self.tree.openElements.append(self.tree.document) + self.parser.phase = XmlElementPhase(self.parser, self.tree) + return getattr(self.parser.phase, name) + +class XmlElementPhase(html5parser.Phase): + """ Generic handling for all XML elements """ + + def __init__(self, *args, **kwargs): + html5parser.Phase.__init__(self, *args, **kwargs) + self.startTagHandler = html5parser.utils.MethodDispatcher([]) + self.startTagHandler.default = self.startTagOther + self.endTagHandler = html5parser.utils.MethodDispatcher([]) + self.endTagHandler.default = self.endTagOther + + def startTagOther(self, name, attributes): + element = self.tree.createElement(name, attributes) + self.tree.openElements[-1].appendChild(element) + self.tree.openElements.append(element) + + def endTagOther(self, name): + for node in self.tree.openElements[::-1]: + if node.name == name: + while self.tree.openElements.pop() != node: + pass + break + else: + self.parser.parseError() + + def processCharacters(self, data): + self.tree.insertText(data) diff --git a/lib/venus/planet/vendor/html5lib/sanitizer.py b/lib/venus/planet/vendor/html5lib/sanitizer.py new file mode 100644 index 0000000..ccbc16b --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/sanitizer.py @@ -0,0 +1,202 @@ +import re +from xml.sax.saxutils import escape, unescape +from tokenizer import HTMLTokenizer + +class HTMLSanitizerMixin(object): + """ sanitization of XHTML+MathML+SVG and of inline style attributes.""" + + acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', + 'big', 'blockquote', 'br', 'button', 'caption', 'center', 'cite', + 'code', 'col', 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', + 'em', 'fieldset', 'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', + 'hr', 'i', 'img', 'input', 'ins', 'kbd', 'label', 'legend', 'li', 'map', + 'menu', 'ol', 'optgroup', 'option', 'p', 'pre', 'q', 's', 'samp', + 'select', 'small', 'span', 'strike', 'strong', 'sub', 'sup', 'table', + 'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'tt', 'u', + 'ul', 'var'] + + mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi', + 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom', + 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub', + 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder', + 'munderover', 'none'] + + svg_elements = ['a', 'animate', 'animateColor', 'animateMotion', + 'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'font-face', + 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern', 'image', + 'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', + 'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect', + 'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use'] + + acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey', + 'action', 'align', 'alt', 'axis', 'border', 'cellpadding', + 'cellspacing', 'char', 'charoff', 'charset', 'checked', 'cite', 'class', + 'clear', 'cols', 'colspan', 'color', 'compact', 'coords', 'datetime', + 'dir', 'disabled', 'enctype', 'for', 'frame', 'headers', 'height', + 'href', 'hreflang', 'hspace', 'id', 'ismap', 'label', 'lang', + 'longdesc', 'maxlength', 'media', 'method', 'multiple', 'name', + 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly', 'rel', 'rev', + 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', + 'span', 'src', 'start', 'style', 'summary', 'tabindex', 'target', + 'title', 'type', 'usemap', 'valign', 'value', 'vspace', 'width', + 'xml:lang'] + + mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign', + 'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth', + 'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence', + 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace', + 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize', + 'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines', + 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection', + 'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show', + 'xlink:type', 'xmlns', 'xmlns:xlink'] + + svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic', + 'arabic-form', 'ascent', 'attributeName', 'attributeType', + 'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height', + 'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx', + 'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-rule', + 'font-family', 'font-size', 'font-stretch', 'font-style', + 'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2', + 'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x', + 'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', + 'keySplines', 'keyTimes', 'lang', 'marker-end', 'marker-mid', + 'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', + 'mathematical', 'max', 'min', 'name', 'offset', 'opacity', 'orient', + 'origin', 'overline-position', 'overline-thickness', 'panose-1', + 'path', 'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', + 'refY', 'repeatCount', 'repeatDur', 'requiredExtensions', + 'requiredFeatures', 'restart', 'rotate', 'rx', 'ry', 'slope', + 'stemh', 'stemv', 'stop-color', 'stop-opacity', + 'strikethrough-position', 'strikethrough-thickness', 'stroke', + 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap', + 'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity', + 'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to', + 'transform', 'type', 'u1', 'u2', 'underline-position', + 'underline-thickness', 'unicode', 'unicode-range', 'units-per-em', + 'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x', + 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole', + 'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', + 'xlink:type', 'xml:base', 'xml:lang', 'xml:space', 'xmlns', + 'xmlns:xlink', 'y', 'y1', 'y2', 'zoomAndPan'] + + attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc', + 'xlink:href', 'xml:base'] + + acceptable_css_properties = ['azimuth', 'background-color', + 'border-bottom-color', 'border-collapse', 'border-color', + 'border-left-color', 'border-right-color', 'border-top-color', 'clear', + 'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font', + 'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight', + 'height', 'letter-spacing', 'line-height', 'overflow', 'pause', + 'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness', + 'speak', 'speak-header', 'speak-numeral', 'speak-punctuation', + 'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent', + 'unicode-bidi', 'vertical-align', 'voice-family', 'volume', + 'white-space', 'width'] + + acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue', + 'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed', + 'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left', + 'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive', + 'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top', + 'transparent', 'underline', 'white', 'yellow'] + + acceptable_svg_properties = [ 'fill', 'fill-opacity', 'fill-rule', + 'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin', + 'stroke-opacity'] + + acceptable_protocols = [ 'ed2k', 'ftp', 'http', 'https', 'irc', + 'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal', + 'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag', + 'ssh', 'sftp', 'rtsp', 'afs' ] + + # subclasses may define their own versions of these constants + allowed_elements = acceptable_elements + mathml_elements + svg_elements + allowed_attributes = acceptable_attributes + mathml_attributes + svg_attributes + allowed_css_properties = acceptable_css_properties + allowed_css_keywords = acceptable_css_keywords + allowed_svg_properties = acceptable_svg_properties + allowed_protocols = acceptable_protocols + + # Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and + # stripping out all # attributes not in ALLOWED_ATTRIBUTES. Style + # attributes are parsed, and a restricted set, # specified by + # ALLOWED_CSS_PROPERTIES and ALLOWED_CSS_KEYWORDS, are allowed through. + # attributes in ATTR_VAL_IS_URI are scanned, and only URI schemes specified + # in ALLOWED_PROTOCOLS are allowed. + # + # sanitize_html('<script> do_nasty_stuff() </script>') + # => <script> do_nasty_stuff() </script> + # sanitize_html('<a href="javascript: sucker();">Click here for $100</a>') + # => <a>Click here for $100</a> + def sanitize_token(self, token): + if token["type"] in ["StartTag", "EndTag", "EmptyTag"]: + if token["name"] in self.allowed_elements: + if token.has_key("data"): + attrs = dict([(name,val) for name,val in token["data"][::-1] if name in self.allowed_attributes]) + for attr in self.attr_val_is_uri: + if not attrs.has_key(attr): continue + val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '', unescape(attrs[attr])).lower() + if re.match("^[a-z0-9][-+.a-z0-9]*:",val_unescaped) and (val_unescaped.split(':')[0] not in self.allowed_protocols): + del attrs[attr] + if attrs.has_key('style'): + attrs['style'] = self.sanitize_css(attrs['style']) + token["data"] = [[name,val] for name,val in attrs.items()] + return token + else: + if token["type"] == "EndTag": + token["data"] = "</%s>" % token["name"] + elif token["data"]: + attrs = ''.join([' %s="%s"' % (k,escape(v)) for k,v in token["data"]]) + token["data"] = "<%s%s>" % (token["name"],attrs) + else: + token["data"] = "<%s>" % token["name"] + if token["type"] == "EmptyTag": + token["data"]=token["data"][:-1] + "/>" + token["type"] = "Characters" + del token["name"] + return token + elif token["type"] == "Comment": + pass + else: + return token + + def sanitize_css(self, style): + # disallow urls + style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style) + + # gauntlet + if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return '' + if not re.match("^(\s*[-\w]+\s*:\s*[^:;]*(;|$))*$", style): return '' + + clean = [] + for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style): + if not value: continue + if prop.lower() in self.allowed_css_properties: + clean.append(prop + ': ' + value + ';') + elif prop.split('-')[0].lower() in ['background','border','margin','padding']: + for keyword in value.split(): + if not keyword in self.acceptable_css_keywords and \ + not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$",keyword): + break + else: + clean.append(prop + ': ' + value + ';') + elif prop.lower() in self.allowed_svg_properties: + clean.append(prop + ': ' + value + ';') + + return ' '.join(clean) + +class HTMLSanitizer(HTMLTokenizer, HTMLSanitizerMixin): + def __init__(self, stream, encoding=None, parseMeta=True, + lowercaseElementName=False, lowercaseAttrName=False): + #Change case matching defaults as we only output lowercase html anyway + #This solution doesn't seem ideal... + HTMLTokenizer.__init__(self, stream, encoding, parseMeta, + lowercaseElementName, lowercaseAttrName) + + def __iter__(self): + for token in HTMLTokenizer.__iter__(self): + token = self.sanitize_token(token) + if token: + yield token diff --git a/lib/venus/planet/vendor/html5lib/serializer/__init__.py b/lib/venus/planet/vendor/html5lib/serializer/__init__.py new file mode 100644 index 0000000..c0030f2 --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/serializer/__init__.py @@ -0,0 +1,3 @@ + +from htmlserializer import HTMLSerializer +from xhtmlserializer import XHTMLSerializer diff --git a/lib/venus/planet/vendor/html5lib/serializer/htmlserializer.py b/lib/venus/planet/vendor/html5lib/serializer/htmlserializer.py new file mode 100644 index 0000000..c5d6c51 --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/serializer/htmlserializer.py @@ -0,0 +1,218 @@ +try: + frozenset +except NameError: + # Import from the sets module for python 2.3 + from sets import ImmutableSet as frozenset + +import gettext +_ = gettext.gettext + +from html5lib.constants import voidElements, booleanAttributes, spaceCharacters +from html5lib.constants import rcdataElements + +from xml.sax.saxutils import escape + +spaceCharacters = u"".join(spaceCharacters) + +try: + from codecs import register_error, xmlcharrefreplace_errors +except ImportError: + unicode_encode_errors = "strict" +else: + unicode_encode_errors = "htmlentityreplace" + + from html5lib.constants import entities + + encode_entity_map = {} + for k, v in entities.items(): + if v != "&" and encode_entity_map.get(v) != k.lower(): + # prefer < over < and similarly for &, >, etc. + encode_entity_map[v] = k + + def htmlentityreplace_errors(exc): + if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)): + res = [] + for c in exc.object[exc.start:exc.end]: + e = encode_entity_map.get(c) + if e: + res.append("&") + res.append(e) + if not e.endswith(";"): + res.append(";") + else: + res.append(c.encode(exc.encoding, "xmlcharrefreplace")) + return (u"".join(res), exc.end) + else: + return xmlcharrefreplace_errors(exc) + + register_error(unicode_encode_errors, htmlentityreplace_errors) + + del register_error + +def encode(text, encoding): + return text.encode(encoding, unicode_encode_errors) + +class HTMLSerializer(object): + + quote_attr_values = False + quote_char = '"' + use_best_quote_char = True + minimize_boolean_attributes = True + + use_trailing_solidus = False + space_before_trailing_solidus = True + escape_lt_in_attrs = False + escape_rcdata = False + + inject_meta_charset = True + strip_whitespace = False + sanitize = False + omit_optional_tags = True + + options = ("quote_attr_values", "quote_char", "use_best_quote_char", + "minimize_boolean_attributes", "use_trailing_solidus", + "space_before_trailing_solidus", "omit_optional_tags", + "strip_whitespace", "inject_meta_charset", "escape_lt_in_attrs", + "escape_rcdata", 'use_trailing_solidus', "sanitize") + + def __init__(self, **kwargs): + if kwargs.has_key('quote_char'): + self.use_best_quote_char = False + for attr in self.options: + setattr(self, attr, kwargs.get(attr, getattr(self, attr))) + self.errors = [] + self.strict = False + + def serialize(self, treewalker, encoding=None): + in_cdata = False + self.errors = [] + if encoding and self.inject_meta_charset: + from html5lib.filters.inject_meta_charset import Filter + treewalker = Filter(treewalker, encoding) + # XXX: WhitespaceFilter should be used before OptionalTagFilter + # for maximum efficiently of this latter filter + if self.strip_whitespace: + from html5lib.filters.whitespace import Filter + treewalker = Filter(treewalker) + if self.sanitize: + from html5lib.filters.sanitizer import Filter + treewalker = Filter(treewalker) + if self.omit_optional_tags: + from html5lib.filters.optionaltags import Filter + treewalker = Filter(treewalker) + for token in treewalker: + type = token["type"] + if type == "Doctype": + doctype = u"<!DOCTYPE %s>" % token["name"] + if encoding: + yield doctype.encode(encoding) + else: + yield doctype + + elif type in ("Characters", "SpaceCharacters"): + if type == "SpaceCharacters" or in_cdata: + if in_cdata and token["data"].find("</") >= 0: + self.serializeError(_("Unexpected </ in CDATA")) + if encoding: + yield token["data"].encode(encoding, "strict") + else: + yield token["data"] + elif encoding: + yield encode(escape(token["data"]), encoding) + else: + yield escape(token["data"]) + + elif type in ("StartTag", "EmptyTag"): + name = token["name"] + if name in rcdataElements and not self.escape_rcdata: + in_cdata = True + elif in_cdata: + self.serializeError(_("Unexpected child element of a CDATA element")) + attrs = token["data"] + if hasattr(attrs, "items"): + attrs = attrs.items() + attrs.sort() + attributes = [] + for k,v in attrs: + if encoding: + k = k.encode(encoding, "strict") + attributes.append(' ') + + attributes.append(k) + if not self.minimize_boolean_attributes or \ + (k not in booleanAttributes.get(name, tuple()) \ + and k not in booleanAttributes.get("", tuple())): + attributes.append("=") + if self.quote_attr_values or not v: + quote_attr = True + else: + quote_attr = reduce(lambda x,y: x or (y in v), + spaceCharacters + "<>\"'", False) + v = v.replace("&", "&") + if self.escape_lt_in_attrs: v = v.replace("<", "<") + if encoding: + v = encode(v, encoding) + if quote_attr: + quote_char = self.quote_char + if self.use_best_quote_char: + if "'" in v and '"' not in v: + quote_char = '"' + elif '"' in v and "'" not in v: + quote_char = "'" + if quote_char == "'": + v = v.replace("'", "'") + else: + v = v.replace('"', """) + attributes.append(quote_char) + attributes.append(v) + attributes.append(quote_char) + else: + attributes.append(v) + if name in voidElements and self.use_trailing_solidus: + if self.space_before_trailing_solidus: + attributes.append(" /") + else: + attributes.append("/") + if encoding: + yield "<%s%s>" % (name.encode(encoding, "strict"), "".join(attributes)) + else: + yield u"<%s%s>" % (name, u"".join(attributes)) + + elif type == "EndTag": + name = token["name"] + if name in rcdataElements: + in_cdata = False + elif in_cdata: + self.serializeError(_("Unexpected child element of a CDATA element")) + end_tag = u"</%s>" % name + if encoding: + end_tag = end_tag.encode(encoding, "strict") + yield end_tag + + elif type == "Comment": + data = token["data"] + if data.find("--") >= 0: + self.serializeError(_("Comment contains --")) + comment = u"<!--%s-->" % token["data"] + if encoding: + comment = comment.encode(encoding, unicode_encode_errors) + yield comment + + else: + self.serializeError(token["data"]) + + def render(self, treewalker, encoding=None): + if encoding: + return "".join(list(self.serialize(treewalker, encoding))) + else: + return u"".join(list(self.serialize(treewalker))) + + def serializeError(self, data="XXX ERROR MESSAGE NEEDED"): + # XXX The idea is to make data mandatory. + self.errors.append(data) + if self.strict: + raise SerializeError + +def SerializeError(Exception): + """Error in serialized tree""" + pass diff --git a/lib/venus/planet/vendor/html5lib/serializer/xhtmlserializer.py b/lib/venus/planet/vendor/html5lib/serializer/xhtmlserializer.py new file mode 100644 index 0000000..7fdce47 --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/serializer/xhtmlserializer.py @@ -0,0 +1,9 @@ +from htmlserializer import HTMLSerializer + +class XHTMLSerializer(HTMLSerializer): + quote_attr_values = True + minimize_boolean_attributes = False + use_trailing_solidus = True + escape_lt_in_attrs = True + omit_optional_tags = False + escape_rcdata = True diff --git a/lib/venus/planet/vendor/html5lib/tokenizer.py b/lib/venus/planet/vendor/html5lib/tokenizer.py new file mode 100644 index 0000000..31f8494 --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/tokenizer.py @@ -0,0 +1,1009 @@ +try: + frozenset +except NameError: + # Import from the sets module for python 2.3 + from sets import Set as set + from sets import ImmutableSet as frozenset +import gettext +_ = gettext.gettext + +from constants import contentModelFlags, spaceCharacters +from constants import entitiesWindows1252, entities +from constants import asciiLowercase, asciiLetters, asciiUpper2Lower +from constants import digits, hexDigits, EOF + +from inputstream import HTMLInputStream + +class HTMLTokenizer(object): + """ This class takes care of tokenizing HTML. + + * self.currentToken + Holds the token that is currently being processed. + + * self.state + Holds a reference to the method to be invoked... XXX + + * self.states + Holds a mapping between states and methods that implement the state. + + * self.stream + Points to HTMLInputStream object. + """ + + # XXX need to fix documentation + + def __init__(self, stream, encoding=None, parseMeta=True, + lowercaseElementName=True, lowercaseAttrName=True,): + self.stream = HTMLInputStream(stream, encoding, parseMeta) + + #Perform case conversions? + self.lowercaseElementName = lowercaseElementName + self.lowercaseAttrName = lowercaseAttrName + + self.states = { + "data":self.dataState, + "entityData":self.entityDataState, + "tagOpen":self.tagOpenState, + "closeTagOpen":self.closeTagOpenState, + "tagName":self.tagNameState, + "beforeAttributeName":self.beforeAttributeNameState, + "attributeName":self.attributeNameState, + "afterAttributeName":self.afterAttributeNameState, + "beforeAttributeValue":self.beforeAttributeValueState, + "attributeValueDoubleQuoted":self.attributeValueDoubleQuotedState, + "attributeValueSingleQuoted":self.attributeValueSingleQuotedState, + "attributeValueUnQuoted":self.attributeValueUnQuotedState, + "bogusComment":self.bogusCommentState, + "markupDeclarationOpen":self.markupDeclarationOpenState, + "commentStart":self.commentStartState, + "commentStartDash":self.commentStartDashState, + "comment":self.commentState, + "commentEndDash":self.commentEndDashState, + "commentEnd":self.commentEndState, + "doctype":self.doctypeState, + "beforeDoctypeName":self.beforeDoctypeNameState, + "doctypeName":self.doctypeNameState, + "afterDoctypeName":self.afterDoctypeNameState, + "beforeDoctypePublicIdentifier":self.beforeDoctypePublicIdentifierState, + "doctypePublicIdentifierDoubleQuoted":self.doctypePublicIdentifierDoubleQuotedState, + "doctypePublicIdentifierSingleQuoted":self.doctypePublicIdentifierSingleQuotedState, + "afterDoctypePublicIdentifier":self.afterDoctypePublicIdentifierState, + "beforeDoctypeSystemIdentifier":self.beforeDoctypeSystemIdentifierState, + "doctypeSystemIdentifierDoubleQuoted":self.doctypeSystemIdentifierDoubleQuotedState, + "doctypeSystemIdentifierSingleQuoted":self.doctypeSystemIdentifierSingleQuotedState, + "afterDoctypeSystemIdentifier":self.afterDoctypeSystemIdentifierState, + "bogusDoctype":self.bogusDoctypeState + } + + # Setup the initial tokenizer state + self.contentModelFlag = contentModelFlags["PCDATA"] + self.escapeFlag = False + self.lastFourChars = [] + self.state = self.states["data"] + + # The current token being created + self.currentToken = None + + # Tokens to be processed. + self.tokenQueue = [] + + def __iter__(self): + """ This is where the magic happens. + + We do our usually processing through the states and when we have a token + to return we yield the token which pauses processing until the next token + is requested. + """ + self.tokenQueue = [] + # Start processing. When EOF is reached self.state will return False + # instead of True and the loop will terminate. + while self.state(): + while self.stream.errors: + yield {"type": "ParseError", "data": self.stream.errors.pop(0)} + while self.tokenQueue: + yield self.tokenQueue.pop(0) + + # Below are various helper functions the tokenizer states use worked out. + def processSolidusInTag(self): + """If the next character is a '>', convert the currentToken into + an EmptyTag + """ + + # We need to consume another character to make sure it's a ">" + data = self.stream.char() + + if self.currentToken["type"] == "StartTag" and data == u">": + self.currentToken["type"] = "EmptyTag" + else: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Solidus (/) incorrectly placed in tag.")}) + + # The character we just consumed need to be put back on the stack so it + # doesn't get lost... + self.stream.unget(data) + + def consumeNumberEntity(self, isHex): + """This function returns either U+FFFD or the character based on the + decimal or hexadecimal representation. It also discards ";" if present. + If not present self.tokenQueue.append({"type": "ParseError"}) is invoked. + """ + + # XXX More need to be done here. For instance, #13 should prolly be + # converted to #10 so we don't get \r (#13 is \r right?) in the DOM and + # such. Thoughts on this appreciated. + allowed = digits + radix = 10 + if isHex: + allowed = hexDigits + radix = 16 + + charStack = [] + + # Consume all the characters that are in range while making sure we + # don't hit an EOF. + c = self.stream.char() + while c in allowed and c is not EOF: + charStack.append(c) + c = self.stream.char() + + # Convert the set of characters consumed to an int. + charAsInt = int("".join(charStack), radix) + + if charAsInt == 13: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Incorrect CR newline entity. Replaced with LF.")}) + charAsInt = 10 + elif 127 < charAsInt < 160: + # If the integer is between 127 and 160 (so 128 and bigger and 159 + # and smaller) we need to do the "windows trick". + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Entity used with illegal number (windows-1252 reference).")}) + + charAsInt = entitiesWindows1252[charAsInt - 128] + + # 0 is not a good number, neither are illegal Unicode code points (higher than 0x10FFFF) or surrogate characters (in the range 0xD800 to 0xDFFF). + if 0 < charAsInt and charAsInt <= 1114111 and not (55296 <= charAsInt and charAsInt <= 57343): + try: + # XXX We should have a separate function that does "int" to + # "unicodestring" conversion since this doesn't always work + # according to hsivonen. Also, unichr has a limitation of 65535 + char = unichr(charAsInt) + except: + try: + char = eval("u'\\U%08x'" % charAsInt) + except: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Numeric entity couldn't be converted to character (codepoint: U+%08x).") % charAsInt}) + else: + char = u"\uFFFD" + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Numeric entity represents an illegal codepoint: U+%08x.") % charAsInt}) + + # Discard the ; if present. Otherwise, put it back on the queue and + # invoke parseError on parser. + if c != u";": + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Numeric entity didn't end with ';'.")}) + self.stream.unget(c) + + return char + + def consumeEntity(self, fromAttribute=False): + char = None + charStack = [self.stream.char()] + if charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&"): + self.stream.unget(charStack) + elif charStack[0] == u"#": + # We might have a number entity here. + charStack.extend([self.stream.char(), self.stream.char()]) + if EOF in charStack[:2]: + # If we reach the end of the file put everything up to EOF + # back in the queue + charStack = charStack[:charStack.index(EOF)] + self.stream.unget(charStack) + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Numeric entity expected. Got end of file instead.")}) + else: + if charStack[1].lower() == u"x" \ + and charStack[2] in hexDigits: + # Hexadecimal entity detected. + self.stream.unget(charStack[2]) + char = self.consumeNumberEntity(True) + elif charStack[1] in digits: + # Decimal entity detected. + self.stream.unget(charStack[1:]) + char = self.consumeNumberEntity(False) + else: + # No number entity detected. + self.stream.unget(charStack) + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Numeric entity expected but none found.")}) + else: + # At this point in the process might have named entity. Entities + # are stored in the global variable "entities". + # + # Consume characters and compare to these to a substring of the + # entity names in the list until the substring no longer matches. + filteredEntityList = [e for e in entities if \ + e.startswith(charStack[0])] + + def entitiesStartingWith(name): + return [e for e in filteredEntityList if e.startswith(name)] + + while charStack[-1] != EOF and\ + entitiesStartingWith("".join(charStack)): + charStack.append(self.stream.char()) + + # At this point we have a string that starts with some characters + # that may match an entity + entityName = None + + # Try to find the longest entity the string will match to take care + # of ¬i for instance. + for entityLength in xrange(len(charStack)-1,1,-1): + possibleEntityName = "".join(charStack[:entityLength]) + if possibleEntityName in entities: + entityName = possibleEntityName + break + + if entityName is not None: + if entityName[-1] != ";": + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Named entity didn't end with ';'.")}) + if entityName[-1] != ";" and fromAttribute and \ + (charStack[entityLength] in asciiLetters + or charStack[entityLength] in digits): + self.stream.unget(charStack) + else: + char = entities[entityName] + self.stream.unget(charStack[entityLength:]) + else: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Named entity expected. Got none.")}) + self.stream.unget(charStack) + return char + + def processEntityInAttribute(self): + """This method replaces the need for "entityInAttributeValueState". + """ + entity = self.consumeEntity(True) + if entity: + self.currentToken["data"][-1][1] += entity + else: + self.currentToken["data"][-1][1] += u"&" + + def emitCurrentToken(self): + """This method is a generic handler for emitting the tags. It also sets + the state to "data" because that's what's needed after a token has been + emitted. + """ + token = self.currentToken + # Add token to the queue to be yielded + if (token["type"] in ("StartTag", "EndTag", "EmptyTag")): + if self.lowercaseElementName: + token["name"] = token["name"].translate(asciiUpper2Lower) + if token["type"] == "EndTag" and token["data"]: + self.tokenQueue.append({"type":"ParseError", + "data":_(u"End tag contains unexpected attributes.")}) + self.tokenQueue.append(token) + self.state = self.states["data"] + + + # Below are the various tokenizer states worked out. + + # XXX AT Perhaps we should have Hixie run some evaluation on billions of + # documents to figure out what the order of the various if and elif + # statements should be. + + def dataState(self): + data = self.stream.char() + + # Keep a charbuffer to handle the escapeFlag + if self.contentModelFlag in\ + (contentModelFlags["CDATA"], contentModelFlags["RCDATA"]): + if len(self.lastFourChars) == 4: + self.lastFourChars.pop(0) + self.lastFourChars.append(data) + + # The rest of the logic + if data == "&" and self.contentModelFlag in\ + (contentModelFlags["PCDATA"], contentModelFlags["RCDATA"]) and not\ + self.escapeFlag: + self.state = self.states["entityData"] + elif data == "-" and self.contentModelFlag in\ + (contentModelFlags["CDATA"], contentModelFlags["RCDATA"]) and not\ + self.escapeFlag and "".join(self.lastFourChars) == "<!--": + self.escapeFlag = True + self.tokenQueue.append({"type": "Characters", "data":data}) + elif data == "<" and (self.contentModelFlag ==\ + contentModelFlags["PCDATA"] or (self.contentModelFlag in + (contentModelFlags["CDATA"], contentModelFlags["RCDATA"]) and\ + self.escapeFlag == False)): + self.state = self.states["tagOpen"] + elif data == ">" and self.contentModelFlag in\ + (contentModelFlags["CDATA"], contentModelFlags["RCDATA"]) and\ + self.escapeFlag and "".join(self.lastFourChars)[1:] == "-->": + self.escapeFlag = False + self.tokenQueue.append({"type": "Characters", "data":data}) + elif data == EOF: + # Tokenization ends. + return False + elif data in spaceCharacters: + # Directly after emitting a token you switch back to the "data + # state". At that point spaceCharacters are important so they are + # emitted separately. + self.tokenQueue.append({"type": "SpaceCharacters", "data": + data + self.stream.charsUntil(spaceCharacters, True)}) + else: + self.tokenQueue.append({"type": "Characters", "data": + data + self.stream.charsUntil(("&", "<", ">", "-"))}) + return True + + def entityDataState(self): + entity = self.consumeEntity() + if entity: + self.tokenQueue.append({"type": "Characters", "data": entity}) + else: + self.tokenQueue.append({"type": "Characters", "data": u"&"}) + self.state = self.states["data"] + return True + + def tagOpenState(self): + data = self.stream.char() + if self.contentModelFlag == contentModelFlags["PCDATA"]: + if data == u"!": + self.state = self.states["markupDeclarationOpen"] + elif data == u"/": + self.state = self.states["closeTagOpen"] + elif data in asciiLetters: + self.currentToken =\ + {"type": "StartTag", "name": data, "data": []} + self.state = self.states["tagName"] + elif data == u">": + # XXX In theory it could be something besides a tag name. But + # do we really care? + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Expected tag name. Got '>' instead.")}) + self.tokenQueue.append({"type": "Characters", "data": u"<>"}) + self.state = self.states["data"] + elif data == u"?": + # XXX In theory it could be something besides a tag name. But + # do we really care? + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Expected tag name. Got '?' instead (HTML doesn't " + "support processing instructions).")}) + self.stream.unget(data) + self.state = self.states["bogusComment"] + else: + # XXX + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Expected tag name. Got something else instead")}) + self.tokenQueue.append({"type": "Characters", "data": u"<"}) + self.stream.unget(data) + self.state = self.states["data"] + else: + # We know the content model flag is set to either RCDATA or CDATA + # now because this state can never be entered with the PLAINTEXT + # flag. + if data == u"/": + self.state = self.states["closeTagOpen"] + else: + self.tokenQueue.append({"type": "Characters", "data": u"<"}) + self.stream.unget(data) + self.state = self.states["data"] + return True + + def closeTagOpenState(self): + if (self.contentModelFlag in (contentModelFlags["RCDATA"], + contentModelFlags["CDATA"])): + if self.currentToken: + charStack = [] + + # So far we know that "</" has been consumed. We now need to know + # whether the next few characters match the name of last emitted + # start tag which also happens to be the currentToken. We also need + # to have the character directly after the characters that could + # match the start tag name. + for x in xrange(len(self.currentToken["name"]) + 1): + charStack.append(self.stream.char()) + # Make sure we don't get hit by EOF + if charStack[-1] == EOF: + break + + # Since this is just for checking. We put the characters back on + # the stack. + self.stream.unget(charStack) + + if self.currentToken \ + and self.currentToken["name"].lower() == "".join(charStack[:-1]).lower() \ + and charStack[-1] in (spaceCharacters | + frozenset((u">", u"/", u"<", EOF))): + # Because the characters are correct we can safely switch to + # PCDATA mode now. This also means we don't have to do it when + # emitting the end tag token. + self.contentModelFlag = contentModelFlags["PCDATA"] + else: + self.tokenQueue.append({"type": "Characters", "data": u"</"}) + self.state = self.states["data"] + + # Need to return here since we don't want the rest of the + # method to be walked through. + return True + + data = self.stream.char() + if data in asciiLetters: + self.currentToken = {"type":"EndTag", "name":data, "data":[]} + self.state = self.states["tagName"] + elif data == u">": + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Expected closing tag. Got '>' instead. Ignoring '</>'.")}) + self.state = self.states["data"] + elif data == EOF: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Expected closing tag. Unexpected end of file.")}) + self.tokenQueue.append({"type": "Characters", "data": u"</"}) + self.state = self.states["data"] + else: + # XXX data can be _'_... + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Expected closing tag. Unexpected character '%s' found.") % (data,)}) + self.stream.unget(data) + self.state = self.states["bogusComment"] + return True + + def tagNameState(self): + data = self.stream.char() + if data in spaceCharacters: + self.state = self.states["beforeAttributeName"] + elif data in asciiLetters: + self.currentToken["name"] += data +\ + self.stream.charsUntil(asciiLetters, True) + elif data == u">": + self.emitCurrentToken() + elif data == EOF: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected end of file in the tag name.")}) + self.emitCurrentToken() + elif data == u"/": + self.processSolidusInTag() + self.state = self.states["beforeAttributeName"] + else: + self.currentToken["name"] += data + return True + + def beforeAttributeNameState(self): + data = self.stream.char() + if data in spaceCharacters: + self.stream.charsUntil(spaceCharacters, True) + elif data in asciiLetters: + self.currentToken["data"].append([data, ""]) + self.state = self.states["attributeName"] + elif data == u">": + self.emitCurrentToken() + elif data == u"/": + self.processSolidusInTag() + elif data == EOF: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected end of file. Expected attribute name instead.")}) + self.emitCurrentToken() + else: + self.currentToken["data"].append([data, ""]) + self.state = self.states["attributeName"] + return True + + def attributeNameState(self): + data = self.stream.char() + leavingThisState = True + emitToken = False + if data == u"=": + self.state = self.states["beforeAttributeValue"] + elif data in asciiLetters: + self.currentToken["data"][-1][0] += data +\ + self.stream.charsUntil(asciiLetters, True) + leavingThisState = False + elif data == u">": + # XXX If we emit here the attributes are converted to a dict + # without being checked and when the code below runs we error + # because data is a dict not a list + emitToken = True + elif data in spaceCharacters: + self.state = self.states["afterAttributeName"] + elif data == u"/": + self.processSolidusInTag() + self.state = self.states["beforeAttributeName"] + elif data == EOF: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected end of file in attribute name.")}) + self.state = self.states["data"] + emitToken = True + else: + self.currentToken["data"][-1][0] += data + leavingThisState = False + + if leavingThisState: + # Attributes are not dropped at this stage. That happens when the + # start tag token is emitted so values can still be safely appended + # to attributes, but we do want to report the parse error in time. + if self.lowercaseAttrName: + self.currentToken["data"][-1][0] = ( + self.currentToken["data"][-1][0].translate(asciiUpper2Lower)) + for name, value in self.currentToken["data"][:-1]: + if self.currentToken["data"][-1][0] == name: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Dropped duplicate attribute on tag.")}) + break + # XXX Fix for above XXX + if emitToken: + self.emitCurrentToken() + return True + + def afterAttributeNameState(self): + data = self.stream.char() + if data in spaceCharacters: + self.stream.charsUntil(spaceCharacters, True) + elif data == u"=": + self.state = self.states["beforeAttributeValue"] + elif data == u">": + self.emitCurrentToken() + elif data in asciiLetters: + self.currentToken["data"].append([data, ""]) + self.state = self.states["attributeName"] + elif data == u"/": + self.processSolidusInTag() + self.state = self.states["beforeAttributeName"] + elif data == EOF: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected end of file. Expected = or end of tag.")}) + self.emitCurrentToken() + else: + self.currentToken["data"].append([data, ""]) + self.state = self.states["attributeName"] + return True + + def beforeAttributeValueState(self): + data = self.stream.char() + if data in spaceCharacters: + self.stream.charsUntil(spaceCharacters, True) + elif data == u"\"": + self.state = self.states["attributeValueDoubleQuoted"] + elif data == u"&": + self.state = self.states["attributeValueUnQuoted"] + self.stream.unget(data); + elif data == u"'": + self.state = self.states["attributeValueSingleQuoted"] + elif data == u">": + self.emitCurrentToken() + elif data == EOF: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected end of file. Expected attribute value.")}) + self.emitCurrentToken() + else: + self.currentToken["data"][-1][1] += data + self.state = self.states["attributeValueUnQuoted"] + return True + + def attributeValueDoubleQuotedState(self): + data = self.stream.char() + if data == "\"": + self.state = self.states["beforeAttributeName"] + elif data == u"&": + self.processEntityInAttribute() + elif data == EOF: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected end of file in attribute value (\").")}) + self.emitCurrentToken() + else: + self.currentToken["data"][-1][1] += data +\ + self.stream.charsUntil(("\"", u"&")) + return True + + def attributeValueSingleQuotedState(self): + data = self.stream.char() + if data == "'": + self.state = self.states["beforeAttributeName"] + elif data == u"&": + self.processEntityInAttribute() + elif data == EOF: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected end of file in attribute value (').")}) + self.emitCurrentToken() + else: + self.currentToken["data"][-1][1] += data +\ + self.stream.charsUntil(("'", u"&")) + return True + + def attributeValueUnQuotedState(self): + data = self.stream.char() + if data in spaceCharacters: + self.state = self.states["beforeAttributeName"] + elif data == u"&": + self.processEntityInAttribute() + elif data == u">": + self.emitCurrentToken() + elif data == EOF: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected end of file in attribute value.")}) + self.emitCurrentToken() + else: + self.currentToken["data"][-1][1] += data + self.stream.charsUntil( \ + frozenset(("&", ">","<")) | spaceCharacters) + return True + + def bogusCommentState(self): + # Make a new comment token and give it as value all the characters + # until the first > or EOF (charsUntil checks for EOF automatically) + # and emit it. + self.tokenQueue.append( + {"type": "Comment", "data": self.stream.charsUntil((u">"))}) + + # Eat the character directly after the bogus comment which is either a + # ">" or an EOF. + self.stream.char() + self.state = self.states["data"] + return True + + def markupDeclarationOpenState(self): + charStack = [self.stream.char(), self.stream.char()] + if charStack == [u"-", u"-"]: + self.currentToken = {"type": "Comment", "data": u""} + self.state = self.states["commentStart"] + else: + for x in xrange(5): + charStack.append(self.stream.char()) + # Put in explicit EOF check + if (not EOF in charStack and + "".join(charStack).upper() == u"DOCTYPE"): + self.currentToken = {"type":"Doctype", "name":u"", + "publicId":None, "systemId":None, "correct":True} + self.state = self.states["doctype"] + else: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Expected '--' or 'DOCTYPE'. Not found.")}) + self.stream.unget(charStack) + self.state = self.states["bogusComment"] + return True + + def commentStartState(self): + data = self.stream.char() + if data == "-": + self.state = self.states["commentStartDash"] + elif data == ">": + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Incorrect comment.")}) + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + elif data == EOF: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected end of file in comment.")}) + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + else: + self.currentToken["data"] += data + self.stream.charsUntil(u"-") + self.state = self.states["comment"] + return True + + def commentStartDashState(self): + data = self.stream.char() + if data == "-": + self.state = self.states["commentEnd"] + elif data == ">": + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Incorrect comment.")}) + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + elif data == EOF: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected end of file in comment.")}) + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + else: + self.currentToken["data"] += "-" + data + self.stream.charsUntil(u"-") + self.state = self.states["comment"] + return True + + + def commentState(self): + data = self.stream.char() + if data == u"-": + self.state = self.states["commentEndDash"] + elif data == EOF: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected end of file in comment.")}) + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + else: + self.currentToken["data"] += data + self.stream.charsUntil(u"-") + return True + + def commentEndDashState(self): + data = self.stream.char() + if data == u"-": + self.state = self.states["commentEnd"] + elif data == EOF: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected end of file in comment (-)")}) + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + else: + self.currentToken["data"] += u"-" + data +\ + self.stream.charsUntil(u"-") + # Consume the next character which is either a "-" or an EOF as + # well so if there's a "-" directly after the "-" we go nicely to + # the "comment end state" without emitting a ParseError() there. + self.stream.char() + return True + + def commentEndState(self): + data = self.stream.char() + if data == u">": + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + elif data == u"-": + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected '-' after '--' found in comment.")}) + self.currentToken["data"] += data + elif data == EOF: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected end of file in comment (--).")}) + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + else: + # XXX + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected character in comment found.")}) + self.currentToken["data"] += u"--" + data + self.state = self.states["comment"] + return True + + def doctypeState(self): + data = self.stream.char() + if data in spaceCharacters: + self.state = self.states["beforeDoctypeName"] + else: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"No space after literal string 'DOCTYPE'.")}) + self.stream.unget(data) + self.state = self.states["beforeDoctypeName"] + return True + + def beforeDoctypeNameState(self): + data = self.stream.char() + if data in spaceCharacters: + pass + elif data == u">": + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected > character. Expected DOCTYPE name.")}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + elif data == EOF: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected end of file. Expected DOCTYPE name.")}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + else: + self.currentToken["name"] = data + self.state = self.states["doctypeName"] + return True + + def doctypeNameState(self): + data = self.stream.char() + if data in spaceCharacters: + self.state = self.states["afterDoctypeName"] + elif data == u">": + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + elif data == EOF: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected end of file in DOCTYPE name.")}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + else: + self.currentToken["name"] += data + return True + + def afterDoctypeNameState(self): + data = self.stream.char() + if data in spaceCharacters: + pass + elif data == u">": + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + elif data == EOF: + self.currentToken["correct"] = False + self.stream.unget(data) + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected end of file in DOCTYPE.")}) + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + else: + charStack = [data] + for x in xrange(5): + charStack.append(self.stream.char()) + if EOF not in charStack and\ + "".join(charStack).translate(asciiUpper2Lower) == "public": + self.state = self.states["beforeDoctypePublicIdentifier"] + elif EOF not in charStack and\ + "".join(charStack).translate(asciiUpper2Lower) == "system": + self.state = self.states["beforeDoctypeSystemIdentifier"] + else: + self.stream.unget(charStack) + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Expected space or '>'. Got '%s'") % (data,)}) + self.state = self.states["bogusDoctype"] + return True + + def beforeDoctypePublicIdentifierState(self): + data = self.stream.char() + if data in spaceCharacters: + pass + elif data == "\"": + self.currentToken["publicId"] = u"" + self.state = self.states["doctypePublicIdentifierDoubleQuoted"] + elif data == "'": + self.currentToken["publicId"] = u"" + self.state = self.states["doctypePublicIdentifierSingleQuoted"] + elif data == ">": + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected end of DOCTYPE.")}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + elif data == EOF: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected end of file in DOCTYPE.")}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + else: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected character in DOCTYPE.")}) + self.state = self.states["bogusDoctype"] + return True + + def doctypePublicIdentifierDoubleQuotedState(self): + data = self.stream.char() + if data == "\"": + self.state = self.states["afterDoctypePublicIdentifier"] + elif data == EOF: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected end of file in DOCTYPE.")}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + else: + self.currentToken["publicId"] += data + return True + + def doctypePublicIdentifierSingleQuotedState(self): + data = self.stream.char() + if data == "'": + self.state = self.states["afterDoctypePublicIdentifier"] + elif data == EOF: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected end of file in DOCTYPE.")}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + else: + self.currentToken["publicId"] += data + return True + + def afterDoctypePublicIdentifierState(self): + data = self.stream.char() + if data in spaceCharacters: + pass + elif data == "\"": + self.currentToken["systemId"] = u"" + self.state = self.states["doctypeSystemIdentifierDoubleQuoted"] + elif data == "'": + self.currentToken["systemId"] = u"" + self.state = self.states["doctypeSystemIdentifierSingleQuoted"] + elif data == ">": + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + elif data == EOF: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected end of file in DOCTYPE.")}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + else: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected character in DOCTYPE.")}) + self.state = self.states["bogusDoctype"] + return True + + def beforeDoctypeSystemIdentifierState(self): + data = self.stream.char() + if data in spaceCharacters: + pass + elif data == "\"": + self.currentToken["systemId"] = u"" + self.state = self.states["doctypeSystemIdentifierDoubleQuoted"] + elif data == "'": + self.currentToken["systemId"] = u"" + self.state = self.states["doctypeSystemIdentifierSingleQuoted"] + elif data == ">": + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected character in DOCTYPE.")}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + elif data == EOF: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected end of file in DOCTYPE.")}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + else: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected character in DOCTYPE.")}) + self.state = self.states["bogusDoctype"] + return True + + def doctypeSystemIdentifierDoubleQuotedState(self): + data = self.stream.char() + if data == "\"": + self.state = self.states["afterDoctypeSystemIdentifier"] + elif data == EOF: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected end of file in DOCTYPE.")}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + else: + self.currentToken["systemId"] += data + return True + + def doctypeSystemIdentifierSingleQuotedState(self): + data = self.stream.char() + if data == "'": + self.state = self.states["afterDoctypeSystemIdentifier"] + elif data == EOF: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected end of file in DOCTYPE.")}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + else: + self.currentToken["systemId"] += data + return True + + def afterDoctypeSystemIdentifierState(self): + data = self.stream.char() + if data in spaceCharacters: + pass + elif data == ">": + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + elif data == EOF: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected end of file in DOCTYPE.")}) + self.currentToken["correct"] = False + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + else: + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected character in DOCTYPE.")}) + self.state = self.states["bogusDoctype"] + return True + + def bogusDoctypeState(self): + data = self.stream.char() + self.currentToken["correct"] = False + if data == u">": + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + elif data == EOF: + # XXX EMIT + self.stream.unget(data) + self.tokenQueue.append({"type": "ParseError", "data": + _(u"Unexpected end of file in bogus doctype.")}) + self.tokenQueue.append(self.currentToken) + self.state = self.states["data"] + else: + pass + return True diff --git a/lib/venus/planet/vendor/html5lib/treebuilders/__init__.py b/lib/venus/planet/vendor/html5lib/treebuilders/__init__.py new file mode 100755 index 0000000..7a421b8 --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/treebuilders/__init__.py @@ -0,0 +1,65 @@ +"""A collection of modules for building different kinds of tree from +HTML documents. + +To create a treebuilder for a new type of tree, you need to do +implement several things: + +1) A set of classes for various types of elements: Document, Doctype, +Comment, Element. These must implement the interface of +_base.treebuilders.Node (although comment nodes have a different +signature for their constructor, see treebuilders.simpletree.Comment) +Textual content may also be implemented as another node type, or not, as +your tree implementation requires. + +2) A treebuilder object (called TreeBuilder by convention) that +inherits from treebuilders._base.TreeBuilder. This has 4 required attributes: +documentClass - the class to use for the bottommost node of a document +elementClass - the class to use for HTML Elements +commentClass - the class to use for comments +doctypeClass - the class to use for doctypes +It also has one required method: +getDocument - Returns the root node of the complete document tree + +3) If you wish to run the unit tests, you must also create a +testSerializer method on your treebuilder which accepts a node and +returns a string containing Node and its children serialized according +to the format used in the unittests + +The supplied simpletree module provides a python-only implementation +of a full treebuilder and is a useful reference for the semantics of +the various methods. +""" + +treeBuilderCache = {} + +def getTreeBuilder(treeType, implementation=None, **kwargs): + """Get a TreeBuilder class for various types of tree with built-in support + + treeType - the name of the tree type required (case-insensitive). Supported + values are "simpletree", "dom", "etree" and "beautifulsoup" + + "simpletree" - a built-in DOM-ish tree type with support for some + more pythonic idioms. + "dom" - The xml.dom.minidom DOM implementation + "etree" - A generic builder for tree implementations exposing an + elementtree-like interface (known to work with + ElementTree, cElementTree and lxml.etree). + "beautifulsoup" - Beautiful soup (if installed) + + implementation - (Currently applies to the "etree" tree type only). A module + implementing the tree type e.g. xml.etree.ElementTree or + lxml.etree.""" + + treeType = treeType.lower() + if treeType not in treeBuilderCache: + if treeType in ("dom", "simpletree"): + mod = __import__(treeType, globals()) + treeBuilderCache[treeType] = mod.TreeBuilder + elif treeType == "beautifulsoup": + import soup + treeBuilderCache[treeType] = soup.TreeBuilder + elif treeType == "etree": + import etree + # XXX: NEVER cache here, caching is done in the etree submodule + return etree.getETreeModule(implementation, **kwargs).TreeBuilder + return treeBuilderCache.get(treeType) diff --git a/lib/venus/planet/vendor/html5lib/treebuilders/_base.py b/lib/venus/planet/vendor/html5lib/treebuilders/_base.py new file mode 100755 index 0000000..a5ae31d --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/treebuilders/_base.py @@ -0,0 +1,330 @@ +from html5lib.constants import scopingElements, tableInsertModeElements +try: + frozenset +except NameError: + # Import from the sets module for python 2.3 + from sets import Set as set + from sets import ImmutableSet as frozenset + +# The scope markers are inserted when entering buttons, object elements, +# marquees, table cells, and table captions, and are used to prevent formatting +# from "leaking" into tables, buttons, object elements, and marquees. +Marker = None + +#XXX - TODO; make the default interface more ElementTree-like +# rather than DOM-like + +class Node(object): + def __init__(self, name): + """Node representing an item in the tree. + name - The tag name associated with the node + parent - The parent of the current node (or None for the document node) + value - The value of the current node (applies to text nodes and + comments + attributes - a dict holding name, value pairs for attributes of the node + childNodes - a list of child nodes of the current node. This must + include all elements but not necessarily other node types + _flags - A list of miscellaneous flags that can be set on the node + """ + self.name = name + self.parent = None + self.value = None + self.attributes = {} + self.childNodes = [] + self._flags = [] + + def __unicode__(self): + attributesStr = " ".join(["%s=\"%s\""%(name, value) + for name, value in + self.attributes.iteritems()]) + if attributesStr: + return "<%s %s>"%(self.name,attributesStr) + else: + return "<%s>"%(self.name) + + def __repr__(self): + return "<%s %s>" % (self.__class__, self.name) + + def appendChild(self, node): + """Insert node as a child of the current node + """ + raise NotImplementedError + + def insertText(self, data, insertBefore=None): + """Insert data as text in the current node, positioned before the + start of node insertBefore or to the end of the node's text. + """ + raise NotImplementedError + + def insertBefore(self, node, refNode): + """Insert node as a child of the current node, before refNode in the + list of child nodes. Raises ValueError if refNode is not a child of + the current node""" + raise NotImplementedError + + def removeChild(self, node): + """Remove node from the children of the current node + """ + raise NotImplementedError + + def reparentChildren(self, newParent): + """Move all the children of the current node to newParent. + This is needed so that trees that don't store text as nodes move the + text in the correct way + """ + #XXX - should this method be made more general? + for child in self.childNodes: + newParent.appendChild(child) + self.childNodes = [] + + def cloneNode(self): + """Return a shallow copy of the current node i.e. a node with the same + name and attributes but with no parent or child nodes + """ + raise NotImplementedError + + + def hasContent(self): + """Return true if the node has children or text, false otherwise + """ + raise NotImplementedError + +class TreeBuilder(object): + """Base treebuilder implementation + documentClass - the class to use for the bottommost node of a document + elementClass - the class to use for HTML Elements + commentClass - the class to use for comments + doctypeClass - the class to use for doctypes + """ + + #Document class + documentClass = None + + #The class to use for creating a node + elementClass = None + + #The class to use for creating comments + commentClass = None + + #The class to use for creating doctypes + doctypeClass = None + + #Fragment class + fragmentClass = None + + def __init__(self): + self.reset() + + def reset(self): + self.openElements = [] + self.activeFormattingElements = [] + + #XXX - rename these to headElement, formElement + self.headPointer = None + self.formPointer = None + + self.insertFromTable = False + + self.document = self.documentClass() + + def elementInScope(self, target, tableVariant=False): + # Exit early when possible. + if self.openElements[-1].name == target: + return True + + # AT Use reverse instead of [::-1] when we can rely on Python 2.4 + # AT How about while True and simply set node to [-1] and set it to + # [-2] at the end... + for node in self.openElements[::-1]: + if node.name == target: + return True + elif node.name == "table": + return False + elif not tableVariant and node.name in scopingElements: + return False + elif node.name == "html": + return False + assert False # We should never reach this point + + def reconstructActiveFormattingElements(self): + # Within this algorithm the order of steps described in the + # specification is not quite the same as the order of steps in the + # code. It should still do the same though. + + # Step 1: stop the algorithm when there's nothing to do. + if not self.activeFormattingElements: + return + + # Step 2 and step 3: we start with the last element. So i is -1. + i = -1 + entry = self.activeFormattingElements[i] + if entry == Marker or entry in self.openElements: + return + + # Step 6 + while entry != Marker and entry not in self.openElements: + # Step 5: let entry be one earlier in the list. + i -= 1 + try: + entry = self.activeFormattingElements[i] + except: + # Step 4: at this point we need to jump to step 8. By not doing + # i += 1 which is also done in step 7 we achieve that. + break + while True: + # Step 7 + i += 1 + + # Step 8 + clone = self.activeFormattingElements[i].cloneNode() + + # Step 9 + element = self.insertElement(clone.name, clone.attributes) + + # Step 10 + self.activeFormattingElements[i] = element + + # Step 11 + if element == self.activeFormattingElements[-1]: + break + + def clearActiveFormattingElements(self): + entry = self.activeFormattingElements.pop() + while self.activeFormattingElements and entry != Marker: + entry = self.activeFormattingElements.pop() + + def elementInActiveFormattingElements(self, name): + """Check if an element exists between the end of the active + formatting elements and the last marker. If it does, return it, else + return false""" + + for item in self.activeFormattingElements[::-1]: + # Check for Marker first because if it's a Marker it doesn't have a + # name attribute. + if item == Marker: + break + elif item.name == name: + return item + return False + + def insertDoctype(self, name, publicId, systemId): + doctype = self.doctypeClass(name) + doctype.publicId = publicId + doctype.systemId = systemId + self.document.appendChild(doctype) + + def insertComment(self, data, parent=None): + if parent is None: + parent = self.openElements[-1] + parent.appendChild(self.commentClass(data)) + + def createElement(self, name, attributes): + """Create an element but don't insert it anywhere""" + element = self.elementClass(name) + element.attributes = attributes + return element + + def _getInsertFromTable(self): + return self._insertFromTable + + def _setInsertFromTable(self, value): + """Switch the function used to insert an element from the + normal one to the misnested table one and back again""" + self._insertFromTable = value + if value: + self.insertElement = self.insertElementTable + else: + self.insertElement = self.insertElementNormal + + insertFromTable = property(_getInsertFromTable, _setInsertFromTable) + + def insertElementNormal(self, name, attributes): + element = self.elementClass(name) + element.attributes = attributes + self.openElements[-1].appendChild(element) + self.openElements.append(element) + return element + + def insertElementTable(self, name, attributes): + """Create an element and insert it into the tree""" + element = self.elementClass(name) + element.attributes = attributes + if self.openElements[-1].name not in tableInsertModeElements: + return self.insertElementNormal(name, attributes) + else: + #We should be in the InTable mode. This means we want to do + #special magic element rearranging + parent, insertBefore = self.getTableMisnestedNodePosition() + if insertBefore is None: + parent.appendChild(element) + else: + parent.insertBefore(element, insertBefore) + self.openElements.append(element) + return element + + def insertText(self, data, parent=None): + """Insert text data.""" + if parent is None: + parent = self.openElements[-1] + + if (not(self.insertFromTable) or (self.insertFromTable and + self.openElements[-1].name not in + tableInsertModeElements)): + parent.insertText(data) + else: + #We should be in the InTable mode. This means we want to do + #special magic element rearranging + parent, insertBefore = self.getTableMisnestedNodePosition() + parent.insertText(data, insertBefore) + + def getTableMisnestedNodePosition(self): + """Get the foster parent element, and sibling to insert before + (or None) when inserting a misnested table node""" + #The foster parent element is the one which comes before the most + #recently opened table element + #XXX - this is really inelegant + lastTable=None + fosterParent = None + insertBefore = None + for elm in self.openElements[::-1]: + if elm.name == u"table": + lastTable = elm + break + if lastTable: + #XXX - we should really check that this parent is actually a + #node here + if lastTable.parent: + fosterParent = lastTable.parent + insertBefore = lastTable + else: + fosterParent = self.openElements[ + self.openElements.index(lastTable) - 1] + else: + fosterParent = self.openElements[0] + return fosterParent, insertBefore + + def generateImpliedEndTags(self, exclude=None): + name = self.openElements[-1].name + # XXX td, th and tr are not actually needed + if (name in frozenset(("dd", "dt", "li", "p", "td", "th", "tr")) + and name != exclude): + self.openElements.pop() + # XXX This is not entirely what the specification says. We should + # investigate it more closely. + self.generateImpliedEndTags(exclude) + + def getDocument(self): + "Return the final tree" + return self.document + + def getFragment(self): + "Return the final fragment" + #assert self.innerHTML + fragment = self.fragmentClass() + self.openElements[0].reparentChildren(fragment) + return fragment + + def testSerializer(self, node): + """Serialize the subtree of node in the format required by unit tests + node - the node from which to start serializing""" + raise NotImplementedError diff --git a/lib/venus/planet/vendor/html5lib/treebuilders/dom.py b/lib/venus/planet/vendor/html5lib/treebuilders/dom.py new file mode 100644 index 0000000..1259a24 --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/treebuilders/dom.py @@ -0,0 +1,203 @@ +import _base +from xml.dom import minidom, Node, XML_NAMESPACE, XMLNS_NAMESPACE + +import re +illegal_xml_chars = re.compile("[\x01-\x08\x0B\x0C\x0E-\x1F]") + +class AttrList: + def __init__(self, element): + self.element = element + def __iter__(self): + return self.element.attributes.items().__iter__() + def __setitem__(self, name, value): + value=illegal_xml_chars.sub(u'\uFFFD',value) + self.element.setAttribute(name, value) + def items(self): + return self.element.attributes.items() + def keys(self): + return self.element.attributes.keys() + def __getitem__(self, name): + return self.element.getAttribute(name) + +class NodeBuilder(_base.Node): + def __init__(self, element): + _base.Node.__init__(self, element.nodeName) + self.element = element + + def appendChild(self, node): + node.parent = self + self.element.appendChild(node.element) + + def insertText(self, data, insertBefore=None): + data=illegal_xml_chars.sub(u'\uFFFD',data) + text = self.element.ownerDocument.createTextNode(data) + if insertBefore: + self.element.insertBefore(text, insertBefore.element) + else: + self.element.appendChild(text) + + def insertBefore(self, node, refNode): + self.element.insertBefore(node.element, refNode.element) + node.parent = self + + def removeChild(self, node): + if node.element.parentNode == self.element: + self.element.removeChild(node.element) + node.parent = None + + def reparentChildren(self, newParent): + while self.element.hasChildNodes(): + child = self.element.firstChild + self.element.removeChild(child) + newParent.element.appendChild(child) + self.childNodes = [] + + def getAttributes(self): + return AttrList(self.element) + + def setAttributes(self, attributes): + if attributes: + for name, value in attributes.items(): + value=illegal_xml_chars.sub(u'\uFFFD',value) + self.element.setAttribute(name, value) + + attributes = property(getAttributes, setAttributes) + + def cloneNode(self): + return NodeBuilder(self.element.cloneNode(False)) + + def hasContent(self): + return self.element.hasChildNodes() + +class TreeBuilder(_base.TreeBuilder): + def documentClass(self): + self.dom = minidom.getDOMImplementation().createDocument(None,None,None) + return self + + def insertDoctype(self, name, publicId, systemId): + domimpl = minidom.getDOMImplementation() + doctype = domimpl.createDocumentType(name, publicId, systemId) + self.document.appendChild(NodeBuilder(doctype)) + doctype.ownerDocument = self.dom + + def elementClass(self, name): + return NodeBuilder(self.dom.createElement(name)) + + def commentClass(self, data): + return NodeBuilder(self.dom.createComment(data)) + + def fragmentClass(self): + return NodeBuilder(self.dom.createDocumentFragment()) + + def appendChild(self, node): + self.dom.appendChild(node.element) + + def testSerializer(self, element): + return testSerializer(element) + + def getDocument(self): + return self.dom + + def getFragment(self): + return _base.TreeBuilder.getFragment(self).element + + def insertText(self, data, parent=None): + data=illegal_xml_chars.sub(u'\uFFFD',data) + if parent <> self: + _base.TreeBuilder.insertText(self, data, parent) + else: + # HACK: allow text nodes as children of the document node + if hasattr(self.dom, '_child_node_types'): + if not Node.TEXT_NODE in self.dom._child_node_types: + self.dom._child_node_types=list(self.dom._child_node_types) + self.dom._child_node_types.append(Node.TEXT_NODE) + self.dom.appendChild(self.dom.createTextNode(data)) + + name = None + +def testSerializer(element): + element.normalize() + rv = [] + def serializeElement(element, indent=0): + if element.nodeType == Node.DOCUMENT_TYPE_NODE: + if element.name: + rv.append("|%s<!DOCTYPE %s>"%(' '*indent, element.name)) + else: + rv.append("|%s<!DOCTYPE >"%(' '*indent,)) + elif element.nodeType == Node.DOCUMENT_NODE: + rv.append("#document") + elif element.nodeType == Node.DOCUMENT_FRAGMENT_NODE: + rv.append("#document-fragment") + elif element.nodeType == Node.COMMENT_NODE: + rv.append("|%s<!-- %s -->"%(' '*indent, element.nodeValue)) + elif element.nodeType == Node.TEXT_NODE: + rv.append("|%s\"%s\"" %(' '*indent, element.nodeValue)) + else: + rv.append("|%s<%s>"%(' '*indent, element.nodeName)) + if element.hasAttributes(): + for name, value in element.attributes.items(): + rv.append('|%s%s="%s"' % (' '*(indent+2), name, value)) + indent += 2 + for child in element.childNodes: + serializeElement(child, indent) + serializeElement(element, 0) + + return "\n".join(rv) + +def dom2sax(node, handler, nsmap={'xml':XML_NAMESPACE}): + if node.nodeType == Node.ELEMENT_NODE: + if not nsmap: + handler.startElement(node.nodeName, node.attributes) + for child in node.childNodes: dom2sax(child, handler, nsmap) + handler.endElement(node.nodeName) + else: + attributes = dict(node.attributes.itemsNS()) + + # gather namespace declarations + prefixes = [] + for attrname in node.attributes.keys(): + attr = node.getAttributeNode(attrname) + if (attr.namespaceURI == XMLNS_NAMESPACE or + (attr.namespaceURI == None and attr.nodeName.startswith('xmlns'))): + prefix = (attr.localName != 'xmlns' and attr.localName or None) + handler.startPrefixMapping(prefix, attr.nodeValue) + prefixes.append(prefix) + nsmap = nsmap.copy() + nsmap[prefix] = attr.nodeValue + del attributes[(attr.namespaceURI, attr.localName)] + + # apply namespace declarations + for attrname in node.attributes.keys(): + attr = node.getAttributeNode(attrname) + if attr.namespaceURI == None and ':' in attr.nodeName: + prefix = attr.nodeName.split(':')[0] + if nsmap.has_key(prefix): + del attributes[(attr.namespaceURI, attr.localName)] + attributes[(nsmap[prefix],attr.localName)]=attr.nodeValue + + # SAX events + ns = node.namespaceURI or nsmap.get(None,None) + handler.startElementNS((ns,node.nodeName), node.nodeName, attributes) + for child in node.childNodes: dom2sax(child, handler, nsmap) + handler.endElementNS((ns, node.nodeName), node.nodeName) + for prefix in prefixes: handler.endPrefixMapping(prefix) + + elif node.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]: + handler.characters(node.nodeValue) + + elif node.nodeType == Node.DOCUMENT_NODE: + handler.startDocument() + for child in node.childNodes: dom2sax(child, handler, nsmap) + handler.endDocument() + + elif node.nodeType == Node.DOCUMENT_FRAGMENT_NODE: + for child in node.childNodes: dom2sax(child, handler, nsmap) + + else: + # ATTRIBUTE_NODE + # ENTITY_NODE + # PROCESSING_INSTRUCTION_NODE + # COMMENT_NODE + # DOCUMENT_TYPE_NODE + # NOTATION_NODE + pass diff --git a/lib/venus/planet/vendor/html5lib/treebuilders/etree.py b/lib/venus/planet/vendor/html5lib/treebuilders/etree.py new file mode 100755 index 0000000..f78762b --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/treebuilders/etree.py @@ -0,0 +1,266 @@ +import _base +import new + +moduleCache = {} + +def getETreeModule(ElementTreeImplementation, fullTree=False): + name = "_" + ElementTreeImplementation.__name__+"builder" + if name in moduleCache: + return moduleCache[name] + else: + mod = new.module("_" + ElementTreeImplementation.__name__+"builder") + objs = getETreeBuilder(ElementTreeImplementation, fullTree) + mod.__dict__.update(objs) + moduleCache[name] = mod + return mod + +def getETreeBuilder(ElementTreeImplementation, fullTree=False): + ElementTree = ElementTreeImplementation + class Element(_base.Node): + def __init__(self, name): + self._element = ElementTree.Element(name) + self.name = name + self.parent = None + self._childNodes = [] + self._flags = [] + + def _setName(self, name): + self._element.tag = name + + def _getName(self): + return self._element.tag + + name = property(_getName, _setName) + + def _getAttributes(self): + return self._element.attrib + + def _setAttributes(self, attributes): + #Delete existing attributes first + #XXX - there may be a better way to do this... + for key in self._element.attrib.keys(): + del self._element.attrib[key] + for key, value in attributes.iteritems(): + self._element.set(key, value) + + attributes = property(_getAttributes, _setAttributes) + + def _getChildNodes(self): + return self._childNodes + + def _setChildNodes(self, value): + del self._element[:] + self._childNodes = [] + for element in value: + self.insertChild(element) + + childNodes = property(_getChildNodes, _setChildNodes) + + def hasContent(self): + """Return true if the node has children or text""" + return bool(self._element.text or self._element.getchildren()) + + def appendChild(self, node): + self._childNodes.append(node) + self._element.append(node._element) + node.parent = self + + def insertBefore(self, node, refNode): + index = self._element.getchildren().index(refNode._element) + self._element.insert(index, node._element) + node.parent = self + + def removeChild(self, node): + self._element.remove(node._element) + node.parent=None + + def insertText(self, data, insertBefore=None): + if not(len(self._element)): + if not self._element.text: + self._element.text = "" + self._element.text += data + elif insertBefore is None: + #Insert the text as the tail of the last child element + if not self._element[-1].tail: + self._element[-1].tail = "" + self._element[-1].tail += data + else: + #Insert the text before the specified node + children = self._element.getchildren() + index = children.index(insertBefore._element) + if index > 0: + if not self._element[index-1].tail: + self._element[index-1].tail = "" + self._element[index-1].tail += data + else: + if not self._element.text: + self._element.text = "" + self._element.text += data + + def cloneNode(self): + element = Element(self.name) + for name, value in self.attributes.iteritems(): + element.attributes[name] = value + return element + + def reparentChildren(self, newParent): + if newParent.childNodes: + newParent.childNodes[-1]._element.tail += self._element.text + else: + if not newParent._element.text: + newParent._element.text = "" + if self._element.text is not None: + newParent._element.text += self._element.text + self._element.text = "" + _base.Node.reparentChildren(self, newParent) + + class Comment(Element): + def __init__(self, data): + #Use the superclass constructor to set all properties on the + #wrapper element + self._element = ElementTree.Comment(data) + self.parent = None + self._childNodes = [] + self._flags = [] + + def _getData(self): + return self._element.text + + def _setData(self, value): + self._element.text = value + + data = property(_getData, _setData) + + class DocumentType(Element): + def __init__(self, name): + Element.__init__(self, "<!DOCTYPE>") + self._element.text = name + + def _getPublicId(self): + return self._element.get(u"publicId", None) + + def _setPublicId(self, value): + if value is not None: + self._element.set(u"publicId", value) + + publicId = property(_getPublicId, _setPublicId) + + def _getSystemId(self): + return self._element.get(u"systemId", None) + + def _setSystemId(self, value): + if value is not None: + self._element.set(u"systemId", value) + + systemId = property(_getSystemId, _setSystemId) + + class Document(Element): + def __init__(self): + Element.__init__(self, "<DOCUMENT_ROOT>") + + class DocumentFragment(Element): + def __init__(self): + Element.__init__(self, "<DOCUMENT_FRAGMENT>") + + def testSerializer(element): + rv = [] + finalText = None + def serializeElement(element, indent=0): + if not(hasattr(element, "tag")): + element = element.getroot() + if element.tag == "<!DOCTYPE>": + rv.append("|%s<!DOCTYPE %s>"%(' '*indent, element.text)) + elif element.tag == "<DOCUMENT_ROOT>": + rv.append("#document") + if element.text: + rv.append("|%s\"%s\""%(' '*(indent+2), element.text)) + if element.tail: + finalText = element.tail + elif type(element.tag) == type(ElementTree.Comment): + rv.append("|%s<!-- %s -->"%(' '*indent, element.text)) + else: + rv.append("|%s<%s>"%(' '*indent, element.tag)) + if hasattr(element, "attrib"): + for name, value in element.attrib.iteritems(): + rv.append('|%s%s="%s"' % (' '*(indent+2), name, value)) + if element.text: + rv.append("|%s\"%s\"" %(' '*(indent+2), element.text)) + indent += 2 + for child in element.getchildren(): + serializeElement(child, indent) + if element.tail: + rv.append("|%s\"%s\"" %(' '*(indent-2), element.tail)) + serializeElement(element, 0) + + if finalText is not None: + rv.append("|%s\"%s\""%(' '*2, finalText)) + + return "\n".join(rv) + + def tostring(element): + """Serialize an element and its child nodes to a string""" + rv = [] + finalText = None + def serializeElement(element): + if type(element) == type(ElementTree.ElementTree): + element = element.getroot() + + if element.tag == "<!DOCTYPE>": + rv.append("<!DOCTYPE %s>"%(element.text,)) + elif element.tag == "<DOCUMENT_ROOT>": + if element.text: + rv.append(element.text) + if element.tail: + finalText = element.tail + + for child in element.getchildren(): + serializeElement(child) + + elif type(element.tag) == type(ElementTree.Comment): + rv.append("<!--%s-->"%(element.text,)) + else: + #This is assumed to be an ordinary element + if not element.attrib: + rv.append("<%s>"%(element.tag,)) + else: + attr = " ".join(["%s=\"%s\""%(name, value) + for name, value in element.attrib.iteritems()]) + rv.append("<%s %s>"%(element.tag, attr)) + if element.text: + rv.append(element.text) + + for child in element.getchildren(): + serializeElement(child) + + rv.append("</%s>"%(element.tag,)) + + if element.tail: + rv.append(element.tail) + + serializeElement(element) + + if finalText is not None: + rv.append("%s\""%(' '*2, finalText)) + + return "".join(rv) + + class TreeBuilder(_base.TreeBuilder): + documentClass = Document + doctypeClass = DocumentType + elementClass = Element + commentClass = Comment + fragmentClass = DocumentFragment + + def testSerializer(self, element): + return testSerializer(element) + + def getDocument(self): + if fullTree: + return self.document._element + else: + return self.document._element.find("html") + + def getFragment(self): + return _base.TreeBuilder.getFragment(self)._element + + return locals() diff --git a/lib/venus/planet/vendor/html5lib/treebuilders/simpletree.py b/lib/venus/planet/vendor/html5lib/treebuilders/simpletree.py new file mode 100755 index 0000000..225cb3e --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/treebuilders/simpletree.py @@ -0,0 +1,205 @@ +import _base +from html5lib.constants import voidElements +from xml.sax.saxutils import escape + +# Really crappy basic implementation of a DOM-core like thing +class Node(_base.Node): + type = -1 + def __init__(self, name): + self.name = name + self.parent = None + self.value = None + self.childNodes = [] + self._flags = [] + + def __iter__(self): + for node in self.childNodes: + yield node + for item in node: + yield item + + def __unicode__(self): + return self.name + + def toxml(self): + raise NotImplementedError + + def printTree(self, indent=0): + tree = '\n|%s%s' % (' '* indent, unicode(self)) + for child in self.childNodes: + tree += child.printTree(indent + 2) + return tree + + def appendChild(self, node): + if (isinstance(node, TextNode) and self.childNodes and + isinstance(self.childNodes[-1], TextNode)): + self.childNodes[-1].value += node.value + else: + self.childNodes.append(node) + node.parent = self + + def insertText(self, data, insertBefore=None): + if insertBefore is None: + self.appendChild(TextNode(data)) + else: + self.insertBefore(TextNode(data), insertBefore) + + def insertBefore(self, node, refNode): + index = self.childNodes.index(refNode) + if (isinstance(node, TextNode) and index > 0 and + isinstance(self.childNodes[index - 1], TextNode)): + self.childNodes[index - 1].value += node.value + else: + self.childNodes.insert(index, node) + node.parent = self + + def removeChild(self, node): + try: + self.childNodes.remove(node) + except: + # XXX + raise + node.parent = None + + def cloneNode(self): + newNode = type(self)(self.name) + if hasattr(self, 'attributes'): + for attr, value in self.attributes.iteritems(): + newNode.attributes[attr] = value + newNode.value = self.value + return newNode + + def hasContent(self): + """Return true if the node has children or text""" + return bool(self.childNodes) + +class Document(Node): + type = 1 + def __init__(self): + Node.__init__(self, None) + + def __unicode__(self): + return "#document" + + def toxml(self, encoding="utf=8"): + result = "" + for child in self.childNodes: + result += child.toxml() + return result.encode(encoding) + + def hilite(self, encoding="utf-8"): + result = "<pre>" + for child in self.childNodes: + result += child.hilite() + return result.encode(encoding) + "</pre>" + + def printTree(self): + tree = unicode(self) + for child in self.childNodes: + tree += child.printTree(2) + return tree + +class DocumentFragment(Document): + type = 2 + def __unicode__(self): + return "#document-fragment" + +class DocumentType(Node): + type = 3 + def __init__(self, name): + Node.__init__(self, name) + self.publicId = u"" + self.systemId = u"" + + def __unicode__(self): + return u"<!DOCTYPE %s>" % self.name + + toxml = __unicode__ + + def hilite(self): + return '<code class="markup doctype"><!DOCTYPE %s></code>' % self.name + +class TextNode(Node): + type = 4 + def __init__(self, value): + Node.__init__(self, None) + self.value = value + + def __unicode__(self): + return u"\"%s\"" % self.value + + def toxml(self): + return escape(self.value) + + hilite = toxml + +class Element(Node): + type = 5 + def __init__(self, name): + Node.__init__(self, name) + self.attributes = {} + + def __unicode__(self): + return u"<%s>" % self.name + + def toxml(self): + result = '<' + self.name + if self.attributes: + for name,value in self.attributes.iteritems(): + result += u' %s="%s"' % (name, escape(value,{'"':'"'})) + if self.childNodes: + result += '>' + for child in self.childNodes: + result += child.toxml() + result += u'</%s>' % self.name + else: + result += u'/>' + return result + + def hilite(self): + result = '<<code class="markup element-name">%s</code>' % self.name + if self.attributes: + for name, value in self.attributes.iteritems(): + result += ' <code class="markup attribute-name">%s</code>=<code class="markup attribute-value">"%s"</code>' % (name, escape(value, {'"':'"'})) + if self.childNodes: + result += ">" + for child in self.childNodes: + result += child.hilite() + elif self.name in voidElements: + return result + ">" + return result + '</<code class="markup element-name">%s</code>>' % self.name + + def printTree(self, indent): + tree = '\n|%s%s' % (' '*indent, unicode(self)) + indent += 2 + if self.attributes: + for name, value in self.attributes.iteritems(): + tree += '\n|%s%s="%s"' % (' ' * indent, name, value) + for child in self.childNodes: + tree += child.printTree(indent) + return tree + +class CommentNode(Node): + type = 6 + def __init__(self, data): + Node.__init__(self, None) + self.data = data + + def __unicode__(self): + return "<!-- %s -->" % self.data + + def toxml(self): + return "<!--%s-->" % self.data + + def hilite(self): + return '<code class="markup comment"><!--%s--></code>' % escape(self.data) + +class TreeBuilder(_base.TreeBuilder): + documentClass = Document + doctypeClass = DocumentType + elementClass = Element + commentClass = CommentNode + fragmentClass = DocumentFragment + + def testSerializer(self, node): + return node.printTree() diff --git a/lib/venus/planet/vendor/html5lib/treebuilders/soup.py b/lib/venus/planet/vendor/html5lib/treebuilders/soup.py new file mode 100644 index 0000000..9708d42 --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/treebuilders/soup.py @@ -0,0 +1,158 @@ +from BeautifulSoup import BeautifulSoup, Tag, NavigableString, Comment, Declaration + +import _base + +class AttrList(object): + def __init__(self, element): + self.element = element + self.attrs = dict(self.element.attrs) + def __iter__(self): + return self.attrs.items().__iter__() + def __setitem__(self, name, value): + "set attr", name, value + self.element[name] = value + def items(self): + return self.attrs.items() + def keys(self): + return self.attrs.keys() + def __getitem__(self, name): + return self.attrs[name] + def __contains__(self, name): + return name in self.attrs.keys() + + +class Element(_base.Node): + def __init__(self, element, soup): + _base.Node.__init__(self, element.name) + self.element = element + self.soup=soup + + def appendChild(self, node): + if (node.element.__class__ == NavigableString and self.element.contents + and self.element.contents[-1].__class__ == NavigableString): + newNode = TextNode(NavigableString( + self.element.contents[-1]+node.element), self.soup) + self.element.contents[-1].extract() + self.appendChild(newNode) + else: + self.element.insert(len(self.element.contents), node.element) + node.parent = self + + def getAttributes(self): + return AttrList(self.element) + + def setAttributes(self, attributes): + if attributes: + for name, value in attributes.items(): + self.element[name] = value + + attributes = property(getAttributes, setAttributes) + + def insertText(self, data, insertBefore=None): + text = TextNode(NavigableString(data), self.soup) + if insertBefore: + self.insertBefore(text, insertBefore) + else: + self.appendChild(text) + + def insertBefore(self, node, refNode): + index = self.element.contents.index(refNode.element) + if (node.element.__class__ == NavigableString and self.element.contents + and self.element.contents[index-1].__class__ == NavigableString): + newNode = TextNode(NavigableString( + self.element.contents[index-1]+node.element), self.soup) + self.element.contents[index-1].extract() + self.insertBefore(newNode, refNode) + else: + self.element.insert(index, node.element) + node.parent = self + + def removeChild(self, node): + node.element.extract() + node.parent = None + + def reparentChildren(self, newParent): + while self.element.contents: + child = self.element.contents[0] + child.extract() + if isinstance(child, Tag): + newParent.appendChild(Element(child, self.soup)) + else: + newParent.appendChild(TextNode(child, self.soup)) + + def cloneNode(self): + node = Element(Tag(self.soup, self.element.name), self.soup) + for key,value in self.attributes: + node.attributes[key] = value + return node + + def hasContent(self): + return self.element.contents + +class TextNode(Element): + def __init__(self, element, soup): + _base.Node.__init__(self, None) + self.element = element + self.soup=soup + + def cloneNode(self): + raise NotImplementedError + +class TreeBuilder(_base.TreeBuilder): + def documentClass(self): + self.soup = BeautifulSoup("") + return Element(self.soup, self.soup) + + def insertDoctype(self, name, publicId, systemId): + self.soup.insert(0, Declaration(name)) + + def elementClass(self, name): + return Element(Tag(self.soup, name), self.soup) + + def commentClass(self, data): + return TextNode(Comment(data), self.soup) + + def fragmentClass(self): + self.soup = BeautifulSoup("") + self.soup.name = "[document_fragment]" + return Element(self.soup, self.soup) + + def appendChild(self, node): + self.soup.insert(len(self.soup.contents), node.element) + + def testSerializer(self, element): + return testSerializer(element) + + def getDocument(self): + return self.soup + + def getFragment(self): + return _base.TreeBuilder.getFragment(self).element + +def testSerializer(element): + rv = [] + def serializeElement(element, indent=0): + if isinstance(element, Declaration): + rv.append("|%s<!DOCTYPE %s>"%(' '*indent, element.string)) + elif isinstance(element, BeautifulSoup): + if element.name == "[document_fragment]": + rv.append("#document-fragment") + else: + rv.append("#document") + + elif isinstance(element, Comment): + rv.append("|%s<!-- %s -->"%(' '*indent, element.string)) + elif isinstance(element, unicode): + rv.append("|%s\"%s\"" %(' '*indent, element)) + else: + rv.append("|%s<%s>"%(' '*indent, element.name)) + if element.attrs: + for name, value in element.attrs: + rv.append('|%s%s="%s"' % (' '*(indent+2), name, value)) + indent += 2 + if hasattr(element, "contents"): + for child in element.contents: + serializeElement(child, indent) + serializeElement(element, 0) + + return "\n".join(rv) diff --git a/lib/venus/planet/vendor/html5lib/treewalkers/__init__.py b/lib/venus/planet/vendor/html5lib/treewalkers/__init__.py new file mode 100644 index 0000000..3a606a8 --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/treewalkers/__init__.py @@ -0,0 +1,52 @@ +"""A collection of modules for iterating through different kinds of +tree, generating tokens identical to those produced by the tokenizer +module. + +To create a tree walker for a new type of tree, you need to do +implement a tree walker object (called TreeWalker by convention) that +implements a 'serialize' method taking a tree as sole argument and +returning an iterator generating tokens. +""" + +treeWalkerCache = {} + +def getTreeWalker(treeType, implementation=None, **kwargs): + """Get a TreeWalker class for various types of tree with built-in support + + treeType - the name of the tree type required (case-insensitive). Supported + values are "simpletree", "dom", "etree" and "beautifulsoup" + + "simpletree" - a built-in DOM-ish tree type with support for some + more pythonic idioms. + "dom" - The xml.dom.minidom DOM implementation + "pulldom" - The xml.dom.pulldom event stream + "etree" - A generic walker for tree implementations exposing an + elementtree-like interface (known to work with + ElementTree, cElementTree and lxml.etree). + "lxml" - Optimized walker for lxml.etree + "beautifulsoup" - Beautiful soup (if installed) + "genshi" - a Genshi stream + + implementation - (Currently applies to the "etree" tree type only). A module + implementing the tree type e.g. xml.etree.ElementTree or + cElementTree.""" + + treeType = treeType.lower() + if treeType not in treeWalkerCache: + if treeType in ("dom", "pulldom", "simpletree"): + mod = __import__(treeType, globals()) + treeWalkerCache[treeType] = mod.TreeWalker + elif treeType == "genshi": + import genshistream + treeWalkerCache[treeType] = genshistream.TreeWalker + elif treeType == "beautifulsoup": + import soup + treeWalkerCache[treeType] = soup.TreeWalker + elif treeType == "lxml": + import lxmletree + treeWalkerCache[treeType] = lxmletree.TreeWalker + elif treeType == "etree": + import etree + # XXX: NEVER cache here, caching is done in the etree submodule + return etree.getETreeModule(implementation, **kwargs).TreeWalker + return treeWalkerCache.get(treeType) diff --git a/lib/venus/planet/vendor/html5lib/treewalkers/_base.py b/lib/venus/planet/vendor/html5lib/treewalkers/_base.py new file mode 100644 index 0000000..fd12d58 --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/treewalkers/_base.py @@ -0,0 +1,154 @@ +import gettext +_ = gettext.gettext + +from html5lib.constants import voidElements, spaceCharacters +spaceCharacters = u"".join(spaceCharacters) + +class TreeWalker(object): + def __init__(self, tree): + self.tree = tree + + def __iter__(self): + raise NotImplementedError + + def error(self, msg): + return {"type": "SerializeError", "data": msg} + + def normalizeAttrs(self, attrs): + if not attrs: + attrs = [] + elif hasattr(attrs, 'items'): + attrs = attrs.items() + return [(unicode(name),unicode(value)) for name,value in attrs] + + def emptyTag(self, name, attrs, hasChildren=False): + yield {"type": "EmptyTag", "name": unicode(name), \ + "data": self.normalizeAttrs(attrs)} + if hasChildren: + yield self.error(_("Void element has children")) + + def startTag(self, name, attrs): + return {"type": "StartTag", "name": unicode(name), \ + "data": self.normalizeAttrs(attrs)} + + def endTag(self, name): + return {"type": "EndTag", "name": unicode(name), "data": []} + + def text(self, data): + data = unicode(data) + middle = data.lstrip(spaceCharacters) + left = data[:len(data)-len(middle)] + if left: + yield {"type": "SpaceCharacters", "data": left} + data = middle + middle = data.rstrip(spaceCharacters) + right = data[len(middle):] + if middle: + yield {"type": "Characters", "data": middle} + if right: + yield {"type": "SpaceCharacters", "data": right} + + def comment(self, data): + return {"type": "Comment", "data": unicode(data)} + + def doctype(self, name, publicId=None, systemId=None, correct=True): + return {"type": "Doctype", + "name": name is not None and unicode(name) or u"", + "publicId": publicId, "systemId": systemId, + "correct": correct} + + def unknown(self, nodeType): + return self.error(_("Unknown node type: ") + nodeType) + +class RecursiveTreeWalker(TreeWalker): + def walkChildren(self, node): + raise NodeImplementedError + + def element(self, node, name, attrs, hasChildren): + if name in voidElements: + for token in self.emptyTag(name, attrs, hasChildren): + yield token + else: + yield self.startTag(name, attrs) + if hasChildren: + for token in self.walkChildren(node): + yield token + yield self.endTag(name) + +from xml.dom import Node + +DOCUMENT = Node.DOCUMENT_NODE +DOCTYPE = Node.DOCUMENT_TYPE_NODE +TEXT = Node.TEXT_NODE +ELEMENT = Node.ELEMENT_NODE +COMMENT = Node.COMMENT_NODE +UNKNOWN = "<#UNKNOWN#>" + +class NonRecursiveTreeWalker(TreeWalker): + def getNodeDetails(self, node): + raise NotImplementedError + + def getFirstChild(self, node): + raise NotImplementedError + + def getNextSibling(self, node): + raise NotImplementedError + + def getParentNode(self, node): + raise NotImplementedError + + def __iter__(self): + currentNode = self.tree + while currentNode is not None: + details = self.getNodeDetails(currentNode) + type, details = details[0], details[1:] + hasChildren = False + + if type == DOCTYPE: + yield self.doctype(*details) + + elif type == TEXT: + for token in self.text(*details): + yield token + + elif type == ELEMENT: + name, attributes, hasChildren = details + if name in voidElements: + for token in self.emptyTag(name, attributes, hasChildren): + yield token + hasChildren = False + else: + yield self.startTag(name, attributes) + + elif type == COMMENT: + yield self.comment(details[0]) + + elif type == DOCUMENT: + hasChildren = True + + else: + yield self.unknown(details[0]) + + if hasChildren: + firstChild = self.getFirstChild(currentNode) + else: + firstChild = None + + if firstChild is not None: + currentNode = firstChild + else: + while currentNode is not None: + details = self.getNodeDetails(currentNode) + type, details = details[0], details[1:] + if type == ELEMENT: + name, attributes, hasChildren = details + if name not in voidElements: + yield self.endTag(name) + nextSibling = self.getNextSibling(currentNode) + if nextSibling is not None: + currentNode = nextSibling + break + if self.tree is currentNode: + currentNode = None + else: + currentNode = self.getParentNode(currentNode) diff --git a/lib/venus/planet/vendor/html5lib/treewalkers/dom.py b/lib/venus/planet/vendor/html5lib/treewalkers/dom.py new file mode 100644 index 0000000..1ed2aed --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/treewalkers/dom.py @@ -0,0 +1,37 @@ +from xml.dom import Node + +import gettext +_ = gettext.gettext + +import _base + +from html5lib.constants import voidElements + +class TreeWalker(_base.NonRecursiveTreeWalker): + def getNodeDetails(self, node): + if node.nodeType == Node.DOCUMENT_TYPE_NODE: + return _base.DOCTYPE, node.name, node.publicId, node.systemId + + elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE): + return _base.TEXT, node.nodeValue + + elif node.nodeType == Node.ELEMENT_NODE: + return _base.ELEMENT, node.nodeName, node.attributes.items(), node.hasChildNodes + + elif node.nodeType == Node.COMMENT_NODE: + return _base.COMMENT, node.nodeValue + + elif node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE): + return (_base.DOCUMENT,) + + else: + return _base.UNKNOWN, node.nodeType + + def getFirstChild(self, node): + return node.firstChild + + def getNextSibling(self, node): + return node.nextSibling + + def getParentNode(self, node): + return node.parentNode diff --git a/lib/venus/planet/vendor/html5lib/treewalkers/etree.py b/lib/venus/planet/vendor/html5lib/treewalkers/etree.py new file mode 100644 index 0000000..976411b --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/treewalkers/etree.py @@ -0,0 +1,112 @@ +import gettext +_ = gettext.gettext + +import new +import copy + +import _base +from html5lib.constants import voidElements + +moduleCache = {} + +def getETreeModule(ElementTreeImplementation): + name = "_" + ElementTreeImplementation.__name__+"builder" + if name in moduleCache: + return moduleCache[name] + else: + mod = new.module("_" + ElementTreeImplementation.__name__+"builder") + objs = getETreeBuilder(ElementTreeImplementation) + mod.__dict__.update(objs) + moduleCache[name] = mod + return mod + +def getETreeBuilder(ElementTreeImplementation): + ElementTree = ElementTreeImplementation + + class TreeWalker(_base.NonRecursiveTreeWalker): + """Given the particular ElementTree representation, this implementation, + to avoid using recursion, returns "nodes" as tuples with the following + content: + + 1. An Element node serving as *context* (it cannot be called the parent + node due to the particular ``tail`` text nodes. + + 2. Either the string literals ``"text"`` or ``"tail"`` or a child index + + 3. A list used as a stack of all ancestor *context nodes*. It is a + pair tuple whose first item is an Element and second item is a child + index. + """ + + def getNodeDetails(self, node): + if isinstance(node, tuple): # It might be the root Element + elt, key, parents = node + if key in ("text", "tail"): + return _base.TEXT, getattr(elt, key) + else: + node = elt[int(key)] + + if not(hasattr(node, "tag")): + node = node.getroot() + + if node.tag in ("<DOCUMENT_ROOT>", "<DOCUMENT_FRAGMENT>"): + return (_base.DOCUMENT,) + + elif node.tag == "<!DOCTYPE>": + return _base.DOCTYPE, node.text + + elif type(node.tag) == type(ElementTree.Comment): + return _base.COMMENT, node.text + + else: + #This is assumed to be an ordinary element + return _base.ELEMENT, node.tag, node.attrib.items(), len(node) or node.text + + def getFirstChild(self, node): + if isinstance(node, tuple): # It might be the root Element + elt, key, parents = node + assert key not in ("text", "tail"), "Text nodes have no children" + parents.append((elt, int(key))) + node = elt[int(key)] + else: + parents = [] + + assert len(node) or node.text, "Node has no children" + if node.text: + return (node, "text", parents) + else: + return (node, 0, parents) + + def getNextSibling(self, node): + assert isinstance(node, tuple), "Node is not a tuple: " + str(node) + + elt, key, parents = node + if key == "text": + key = -1 + elif key == "tail": + elt, key = parents.pop() + else: + # Look for "tail" of the "revisited" node + child = elt[key] + if child.tail: + parents.append((elt, key)) + return (child, "tail", parents) + + # case where key were "text" or "tail" or elt[key] had a tail + key += 1 + if len(elt) > key: + return (elt, key, parents) + else: + return None + + def getParentNode(self, node): + assert isinstance(node, tuple) + elt, key, parents = node + if parents: + elt, key = parents.pop() + return elt, key, parents + else: + # HACK: We could return ``elt`` but None will stop the algorithm the same way + return None + + return locals() diff --git a/lib/venus/planet/vendor/html5lib/treewalkers/genshistream.py b/lib/venus/planet/vendor/html5lib/treewalkers/genshistream.py new file mode 100644 index 0000000..ecc7a0b --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/treewalkers/genshistream.py @@ -0,0 +1,67 @@ +from genshi.core import START, END, XML_DECL, DOCTYPE, TEXT, \ + START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT +from genshi.output import NamespaceFlattener + +import _base + +from html5lib.constants import voidElements + +class TreeWalker(_base.TreeWalker): + def __iter__(self): + depth = 0 + ignore_until = None + previous = None + for event in NamespaceFlattener(prefixes={ + 'http://www.w3.org/1999/xhtml': '' + })(self.tree): + if previous is not None: + if previous[0] == START: + depth += 1 + if ignore_until <= depth: + ignore_until = None + if ignore_until is None: + for token in self.tokens(previous, event): + yield token + if token["type"] == "EmptyTag": + ignore_until = depth + if previous[0] == END: + depth -= 1 + previous = event + if previous is not None: + if ignore_until is None or ignore_until <= depth: + for token in self.tokens(previous, None): + yield token + elif ignore_until is not None: + raise ValueError("Illformed DOM event stream: void element without END_ELEMENT") + + def tokens(self, event, next): + kind, data, pos = event + if kind == START: + tag, attrib = data + if tag in voidElements: + for token in self.emptyTag(tag, list(attrib), \ + not next or next[0] != END or next[1] != tag): + yield token + else: + yield self.startTag(tag, list(attrib)) + + elif kind == END: + if data not in voidElements: + yield self.endTag(data) + + elif kind == COMMENT: + yield self.comment(data) + + elif kind == TEXT: + for token in self.text(data): + yield token + + elif kind == DOCTYPE: + yield self.doctype(*data) + + elif kind in (XML_DECL, DOCTYPE, START_NS, END_NS, \ + START_CDATA, END_CDATA, PI): + pass + + else: + yield self.unknown(kind) diff --git a/lib/venus/planet/vendor/html5lib/treewalkers/pulldom.py b/lib/venus/planet/vendor/html5lib/treewalkers/pulldom.py new file mode 100644 index 0000000..4a96aed --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/treewalkers/pulldom.py @@ -0,0 +1,52 @@ +from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \ + COMMENT, IGNORABLE_WHITESPACE, CHARACTERS + +import _base + +from html5lib.constants import voidElements + +class TreeWalker(_base.TreeWalker): + def __iter__(self): + ignore_until = None + previous = None + for event in self.tree: + if previous is not None and \ + (ignore_until is None or previous[1] is ignore_until): + if previous[1] is ignore_until: + ignore_until = None + for token in self.tokens(previous, event): + yield token + if token["type"] == "EmptyTag": + ignore_until = previous[1] + previous = event + if ignore_until is None or previous[1] is ignore_until: + for token in self.tokens(previous, None): + yield token + elif ignore_until is not None: + raise ValueError("Illformed DOM event stream: void element without END_ELEMENT") + + def tokens(self, event, next): + type, node = event + if type == START_ELEMENT: + name = node.nodeName + if name in voidElements: + for token in self.emptyTag(name, \ + node.attributes.items(), not next or next[1] is not node): + yield token + else: + yield self.startTag(name, node.attributes.items()) + + elif type == END_ELEMENT: + name = node.nodeName + if name not in voidElements: + yield self.endTag(name) + + elif type == COMMENT: + yield self.comment(node.nodeValue) + + elif type in (IGNORABLE_WHITESPACE, CHARACTERS): + for token in self.text(node.nodeValue): + yield token + + else: + yield self.unknown(type) diff --git a/lib/venus/planet/vendor/html5lib/treewalkers/simpletree.py b/lib/venus/planet/vendor/html5lib/treewalkers/simpletree.py new file mode 100644 index 0000000..9dac6c8 --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/treewalkers/simpletree.py @@ -0,0 +1,72 @@ +import gettext +_ = gettext.gettext + +import _base + +class TreeWalker(_base.NonRecursiveTreeWalker): + """Given that simpletree has no performant way of getting a node's + next sibling, this implementation returns "nodes" as tuples with the + following content: + + 1. The parent Node (Element, Document or DocumentFragment) + + 2. The child index of the current node in its parent's children list + + 3. A list used as a stack of all ancestors. It is a pair tuple whose + first item is a parent Node and second item is a child index. + """ + + def getNodeDetails(self, node): + if isinstance(node, tuple): # It might be the root Node + parent, idx, parents = node + node = parent.childNodes[idx] + + # testing node.type allows us not to import treebuilders.simpletree + if node.type in (1, 2): # Document or DocumentFragment + return (_base.DOCUMENT,) + + elif node.type == 3: # DocumentType + return _base.DOCTYPE, node.name, node.publicId, node.systemId + + elif node.type == 4: # TextNode + return _base.TEXT, node.value + + elif node.type == 5: # Element + return _base.ELEMENT, node.name, \ + node.attributes.items(), node.hasContent() + + elif node.type == 6: # CommentNode + return _base.COMMENT, node.data + + else: + return _node.UNKNOWN, node.type + + def getFirstChild(self, node): + if isinstance(node, tuple): # It might be the root Node + parent, idx, parents = node + parents.append((parent, idx)) + node = parent.childNodes[idx] + else: + parents = [] + + assert node.hasContent(), "Node has no children" + return (node, 0, parents) + + def getNextSibling(self, node): + assert isinstance(node, tuple), "Node is not a tuple: " + str(node) + parent, idx, parents = node + idx += 1 + if len(parent.childNodes) > idx: + return (parent, idx, parents) + else: + return None + + def getParentNode(self, node): + assert isinstance(node, tuple) + parent, idx, parents = node + if parents: + parent, idx = parents.pop() + return parent, idx, parents + else: + # HACK: We could return ``parent`` but None will stop the algorithm the same way + return None diff --git a/lib/venus/planet/vendor/html5lib/treewalkers/soup.py b/lib/venus/planet/vendor/html5lib/treewalkers/soup.py new file mode 100644 index 0000000..1d52ca0 --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/treewalkers/soup.py @@ -0,0 +1,36 @@ +import gettext +_ = gettext.gettext + +from BeautifulSoup import BeautifulSoup, Declaration, Comment, Tag + +import _base + +class TreeWalker(_base.NonRecursiveTreeWalker): + def getNodeDetails(self, node): + if isinstance(node, BeautifulSoup): # Document or DocumentFragment + return (_base.DOCUMENT,) + + elif isinstance(node, Declaration): # DocumentType + #Slice needed to remove markup added during unicode conversion + return _base.DOCTYPE, unicode(node.string)[2:-1] + + elif isinstance(node, Comment): + return _base.COMMENT, unicode(node.string)[4:-3] + + elif isinstance(node, unicode): # TextNode + return _base.TEXT, node + + elif isinstance(node, Tag): # Element + return _base.ELEMENT, node.name, \ + dict(node.attrs).items(), node.contents + else: + return _base.UNKNOWN, node.__class__.__name__ + + def getFirstChild(self, node): + return node.contents[0] + + def getNextSibling(self, node): + return node.nextSibling + + def getParentNode(self, node): + return node.parent diff --git a/lib/venus/planet/vendor/html5lib/utils.py b/lib/venus/planet/vendor/html5lib/utils.py new file mode 100644 index 0000000..c71e864 --- /dev/null +++ b/lib/venus/planet/vendor/html5lib/utils.py @@ -0,0 +1,36 @@ +try: + frozenset +except NameError: + #Import from the sets module for python 2.3 + from sets import Set as set + from sets import ImmutableSet as frozenset + +class MethodDispatcher(dict): + """Dict with 2 special properties: + + On initiation, keys that are lists, sets or tuples are converted to + multiple keys so accessing any one of the items in the original + list-like object returns the matching value + + md = MethodDispatcher({("foo", "bar"):"baz"}) + md["foo"] == "baz" + + A default value which can be set through the default attribute. + """ + + def __init__(self, items=()): + # Using _dictEntries instead of directly assigning to self is about + # twice as fast. Please do careful performance testing before changing + # anything here. + _dictEntries = [] + for name,value in items: + if type(name) in (list, tuple, frozenset, set): + for item in name: + _dictEntries.append((item, value)) + else: + _dictEntries.append((name, value)) + dict.__init__(self, _dictEntries) + self.default = None + + def __getitem__(self, key): + return dict.get(self, key, self.default) diff --git a/lib/venus/planet/vendor/htmltmpl.py b/lib/venus/planet/vendor/htmltmpl.py new file mode 100644 index 0000000..d4fce5f --- /dev/null +++ b/lib/venus/planet/vendor/htmltmpl.py @@ -0,0 +1,1421 @@ + +""" A templating engine for separation of code and HTML. + + The documentation of this templating engine is separated to two parts: + + 1. Description of the templating language. + + 2. Documentation of classes and API of this module that provides + a Python implementation of the templating language. + + All the documentation can be found in 'doc' directory of the + distribution tarball or at the homepage of the engine. + Latest versions of this module are also available at that website. + + You can use and redistribute this module under conditions of the + GNU General Public License that can be found either at + [ http://www.gnu.org/ ] or in file "LICENSE" contained in the + distribution tarball of this module. + + Copyright (c) 2001 Tomas Styblo, tripie@cpan.org + + @name htmltmpl + @version 1.22 + @author-name Tomas Styblo + @author-email tripie@cpan.org + @website http://htmltmpl.sourceforge.net/ + @license-name GNU GPL + @license-url http://www.gnu.org/licenses/gpl.html +""" + +__version__ = 1.22 +__author__ = "Tomas Styblo (tripie@cpan.org)" + +# All imported modules are part of the standard Python library. + +from types import * +import re +import os +import os.path +import pprint # only for debugging +import sys +import copy +import cgi # for HTML escaping of variables +import urllib # for URL escaping of variables +import cPickle # for template compilation +import gettext +import portalocker # for locking + +INCLUDE_DIR = "inc" + +# Total number of possible parameters. +# Increment if adding a parameter to any statement. +PARAMS_NUMBER = 3 + +# Relative positions of parameters in TemplateCompiler.tokenize(). +PARAM_NAME = 1 +PARAM_ESCAPE = 2 +PARAM_GLOBAL = 3 +PARAM_GETTEXT_STRING = 1 + +############################################## +# CLASS: TemplateManager # +############################################## + +class TemplateManager: + """ Class that manages compilation and precompilation of templates. + + You should use this class whenever you work with templates + that are stored in a file. The class can create a compiled + template and transparently manage its precompilation. It also + keeps the precompiled templates up-to-date by modification times + comparisons. + """ + + def __init__(self, include=1, max_include=5, precompile=1, comments=1, + gettext=0, debug=0): + """ Constructor. + + @header + __init__(include=1, max_include=5, precompile=1, comments=1, + gettext=0, debug=0) + + @param include Enable or disable included templates. + This optional parameter can be used to enable or disable + <em>TMPL_INCLUDE</em> inclusion of templates. Disabling of + inclusion can improve performance a bit. The inclusion is + enabled by default. + + @param max_include Maximum depth of nested inclusions. + This optional parameter can be used to specify maximum depth of + nested <em>TMPL_INCLUDE</em> inclusions. It defaults to 5. + This setting prevents infinite recursive inclusions. + + @param precompile Enable or disable precompilation of templates. + This optional parameter can be used to enable or disable + creation and usage of precompiled templates. + + A precompiled template is saved to the same directory in + which the main template file is located. You need write + permissions to that directory. + + Precompilation provides a significant performance boost because + it's not necessary to parse the templates over and over again. + The boost is especially noticeable when templates that include + other templates are used. + + Comparison of modification times of the main template and all + included templates is used to ensure that the precompiled + templates are up-to-date. Templates are also recompiled if the + htmltmpl module is updated. + + The <em>TemplateError</em>exception is raised when the precompiled + template cannot be saved. Precompilation is enabled by default. + + @param comments Enable or disable template comments. + This optional parameter can be used to enable or disable + template comments. + Disabling of the comments can improve performance a bit. + Comments are enabled by default. + + @param gettext Enable or disable gettext support. + + @param debug Enable or disable debugging messages. + This optional parameter is a flag that can be used to enable + or disable debugging messages which are printed to the standard + error output. The debugging messages are disabled by default. + """ + # Save the optional parameters. + # These values are not modified by any method. + self._include = include + self._max_include = max_include + self._precompile = precompile + self._comments = comments + self._gettext = gettext + self._debug = debug + + self.DEB("INIT DONE") + + def prepare(self, file): + """ Preprocess, parse, tokenize and compile the template. + + If precompilation is enabled then this method tries to load + a precompiled form of the template from the same directory + in which the template source file is located. If it succeeds, + then it compares modification times stored in the precompiled + form to modification times of source files of the template, + including source files of all templates included via the + <em>TMPL_INCLUDE</em> statements. If any of the modification times + differs, then the template is recompiled and the precompiled + form updated. + + If precompilation is disabled, then this method parses and + compiles the template. + + @header prepare(file) + + @return Compiled template. + The methods returns an instance of the <em>Template</em> class + which is a compiled form of the template. This instance can be + used as input for the <em>TemplateProcessor</em>. + + @param file Path to the template file to prepare. + The method looks for the template file in current directory + if the parameter is a relative path. All included templates must + be placed in subdirectory <strong>'inc'</strong> of the + directory in which the main template file is located. + """ + compiled = None + if self._precompile: + if self.is_precompiled(file): + try: + precompiled = self.load_precompiled(file) + except PrecompiledError, template: + print >> sys.stderr, "Htmltmpl: bad precompiled "\ + "template '%s' removed" % template + compiled = self.compile(file) + self.save_precompiled(compiled) + else: + precompiled.debug(self._debug) + compile_params = (self._include, self._max_include, + self._comments, self._gettext) + if precompiled.is_uptodate(compile_params): + self.DEB("PRECOMPILED: UPTODATE") + compiled = precompiled + else: + self.DEB("PRECOMPILED: NOT UPTODATE") + compiled = self.update(precompiled) + else: + self.DEB("PRECOMPILED: NOT PRECOMPILED") + compiled = self.compile(file) + self.save_precompiled(compiled) + else: + self.DEB("PRECOMPILATION DISABLED") + compiled = self.compile(file) + return compiled + + def update(self, template): + """ Update (recompile) a compiled template. + + This method recompiles a template compiled from a file. + If precompilation is enabled then the precompiled form saved on + disk is also updated. + + @header update(template) + + @return Recompiled template. + It's ensured that the returned template is up-to-date. + + @param template A compiled template. + This parameter should be an instance of the <em>Template</em> + class, created either by the <em>TemplateManager</em> or by the + <em>TemplateCompiler</em>. The instance must represent a template + compiled from a file on disk. + """ + self.DEB("UPDATE") + updated = self.compile(template.file()) + if self._precompile: + self.save_precompiled(updated) + return updated + + ############################################## + # PRIVATE METHODS # + ############################################## + + def DEB(self, str): + """ Print debugging message to stderr if debugging is enabled. + @hidden + """ + if self._debug: print >> sys.stderr, str + + def compile(self, file): + """ Compile the template. + @hidden + """ + return TemplateCompiler(self._include, self._max_include, + self._comments, self._gettext, + self._debug).compile(file) + + def is_precompiled(self, file): + """ Return true if the template is already precompiled on the disk. + This method doesn't check whether the compiled template is + uptodate. + @hidden + """ + filename = file + "c" # "template.tmplc" + if os.path.isfile(filename): + return 1 + else: + return 0 + + def load_precompiled(self, file): + """ Load precompiled template from disk. + + Remove the precompiled template file and recompile it + if the file contains corrupted or unpicklable data. + + @hidden + """ + filename = file + "c" # "template.tmplc" + self.DEB("LOADING PRECOMPILED") + try: + remove_bad = 0 + file = None + try: + file = open(filename, "rb") + portalocker.lock(file, portalocker.LOCK_SH) + precompiled = cPickle.load(file) + except IOError, (errno, errstr): + raise TemplateError, "IO error in load precompiled "\ + "template '%s': (%d) %s"\ + % (filename, errno, errstr) + except cPickle.UnpicklingError: + remove_bad = 1 + raise PrecompiledError, filename + except: + remove_bad = 1 + raise + else: + return precompiled + finally: + if file: + portalocker.unlock(file) + file.close() + if remove_bad and os.path.isfile(filename): + # X: We may lose the original exception here, raising OSError. + os.remove(filename) + + def save_precompiled(self, template): + """ Save compiled template to disk in precompiled form. + + Associated metadata is also saved. It includes: filename of the + main template file, modification time of the main template file, + modification times of all included templates and version of the + htmltmpl module which compiled the template. + + The method removes a file which is saved only partially because + of some error. + + @hidden + """ + filename = template.file() + "c" # creates "template.tmplc" + # Check if we have write permission to the template's directory. + template_dir = os.path.dirname(os.path.abspath(filename)) + if not os.access(template_dir, os.W_OK): + raise TemplateError, "Cannot save precompiled templates "\ + "to '%s': write permission denied."\ + % template_dir + try: + remove_bad = 0 + file = None + try: + file = open(filename, "wb") # may truncate existing file + portalocker.lock(file, portalocker.LOCK_EX) + BINARY = 1 + READABLE = 0 + if self._debug: + cPickle.dump(template, file, READABLE) + else: + cPickle.dump(template, file, BINARY) + except IOError, (errno, errstr): + remove_bad = 1 + raise TemplateError, "IO error while saving precompiled "\ + "template '%s': (%d) %s"\ + % (filename, errno, errstr) + except cPickle.PicklingError, error: + remove_bad = 1 + raise TemplateError, "Pickling error while saving "\ + "precompiled template '%s': %s"\ + % (filename, error) + except: + remove_bad = 1 + raise + else: + self.DEB("SAVING PRECOMPILED") + finally: + if file: + portalocker.unlock(file) + file.close() + if remove_bad and os.path.isfile(filename): + # X: We may lose the original exception here, raising OSError. + os.remove(filename) + + +############################################## +# CLASS: TemplateProcessor # +############################################## + +class TemplateProcessor: + """ Fill the template with data and process it. + + This class provides actual processing of a compiled template. + Use it to set template variables and loops and then obtain + result of the processing. + """ + + def __init__(self, html_escape=1, magic_vars=1, global_vars=0, debug=0): + """ Constructor. + + @header __init__(html_escape=1, magic_vars=1, global_vars=0, + debug=0) + + @param html_escape Enable or disable HTML escaping of variables. + This optional parameter is a flag that can be used to enable or + disable automatic HTML escaping of variables. + All variables are by default automatically HTML escaped. + The escaping process substitutes HTML brackets, ampersands and + double quotes with appropriate HTML entities. + + @param magic_vars Enable or disable loop magic variables. + This parameter can be used to enable or disable + "magic" context variables, that are automatically defined inside + loops. Magic variables are enabled by default. + + Refer to the language specification for description of these + magic variables. + + @param global_vars Globally activate global lookup of variables. + This optional parameter is a flag that can be used to specify + whether variables which cannot be found in the current scope + should be automatically looked up in enclosing scopes. + + Automatic global lookup is disabled by default. Global lookup + can be overriden on a per-variable basis by the + <strong>GLOBAL</strong> parameter of a <strong>TMPL_VAR</strong> + statement. + + @param debug Enable or disable debugging messages. + """ + self._html_escape = html_escape + self._magic_vars = magic_vars + self._global_vars = global_vars + self._debug = debug + + # Data structure containing variables and loops set by the + # application. Use debug=1, process some template and + # then check stderr to see how the structure looks. + # It's modified only by set() and reset() methods. + self._vars = {} + + # Following variables are for multipart templates. + self._current_part = 1 + self._current_pos = 0 + + def set(self, var, value): + """ Associate a value with top-level template variable or loop. + + A template identifier can represent either an ordinary variable + (string) or a loop. + + To assign a value to a string identifier pass a scalar + as the 'value' parameter. This scalar will be automatically + converted to string. + + To assign a value to a loop identifier pass a list of mappings as + the 'value' parameter. The engine iterates over this list and + assigns values from the mappings to variables in a template loop + block if a key in the mapping corresponds to a name of a variable + in the loop block. The number of mappings contained in this list + is equal to number of times the loop block is repeated in the + output. + + @header set(var, value) + @return No return value. + + @param var Name of template variable or loop. + @param value The value to associate. + + """ + # The correctness of character case is verified only for top-level + # variables. + if self.is_ordinary_var(value): + # template top-level ordinary variable + if not var.islower(): + raise TemplateError, "Invalid variable name '%s'." % var + elif type(value) == ListType: + # template top-level loop + if var != var.capitalize(): + raise TemplateError, "Invalid loop name '%s'." % var + else: + raise TemplateError, "Value of toplevel variable '%s' must "\ + "be either a scalar or a list." % var + self._vars[var] = value + self.DEB("VALUE SET: " + str(var)) + + def reset(self, keep_data=0): + """ Reset the template data. + + This method resets the data contained in the template processor + instance. The template processor instance can be used to process + any number of templates, but this method must be called after + a template is processed to reuse the instance, + + @header reset(keep_data=0) + @return No return value. + + @param keep_data Do not reset the template data. + Use this flag if you do not want the template data to be erased. + This way you can reuse the data contained in the instance of + the <em>TemplateProcessor</em>. + """ + self._current_part = 1 + self._current_pos = 0 + if not keep_data: + self._vars.clear() + self.DEB("RESET") + + def process(self, template, part=None): + """ Process a compiled template. Return the result as string. + + This method actually processes a template and returns + the result. + + @header process(template, part=None) + @return Result of the processing as string. + + @param template A compiled template. + Value of this parameter must be an instance of the + <em>Template</em> class created either by the + <em>TemplateManager</em> or by the <em>TemplateCompiler</em>. + + @param part The part of a multipart template to process. + This parameter can be used only together with a multipart + template. It specifies the number of the part to process. + It must be greater than zero, because the parts are numbered + from one. + + The parts must be processed in the right order. You + cannot process a part which precedes an already processed part. + + If this parameter is not specified, then the whole template + is processed, or all remaining parts are processed. + """ + self.DEB("APP INPUT:") + if self._debug: pprint.pprint(self._vars, sys.stderr) + if part != None and (part == 0 or part < self._current_part): + raise TemplateError, "process() - invalid part number" + + # This flag means "jump behind the end of current statement" or + # "skip the parameters of current statement". + # Even parameters that actually are not present in the template + # do appear in the list of tokens as empty items ! + skip_params = 0 + + # Stack for enabling or disabling output in response to TMPL_IF, + # TMPL_UNLESS, TMPL_ELSE and TMPL_LOOPs with no passes. + output_control = [] + ENABLE_OUTPUT = 1 + DISABLE_OUTPUT = 0 + + # Stacks for data related to loops. + loop_name = [] # name of a loop + loop_pass = [] # current pass of a loop (counted from zero) + loop_start = [] # index of loop start in token list + loop_total = [] # total number of passes in a loop + + tokens = template.tokens() + len_tokens = len(tokens) + out = "" # buffer for processed output + + # Recover position at which we ended after processing of last part. + i = self._current_pos + + # Process the list of tokens. + while 1: + if i == len_tokens: break + if skip_params: + # Skip the parameters following a statement. + skip_params = 0 + i += PARAMS_NUMBER + continue + + token = tokens[i] + if token.startswith("<TMPL_") or \ + token.startswith("</TMPL_"): + if token == "<TMPL_VAR": + # TMPL_VARs should be first. They are the most common. + var = tokens[i + PARAM_NAME] + if not var: + raise TemplateError, "No identifier in <TMPL_VAR>." + escape = tokens[i + PARAM_ESCAPE] + globalp = tokens[i + PARAM_GLOBAL] + skip_params = 1 + + # If output of current block is not disabled then append + # the substitued and escaped variable to the output. + if DISABLE_OUTPUT not in output_control: + value = str(self.find_value(var, loop_name, loop_pass, + loop_total, globalp)) + out += self.escape(value, escape) + self.DEB("VAR: " + str(var)) + + elif token == "<TMPL_LOOP": + var = tokens[i + PARAM_NAME] + if not var: + raise TemplateError, "No identifier in <TMPL_LOOP>." + skip_params = 1 + + # Find total number of passes in this loop. + passtotal = self.find_value(var, loop_name, loop_pass, + loop_total) + if not passtotal: passtotal = 0 + # Push data for this loop on the stack. + loop_total.append(passtotal) + loop_start.append(i) + loop_pass.append(0) + loop_name.append(var) + + # Disable output of loop block if the number of passes + # in this loop is zero. + if passtotal == 0: + # This loop is empty. + output_control.append(DISABLE_OUTPUT) + self.DEB("LOOP: DISABLE: " + str(var)) + else: + output_control.append(ENABLE_OUTPUT) + self.DEB("LOOP: FIRST PASS: %s TOTAL: %d"\ + % (var, passtotal)) + + elif token == "<TMPL_IF": + var = tokens[i + PARAM_NAME] + if not var: + raise TemplateError, "No identifier in <TMPL_IF>." + globalp = tokens[i + PARAM_GLOBAL] + skip_params = 1 + if self.find_value(var, loop_name, loop_pass, + loop_total, globalp): + output_control.append(ENABLE_OUTPUT) + self.DEB("IF: ENABLE: " + str(var)) + else: + output_control.append(DISABLE_OUTPUT) + self.DEB("IF: DISABLE: " + str(var)) + + elif token == "<TMPL_UNLESS": + var = tokens[i + PARAM_NAME] + if not var: + raise TemplateError, "No identifier in <TMPL_UNLESS>." + globalp = tokens[i + PARAM_GLOBAL] + skip_params = 1 + if self.find_value(var, loop_name, loop_pass, + loop_total, globalp): + output_control.append(DISABLE_OUTPUT) + self.DEB("UNLESS: DISABLE: " + str(var)) + else: + output_control.append(ENABLE_OUTPUT) + self.DEB("UNLESS: ENABLE: " + str(var)) + + elif token == "</TMPL_LOOP": + skip_params = 1 + if not loop_name: + raise TemplateError, "Unmatched </TMPL_LOOP>." + + # If this loop was not disabled, then record the pass. + if loop_total[-1] > 0: loop_pass[-1] += 1 + + if loop_pass[-1] == loop_total[-1]: + # There are no more passes in this loop. Pop + # the loop from stack. + loop_pass.pop() + loop_name.pop() + loop_start.pop() + loop_total.pop() + output_control.pop() + self.DEB("LOOP: END") + else: + # Jump to the beggining of this loop block + # to process next pass of the loop. + i = loop_start[-1] + self.DEB("LOOP: NEXT PASS") + + elif token == "</TMPL_IF": + skip_params = 1 + if not output_control: + raise TemplateError, "Unmatched </TMPL_IF>." + output_control.pop() + self.DEB("IF: END") + + elif token == "</TMPL_UNLESS": + skip_params = 1 + if not output_control: + raise TemplateError, "Unmatched </TMPL_UNLESS>." + output_control.pop() + self.DEB("UNLESS: END") + + elif token == "<TMPL_ELSE": + skip_params = 1 + if not output_control: + raise TemplateError, "Unmatched <TMPL_ELSE>." + if output_control[-1] == DISABLE_OUTPUT: + # Condition was false, activate the ELSE block. + output_control[-1] = ENABLE_OUTPUT + self.DEB("ELSE: ENABLE") + elif output_control[-1] == ENABLE_OUTPUT: + # Condition was true, deactivate the ELSE block. + output_control[-1] = DISABLE_OUTPUT + self.DEB("ELSE: DISABLE") + else: + raise TemplateError, "BUG: ELSE: INVALID FLAG" + + elif token == "<TMPL_BOUNDARY": + if part and part == self._current_part: + self.DEB("BOUNDARY ON") + self._current_part += 1 + self._current_pos = i + 1 + PARAMS_NUMBER + break + else: + skip_params = 1 + self.DEB("BOUNDARY OFF") + self._current_part += 1 + + elif token == "<TMPL_INCLUDE": + # TMPL_INCLUDE is left in the compiled template only + # when it was not replaced by the parser. + skip_params = 1 + filename = tokens[i + PARAM_NAME] + out += """ + <br /> + <p> + <strong>HTMLTMPL WARNING:</strong><br /> + Cannot include template: <strong>%s</strong> + </p> + <br /> + """ % filename + self.DEB("CANNOT INCLUDE WARNING") + + elif token == "<TMPL_GETTEXT": + skip_params = 1 + if DISABLE_OUTPUT not in output_control: + text = tokens[i + PARAM_GETTEXT_STRING] + out += gettext.gettext(text) + self.DEB("GETTEXT: " + text) + + else: + # Unknown processing directive. + raise TemplateError, "Invalid statement %s>." % token + + elif DISABLE_OUTPUT not in output_control: + # Raw textual template data. + # If output of current block is not disabled, then + # append template data to the output buffer. + out += token + + i += 1 + # end of the big while loop + + # Check whether all opening statements were closed. + if loop_name: raise TemplateError, "Missing </TMPL_LOOP>." + if output_control: raise TemplateError, "Missing </TMPL_IF> or </TMPL_UNLESS>" + return out + + ############################################## + # PRIVATE METHODS # + ############################################## + + def DEB(self, str): + """ Print debugging message to stderr if debugging is enabled. + @hidden + """ + if self._debug: print >> sys.stderr, str + + def find_value(self, var, loop_name, loop_pass, loop_total, + global_override=None): + """ Search the self._vars data structure to find variable var + located in currently processed pass of a loop which + is currently being processed. If the variable is an ordinary + variable, then return it. + + If the variable is an identificator of a loop, then + return the total number of times this loop will + be executed. + + Return an empty string, if the variable is not + found at all. + + @hidden + """ + # Search for the requested variable in magic vars if the name + # of the variable starts with "__" and if we are inside a loop. + if self._magic_vars and var.startswith("__") and loop_name: + return self.magic_var(var, loop_pass[-1], loop_total[-1]) + + # Search for an ordinary variable or for a loop. + # Recursively search in self._vars for the requested variable. + scope = self._vars + globals = [] + for i in range(len(loop_name)): + # If global lookup is on then push the value on the stack. + if ((self._global_vars and global_override != "0") or \ + global_override == "1") and scope.has_key(var) and \ + self.is_ordinary_var(scope[var]): + globals.append(scope[var]) + + # Descent deeper into the hierarchy. + if scope.has_key(loop_name[i]) and scope[loop_name[i]]: + scope = scope[loop_name[i]][loop_pass[i]] + else: + return "" + + if scope.has_key(var): + # Value exists in current loop. + if type(scope[var]) == ListType: + # The requested value is a loop. + # Return total number of its passes. + return len(scope[var]) + else: + return scope[var] + elif globals and \ + ((self._global_vars and global_override != "0") or \ + global_override == "1"): + # Return globally looked up value. + return globals.pop() + else: + # No value found. + if var[0].isupper(): + # This is a loop name. + # Return zero, because the user wants to know number + # of its passes. + return 0 + else: + return "" + + def magic_var(self, var, loop_pass, loop_total): + """ Resolve and return value of a magic variable. + Raise an exception if the magic variable is not recognized. + + @hidden + """ + self.DEB("MAGIC: '%s', PASS: %d, TOTAL: %d"\ + % (var, loop_pass, loop_total)) + if var == "__FIRST__": + if loop_pass == 0: + return 1 + else: + return 0 + elif var == "__LAST__": + if loop_pass == loop_total - 1: + return 1 + else: + return 0 + elif var == "__INNER__": + # If this is neither the first nor the last pass. + if loop_pass != 0 and loop_pass != loop_total - 1: + return 1 + else: + return 0 + elif var == "__PASS__": + # Magic variable __PASS__ counts passes from one. + return loop_pass + 1 + elif var == "__PASSTOTAL__": + return loop_total + elif var == "__ODD__": + # Internally pass numbers stored in loop_pass are counted from + # zero. But the template language presents them counted from one. + # Therefore we must add one to the actual loop_pass value to get + # the value we present to the user. + if (loop_pass + 1) % 2 != 0: + return 1 + else: + return 0 + elif var.startswith("__EVERY__"): + # Magic variable __EVERY__x is never true in first or last pass. + if loop_pass != 0 and loop_pass != loop_total - 1: + # Check if an integer follows the variable name. + try: + every = int(var[9:]) # nine is length of "__EVERY__" + except ValueError: + raise TemplateError, "Magic variable __EVERY__x: "\ + "Invalid pass number." + else: + if not every: + raise TemplateError, "Magic variable __EVERY__x: "\ + "Pass number cannot be zero." + elif (loop_pass + 1) % every == 0: + self.DEB("MAGIC: EVERY: " + str(every)) + return 1 + else: + return 0 + else: + return 0 + else: + raise TemplateError, "Invalid magic variable '%s'." % var + + def escape(self, str, override=""): + """ Escape a string either by HTML escaping or by URL escaping. + @hidden + """ + ESCAPE_QUOTES = 1 + if (self._html_escape and override != "NONE" and override != "0" and \ + override != "URL") or override == "HTML" or override == "1": + return cgi.escape(str, ESCAPE_QUOTES) + elif override == "URL": + return urllib.quote_plus(str) + else: + return str + + def is_ordinary_var(self, var): + """ Return true if var is a scalar. (not a reference to loop) + @hidden + """ + if type(var) == StringType or type(var) == IntType or \ + type(var) == LongType or type(var) == FloatType: + return 1 + else: + return 0 + + +############################################## +# CLASS: TemplateCompiler # +############################################## + +class TemplateCompiler: + """ Preprocess, parse, tokenize and compile the template. + + This class parses the template and produces a 'compiled' form + of it. This compiled form is an instance of the <em>Template</em> + class. The compiled form is used as input for the TemplateProcessor + which uses it to actually process the template. + + This class should be used direcly only when you need to compile + a template from a string. If your template is in a file, then you + should use the <em>TemplateManager</em> class which provides + a higher level interface to this class and also can save the + compiled template to disk in a precompiled form. + """ + + def __init__(self, include=1, max_include=5, comments=1, gettext=0, + debug=0): + """ Constructor. + + @header __init__(include=1, max_include=5, comments=1, gettext=0, + debug=0) + + @param include Enable or disable included templates. + @param max_include Maximum depth of nested inclusions. + @param comments Enable or disable template comments. + @param gettext Enable or disable gettext support. + @param debug Enable or disable debugging messages. + """ + + self._include = include + self._max_include = max_include + self._comments = comments + self._gettext = gettext + self._debug = debug + + # This is a list of filenames of all included templates. + # It's modified by the include_templates() method. + self._include_files = [] + + # This is a counter of current inclusion depth. It's used to prevent + # infinite recursive includes. + self._include_level = 0 + + def compile(self, file): + """ Compile template from a file. + + @header compile(file) + @return Compiled template. + The return value is an instance of the <em>Template</em> + class. + + @param file Filename of the template. + See the <em>prepare()</em> method of the <em>TemplateManager</em> + class for exaplanation of this parameter. + """ + + self.DEB("COMPILING FROM FILE: " + file) + self._include_path = os.path.join(os.path.dirname(file), INCLUDE_DIR) + tokens = self.parse(self.read(file)) + compile_params = (self._include, self._max_include, self._comments, + self._gettext) + return Template(__version__, file, self._include_files, + tokens, compile_params, self._debug) + + def compile_string(self, data): + """ Compile template from a string. + + This method compiles a template from a string. The + template cannot include any templates. + <strong>TMPL_INCLUDE</strong> statements are turned into warnings. + + @header compile_string(data) + @return Compiled template. + The return value is an instance of the <em>Template</em> + class. + + @param data String containing the template data. + """ + self.DEB("COMPILING FROM STRING") + self._include = 0 + tokens = self.parse(data) + compile_params = (self._include, self._max_include, self._comments, + self._gettext) + return Template(__version__, None, None, tokens, compile_params, + self._debug) + + ############################################## + # PRIVATE METHODS # + ############################################## + + def DEB(self, str): + """ Print debugging message to stderr if debugging is enabled. + @hidden + """ + if self._debug: print >> sys.stderr, str + + def read(self, filename): + """ Read content of file and return it. Raise an error if a problem + occurs. + @hidden + """ + self.DEB("READING: " + filename) + try: + f = None + try: + f = open(filename, "r") + data = f.read() + except IOError, (errno, errstr): + raise TemplateError, "IO error while reading template '%s': "\ + "(%d) %s" % (filename, errno, errstr) + else: + return data + finally: + if f: f.close() + + def parse(self, template_data): + """ Parse the template. This method is recursively called from + within the include_templates() method. + + @return List of processing tokens. + @hidden + """ + if self._comments: + self.DEB("PREPROCESS: COMMENTS") + template_data = self.remove_comments(template_data) + tokens = self.tokenize(template_data) + if self._include: + self.DEB("PREPROCESS: INCLUDES") + self.include_templates(tokens) + return tokens + + def remove_comments(self, template_data): + """ Remove comments from the template data. + @hidden + """ + pattern = r"### .*" + return re.sub(pattern, "", template_data) + + def include_templates(self, tokens): + """ Process TMPL_INCLUDE statements. Use the include_level counter + to prevent infinite recursion. Record paths to all included + templates to self._include_files. + @hidden + """ + i = 0 + out = "" # buffer for output + skip_params = 0 + + # Process the list of tokens. + while 1: + if i == len(tokens): break + if skip_params: + skip_params = 0 + i += PARAMS_NUMBER + continue + + token = tokens[i] + if token == "<TMPL_INCLUDE": + filename = tokens[i + PARAM_NAME] + if not filename: + raise TemplateError, "No filename in <TMPL_INCLUDE>." + self._include_level += 1 + if self._include_level > self._max_include: + # Do not include the template. + # Protection against infinite recursive includes. + skip_params = 1 + self.DEB("INCLUDE: LIMIT REACHED: " + filename) + else: + # Include the template. + skip_params = 0 + include_file = os.path.join(self._include_path, filename) + self._include_files.append(include_file) + include_data = self.read(include_file) + include_tokens = self.parse(include_data) + + # Append the tokens from the included template to actual + # position in the tokens list, replacing the TMPL_INCLUDE + # token and its parameters. + tokens[i:i+PARAMS_NUMBER+1] = include_tokens + i = i + len(include_tokens) + self.DEB("INCLUDED: " + filename) + continue # Do not increment 'i' below. + i += 1 + # end of the main while loop + + if self._include_level > 0: self._include_level -= 1 + return out + + def tokenize(self, template_data): + """ Split the template into tokens separated by template statements. + The statements itself and associated parameters are also + separately included in the resulting list of tokens. + Return list of the tokens. + + @hidden + """ + self.DEB("TOKENIZING TEMPLATE") + # NOTE: The TWO double quotes in character class in the regexp below + # are there only to prevent confusion of syntax highlighter in Emacs. + pattern = r""" + (?:^[ \t]+)? # eat spaces, tabs (opt.) + (< + (?:!--[ ])? # comment start + space (opt.) + /?TMPL_[A-Z]+ # closing slash / (opt.) + statement + [ a-zA-Z0-9""/.=:_\\-]* # this spans also comments ending (--) + >) + [%s]? # eat trailing newline (opt.) + """ % os.linesep + rc = re.compile(pattern, re.VERBOSE | re.MULTILINE) + split = rc.split(template_data) + tokens = [] + for statement in split: + if statement.startswith("<TMPL_") or \ + statement.startswith("</TMPL_") or \ + statement.startswith("<!-- TMPL_") or \ + statement.startswith("<!-- /TMPL_"): + # Processing statement. + statement = self.strip_brackets(statement) + params = re.split(r"\s+", statement) + tokens.append(self.find_directive(params)) + tokens.append(self.find_name(params)) + tokens.append(self.find_param("ESCAPE", params)) + tokens.append(self.find_param("GLOBAL", params)) + else: + # "Normal" template data. + if self._gettext: + self.DEB("PARSING GETTEXT STRINGS") + self.gettext_tokens(tokens, statement) + else: + tokens.append(statement) + return tokens + + def gettext_tokens(self, tokens, str): + """ Find gettext strings and return appropriate array of + processing tokens. + @hidden + """ + escaped = 0 + gt_mode = 0 + i = 0 + buf = "" + while(1): + if i == len(str): break + if str[i] == "\\": + escaped = 0 + if str[i+1] == "\\": + buf += "\\" + i += 2 + continue + elif str[i+1] == "[" or str[i+1] == "]": + escaped = 1 + else: + buf += "\\" + elif str[i] == "[" and str[i+1] == "[": + if gt_mode: + if escaped: + escaped = 0 + buf += "[" + else: + buf += "[" + else: + if escaped: + escaped = 0 + buf += "[" + else: + tokens.append(buf) + buf = "" + gt_mode = 1 + i += 2 + continue + elif str[i] == "]" and str[i+1] == "]": + if gt_mode: + if escaped: + escaped = 0 + buf += "]" + else: + self.add_gettext_token(tokens, buf) + buf = "" + gt_mode = 0 + i += 2 + continue + else: + if escaped: + escaped = 0 + buf += "]" + else: + buf += "]" + else: + escaped = 0 + buf += str[i] + i += 1 + # end of the loop + + if buf: + tokens.append(buf) + + def add_gettext_token(self, tokens, str): + """ Append a gettext token and gettext string to the tokens array. + @hidden + """ + self.DEB("GETTEXT PARSER: TOKEN: " + str) + tokens.append("<TMPL_GETTEXT") + tokens.append(str) + tokens.append(None) + tokens.append(None) + + def strip_brackets(self, statement): + """ Strip HTML brackets (with optional HTML comments) from the + beggining and from the end of a statement. + @hidden + """ + if statement.startswith("<!-- TMPL_") or \ + statement.startswith("<!-- /TMPL_"): + return statement[5:-4] + else: + return statement[1:-1] + + def find_directive(self, params): + """ Extract processing directive (TMPL_*) from a statement. + @hidden + """ + directive = params[0] + del params[0] + self.DEB("TOKENIZER: DIRECTIVE: " + directive) + return "<" + directive + + def find_name(self, params): + """ Extract identifier from a statement. The identifier can be + specified both implicitely or explicitely as a 'NAME' parameter. + @hidden + """ + if len(params) > 0 and '=' not in params[0]: + # implicit identifier + name = params[0] + del params[0] + else: + # explicit identifier as a 'NAME' parameter + name = self.find_param("NAME", params) + self.DEB("TOKENIZER: NAME: " + str(name)) + return name + + def find_param(self, param, params): + """ Extract value of parameter from a statement. + @hidden + """ + for pair in params: + name, value = pair.split("=") + if not name or not value: + raise TemplateError, "Syntax error in template." + if name == param: + if value[0] == '"': + # The value is in double quotes. + ret_value = value[1:-1] + else: + # The value is without double quotes. + ret_value = value + self.DEB("TOKENIZER: PARAM: '%s' => '%s'" % (param, ret_value)) + return ret_value + else: + self.DEB("TOKENIZER: PARAM: '%s' => NOT DEFINED" % param) + return None + + +############################################## +# CLASS: Template # +############################################## + +class Template: + """ This class represents a compiled template. + + This class provides storage and methods for the compiled template + and associated metadata. It's serialized by pickle if we need to + save the compiled template to disk in a precompiled form. + + You should never instantiate this class directly. Always use the + <em>TemplateManager</em> or <em>TemplateCompiler</em> classes to + create the instances of this class. + + The only method which you can directly use is the <em>is_uptodate</em> + method. + """ + + def __init__(self, version, file, include_files, tokens, compile_params, + debug=0): + """ Constructor. + @hidden + """ + self._version = version + self._file = file + self._tokens = tokens + self._compile_params = compile_params + self._debug = debug + self._mtime = None + self._include_mtimes = {} + + if not file: + self.DEB("TEMPLATE WAS COMPILED FROM A STRING") + return + + # Save modifitcation time of the main template file. + if os.path.isfile(file): + self._mtime = os.path.getmtime(file) + else: + raise TemplateError, "Template: file does not exist: '%s'" % file + + # Save modificaton times of all included template files. + for inc_file in include_files: + if os.path.isfile(inc_file): + self._include_mtimes[inc_file] = os.path.getmtime(inc_file) + else: + raise TemplateError, "Template: file does not exist: '%s'"\ + % inc_file + + self.DEB("NEW TEMPLATE CREATED") + + def is_uptodate(self, compile_params=None): + """ Check whether the compiled template is uptodate. + + Return true if this compiled template is uptodate. + Return false, if the template source file was changed on the + disk since it was compiled. + Works by comparison of modification times. + Also takes modification times of all included templates + into account. + + @header is_uptodate(compile_params=None) + @return True if the template is uptodate, false otherwise. + + @param compile_params Only for internal use. + Do not use this optional parameter. It's intended only for + internal use by the <em>TemplateManager</em>. + """ + if not self._file: + self.DEB("TEMPLATE COMPILED FROM A STRING") + return 0 + + if self._version != __version__: + self.DEB("TEMPLATE: VERSION NOT UPTODATE") + return 0 + + if compile_params != None and compile_params != self._compile_params: + self.DEB("TEMPLATE: DIFFERENT COMPILATION PARAMS") + return 0 + + # Check modification times of the main template and all included + # templates. If the included template no longer exists, then + # the problem will be resolved when the template is recompiled. + + # Main template file. + if not (os.path.isfile(self._file) and \ + self._mtime == os.path.getmtime(self._file)): + self.DEB("TEMPLATE: NOT UPTODATE: " + self._file) + return 0 + + # Included templates. + for inc_file in self._include_mtimes.keys(): + if not (os.path.isfile(inc_file) and \ + self._include_mtimes[inc_file] == \ + os.path.getmtime(inc_file)): + self.DEB("TEMPLATE: NOT UPTODATE: " + inc_file) + return 0 + else: + self.DEB("TEMPLATE: UPTODATE") + return 1 + + def tokens(self): + """ Get tokens of this template. + @hidden + """ + return self._tokens + + def file(self): + """ Get filename of the main file of this template. + @hidden + """ + return self._file + + def debug(self, debug): + """ Get debugging state. + @hidden + """ + self._debug = debug + + ############################################## + # PRIVATE METHODS # + ############################################## + + def __getstate__(self): + """ Used by pickle when the class is serialized. + Remove the 'debug' attribute before serialization. + @hidden + """ + dict = copy.copy(self.__dict__) + del dict["_debug"] + return dict + + def __setstate__(self, dict): + """ Used by pickle when the class is unserialized. + Add the 'debug' attribute. + @hidden + """ + dict["_debug"] = 0 + self.__dict__ = dict + + + def DEB(self, str): + """ Print debugging message to stderr. + @hidden + """ + if self._debug: print >> sys.stderr, str + + +############################################## +# EXCEPTIONS # +############################################## + +class TemplateError(Exception): + """ Fatal exception. Raised on runtime or template syntax errors. + + This exception is raised when a runtime error occurs or when a syntax + error in the template is found. It has one parameter which always + is a string containing a description of the error. + + All potential IOError exceptions are handled by the module and are + converted to TemplateError exceptions. That means you should catch the + TemplateError exception if there is a possibility that for example + the template file will not be accesssible. + + The exception can be raised by constructors or by any method of any + class. + + The instance is no longer usable when this exception is raised. + """ + + def __init__(self, error): + """ Constructor. + @hidden + """ + Exception.__init__(self, "Htmltmpl error: " + error) + + +class PrecompiledError(Exception): + """ This exception is _PRIVATE_ and non fatal. + @hidden + """ + + def __init__(self, template): + """ Constructor. + @hidden + """ + Exception.__init__(self, template) + diff --git a/lib/venus/planet/vendor/httplib2/__init__.py b/lib/venus/planet/vendor/httplib2/__init__.py new file mode 100644 index 0000000..69bf4ec --- /dev/null +++ b/lib/venus/planet/vendor/httplib2/__init__.py @@ -0,0 +1,917 @@ +from __future__ import generators +""" +httplib2 + +A caching http interface that supports ETags and gzip +to conserve bandwidth. + +Requires Python 2.3 or later + +""" + +__author__ = "Joe Gregorio (joe@bitworking.org)" +__copyright__ = "Copyright 2006, Joe Gregorio" +__contributors__ = ["Thomas Broyer (t.broyer@ltgt.net)", + "James Antill", + "Xavier Verges Farrero", + "Jonathan Feinberg", + "Blair Zajac", + "Sam Ruby", + "Louis Nyffenegger"] +__license__ = "MIT" +__version__ = "$Rev: 227 $" + +import re +import sys +import md5 +import email +import email.Utils +import email.Message +import StringIO +import gzip +import zlib +import httplib +import urlparse +import base64 +import os +import copy +import calendar +import time +import random +import sha +import hmac +from gettext import gettext as _ +from socket import gaierror + +if sys.version_info >= (2,3): + from iri2uri import iri2uri +else: + def iri2uri(uri): + return uri + +__all__ = ['Http', 'Response', 'HttpLib2Error', + 'RedirectMissingLocation', 'RedirectLimit', 'FailedToDecompressContent', + 'UnimplementedDigestAuthOptionError', 'UnimplementedHmacDigestAuthOptionError', + 'debuglevel'] + + +# The httplib debug level, set to a non-zero value to get debug output +debuglevel = 0 + +# Python 2.3 support +if sys.version_info < (2,4): + def sorted(seq): + seq.sort() + return seq + +# Python 2.3 support +def HTTPResponse__getheaders(self): + """Return list of (header, value) tuples.""" + if self.msg is None: + raise httplib.ResponseNotReady() + return self.msg.items() + +if not hasattr(httplib.HTTPResponse, 'getheaders'): + httplib.HTTPResponse.getheaders = HTTPResponse__getheaders + +# All exceptions raised here derive from HttpLib2Error +class HttpLib2Error(Exception): pass + +class RedirectMissingLocation(HttpLib2Error): pass +class RedirectLimit(HttpLib2Error): pass +class FailedToDecompressContent(HttpLib2Error): pass +class UnimplementedDigestAuthOptionError(HttpLib2Error): pass +class UnimplementedHmacDigestAuthOptionError(HttpLib2Error): pass +class RelativeURIError(HttpLib2Error): pass +class ServerNotFoundError(HttpLib2Error): pass + +# Open Items: +# ----------- +# Proxy support + +# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?) + +# Pluggable cache storage (supports storing the cache in +# flat files by default. We need a plug-in architecture +# that can support Berkeley DB and Squid) + +# == Known Issues == +# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator. +# Does not handle Cache-Control: max-stale +# Does not use Age: headers when calculating cache freshness. + + +# The number of redirections to follow before giving up. +# Note that only GET redirects are automatically followed. +# Will also honor 301 requests by saving that info and never +# requesting that URI again. +DEFAULT_MAX_REDIRECTS = 5 + +# Which headers are hop-by-hop headers by default +HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade'] + +def _get_end2end_headers(response): + hopbyhop = list(HOP_BY_HOP) + hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')]) + return [header for header in response.keys() if header not in hopbyhop] + +URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?") + +def parse_uri(uri): + """Parses a URI using the regex given in Appendix B of RFC 3986. + + (scheme, authority, path, query, fragment) = parse_uri(uri) + """ + groups = URI.match(uri).groups() + return (groups[1], groups[3], groups[4], groups[6], groups[8]) + +def urlnorm(uri): + (scheme, authority, path, query, fragment) = parse_uri(uri) + if not scheme or not authority: + raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri) + authority = authority.lower() + scheme = scheme.lower() + if not path: + path = "/" + # Could do syntax based normalization of the URI before + # computing the digest. See Section 6.2.2 of Std 66. + request_uri = query and "?".join([path, query]) or path + scheme = scheme.lower() + defrag_uri = scheme + "://" + authority + request_uri + return scheme, authority, request_uri, defrag_uri + + +# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/) +re_url_scheme = re.compile(r'^\w+://') +re_slash = re.compile(r'[?/:|]+') + +def safename(filename): + """Return a filename suitable for the cache. + + Strips dangerous and common characters to create a filename we + can use to store the cache in. + """ + + try: + if re_url_scheme.match(filename): + if isinstance(filename,str): + filename = filename.decode('utf-8') + filename = filename.encode('idna') + else: + filename = filename.encode('idna') + except: + pass + if isinstance(filename,unicode): + filename=filename.encode('utf-8') + filemd5 = md5.new(filename).hexdigest() + filename = re_url_scheme.sub("", filename) + filename = re_slash.sub(",", filename) + + # limit length of filename + if len(filename)>200: + filename=filename[:200] + return ",".join((filename, filemd5)) + +NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+') +def _normalize_headers(headers): + return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()]) + +def _parse_cache_control(headers): + retval = {} + if headers.has_key('cache-control'): + parts = headers['cache-control'].split(',') + parts_with_args = [tuple([x.strip() for x in part.split("=")]) for part in parts if -1 != part.find("=")] + parts_wo_args = [(name.strip(), 1) for name in parts if -1 == name.find("=")] + retval = dict(parts_with_args + parts_wo_args) + return retval + +# Whether to use a strict mode to parse WWW-Authenticate headers +# Might lead to bad results in case of ill-formed header value, +# so disabled by default, falling back to relaxed parsing. +# Set to true to turn on, usefull for testing servers. +USE_WWW_AUTH_STRICT_PARSING = 0 + +# In regex below: +# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP +# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space +# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both: +# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"? +WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$") +WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$") +UNQUOTE_PAIRS = re.compile(r'\\(.)') +def _parse_www_authenticate(headers, headername='www-authenticate'): + """Returns a dictionary of dictionaries, one dict + per auth_scheme.""" + retval = {} + if headers.has_key(headername): + authenticate = headers[headername].strip() + www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED + while authenticate: + # Break off the scheme at the beginning of the line + if headername == 'authentication-info': + (auth_scheme, the_rest) = ('digest', authenticate) + else: + (auth_scheme, the_rest) = authenticate.split(" ", 1) + # Now loop over all the key value pairs that come after the scheme, + # being careful not to roll into the next scheme + match = www_auth.search(the_rest) + auth_params = {} + while match: + if match and len(match.groups()) == 3: + (key, value, the_rest) = match.groups() + auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')]) + match = www_auth.search(the_rest) + retval[auth_scheme.lower()] = auth_params + authenticate = the_rest.strip() + return retval + + +def _entry_disposition(response_headers, request_headers): + """Determine freshness from the Date, Expires and Cache-Control headers. + + We don't handle the following: + + 1. Cache-Control: max-stale + 2. Age: headers are not used in the calculations. + + Not that this algorithm is simpler than you might think + because we are operating as a private (non-shared) cache. + This lets us ignore 's-maxage'. We can also ignore + 'proxy-invalidate' since we aren't a proxy. + We will never return a stale document as + fresh as a design decision, and thus the non-implementation + of 'max-stale'. This also lets us safely ignore 'must-revalidate' + since we operate as if every server has sent 'must-revalidate'. + Since we are private we get to ignore both 'public' and + 'private' parameters. We also ignore 'no-transform' since + we don't do any transformations. + The 'no-store' parameter is handled at a higher level. + So the only Cache-Control parameters we look at are: + + no-cache + only-if-cached + max-age + min-fresh + """ + + retval = "STALE" + cc = _parse_cache_control(request_headers) + cc_response = _parse_cache_control(response_headers) + + if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1: + retval = "TRANSPARENT" + if 'cache-control' not in request_headers: + request_headers['cache-control'] = 'no-cache' + elif cc.has_key('no-cache'): + retval = "TRANSPARENT" + elif cc_response.has_key('no-cache'): + retval = "STALE" + elif cc.has_key('only-if-cached'): + retval = "FRESH" + elif response_headers.has_key('date'): + date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date'])) + now = time.time() + current_age = max(0, now - date) + if cc_response.has_key('max-age'): + try: + freshness_lifetime = int(cc_response['max-age']) + except: + freshness_lifetime = 0 + elif response_headers.has_key('expires'): + expires = email.Utils.parsedate_tz(response_headers['expires']) + freshness_lifetime = max(0, calendar.timegm(expires) - date) + else: + freshness_lifetime = 0 + if cc.has_key('max-age'): + try: + freshness_lifetime = int(cc['max-age']) + except: + freshness_lifetime = 0 + if cc.has_key('min-fresh'): + try: + min_fresh = int(cc['min-fresh']) + except: + min_fresh = 0 + current_age += min_fresh + if freshness_lifetime > current_age: + retval = "FRESH" + return retval + +def _decompressContent(response, new_content): + content = new_content + try: + encoding = response.get('content-encoding', None) + if encoding in ['gzip', 'deflate']: + if encoding == 'gzip': + content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read() + if encoding == 'deflate': + content = zlib.decompress(content) + response['content-length'] = str(len(content)) + del response['content-encoding'] + except: + content = "" + raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding')) + return content + +def _updateCache(request_headers, response_headers, content, cache, cachekey): + if cachekey: + cc = _parse_cache_control(request_headers) + cc_response = _parse_cache_control(response_headers) + if cc.has_key('no-store') or cc_response.has_key('no-store'): + cache.delete(cachekey) + else: + info = email.Message.Message() + for key, value in response_headers.iteritems(): + if key not in ['status','content-encoding','transfer-encoding']: + info[key] = value + + status = response_headers.status + if status == 304: + status = 200 + + status_header = 'status: %d\r\n' % response_headers.status + + header_str = info.as_string() + + header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str) + text = "".join([status_header, header_str, content]) + + cache.set(cachekey, text) + +def _cnonce(): + dig = md5.new("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest() + return dig[:16] + +def _wsse_username_token(cnonce, iso_now, password): + return base64.encodestring(sha.new("%s%s%s" % (cnonce, iso_now, password)).digest()).strip() + + +# For credentials we need two things, first +# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.) +# Then we also need a list of URIs that have already demanded authentication +# That list is tricky since sub-URIs can take the same auth, or the +# auth scheme may change as you descend the tree. +# So we also need each Auth instance to be able to tell us +# how close to the 'top' it is. + +class Authentication: + def __init__(self, credentials, host, request_uri, headers, response, content, http): + (scheme, authority, path, query, fragment) = parse_uri(request_uri) + self.path = path + self.host = host + self.credentials = credentials + self.http = http + + def depth(self, request_uri): + (scheme, authority, path, query, fragment) = parse_uri(request_uri) + return request_uri[len(self.path):].count("/") + + def inscope(self, host, request_uri): + # XXX Should we normalize the request_uri? + (scheme, authority, path, query, fragment) = parse_uri(request_uri) + return (host == self.host) and path.startswith(self.path) + + def request(self, method, request_uri, headers, content): + """Modify the request headers to add the appropriate + Authorization header. Over-rise this in sub-classes.""" + pass + + def response(self, response, content): + """Gives us a chance to update with new nonces + or such returned from the last authorized response. + Over-rise this in sub-classes if necessary. + + Return TRUE is the request is to be retried, for + example Digest may return stale=true. + """ + return False + + + +class BasicAuthentication(Authentication): + def __init__(self, credentials, host, request_uri, headers, response, content, http): + Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) + + def request(self, method, request_uri, headers, content): + """Modify the request headers to add the appropriate + Authorization header.""" + headers['authorization'] = 'Basic ' + base64.encodestring("%s:%s" % self.credentials).strip() + + +class DigestAuthentication(Authentication): + """Only do qop='auth' and MD5, since that + is all Apache currently implements""" + def __init__(self, credentials, host, request_uri, headers, response, content, http): + Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) + challenge = _parse_www_authenticate(response, 'www-authenticate') + self.challenge = challenge['digest'] + qop = self.challenge.get('qop') + self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None + if self.challenge['qop'] is None: + raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop)) + self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5') + if self.challenge['algorithm'] != 'MD5': + raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm'])) + self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]]) + self.challenge['nc'] = 1 + + def request(self, method, request_uri, headers, content, cnonce = None): + """Modify the request headers""" + H = lambda x: md5.new(x).hexdigest() + KD = lambda s, d: H("%s:%s" % (s, d)) + A2 = "".join([method, ":", request_uri]) + self.challenge['cnonce'] = cnonce or _cnonce() + request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (self.challenge['nonce'], + '%08x' % self.challenge['nc'], + self.challenge['cnonce'], + self.challenge['qop'], H(A2) + )) + headers['Authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % ( + self.credentials[0], + self.challenge['realm'], + self.challenge['nonce'], + request_uri, + self.challenge['algorithm'], + request_digest, + self.challenge['qop'], + self.challenge['nc'], + self.challenge['cnonce'], + ) + self.challenge['nc'] += 1 + + def response(self, response, content): + if not response.has_key('authentication-info'): + challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {}) + if 'true' == challenge.get('stale'): + self.challenge['nonce'] = challenge['nonce'] + self.challenge['nc'] = 1 + return True + else: + updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {}) + + if updated_challenge.has_key('nextnonce'): + self.challenge['nonce'] = updated_challenge['nextnonce'] + self.challenge['nc'] = 1 + return False + + +class HmacDigestAuthentication(Authentication): + """Adapted from Robert Sayre's code and DigestAuthentication above.""" + __author__ = "Thomas Broyer (t.broyer@ltgt.net)" + + def __init__(self, credentials, host, request_uri, headers, response, content, http): + Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) + challenge = _parse_www_authenticate(response, 'www-authenticate') + self.challenge = challenge['hmacdigest'] + # TODO: self.challenge['domain'] + self.challenge['reason'] = self.challenge.get('reason', 'unauthorized') + if self.challenge['reason'] not in ['unauthorized', 'integrity']: + self.challenge['reason'] = 'unauthorized' + self.challenge['salt'] = self.challenge.get('salt', '') + if not self.challenge.get('snonce'): + raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty.")) + self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1') + if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']: + raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm'])) + self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1') + if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']: + raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm'])) + if self.challenge['algorithm'] == 'HMAC-MD5': + self.hashmod = md5 + else: + self.hashmod = sha + if self.challenge['pw-algorithm'] == 'MD5': + self.pwhashmod = md5 + else: + self.pwhashmod = sha + self.key = "".join([self.credentials[0], ":", + self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(), + ":", self.challenge['realm'] + ]) + self.key = self.pwhashmod.new(self.key).hexdigest().lower() + + def request(self, method, request_uri, headers, content): + """Modify the request headers""" + keys = _get_end2end_headers(headers) + keylist = "".join(["%s " % k for k in keys]) + headers_val = "".join([headers[k] for k in keys]) + created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime()) + cnonce = _cnonce() + request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val) + request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower() + headers['Authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % ( + self.credentials[0], + self.challenge['realm'], + self.challenge['snonce'], + cnonce, + request_uri, + created, + request_digest, + keylist, + ) + + def response(self, response, content): + challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {}) + if challenge.get('reason') in ['integrity', 'stale']: + return True + return False + + +class WsseAuthentication(Authentication): + """This is thinly tested and should not be relied upon. + At this time there isn't any third party server to test against. + Blogger and TypePad implemented this algorithm at one point + but Blogger has since switched to Basic over HTTPS and + TypePad has implemented it wrong, by never issuing a 401 + challenge but instead requiring your client to telepathically know that + their endpoint is expecting WSSE profile="UsernameToken".""" + def __init__(self, credentials, host, request_uri, headers, response, content, http): + Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) + + def request(self, method, request_uri, headers, content): + """Modify the request headers to add the appropriate + Authorization header.""" + headers['Authorization'] = 'WSSE profile="UsernameToken"' + iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) + cnonce = _cnonce() + password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1]) + headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % ( + self.credentials[0], + password_digest, + cnonce, + iso_now) + +class GoogleLoginAuthentication(Authentication): + def __init__(self, credentials, host, request_uri, headers, response, content, http): + from urllib import urlencode + Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) + + auth = dict(Email=credentials[0], Passwd=credentials[1], service='cl', source=headers['user-agent']) + resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'}) + lines = content.split('\n') + d = dict([tuple(line.split("=", 1)) for line in lines if line]) + if resp.status == 403: + self.Auth = "" + else: + self.Auth = d['Auth'] + + def request(self, method, request_uri, headers, content): + """Modify the request headers to add the appropriate + Authorization header.""" + headers['authorization'] = 'GoogleLogin Auth=' + self.Auth + + +AUTH_SCHEME_CLASSES = { + "basic": BasicAuthentication, + "wsse": WsseAuthentication, + "digest": DigestAuthentication, + "hmacdigest": HmacDigestAuthentication, + "googlelogin": GoogleLoginAuthentication +} + +AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"] + +def _md5(s): + return + +class FileCache: + """Uses a local directory as a store for cached files. + Not really safe to use if multiple threads or processes are going to + be running on the same cache. + """ + def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior + self.cache = cache + self.safe = safe + if not os.path.exists(cache): + os.makedirs(self.cache) + + def get(self, key): + retval = None + cacheFullPath = os.path.join(self.cache, self.safe(key)) + try: + f = file(cacheFullPath, "r") + retval = f.read() + f.close() + except: + pass + return retval + + def set(self, key, value): + cacheFullPath = os.path.join(self.cache, self.safe(key)) + f = file(cacheFullPath, "w") + f.write(value) + f.close() + + def delete(self, key): + cacheFullPath = os.path.join(self.cache, self.safe(key)) + if os.path.exists(cacheFullPath): + os.remove(cacheFullPath) + +class Http: + """An HTTP client that handles all + methods, caching, ETags, compression, + HTTPS, Basic, Digest, WSSE, etc. + """ + def __init__(self, cache=None): + # Map domain name to an httplib connection + self.connections = {} + # The location of the cache, for now a directory + # where cached responses are held. + if cache and isinstance(cache, str): + self.cache = FileCache(cache) + else: + self.cache = cache + + # tuples of name, password + self.credentials = [] + + # authorization objects + self.authorizations = [] + + self.follow_all_redirects = False + + self.ignore_etag = False + + def _auth_from_challenge(self, host, request_uri, headers, response, content): + """A generator that creates Authorization objects + that can be applied to requests. + """ + challenges = _parse_www_authenticate(response, 'www-authenticate') + for cred in self.credentials: + for scheme in AUTH_SCHEME_ORDER: + if challenges.has_key(scheme): + yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self) + + def add_credentials(self, name, password): + """Add a name and password that will be used + any time a request requires authentication.""" + self.credentials.append((name, password)) + + def clear_credentials(self): + """Remove all the names and passwords + that are used for authentication""" + self.credentials = [] + self.authorizations = [] + + def _conn_request(self, conn, request_uri, method, body, headers): + for i in range(2): + try: + conn.request(method, request_uri, body, headers) + response = conn.getresponse() + except gaierror: + raise ServerNotFoundError("Unable to find the server at %s" % request_uri) + except: + if i == 0: + conn.close() + conn.connect() + continue + else: + raise + else: + content = response.read() + response = Response(response) + content = _decompressContent(response, content) + + break; + return (response, content) + + + def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey): + """Do the actual request using the connection object + and also follow one level of redirects if necessary""" + + auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)] + auth = auths and sorted(auths)[0][1] or None + if auth: + auth.request(method, request_uri, headers, body) + + (response, content) = self._conn_request(conn, request_uri, method, body, headers) + + if auth: + if auth.response(response, body): + auth.request(method, request_uri, headers, body) + (response, content) = self._conn_request(conn, request_uri, method, body, headers ) + response._stale_digest = 1 + + if response.status == 401: + for authorization in self._auth_from_challenge(host, request_uri, headers, response, content): + authorization.request(method, request_uri, headers, body) + (response, content) = self._conn_request(conn, request_uri, method, body, headers, ) + if response.status != 401: + self.authorizations.append(authorization) + authorization.response(response, body) + break + + if (self.follow_all_redirects or method in ["GET", "HEAD"]) or response.status == 303: + if response.status in [300, 301, 302, 303, 307]: + # Pick out the location header and basically start from the beginning + # remembering first to strip the ETag header and decrement our 'depth' + if redirections: + if not response.has_key('location') and response.status != 300: + raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header.")) + # Fix-up relative redirects (which violate an RFC 2616 MUST) + if response.has_key('location'): + location = response['location'] + (scheme, authority, path, query, fragment) = parse_uri(location) + if authority == None: + response['location'] = urlparse.urljoin(absolute_uri, location) + if response.status == 301 and method in ["GET", "HEAD"]: + response['-x-permanent-redirect-url'] = response['location'] + if not response.has_key('content-location'): + response['content-location'] = absolute_uri + _updateCache(headers, response, content, self.cache, cachekey) + if headers.has_key('if-none-match'): + del headers['if-none-match'] + if headers.has_key('if-modified-since'): + del headers['if-modified-since'] + if response.has_key('location'): + location = response['location'] + old_response = copy.deepcopy(response) + if not old_response.has_key('content-location'): + old_response['content-location'] = absolute_uri + redirect_method = ((response.status == 303) and (method not in ["GET", "HEAD"])) and "GET" or method + (response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1) + response.previous = old_response + else: + raise RedirectLimit( _("Redirected more times than rediection_limit allows.")) + elif response.status in [200, 203] and method == "GET": + # Don't cache 206's since we aren't going to handle byte range requests + if not response.has_key('content-location'): + response['content-location'] = absolute_uri + _updateCache(headers, response, content, self.cache, cachekey) + + return (response, content) + + def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS): + """ Performs a single HTTP request. +The 'uri' is the URI of the HTTP resource and can begin +with either 'http' or 'https'. The value of 'uri' must be an absolute URI. + +The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc. +There is no restriction on the methods allowed. + +The 'body' is the entity body to be sent with the request. It is a string +object. + +Any extra headers that are to be sent with the request should be provided in the +'headers' dictionary. + +The maximum number of redirect to follow before raising an +exception is 'redirections. The default is 5. + +The return value is a tuple of (response, content), the first +being and instance of the 'Response' class, the second being +a string that contains the response entity body. + """ + if headers is None: + headers = {} + else: + headers = _normalize_headers(headers) + + if not headers.has_key('user-agent'): + headers['user-agent'] = "Python-httplib2/%s" % __version__ + + uri = iri2uri(uri) + + (scheme, authority, request_uri, defrag_uri) = urlnorm(uri) + + if not self.connections.has_key(scheme+":"+authority): + connection_type = (scheme == 'https') and httplib.HTTPSConnection or httplib.HTTPConnection + conn = self.connections[scheme+":"+authority] = connection_type(authority) + conn.set_debuglevel(debuglevel) + else: + conn = self.connections[scheme+":"+authority] + + if method in ["GET", "HEAD"] and 'range' not in headers: + headers['accept-encoding'] = 'compress, gzip' + + info = email.Message.Message() + cached_value = None + if self.cache: + cachekey = defrag_uri + cached_value = self.cache.get(cachekey) + if cached_value: + try: + info = email.message_from_string(cached_value) + content = cached_value.split('\r\n\r\n', 1)[1] + except Exception, e: + self.cache.delete(cachekey) + cachekey = None + cached_value = None + else: + cachekey = None + + if method in ["PUT"] and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers: + # http://www.w3.org/1999/04/Editing/ + headers['if-match'] = info['etag'] + + if method not in ["GET", "HEAD"] and self.cache and cachekey: + # RFC 2616 Section 13.10 + self.cache.delete(cachekey) + + if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers: + if info.has_key('-x-permanent-redirect-url'): + # Should cached permanent redirects be counted in our redirection count? For now, yes. + (response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1) + response.previous = Response(info) + response.previous.fromcache = True + else: + # Determine our course of action: + # Is the cached entry fresh or stale? + # Has the client requested a non-cached response? + # + # There seems to be three possible answers: + # 1. [FRESH] Return the cache entry w/o doing a GET + # 2. [STALE] Do the GET (but add in cache validators if available) + # 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request + entry_disposition = _entry_disposition(info, headers) + + if entry_disposition == "FRESH": + if not cached_value: + info['status'] = '504' + content = "" + response = Response(info) + if cached_value: + response.fromcache = True + return (response, content) + + if entry_disposition == "STALE": + if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers: + headers['if-none-match'] = info['etag'] + if info.has_key('last-modified') and not 'last-modified' in headers: + headers['if-modified-since'] = info['last-modified'] + elif entry_disposition == "TRANSPARENT": + pass + + (response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey) + + if response.status == 304 and method == "GET": + # Rewrite the cache entry with the new end-to-end headers + # Take all headers that are in response + # and overwrite their values in info. + # unless they are hop-by-hop, or are listed in the connection header. + + for key in _get_end2end_headers(response): + info[key] = response[key] + merged_response = Response(info) + if hasattr(response, "_stale_digest"): + merged_response._stale_digest = response._stale_digest + try: + _updateCache(headers, merged_response, content, self.cache, cachekey) + except: + print locals() + raise + response = merged_response + response.status = 200 + response.fromcache = True + + elif response.status == 200: + content = new_content + else: + self.cache.delete(cachekey) + content = new_content + else: + (response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey) + return (response, content) + + + +class Response(dict): + """An object more like email.Message than httplib.HTTPResponse.""" + + """Is this response from our local cache""" + fromcache = False + + """HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """ + version = 11 + + "Status code returned by server. " + status = 200 + + """Reason phrase returned by server.""" + reason = "Ok" + + previous = None + + def __init__(self, info): + # info is either an email.Message or + # an httplib.HTTPResponse object. + if isinstance(info, httplib.HTTPResponse): + for key, value in info.getheaders(): + self[key] = value + self.status = info.status + self['status'] = str(self.status) + self.reason = info.reason + self.version = info.version + elif isinstance(info, email.Message.Message): + for key, value in info.items(): + self[key] = value + self.status = int(self['status']) + + def __getattr__(self, name): + if name == 'dict': + return self + else: + raise AttributeError, name + + diff --git a/lib/venus/planet/vendor/httplib2/iri2uri.py b/lib/venus/planet/vendor/httplib2/iri2uri.py new file mode 100644 index 0000000..70667ed --- /dev/null +++ b/lib/venus/planet/vendor/httplib2/iri2uri.py @@ -0,0 +1,110 @@ +""" +iri2uri + +Converts an IRI to a URI. + +""" +__author__ = "Joe Gregorio (joe@bitworking.org)" +__copyright__ = "Copyright 2006, Joe Gregorio" +__contributors__ = [] +__version__ = "1.0.0" +__license__ = "MIT" +__history__ = """ +""" + +import urlparse + + +# Convert an IRI to a URI following the rules in RFC 3987 +# +# The characters we need to enocde and escape are defined in the spec: +# +# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD +# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF +# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD +# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD +# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD +# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD +# / %xD0000-DFFFD / %xE1000-EFFFD + +escape_range = [ + (0xA0, 0xD7FF ), + (0xE000, 0xF8FF ), + (0xF900, 0xFDCF ), + (0xFDF0, 0xFFEF), + (0x10000, 0x1FFFD ), + (0x20000, 0x2FFFD ), + (0x30000, 0x3FFFD), + (0x40000, 0x4FFFD ), + (0x50000, 0x5FFFD ), + (0x60000, 0x6FFFD), + (0x70000, 0x7FFFD ), + (0x80000, 0x8FFFD ), + (0x90000, 0x9FFFD), + (0xA0000, 0xAFFFD ), + (0xB0000, 0xBFFFD ), + (0xC0000, 0xCFFFD), + (0xD0000, 0xDFFFD ), + (0xE1000, 0xEFFFD), + (0xF0000, 0xFFFFD ), + (0x100000, 0x10FFFD) +] + +def encode(c): + retval = c + i = ord(c) + for low, high in escape_range: + if i < low: + break + if i >= low and i <= high: + retval = "".join(["%%%2X" % ord(o) for o in c.encode('utf-8')]) + break + return retval + + +def iri2uri(uri): + """Convert an IRI to a URI. Note that IRIs must be + passed in a unicode strings. That is, do not utf-8 encode + the IRI before passing it into the function.""" + if isinstance(uri ,unicode): + (scheme, authority, path, query, fragment) = urlparse.urlsplit(uri) + authority = authority.encode('idna') + # For each character in 'ucschar' or 'iprivate' + # 1. encode as utf-8 + # 2. then %-encode each octet of that utf-8 + uri = urlparse.urlunsplit((scheme, authority, path, query, fragment)) + uri = "".join([encode(c) for c in uri]) + return uri + +if __name__ == "__main__": + import unittest + + class Test(unittest.TestCase): + + def test_uris(self): + """Test that URIs are invariant under the transformation.""" + invariant = [ + u"ftp://ftp.is.co.za/rfc/rfc1808.txt", + u"http://www.ietf.org/rfc/rfc2396.txt", + u"ldap://[2001:db8::7]/c=GB?objectClass?one", + u"mailto:John.Doe@example.com", + u"news:comp.infosystems.www.servers.unix", + u"tel:+1-816-555-1212", + u"telnet://192.0.2.16:80/", + u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ] + for uri in invariant: + self.assertEqual(uri, iri2uri(uri)) + + def test_iri(self): + """ Test that the right type of escaping is done for each part of the URI.""" + self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri(u"http://\N{COMET}.com/\N{COMET}")) + self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri(u"http://bitworking.org/?fred=\N{COMET}")) + self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri(u"http://bitworking.org/#\N{COMET}")) + self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}")) + self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")) + self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))) + self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8'))) + + unittest.main() + + diff --git a/lib/venus/planet/vendor/portalocker.py b/lib/venus/planet/vendor/portalocker.py new file mode 100644 index 0000000..12592a3 --- /dev/null +++ b/lib/venus/planet/vendor/portalocker.py @@ -0,0 +1,93 @@ +# portalocker.py - Cross-platform (posix/nt) API for flock-style file locking. +# Requires python 1.5.2 or better. +# See http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65203/index_txt +# Except where otherwise noted, recipes in the Python Cookbook are +# published under the Python license. + +"""Cross-platform (posix/nt) API for flock-style file locking. + +Synopsis: + + import portalocker + file = open("somefile", "r+") + portalocker.lock(file, portalocker.LOCK_EX) + file.seek(12) + file.write("foo") + file.close() + +If you know what you're doing, you may choose to + + portalocker.unlock(file) + +before closing the file, but why? + +Methods: + + lock( file, flags ) + unlock( file ) + +Constants: + + LOCK_EX + LOCK_SH + LOCK_NB + +I learned the win32 technique for locking files from sample code +provided by John Nielsen <nielsenjf@my-deja.com> in the documentation +that accompanies the win32 modules. + +Author: Jonathan Feinberg <jdf@pobox.com> +Version: $Id: portalocker.py,v 1.3 2001/05/29 18:47:55 Administrator Exp $ +""" + +import os + +if os.name == 'nt': + import win32con + import win32file + import pywintypes + LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK + LOCK_SH = 0 # the default + LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY + # is there any reason not to reuse the following structure? + __overlapped = pywintypes.OVERLAPPED() +elif os.name == 'posix': + import fcntl + LOCK_EX = fcntl.LOCK_EX + LOCK_SH = fcntl.LOCK_SH + LOCK_NB = fcntl.LOCK_NB +else: + raise RuntimeError("PortaLocker only defined for nt and posix platforms") + +if os.name == 'nt': + def lock(file, flags): + hfile = win32file._get_osfhandle(file.fileno()) + win32file.LockFileEx(hfile, flags, 0, -0x10000, __overlapped) + + def unlock(file): + hfile = win32file._get_osfhandle(file.fileno()) + win32file.UnlockFileEx(hfile, 0, -0x10000, __overlapped) + +elif os.name =='posix': + def lock(file, flags): + fcntl.flock(file.fileno(), flags) + + def unlock(file): + fcntl.flock(file.fileno(), fcntl.LOCK_UN) + +if __name__ == '__main__': + from time import time, strftime, localtime + import sys + import portalocker + + log = open('log.txt', "a+") + portalocker.lock(log, portalocker.LOCK_EX) + + timestamp = strftime("%m/%d/%Y %H:%M:%S\n", localtime(time())) + log.write( timestamp ) + + print "Wrote lines. Hit enter to release lock." + dummy = sys.stdin.readline() + + log.close() + diff --git a/lib/venus/planet/vendor/timeoutsocket.py b/lib/venus/planet/vendor/timeoutsocket.py new file mode 100644 index 0000000..b698df0 --- /dev/null +++ b/lib/venus/planet/vendor/timeoutsocket.py @@ -0,0 +1,424 @@ + +#### +# Copyright 2000,2001 by Timothy O'Malley <timo@alum.mit.edu> +# +# All Rights Reserved +# +# Permission to use, copy, modify, and distribute this software +# and its documentation for any purpose and without fee is hereby +# granted, provided that the above copyright notice appear in all +# copies and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of +# Timothy O'Malley not be used in advertising or publicity +# pertaining to distribution of the software without specific, written +# prior permission. +# +# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS +# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR +# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +# PERFORMANCE OF THIS SOFTWARE. +# +#### + +"""Timeout Socket + +This module enables a timeout mechanism on all TCP connections. It +does this by inserting a shim into the socket module. After this module +has been imported, all socket creation goes through this shim. As a +result, every TCP connection will support a timeout. + +The beauty of this method is that it immediately and transparently +enables the entire python library to support timeouts on TCP sockets. +As an example, if you wanted to SMTP connections to have a 20 second +timeout: + + import timeoutsocket + import smtplib + timeoutsocket.setDefaultSocketTimeout(20) + + +The timeout applies to the socket functions that normally block on +execution: read, write, connect, and accept. If any of these +operations exceeds the specified timeout, the exception Timeout +will be raised. + +The default timeout value is set to None. As a result, importing +this module does not change the default behavior of a socket. The +timeout mechanism only activates when the timeout has been set to +a numeric value. (This behavior mimics the behavior of the +select.select() function.) + +This module implements two classes: TimeoutSocket and TimeoutFile. + +The TimeoutSocket class defines a socket-like object that attempts to +avoid the condition where a socket may block indefinitely. The +TimeoutSocket class raises a Timeout exception whenever the +current operation delays too long. + +The TimeoutFile class defines a file-like object that uses the TimeoutSocket +class. When the makefile() method of TimeoutSocket is called, it returns +an instance of a TimeoutFile. + +Each of these objects adds two methods to manage the timeout value: + + get_timeout() --> returns the timeout of the socket or file + set_timeout() --> sets the timeout of the socket or file + + +As an example, one might use the timeout feature to create httplib +connections that will timeout after 30 seconds: + + import timeoutsocket + import httplib + H = httplib.HTTP("www.python.org") + H.sock.set_timeout(30) + +Note: When used in this manner, the connect() routine may still +block because it happens before the timeout is set. To avoid +this, use the 'timeoutsocket.setDefaultSocketTimeout()' function. + +Good Luck! + +""" + +__version__ = "$Revision: 1.1.1.1 $" +__author__ = "Timothy O'Malley <timo@alum.mit.edu>" + +# +# Imports +# +import select, string +import socket +if not hasattr(socket, "_no_timeoutsocket"): + _socket = socket.socket +else: + _socket = socket._no_timeoutsocket + + +# +# Set up constants to test for Connected and Blocking operations. +# We delete 'os' and 'errno' to keep our namespace clean(er). +# Thanks to Alex Martelli and G. Li for the Windows error codes. +# +import os +if os.name == "nt": + _IsConnected = ( 10022, 10056 ) + _ConnectBusy = ( 10035, ) + _AcceptBusy = ( 10035, ) +else: + import errno + _IsConnected = ( errno.EISCONN, ) + _ConnectBusy = ( errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK ) + _AcceptBusy = ( errno.EAGAIN, errno.EWOULDBLOCK ) + del errno +del os + + +# +# Default timeout value for ALL TimeoutSockets +# +_DefaultTimeout = None +def setDefaultSocketTimeout(timeout): + global _DefaultTimeout + _DefaultTimeout = timeout +def getDefaultSocketTimeout(): + return _DefaultTimeout + +# +# Exceptions for socket errors and timeouts +# +Error = socket.error +class Timeout(Exception): + pass + + +# +# Factory function +# +from socket import AF_INET, SOCK_STREAM +def timeoutsocket(family=AF_INET, type=SOCK_STREAM, proto=None): + if family != AF_INET or type != SOCK_STREAM: + if proto: + return _socket(family, type, proto) + else: + return _socket(family, type) + return TimeoutSocket( _socket(family, type), _DefaultTimeout ) +# end timeoutsocket + +# +# The TimeoutSocket class definition +# +class TimeoutSocket: + """TimeoutSocket object + Implements a socket-like object that raises Timeout whenever + an operation takes too long. + The definition of 'too long' can be changed using the + set_timeout() method. + """ + + _copies = 0 + _blocking = 1 + + def __init__(self, sock, timeout): + self._sock = sock + self._timeout = timeout + # end __init__ + + def __getattr__(self, key): + return getattr(self._sock, key) + # end __getattr__ + + def get_timeout(self): + return self._timeout + # end set_timeout + + def set_timeout(self, timeout=None): + self._timeout = timeout + # end set_timeout + + def setblocking(self, blocking): + self._blocking = blocking + return self._sock.setblocking(blocking) + # end set_timeout + + def connect_ex(self, addr): + errcode = 0 + try: + self.connect(addr) + except Error, why: + errcode = why[0] + return errcode + # end connect_ex + + def connect(self, addr, port=None, dumbhack=None): + # In case we were called as connect(host, port) + if port != None: addr = (addr, port) + + # Shortcuts + sock = self._sock + timeout = self._timeout + blocking = self._blocking + + # First, make a non-blocking call to connect + try: + sock.setblocking(0) + sock.connect(addr) + sock.setblocking(blocking) + return + except Error, why: + # Set the socket's blocking mode back + sock.setblocking(blocking) + + # If we are not blocking, re-raise + if not blocking: + raise + + # If we are already connected, then return success. + # If we got a genuine error, re-raise it. + errcode = why[0] + if dumbhack and errcode in _IsConnected: + return + elif errcode not in _ConnectBusy: + raise + + # Now, wait for the connect to happen + # ONLY if dumbhack indicates this is pass number one. + # If select raises an error, we pass it on. + # Is this the right behavior? + if not dumbhack: + r,w,e = select.select([], [sock], [], timeout) + if w: + return self.connect(addr, dumbhack=1) + + # If we get here, then we should raise Timeout + raise Timeout("Attempted connect to %s timed out." % str(addr) ) + # end connect + + def accept(self, dumbhack=None): + # Shortcuts + sock = self._sock + timeout = self._timeout + blocking = self._blocking + + # First, make a non-blocking call to accept + # If we get a valid result, then convert the + # accept'ed socket into a TimeoutSocket. + # Be carefult about the blocking mode of ourselves. + try: + sock.setblocking(0) + newsock, addr = sock.accept() + sock.setblocking(blocking) + timeoutnewsock = self.__class__(newsock, timeout) + timeoutnewsock.setblocking(blocking) + return (timeoutnewsock, addr) + except Error, why: + # Set the socket's blocking mode back + sock.setblocking(blocking) + + # If we are not supposed to block, then re-raise + if not blocking: + raise + + # If we got a genuine error, re-raise it. + errcode = why[0] + if errcode not in _AcceptBusy: + raise + + # Now, wait for the accept to happen + # ONLY if dumbhack indicates this is pass number one. + # If select raises an error, we pass it on. + # Is this the right behavior? + if not dumbhack: + r,w,e = select.select([sock], [], [], timeout) + if r: + return self.accept(dumbhack=1) + + # If we get here, then we should raise Timeout + raise Timeout("Attempted accept timed out.") + # end accept + + def send(self, data, flags=0): + sock = self._sock + if self._blocking: + r,w,e = select.select([],[sock],[], self._timeout) + if not w: + raise Timeout("Send timed out") + return sock.send(data, flags) + # end send + + def recv(self, bufsize, flags=0): + sock = self._sock + if self._blocking: + r,w,e = select.select([sock], [], [], self._timeout) + if not r: + raise Timeout("Recv timed out") + return sock.recv(bufsize, flags) + # end recv + + def makefile(self, flags="r", bufsize=-1): + self._copies = self._copies +1 + return TimeoutFile(self, flags, bufsize) + # end makefile + + def close(self): + if self._copies <= 0: + self._sock.close() + else: + self._copies = self._copies -1 + # end close + +# end TimeoutSocket + + +class TimeoutFile: + """TimeoutFile object + Implements a file-like object on top of TimeoutSocket. + """ + + def __init__(self, sock, mode="r", bufsize=4096): + self._sock = sock + self._bufsize = 4096 + if bufsize > 0: self._bufsize = bufsize + if not hasattr(sock, "_inqueue"): self._sock._inqueue = "" + + # end __init__ + + def __getattr__(self, key): + return getattr(self._sock, key) + # end __getattr__ + + def close(self): + self._sock.close() + self._sock = None + # end close + + def write(self, data): + self.send(data) + # end write + + def read(self, size=-1): + _sock = self._sock + _bufsize = self._bufsize + while 1: + datalen = len(_sock._inqueue) + if datalen >= size >= 0: + break + bufsize = _bufsize + if size > 0: + bufsize = min(bufsize, size - datalen ) + buf = self.recv(bufsize) + if not buf: + break + _sock._inqueue = _sock._inqueue + buf + data = _sock._inqueue + _sock._inqueue = "" + if size > 0 and datalen > size: + _sock._inqueue = data[size:] + data = data[:size] + return data + # end read + + def readline(self, size=-1): + _sock = self._sock + _bufsize = self._bufsize + while 1: + idx = string.find(_sock._inqueue, "\n") + if idx >= 0: + break + datalen = len(_sock._inqueue) + if datalen >= size >= 0: + break + bufsize = _bufsize + if size > 0: + bufsize = min(bufsize, size - datalen ) + buf = self.recv(bufsize) + if not buf: + break + _sock._inqueue = _sock._inqueue + buf + + data = _sock._inqueue + _sock._inqueue = "" + if idx >= 0: + idx = idx + 1 + _sock._inqueue = data[idx:] + data = data[:idx] + elif size > 0 and datalen > size: + _sock._inqueue = data[size:] + data = data[:size] + return data + # end readline + + def readlines(self, sizehint=-1): + result = [] + data = self.read() + while data: + idx = string.find(data, "\n") + if idx >= 0: + idx = idx + 1 + result.append( data[:idx] ) + data = data[idx:] + else: + result.append( data ) + data = "" + return result + # end readlines + + def flush(self): pass + +# end TimeoutFile + + +# +# Silently replace the socket() builtin function with +# our timeoutsocket() definition. +# +if not hasattr(socket, "_no_timeoutsocket"): + socket._no_timeoutsocket = socket.socket + socket.socket = timeoutsocket +del socket +socket = timeoutsocket +# Finis diff --git a/lib/venus/spider.py b/lib/venus/spider.py new file mode 100755 index 0000000..22b7da2 --- /dev/null +++ b/lib/venus/spider.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python +""" +Main program to run just the spider portion of planet +""" + +import sys +from planet import spider, config + +if __name__ == '__main__': + + config.load(sys.argv[1]) + + if len(sys.argv) == 2: + # spider all feeds + spider.spiderPlanet() + elif len(sys.argv) > 2: + # spider selected feeds + for feed in sys.argv[2:]: + spider.spiderFeed(feed) + else: + print "Usage:" + print " python %s config.ini [URI URI ...]" % sys.argv[0] diff --git a/lib/venus/splice.py b/lib/venus/splice.py new file mode 100755 index 0000000..d27a6d5 --- /dev/null +++ b/lib/venus/splice.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python +""" +Main program to run just the splice portion of planet +""" + +import os.path +import sys +from planet import splice, config + +if __name__ == '__main__': + + if len(sys.argv) == 2 and os.path.isfile(sys.argv[1]): + config.load(sys.argv[1]) + doc = splice.splice() + splice.apply(doc.toxml('utf-8')) + else: + print "Usage:" + print " python %s config.ini" % sys.argv[0] diff --git a/lib/venus/themes/asf/config.ini b/lib/venus/themes/asf/config.ini new file mode 100644 index 0000000..62a9776 --- /dev/null +++ b/lib/venus/themes/asf/config.ini @@ -0,0 +1,21 @@ +# This theme is based on the one originally developed by Stefano Mazzocci +# for planetapache.org, and modified by Sam Ruby for planet.intertwingly.net + +[Planet] +template_files: + atom.xml.xslt + foafroll.xml.xslt + index.html.xslt + opml.xml.xslt + validate.html.xslt + +template_directories: + ../common + +bill_of_materials: + default.css + personalize.js + images/feed-icon-10x10.png + images/opml.png + images/foaf.png + images/venus.png diff --git a/lib/venus/themes/asf/default.css b/lib/venus/themes/asf/default.css new file mode 100644 index 0000000..a23be74 --- /dev/null +++ b/lib/venus/themes/asf/default.css @@ -0,0 +1,533 @@ +/* + * Originally written by Stefano Mazzocchi <stefano at apache dot org> + * Adapted by Sam Ruby <rubys at intertwingly dot net> + */ + +/* ----------------------------- Global Definitions -------------------- */ + +body { + margin: 0px; + padding: 0px; + color: #222; + background-color: #fff; + quotes: "\201C" "\201E" "\2018" "\2019"; +} + +a:link { + color: #222; +} + +a:visited { + color: #555; +} + +a:hover { + color: #000; +} + +a:active { +} + +a:focus { +} + +a.inactive { + color: #558; +} + +a.rising { + font-weight: bold; +} + +a[rel~='license'] { + text-decoration: none; +} + +body > h1 { + font-size: x-large; + text-transform: uppercase; + letter-spacing: 0.25em; + padding: 10px; + margin: 0px 0px 0px 0px; + color: #889; + font-family: Sans-Serif; + font-weight: bold; + font-style: italic; + background-color: #eee; + border-bottom: 2px solid #ccd; +} + +/* ----------------------------- Sidebar --------------------------- */ + +#sidebar { + float: right; + top: 150px; + right: 0px; + width: 210px; + background-color: white; + + padding: 0px 0px 10px 0px; + margin: 0px 0px 20px 20px; + border-left: 2px solid #ccd; + border-bottom: 2px solid #ccd; + -webkit-border-bottom-left-radius: 1em; + -moz-border-radius: 0 0 0 1em; +} + +#sidebar h2 { + letter-spacing: 0.15em; + text-transform: uppercase; + font-size: x-small; + color: #666; + font-weight: normal; + padding: 2px 0px 2px 12px; + margin: 15px 0px 5px 10px; + border-top: 1px solid #ccc; + border-left: 1px solid #ccc; + border-bottom: 1px solid #ccc; + -webkit-border-top-left-radius: 6px; + -webkit-border-bottom-left-radius: 6px; + -moz-border-radius: 6px 0 0 6px; +} + +#sidebar h2 a img { + margin-bottom: 4px; + vertical-align: middle; +} + +#sidebar p { + font-size: x-small; + padding-left: 20px; + padding-right: 5px; +} + +#sidebar ul { + font-family: sans-serif; + margin-left: 5px; + padding-left: 25px; +} + +#sidebar li { + margin-left: 0px; + text-indent: -15px; + list-style-type: none; + font-size: x-small; +} + +#sidebar ul li a { + text-decoration: none; +} + +#sidebar ul li a:hover { + text-decoration: underline; +} + +#sidebar ul li a:visited { + color: #000; +} + +#sidebar ul li ul { + display: none; +} + +#sidebar ul li { + position: relative; +} + +#sidebar ul li:hover ul { + background-color: #EEE; + -webkit-border-radius: 0.5em; + -moz-border-radius: 0.5em; + border: 2px solid #BBB; + color:#000; + display: block; + margin-left: -300px; + margin-right: 115px; + padding: 10px; + padding-left: 25px; + position: absolute; + right: 80px; + top: -12px; + z-index: 1; +} + +#sidebar img { + margin-top: 2px; + border: 0; +} + +#sidebar dl { + font-size: x-small; + padding-left: 1.0em; +} + +#sidebar dl ul { + padding-left: 1em; +} + +#sidebar dt { + margin-top: 1em; + font-weight: bold; + padding-left: 1.0em; +} + +#sidebar dd { + margin-left: 2.5em; +} + +#sidebar .message { + cursor: help; + border-bottom: 1px dashed red; +} + +#sidebar a.message:hover { + cursor: help; + background-color: #ffD0D0; + border: 1px dashed red !important; + text-decoration: none !important; +} + +#sidebar input[name=q] { + padding-left: 0.5em; + border: 1px solid #ccd; + -webkit-border-radius: 0.8em; + -moz-border-radius: 0.8em; + width: 12.5em; + margin: 4px 0 0 24px; +} + +/* ---------------------------- Footer --------------------------- */ + +#footer ul { + margin: 0 20px 0 -25px; + padding: 0; +} + +#footer li { + margin: 0; + padding: 0; + list-style: none; + display: inline; +} + +#footer ul li ul { + display: none; +} + +#footer img { + display: none; +} + +/* ----------------------------- Body ---------------------------- */ + +#body { + margin-top: 10px; + margin-right: 210px; +} + +.admin { + text-align: right; +} + +#body > h2 { + float: right; + min-width: 25%; + -webkit-border-top-left-radius: 0.5em; + -webkit-border-bottom-left-radius: 0.5em; + -moz-border-radius: 0.5em 0 0 0.5em; + text-transform: none; + font-size: medium; + color: #667; + font-weight: bold; + text-align: center; + border: 2px solid #ccd; + background-color: #eee; + padding: 1px 1.5em 1px 1.5em; + margin: -0.2em -22px 0 0; +} + +/* ----------------------------- News ---------------------------- */ + +.news { + margin: 30px 10px 30px 10px; + clear: left; +} + +.news > h3 { + text-indent: -10px; + margin: 12px; + padding: 0px; + font-size: medium; +} + +.news > h3 > a:first-child { + margin-left: 10px +} + +.news > h3 > a:first-child:before { + content: '⌘'; + font-family: Code2000; + color: #D70; + margin-left: -18px; + margin-right: 2px; + text-decoration: none; +} + +img.icon { + height: 16px; + width: 16px; + margin-left: -8px; + margin-bottom: -2px; + margin-right: 3px; +} + +.news .content { + margin: 5px 5px 5px 15px; + padding: 0px 5px 0px 5px; + border-left: 1px solid #ccc; + line-height: 1.2em; + font-size: small; + font-family: sans-serif; +} + +.news .links { +} + +.news .permalink { + text-align: right; +} + +/* ----------------------------- News Content ---------------------------- */ + +.news .content p { + line-height: 1.2em; +} + +.news .content img { + margin: 5px; +} + +.news .content blockquote { + margin: 10px 35px 10px 35px; + padding: 5px; +} + +.news .content pre { + font-family: monospace; + font-size: medium; + font-weight: bold; + border: 1px solid #ddd; + padding: 10px; + margin: 10px 20px 10px 20px; + background-color: #f8f8f8; + overflow: auto; +} + +.news .content ul, .news .content ol { + margin: 5px 35px 5px 35px; + padding: 5px; + counter-reset: item; +} + +.news .content ul > ul, .news .content ul > ol, .news .content ol > ul, .news .content ol > ol { + margin: 0px 0px 0px 35px; + padding: 0px; +} + +.news .content li { + padding: 1px; + line-height: 1.2em; +} + +.news code { + font-family: monospace; + font-size: medium; + font-weight: bold; +} + +.news .content a { + text-decoration: none; + color: #000; + border-bottom: 1px dotted #777; + margin: 0px 2px 0px 2px; + padding: 1px 1px 1px 1px; +} + +.news .content a:hover { + border: 1px dotted #000; + background-color: #eee; + padding: 1px 2px 1px 2px; + margin: 0px; +} + +.news .content a:active { + background-color: #ccc !important; + position: relative; + top: 1px; + left: 1px; + padding: 1px 2px 1px 2px; + margin: 0px; +} + +.news .content a:focus { + border: 1px solid #fff !important; + background-color: #ccc !important; + padding: 1px 2px 1px 2px; + margin: 0px; +} + +/* --------------------------- Accomodations ----------------------- */ + +/* boing boing */ +br { + clear: none !important; +} + +/* engadget */ +p { + clear: none !important; +} + +/* cadenhead */ +p.sourcecode { + font-family: monospace; + font-size: medium; + font-weight: bold; + border: 1px solid #ddd; + padding: 10px; + margin: 10px 20px 10px 20px; + background-color: #f8f8f8; + overflow: auto; +} + +/* cadenhead */ +span.sourcecode { + font-family: monospace; + font-size: medium; + font-weight: bold; + font-size: large; + background-color: #f8f8f8; +} + +/* hsivonen */ +ul p, ol p { + margin-top: 0.3em; + margin-bottom: 0.3em; +} + +/* programmableweb */ +.imgRight { + float: right; +} + +/* gizmodo */ +img.left { + float: left; +} + +/* gizmodo */ +img.right { + float: right; +} + +/* gizmodo */ +img.center { + display: block; + margin-left: auto; + margin-right: auto; +} + +/* wikipedia */ +table { + width: auto !important; +} + +/* del.icio.us */ +.delicious-tags { + font-size: x-small; + text-align: right; +} + +/* musings */ +img.mathlogo, img.svglogo { + float: right; + border: 0; +} + +math { + white-space: nowrap; +} + +math[display=block] { + overflow: auto; +} + +.numberedEq span, .eqno { + float: right; +} + +/* sutor */ +img.post-img-right { + float:right; +} + +/* niall */ +img.floatright { + float: right; +} + +/* jason kolb */ +.FeaturedPost > li { + list-style-type: none; + background-color: #f8f8f8; +} + +/* Tantek */ +ul.tags,ul.tags li,h4.tags { + display:inline; + font-size: x-small +} + +ul.tags a:link, ul.tags a:visited { + color:green +} + +a[rel='tag'] img { + border: 0; +} + +/* DiveIntoMark */ +.framed { + float: none; +} + +/* BurningBird */ +.update:before { + content: 'Update'; + font-weight: bold; +} + +.update { + margin: 2em; + padding: 0 1em 0 1em; + background: #eee; + border: 1px solid #aaa; +} + +/* ----------------------------- Footer ---------------------------- */ + +#footer { + padding: 0px; + margin: 30px 0px 50px 50px; +} + +#footer p { + padding: 2px 2px 2px 5px; + background-color: #ccc; + border-top: 1px solid #aaa; + border-bottom: 1px solid #aaa; + border-left: 1px solid #aaa; + letter-spacing: 0.15em; + text-transform: uppercase; + text-align: left; +} diff --git a/lib/venus/themes/asf/index.html.xslt b/lib/venus/themes/asf/index.html.xslt new file mode 100644 index 0000000..8901d4c --- /dev/null +++ b/lib/venus/themes/asf/index.html.xslt @@ -0,0 +1,339 @@ +<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0" + xmlns:atom="http://www.w3.org/2005/Atom" + xmlns:xhtml="http://www.w3.org/1999/xhtml" + xmlns:planet="http://planet.intertwingly.net/" + xmlns="http://www.w3.org/1999/xhtml" + exclude-result-prefixes="atom planet xhtml"> + + <xsl:output method="xml" omit-xml-declaration="yes"/> + + <xsl:template match="atom:feed"> + <xsl:text disable-output-escaping="yes"><!DOCTYPE html></xsl:text> + <xsl:text> </xsl:text> + <html xmlns="http://www.w3.org/1999/xhtml"> + + <!-- head --> + <xsl:text> </xsl:text> + <head> + <link rel="stylesheet" href="default.css" type="text/css" /> + <title><xsl:value-of select="atom:title"/></title> + <meta name="robots" content="noindex,nofollow" /> + <meta name="generator" content="{atom:generator}" /> + <xsl:if test="atom:link[@rel='self']"> + <link rel="alternate" href="{atom:link[@rel='self']/@href}" + title="{atom:title}" type="{atom:link[@rel='self']/@type}" /> + </xsl:if> + <link rel="shortcut icon" href="/favicon.ico" /> + <script type="text/javascript" src="personalize.js"> + <xsl:comment><!--HTML Compatibility--></xsl:comment> + </script> + </head> + + <xsl:text> </xsl:text> + <body> + <xsl:text> </xsl:text> + <h1><xsl:value-of select="atom:title"/></h1> + + <xsl:text> </xsl:text> + <div id="body"> + <xsl:apply-templates select="atom:entry"/> + <xsl:text> </xsl:text> + </div> + + <h1>Footnotes</h1> + <xsl:text> </xsl:text> + + <div id="sidebar"> + <h2>Info</h2> + + <dl> + <dt>Last updated:</dt> + <dd> + <time datetime="{atom:updated}" title="GMT"> + <xsl:value-of select="atom:updated/@planet:format"/> + </time> + </dd> + <dt>Powered by:</dt> + <dd> + <a href="http://intertwingly.net/code/venus/"> + <img src="images/venus.png" width="80" height="15" + alt="Venus" border="0"/> + </a> + </dd> + <dt>Export:</dt> + <dd> + <ul> + <li> + <a href="opml.xml"> + <img src="images/opml.png" alt="OPML"/> + </a> + </li> + <li> + <a href="foafroll.xml"> + <img src="images/foaf.png" alt="FOAF"/> + </a> + </li> + </ul> + </dd> + </dl> + </div> + + <xsl:text> </xsl:text> + <div id="footer"> + <h2>Subscriptions</h2> + <ul> + <xsl:for-each select="planet:source"> + <xsl:sort select="planet:name"/> + <xsl:variable name="id" select="atom:id"/> + <xsl:variable name="posts" + select="/atom:feed/atom:entry[atom:source/atom:id = $id]"/> + <xsl:text> </xsl:text> + <li> + <!-- icon --> + <a title="subscribe"> + <xsl:choose> + <xsl:when test="planet:http_location"> + <xsl:attribute name="href"> + <xsl:value-of select="planet:http_location"/> + </xsl:attribute> + </xsl:when> + <xsl:when test="atom:link[@rel='self']/@href"> + <xsl:attribute name="href"> + <xsl:value-of select="atom:link[@rel='self']/@href"/> + </xsl:attribute> + </xsl:when> + </xsl:choose> + <img src="images/feed-icon-10x10.png" alt="(feed)"/> + </a> + <xsl:text> </xsl:text> + + <!-- name --> + <a> + <xsl:if test="atom:link[@rel='alternate']/@href"> + <xsl:attribute name="href"> + <xsl:value-of select="atom:link[@rel='alternate']/@href"/> + </xsl:attribute> + </xsl:if> + + <xsl:choose> + <xsl:when test="planet:message"> + <xsl:attribute name="class"> + <xsl:if test="$posts">active message</xsl:if> + <xsl:if test="not($posts)">message</xsl:if> + </xsl:attribute> + <xsl:attribute name="title"> + <xsl:value-of select="planet:message"/> + </xsl:attribute> + </xsl:when> + <xsl:when test="atom:title"> + <xsl:attribute name="title"> + <xsl:value-of select="atom:title"/> + </xsl:attribute> + <xsl:if test="$posts"> + <xsl:attribute name="class">active</xsl:attribute> + </xsl:if> + </xsl:when> + </xsl:choose> + <xsl:value-of select="planet:name"/> + </a> + + <xsl:if test="$posts[string-length(atom:title) > 0]"> + <ul> + <xsl:for-each select="$posts"> + <xsl:if test="string-length(atom:title) > 0"> + <li> + <a href="{atom:link[@rel='alternate']/@href}"> + <xsl:if test="atom:title/@xml:lang != @xml:lang"> + <xsl:attribute name="xml:lang" + select="{atom:title/@xml:lang}"/> + </xsl:if> + <xsl:value-of select="atom:title"/> + </a> + </li> + </xsl:if> + </xsl:for-each> + </ul> + </xsl:if> + </li> + </xsl:for-each> + <xsl:text> </xsl:text> + </ul> + </div> + + <xsl:text> </xsl:text> + </body> + </html> + </xsl:template> + + <xsl:template match="atom:entry"> + <!-- date header --> + <xsl:variable name="date" select="substring(atom:updated,1,10)"/> + <xsl:if test="not(preceding-sibling::atom:entry + [substring(atom:updated,1,10) = $date])"> + <xsl:text> </xsl:text> + <h2> + <time datetime="{$date}"> + <xsl:value-of select="substring-before(atom:updated/@planet:format,', ')"/> + <xsl:text>, </xsl:text> + <xsl:value-of select="substring-before(substring-after(atom:updated/@planet:format,', '), ' ')"/> + </time> + </h2> + </xsl:if> + + <xsl:text> </xsl:text> + <div class="news {atom:source/planet:css-id}"> + + <xsl:if test="@xml:lang"> + <xsl:attribute name="xml:lang"> + <xsl:value-of select="@xml:lang"/> + </xsl:attribute> + </xsl:if> + + <!-- entry title --> + <xsl:text> </xsl:text> + <h3> + <xsl:if test="atom:source/atom:icon"> + <img src="{atom:source/atom:icon}" class="icon"/> + </xsl:if> + <a> + <xsl:if test="atom:source/atom:link[@rel='alternate']/@href"> + <xsl:attribute name="href"> + <xsl:value-of + select="atom:source/atom:link[@rel='alternate']/@href"/> + </xsl:attribute> + </xsl:if> + + <xsl:attribute name="title"> + <xsl:value-of select="atom:source/atom:title"/> + </xsl:attribute> + <xsl:value-of select="atom:source/planet:name"/> + </a> + <xsl:if test="string-length(atom:title) > 0"> + <xsl:text>—</xsl:text> + <a href="{atom:link[@rel='alternate']/@href}"> + <xsl:if test="atom:title/@xml:lang != @xml:lang"> + <xsl:attribute name="xml:lang" select="{atom:title/@xml:lang}"/> + </xsl:if> + <xsl:value-of select="atom:title"/> + </a> + </xsl:if> + </h3> + + <!-- entry content --> + <xsl:text> </xsl:text> + <xsl:choose> + <xsl:when test="atom:content"> + <xsl:apply-templates select="atom:content"/> + </xsl:when> + <xsl:otherwise> + <xsl:apply-templates select="atom:summary"/> + </xsl:otherwise> + </xsl:choose> + + <!-- entry footer --> + <xsl:text> </xsl:text> + <div class="permalink"> + <xsl:if test="atom:link[@rel='license'] or + atom:source/atom:link[@rel='license'] or + atom:rights or atom:source/atom:rights"> + <a> + <xsl:if test="atom:source/atom:link[@rel='license']/@href"> + <xsl:attribute name="rel">license</xsl:attribute> + <xsl:attribute name="href"> + <xsl:value-of select="atom:source/atom:link[@rel='license']/@href"/> + </xsl:attribute> + </xsl:if> + <xsl:if test="atom:link[@rel='license']/@href"> + <xsl:attribute name="rel">license</xsl:attribute> + <xsl:attribute name="href"> + <xsl:value-of select="atom:link[@rel='license']/@href"/> + </xsl:attribute> + </xsl:if> + <xsl:if test="atom:source/atom:rights"> + <xsl:attribute name="title"> + <xsl:value-of select="atom:source/atom:rights"/> + </xsl:attribute> + </xsl:if> + <xsl:if test="atom:rights"> + <xsl:attribute name="title"> + <xsl:value-of select="atom:rights"/> + </xsl:attribute> + </xsl:if> + <xsl:text>©</xsl:text> + </a> + <xsl:text> </xsl:text> + </xsl:if> + <a href="{atom:link[@rel='alternate']/@href}"> + <xsl:choose> + <xsl:when test="atom:author/atom:name"> + <xsl:if test="not(atom:link[@rel='license'] or + atom:source/atom:link[@rel='license'] or + atom:rights or atom:source/atom:rights)"> + <xsl:text>by </xsl:text> + </xsl:if> + <xsl:value-of select="atom:author/atom:name"/> + <xsl:text> at </xsl:text> + </xsl:when> + <xsl:when test="atom:source/atom:author/atom:name"> + <xsl:if test="not(atom:link[@rel='license'] or + atom:source/atom:link[@rel='license'] or + atom:rights or atom:source/atom:rights)"> + <xsl:text>by </xsl:text> + </xsl:if> + <xsl:value-of select="atom:source/atom:author/atom:name"/> + <xsl:text> at </xsl:text> + </xsl:when> + </xsl:choose> + <time datetime="{atom:updated}" title="GMT"> + <xsl:value-of select="atom:updated/@planet:format"/> + </time> + </a> + </div> + </div> + + </xsl:template> + + <!-- xhtml content --> + <xsl:template match="atom:content/xhtml:div | atom:summary/xhtml:div"> + <xsl:copy> + <xsl:if test="../@xml:lang and not(../@xml:lang = ../../@xml:lang)"> + <xsl:attribute name="xml:lang"> + <xsl:value-of select="../@xml:lang"/> + </xsl:attribute> + </xsl:if> + <xsl:attribute name="class">content</xsl:attribute> + <xsl:apply-templates select="@*|node()"/> + </xsl:copy> + </xsl:template> + + <!-- plain text content --> + <xsl:template match="atom:content/text() | atom:summary/text()"> + <div class="content" xmlns="http://www.w3.org/1999/xhtml"> + <xsl:if test="../@xml:lang and not(../@xml:lang = ../../@xml:lang)"> + <xsl:attribute name="xml:lang"> + <xsl:value-of select="../@xml:lang"/> + </xsl:attribute> + </xsl:if> + <xsl:copy-of select="."/> + </div> + </xsl:template> + + <!-- Remove stray atom elements --> + <xsl:template match="atom:*"> + <xsl:apply-templates/> + </xsl:template> + + <!-- Feedburner detritus --> + <xsl:template match="xhtml:div[@class='feedflare']"/> + + <!-- Strip site meter --> + <xsl:template match="xhtml:div[comment()[. = ' Site Meter ']]"/> + + <!-- pass through everything else --> + <xsl:template match="@*|node()"> + <xsl:copy> + <xsl:apply-templates select="@*|node()"/> + </xsl:copy> + </xsl:template> + +</xsl:stylesheet> diff --git a/lib/venus/themes/asf/personalize.js b/lib/venus/themes/asf/personalize.js new file mode 100644 index 0000000..d044b87 --- /dev/null +++ b/lib/venus/themes/asf/personalize.js @@ -0,0 +1,297 @@ +var entries = []; // list of news items + +var days = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", + "Friday", "Saturday"]; +var months = ["January", "February", "March", "April", "May", "June", "July", + "August", "September", "October", "November", "December"]; + +// event complete: stop propagation of the event +function stopPropagation(event) { + if (event.preventDefault) { + event.preventDefault(); + event.stopPropagation(); + } else { + event.returnValue = false; + } +} + +// scroll back to the previous article +function prevArticle(event) { + var scrollTop = document.documentElement.scrollTop || document.body.scrollTop; + for (var i=entries.length; --i>=0;) { + if (!entries[i].anchor) continue; + if (entries[i].anchor.offsetTop < scrollTop) { + window.location.hash=entries[i].anchor.id; + stopPropagation(event); + break; + } + } +} + +// advance to the next article +function nextArticle(event) { + var scrollTop = document.documentElement.scrollTop || document.body.scrollTop; + for (var i=1; i<entries.length; i++) { + if (!entries[i].anchor) continue; + if (entries[i].anchor.offsetTop-20 >scrollTop) { + window.location.hash=entries[i].anchor.id; + stopPropagation(event); + break; + } + } +} + +// process keypresses +function navkey(event) { + var checkbox = document.getElementById('navkeys'); + if (!checkbox || !checkbox.checked) return; + + if (!event) event=window.event; + if (event.originalTarget && + event.originalTarget.nodeName.toLowerCase() == 'input' && + event.originalTarget.id != 'navkeys') return; + + if (!document.documentElement) return; + if (!entries[0].anchor || !entries[0].anchor.offsetTop) return; + + key=event.keyCode; + if (key == 'J'.charCodeAt(0)) nextArticle(event); + if (key == 'K'.charCodeAt(0)) prevArticle(event); +} + +// create (or reset) a cookie +function createCookie(name,value,days) { + if (days) { + var date = new Date(); + date.setTime(date.getTime()+(days*24*60*60*1000)); + var expires = "; expires="+date.toGMTString(); + } + else expires = ""; + document.cookie = name+"="+value+expires+"; path=/"; +} + +// read a cookie +function readCookie(name) { + var nameEQ = name + "="; + if (!document.cookie) return; + var ca = document.cookie.split(';'); + for(var i=0;i < ca.length;i++) { + var c = ca[i]; + while (c.charAt(0)==' ') c = c.substring(1,c.length); + if (c.indexOf(nameEQ) == 0) return c.substring(nameEQ.length,c.length); + } + return null; +} + +// each time the value of the option changes, update the cookie +function selectOption() { + var checkbox = document.getElementById('navkeys'); + if (!checkbox) return; + createCookie("navkeys", checkbox.checked?'true':'false', 365); +} + +// add navkeys option to sidebar +function addOption(event) { + var sidebar = document.getElementById('sidebar'); + if (!sidebar) return; + + var h2 = null; + for (var i=entries.length; --i>=0;) { + if (document.getElementById("news-" + i)) break; + if (entries[i].parent.offsetTop > 0) { + var a = entries[i].anchor = document.createElement('a'); + a.id = "news-" + i; + entries[i].parent.insertBefore(a, entries[i].parent.firstChild); + if (h2 == null) h2 = document.createElement('h2'); + } + } + + if (h2 != null && !document.getElementById("navkeys")) { + h2.appendChild(document.createTextNode('Options')); + sidebar.appendChild(h2); + + var form = document.createElement('form'); + var p = document.createElement('p'); + var input = document.createElement('input'); + input.type = "checkbox"; + input.id = "navkeys"; + p.appendChild(input); + var a = document.createElement('a'); + a.title = "Navigate entries"; + a.appendChild(document.createTextNode('Enable ')); + var code = document.createElement('code'); + code.appendChild(document.createTextNode('J')); + a.appendChild(code); + a.appendChild(document.createTextNode(' and ')); + code = document.createElement('code'); + code.appendChild(document.createTextNode('K')); + a.appendChild(code); + a.appendChild(document.createTextNode(' keys')); + p.appendChild(a); + form.appendChild(p); + sidebar.appendChild(form); + + var cookie = readCookie("navkeys"); + if (cookie && cookie == 'true') input.checked = true; + input.onclick = selectOption; + document.onkeydown = navkey; + } +} + +// Parse an HTML5-liberalized version of RFC 3339 datetime values +Date.parseRFC3339 = function (string) { + var date=new Date(); + date.setTime(0); + var match = string.match(/(\d{4})-(\d\d)-(\d\d)\s*(?:[\sT]\s*(\d\d):(\d\d)(?::(\d\d))?(\.\d*)?\s*(Z|([-+])(\d\d):(\d\d))?)?/); + if (!match) return; + if (match[2]) match[2]--; + if (match[7]) match[7] = (match[7]+'000').substring(1,4); + var field = [null,'FullYear','Month','Date','Hours','Minutes','Seconds','Milliseconds']; + for (var i=1; i<=7; i++) if (match[i]) date['setUTC'+field[i]](match[i]); + if (match[9]) date.setTime(date.getTime()+ + (match[9]=='-'?1:-1)*(match[10]*3600000+match[11]*60000) ); + return date.getTime(); +} + +// convert datetime to local date +var localere = /^(\w+) (\d+) (\w+) \d+ 0?(\d\d?:\d\d):\d\d ([AP]M) (EST|EDT|CST|CDT|MST|MDT|PST|PDT)/; +function localizeDate(element) { + var date = new Date(); + date.setTime(Date.parseRFC3339(element.getAttribute('datetime'))); + if (!date.getTime()) return; + + var local = date.toLocaleString(); + if (element.parentNode.nodeName == 'a') local = date.toLocaleTimeString(); + var match = local.match(localere); + if (match) { /* Firefox */ + element.innerHTML = match[4] + ' ' + match[5].toLowerCase(); + element.title = match[6] + " \u2014 " + + match[1] + ', ' + match[3] + ' ' + match[2]; + return days[date.getDay()] + ', ' + months[date.getMonth()] + ' ' + + date.getDate() + ', ' + date.getFullYear(); + } else { + local = local.replace(/ GMT(-\d\d\d\d) \(.*\)$/, ''); /* Webkit */ + element.title = element.innerHTML + ' GMT'; + element.innerHTML = local; + return days[date.getDay()] + ', ' + date.getDate() + ' ' + + months[date.getMonth()] + ' ' + date.getFullYear(); + } + +} + +// find entries (and localizeDates) +function findEntries() { + + var times = document.getElementsByTagName('time'); + + for (var i=0; i<times.length; i++) { + if (times[i].title == "GMT") { + var date = localizeDate(times[i]); + + var parent = times[i]; + while (parent && + (!parent.className || parent.className.split(' ')[0] != 'news')) { + parent = parent.parentNode; + } + + if (parent) { + var info = entries[entries.length] = new Object(); + info.parent = parent; + info.date = date; + info.datetime = times[i].getAttribute('datetime').substring(0,10); + } + } + } + +} + +// insert/remove date headers to indicate change of date in local time zone +function moveDateHeaders() { + var lastdate = '' + for (var i=0; i<entries.length; i++) { + var parent = entries[i].parent; + var date = entries[i].date; + + sibling = parent.previousSibling; + while (sibling && sibling.nodeType != 1) { + sibling = sibling.previousSibling; + } + + if (sibling && sibling.nodeName.toLowerCase() == 'h2') { + if (lastdate == date) { + sibling.parentNode.removeChild(sibling); + } else { + sibling.childNodes[0].innerHTML = date; + sibling.childNodes[0].setAttribute('datetime',entries[i].datetime); + lastdate = date; + } + } else if (lastdate != date) { + var h2 = document.createElement('h2'); + var time = document.createElement('time'); + time.setAttribute('datetime',entries[i].datetime); + time.appendChild(document.createTextNode(date)); + h2.appendChild(time); + parent.parentNode.insertBefore(h2, parent); + lastdate = date; + } + } +} + +function moveSidebar() { + var sidebar = document.getElementById('sidebar'); + if (sidebar.currentStyle && sidebar.currentStyle['float'] == 'none') return; + if (window.getComputedStyle && document.defaultView.getComputedStyle(sidebar,null).getPropertyValue('float') == 'none') return; + + var h1 = sidebar.previousSibling; + while (h1.nodeType != 1) h1=h1.previousSibling; + if (h1.nodeName.toLowerCase() == 'h1') h1.parentNode.removeChild(h1); + + var footer = document.getElementById('footer'); + var ul = footer.lastChild; + while (ul.nodeType != 1) ul=ul.previousSibling; + + var twisty = document.createElement('a'); + twisty.appendChild(document.createTextNode('\u25bc')); + twisty.title = 'hide'; + twisty.onclick = function() { + var display = 'block'; + if (this.childNodes[0].nodeValue == '\u25ba') { + this.title = 'hide'; + this.childNodes[0].nodeValue = '\u25bc'; + } else { + this.title = 'show'; + this.childNodes[0].nodeValue = '\u25ba'; + display = 'none'; + } + ul.style.display = display; + createCookie("subscriptions", display, 365); + } + + var cookie = readCookie("subscriptions"); + if (cookie && cookie == 'none') twisty.onclick(); + + for (var node=footer.lastChild; node; node=footer.lastChild) { + if (twisty && node.nodeType == 1 && node.nodeName.toLowerCase() == 'h2') { + node.appendChild(twisty); + twisty = null; + } + footer.removeChild(node); + sidebar.insertBefore(node, sidebar.firstChild); + } + + var body = document.getElementById('body'); + sidebar.parentNode.removeChild(sidebar); + body.parentNode.insertBefore(sidebar, body); + body.style.marginRight = 0; +} + +// adjust dates to local time zones, optionally provide navigation keys +function personalize() { + moveSidebar(); + findEntries(); + addOption(); + moveDateHeaders(); +} + +// hook event +document.addEventListener("DOMContentLoaded", personalize, false); diff --git a/lib/venus/themes/classic_fancy/config.ini b/lib/venus/themes/classic_fancy/config.ini new file mode 100644 index 0000000..e3ac0c8 --- /dev/null +++ b/lib/venus/themes/classic_fancy/config.ini @@ -0,0 +1,20 @@ +# This theme is based on the one contained in Planet V2.0. It demonstrates +# that one can mix the use of htmltmpl and xslt templates. + +[Planet] +template_files: + atom.xml.xslt + foafroll.xml.xslt + index.html.tmpl + opml.xml.xslt + rss10.xml.tmpl + rss20.xml.tmpl + +template_directories: + ../common + +bill_of_materials: + planet.css + images/feed-icon-10x10.png + images/logo.png + images/planet.png diff --git a/lib/venus/themes/classic_fancy/index.html.tmpl b/lib/venus/themes/classic_fancy/index.html.tmpl new file mode 100644 index 0000000..3ade246 --- /dev/null +++ b/lib/venus/themes/classic_fancy/index.html.tmpl @@ -0,0 +1,126 @@ +<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> +<html> + +### Fancy Planet HTML template. +### +### When combined with the stylesheet and images in the output/ directory +### of the Planet source, this gives you a much prettier result than the +### default examples template and demonstrates how to use the config file +### to support things like faces +### +### For documentation on the more boring template elements, see +### examples/config.ini and examples/index.html.tmpl in the Planet source. + +<head> +<title><TMPL_VAR name></title> +<meta http-equiv="Content-Type" content="text/html; charset=utf-8"> +<meta name="generator" content="<TMPL_VAR generator ESCAPE="HTML">"> +<link rel="stylesheet" href="planet.css" type="text/css"> +<TMPL_IF feedtype> +<link rel="alternate" href="<TMPL_VAR feed ESCAPE="HTML">" title="<TMPL_VAR channel_title_plain ESCAPE="HTML">" type="application/<TMPL_VAR feedtype>+xml"> +</TMPL_IF> +</head> + +<body> +<h1><TMPL_VAR name></h1> +<TMPL_VAR admin> + +<TMPL_LOOP Items> +<TMPL_IF new_date> +<TMPL_UNLESS __FIRST__> +### End <div class="channelgroup"> +</div> +### End <div class="daygroup"> +</div> +</TMPL_UNLESS> +<div class="daygroup"> +<h2><TMPL_VAR new_date></h2> +</TMPL_IF> + +<TMPL_IF new_channel> +<TMPL_UNLESS new_date> +### End <div class="channelgroup"> +</div> +</TMPL_UNLESS> +<div class="channelgroup"> + +### Planet provides template variables for *all* configuration options for +### the channel (and defaults), even if it doesn't know about them. We +### exploit this here to add hackergotchi faces to our channels. Planet +### doesn't know about the "face", "facewidth" and "faceheight" configuration +### variables, but makes them available to us anyway. + +<h3><a href="<TMPL_VAR channel_link ESCAPE="HTML">" title="<TMPL_VAR channel_title_plain ESCAPE="HTML">"><TMPL_VAR channel_name></a></h3> +<TMPL_IF channel_face> +<img class="face" src="images/<TMPL_VAR channel_face ESCAPE="HTML">" width="<TMPL_VAR channel_facewidth ESCAPE="HTML">" height="<TMPL_VAR channel_faceheight ESCAPE="HTML">" alt=""> +</TMPL_IF> +</TMPL_IF> + + +<div class="entrygroup" id="<TMPL_VAR id>"<TMPL_IF channel_language> lang="<TMPL_VAR channel_language>"</TMPL_IF>> +<TMPL_IF title> +<h4<TMPL_IF title_language> lang="<TMPL_VAR title_language>"</TMPL_IF>><a href="<TMPL_VAR link ESCAPE="HTML">"><TMPL_VAR title></a></h4> +</TMPL_IF> +<div class="entry"> +<div class="content"<TMPL_IF content_language> lang="<TMPL_VAR content_language>"</TMPL_IF>> +<TMPL_VAR content> +</div> + +### Planet also makes available all of the information from the feed +### that it can. Use the 'planet-cache' tool on the cache file for +### a particular feed to find out what additional keys it supports. +### Comment extra fields are 'author' and 'category' which we +### demonstrate below. + +<p class="date"> +<a href="<TMPL_VAR link ESCAPE="HTML">"><TMPL_IF author>by <TMPL_VAR author ESCAPE="HTML"> at </TMPL_IF><TMPL_VAR date><TMPL_IF category> under <TMPL_VAR category></TMPL_IF></a> +</p> +</div> +</div> + +<TMPL_IF __LAST__> +### End <div class="channelgroup"> +</div> +### End <div class="daygroup"> +</div> +</TMPL_IF> +</TMPL_LOOP> + + +<div class="sidebar"> +<img src="images/logo.png" width="136" height="136" alt=""> + +<h2>Subscriptions</h2> +<ul> +<TMPL_LOOP Channels> +<li> +<a href="<TMPL_VAR url ESCAPE="HTML">" title="subscribe"><img src="images/feed-icon-10x10.png" alt="(feed)"></a> <a <TMPL_IF link>href="<TMPL_VAR link ESCAPE="HTML">" </TMPL_IF><TMPL_IF message>class="message" title="<TMPL_VAR message ESCAPE="HTML">"</TMPL_IF><TMPL_UNLESS message>title="<TMPL_VAR title_plain ESCAPE="HTML">"</TMPL_UNLESS>><TMPL_VAR name></a> +</li> +</TMPL_LOOP> +</ul> + +<p> +<strong>Last updated:</strong><br> +<TMPL_VAR date><br> +<em>All times are UTC.</em><br> +<br> +Powered by:<br> +<a href="http://www.planetplanet.org/"><img src="images/planet.png" width="80" height="15" alt="Planet" border="0"></a> +</p> + +<p> +<h2>Planetarium:</h2> +<ul> +<li><a href="http://www.planetapache.org/">Planet Apache</a></li> +<li><a href="http://planet.debian.net/">Planet Debian</a></li> +<li><a href="http://planet.freedesktop.org/">Planet freedesktop.org</a></li> +<li><a href="http://planet.gnome.org/">Planet GNOME</a></li> +<li><a href="http://planetsun.org/">Planet Sun</a></li> +<li><a href="http://fedora.linux.duke.edu/fedorapeople/">Fedora People</a></li> +<li><a href="http://www.planetplanet.org/">more...</a></li> +</ul> +</p> +</div> +</body> + +</html> diff --git a/lib/venus/themes/classic_fancy/planet.css b/lib/venus/themes/classic_fancy/planet.css new file mode 100644 index 0000000..05653c0 --- /dev/null +++ b/lib/venus/themes/classic_fancy/planet.css @@ -0,0 +1,150 @@ +body { + border-right: 1px solid black; + margin-right: 200px; + + padding-left: 20px; + padding-right: 20px; +} + +h1 { + margin-top: 0px; + padding-top: 20px; + + font-family: "Bitstream Vera Sans", sans-serif; + font-weight: normal; + letter-spacing: -2px; + text-transform: lowercase; + text-align: right; + + color: grey; +} + +.admin { + text-align: right; +} + +h2 { + font-family: "Bitstream Vera Sans", sans-serif; + font-weight: normal; + color: #200080; + + margin-left: -20px; +} + +h3 { + font-family: "Bitstream Vera Sans", sans-serif; + font-weight: normal; + + background-color: #a0c0ff; + border: 1px solid #5080b0; + + padding: 4px; +} + +h3 a { + text-decoration: none; + color: inherit; +} + +h4 { + font-family: "Bitstream Vera Sans", sans-serif; + font-weight: bold; +} + +h4 a { + text-decoration: none; + color: inherit; +} + +img.face { + float: right; + margin-top: -3em; +} + +.entry { + margin-bottom: 2em; +} + +.entry .date { + font-family: "Bitstream Vera Sans", sans-serif; + color: grey; +} + +.entry .date a { + text-decoration: none; + color: inherit; +} + +.sidebar { + position: absolute; + top: 0px; + right: 0px; + width: 200px; + + margin-left: 0px; + margin-right: 0px; + padding-right: 0px; + + padding-top: 20px; + padding-left: 0px; + + font-family: "Bitstream Vera Sans", sans-serif; + font-size: 85%; +} + +.sidebar h2 { + font-size: 110%; + font-weight: bold; + color: black; + + padding-left: 5px; + margin-left: 0px; +} + +.sidebar ul { + padding-left: 1em; + margin-left: 0px; + + list-style-type: none; +} + +.sidebar ul li:hover { + color: grey; +} + +.sidebar ul li a { + text-decoration: none; +} + +.sidebar ul li a:hover { + text-decoration: underline; +} + +.sidebar ul li a img { + border: 0; +} + +.sidebar p { + border-top: 1px solid grey; + margin-top: 30px; + padding-top: 10px; + + padding-left: 5px; +} + +.sidebar .message { + cursor: help; + border-bottom: 1px dashed red; +} + +.sidebar a.message:hover { + cursor: help; + background-color: #ff0000; + color: #ffffff !important; + text-decoration: none !important; +} + +a:hover { + text-decoration: underline !important; + color: blue !important; +} diff --git a/lib/venus/themes/common/atom.xml.xslt b/lib/venus/themes/common/atom.xml.xslt new file mode 100644 index 0000000..c57a938 --- /dev/null +++ b/lib/venus/themes/common/atom.xml.xslt @@ -0,0 +1,80 @@ +<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0" + xmlns:access="http://www.bloglines.com/about/specs/fac-1.0" + xmlns:atom="http://www.w3.org/2005/Atom" + xmlns:indexing="urn:atom-extension:indexing" + xmlns:planet="http://planet.intertwingly.net/" + xmlns:xhtml="http://www.w3.org/1999/xhtml" + xmlns="http://www.w3.org/1999/xhtml" + exclude-result-prefixes="planet xhtml"> + + <!-- strip planet elements and attributes --> + <xsl:template match="planet:*|@planet:*"/> + + <!-- strip obsolete link relationships --> + <xsl:template match="atom:link[@rel='service.edit']"/> + <xsl:template match="atom:link[@rel='service.post']"/> + <xsl:template match="atom:link[@rel='service.feed']"/> + + <!-- Feedburner detritus --> + <xsl:template match="xhtml:div[@class='feedflare']"/> + + <!-- Strip site meter --> + <xsl:template match="xhtml:div[comment()[. = ' Site Meter ']]"/> + + <!-- add Google/LiveJournal-esque and Bloglines noindex directive --> + <xsl:template match="atom:feed"> + <xsl:copy> + <xsl:attribute name="indexing:index">no</xsl:attribute> + <xsl:apply-templates select="@*"/> + <access:restriction relationship="deny"/> + <xsl:apply-templates select="node()"/> + <xsl:text> </xsl:text> + </xsl:copy> + </xsl:template> + +<!-- popular customization: add planet name to each entry title + <xsl:template match="atom:entry/atom:title"> + <xsl:text> </xsl:text> + <xsl:copy> + <xsl:apply-templates select="@*"/> + <xsl:value-of select="../atom:source/planet:name"/> + <xsl:text>: </xsl:text> + <xsl:apply-templates select="node()"/> + </xsl:copy> + </xsl:template> +--> + + <!-- indent atom elements --> + <xsl:template match="atom:*"> + <!-- double space before atom:entries --> + <xsl:if test="self::atom:entry"> + <xsl:text> </xsl:text> + </xsl:if> + + <!-- indent start tag --> + <xsl:text> </xsl:text> + <xsl:for-each select="ancestor::*"> + <xsl:text> </xsl:text> + </xsl:for-each> + + <xsl:copy> + <xsl:apply-templates select="@*|node()"/> + + <!-- indent end tag if there are element children --> + <xsl:if test="*"> + <xsl:text> </xsl:text> + <xsl:for-each select="ancestor::*"> + <xsl:text> </xsl:text> + </xsl:for-each> + </xsl:if> + </xsl:copy> + </xsl:template> + + <!-- pass through everything else --> + <xsl:template match="@*|node()"> + <xsl:copy> + <xsl:apply-templates select="@*|node()"/> + </xsl:copy> + </xsl:template> + +</xsl:stylesheet> diff --git a/lib/venus/themes/common/foafroll.xml.xslt b/lib/venus/themes/common/foafroll.xml.xslt new file mode 100644 index 0000000..44726d9 --- /dev/null +++ b/lib/venus/themes/common/foafroll.xml.xslt @@ -0,0 +1,39 @@ +<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0" + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#" + xmlns:foaf="http://xmlns.com/foaf/0.1/" + xmlns:rss="http://purl.org/rss/1.0/" + xmlns:dc="http://purl.org/dc/elements/1.1/" + xmlns:atom="http://www.w3.org/2005/Atom" + xmlns:planet="http://planet.intertwingly.net/" + exclude-result-prefixes="atom planet"> + + <xsl:output indent="yes" method="xml"/> + + <xsl:template match="atom:feed"> + <rdf:RDF> + <foaf:Group> + <foaf:name><xsl:value-of select="atom:author/atom:name"/></foaf:name> + <foaf:homepage><xsl:value-of select="atom:author/atom:uri"/></foaf:homepage> + + <xsl:apply-templates select="planet:source"/> + </foaf:Group> + </rdf:RDF> + </xsl:template> + + <xsl:template match="planet:source"> + <foaf:member> + <foaf:Agent> + <foaf:name><xsl:value-of select="planet:name"/></foaf:name> + <foaf:weblog> + <foaf:Document rdf:about="{atom:link[@rel='alternate']/@href}"> + <dc:title><xsl:value-of select="atom:title"/></dc:title> + <rdfs:seeAlso> + <rss:channel rdf:about="{atom:link[@rel='self']/@href}" /> + </rdfs:seeAlso> + </foaf:Document> + </foaf:weblog> + </foaf:Agent> + </foaf:member> + </xsl:template> +</xsl:stylesheet> diff --git a/lib/venus/themes/common/images/feed-icon-10x10.png b/lib/venus/themes/common/images/feed-icon-10x10.png new file mode 100644 index 0000000000000000000000000000000000000000..cc869bc61785f4db646fcbbcfc87aa3d20d99eba GIT binary patch literal 469 zcmV;`0V@89P)<h;3K|Lk000e1NJLTq000UA000UI1^@s6jWW-@00004XF*Lt006JZ zHwB960000PbVXQnQ*UN;cVTj606}DLVr3vnZDD6+Qe|Oed2z{QJOBUzXGugsRCwAf zkUK~mQ5c24duL`{d}E=mS{MtFKnQ9fJ`ijKE5XKAK}oxfjdrF<8`4NX(kO_fg&<g| zV4-NCD87(YeBx`>b#`aw-kX_Si^Jc1|2c;v&L+N%#GTkbr^vx_L@0?2ue1vae8uy9 zW>j2Fwi~;wnv#w|%)>D{wT>10d<a2d3EX$EBPY9}ocPy1tePK~+#A96EaqS_52>6+ znvjX&#K$e}$~4lwrKocpr#p$RZpK^viI-7mZAGBOZfK!ocn0%!gWCL!dO5}Fox+@M z<8LitMCck7=WdtW+z{sJ1iSwiD)WlBvk<zE5xA-l9~HK%>T!CKn3HAn`5Jatl8=pf zWMv()t_|gz0w^mJ$i`l18o*uui>ycxWHsvP8s|%U9u%*Cx=p;;psT*)T^`{*rxCTS zWX}(wG=ZN^W3ms(Xv}CQKedl?b7Aoq{>2_>AN82ZL&^z8{|hhxfn}Mu<dY`u00000 LNkvXXu0mjf>vYci literal 0 HcmV?d00001 diff --git a/lib/venus/themes/common/images/foaf.png b/lib/venus/themes/common/images/foaf.png new file mode 100644 index 0000000000000000000000000000000000000000..0fea5ba5bc3daba7e3fca691f0db3c755934f685 GIT binary patch literal 1393 zcmd^7`%hX27{xmA?Q~Wi-s?&UP6=D9ncP*kxD~ZX5vLRhB~B3p17=HopqW`ZQIr^3 z%%ry}7rX?-W^+DoTdZ!&GGf$;6${PWGfUHq#geHsX|}rd?q>hRvXh*1@+IF7-#Ph) zYO49EN!dvx5-C+sSuQ3#n_yZ(ETQD!-)$sPEUBhSQn9eGAP@-V=H_~Pd#TjpWo2bJ z?!>j){y_}K5mqj%tWsH9E5R@fZ)q{jn!;fVe0^ggIDzYQmOGZg=|T6d8)Sk=0P%RV zIBrw-s{3a9hRiGaF}+Hq3M&-dJ#LRs==$EZ*W7IQ+z^St{r&yOHp1gK1p<M|A157X z2tn>T?m3cEljZGlG=SQX?R*YI1*rqm0}u$>`TWJYx)XUP_VzxZ(J0i+%r<M=;o&ud z!GLQt4#$pHDuu7FYYZA3N9X7Di;H^MC7C1W!0`#s&z`KLL^OnALSa%I8G<0MSMT}B z!{xGkzK`?RMZSgx9CsHNzap%9_qk4oAP60#A3GKk(P|?K1q1+Nn`1c)0AK)4G-$Kg z=H@zz0APGvu`ZFc>MTb`wzKCNX*4>R^O9F0d6@;W*-#`h%msjsZs*vz-~5F+R9`PX zc@o`1ooLAG?c(uxhiY||NE3}(_jS4>t@gaIIutTSmCDXe0Xd$WaU8ItK@KP953Sbt znNgmbyT3mQLAiy6jIdIfk(zEa-E{xpPESsA?YiuC)s?GX8jK4RAWQmz^z>;41hFhj z7Aw4#pAXRiih0FM1M~9PTpO}Yp`;bk>EZA&au=DGNF4SZ;vaAvC-nb`?DWucBKb_G zxcUOo!+!>(XB96T5QoPoh5T~d6CYhAhQv#i7dr^A{Fmo8T1$zAT0!|)2`#9SVS}S< zyJM#AEgM%Jt$$h{mA5*3o356;VRHU@1w7*JY>=61CZ-fOiBoJPb<4}_9NXT5sb6>; zW8d;>tV~8TOcj*3(>6-p+{|u!c)ryfZ9OpSg#3hr$*tRszWJrjr_74gCx2zC)G4A@ zIj@OQ)X_7=zWaT28ZU12ntz&eyXK%a3xglSSk{I2bS=~eS?k{hinOpw+A~_vIAC79 z(cqd0G>_gN8JR{N&CqE}dNBF1`y1V}kw4+wBG#0Ra#Ma$w0UY#BlG#U3O$~WZjIdA ziz73W!##e#!aAZ<O>5Ld)S<PvzuKv<SbYn-Awtc!u<u~2RWvhfh<SJ?JFlxLKEA0d zPs%WTXj%EX-Fx{ru7XX*$X%tSPtLqvFg%p;yLyV7{wdd=NJ+|+o>DdRr!d6tz!1cM b*GVMbqsy7^M_-&FS|$l9s>^42ioSmU^%GgE literal 0 HcmV?d00001 diff --git a/lib/venus/themes/common/images/logo.png b/lib/venus/themes/common/images/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..f277bf9281a3659dd27aaaf6822e01240cc48cd0 GIT binary patch literal 5413 zcmV+=724{FP)<h;3K|Lk000e1NJLTq004*p004*x0ssI2v)D8R00009a7bBm000*f z000*f0cW4mQUCw|2XskIMF-Rb5(_&83T8PQ0000abVXQnLvL+uWo~o;OhrLNAXH^z zXl`$8Z)a&`b0A4>V=f?8F)lDMe$ayr000z}Nkl<Zc-rk<?RMk14ikFv{;!PvVM_u? z%IVBFiQBc$y=SLw;>4mzf#6qb_jO<Q_5BOme@NbIaTSpOt+lUvA+C8nN^#xSM=rHn zU$<1QdEKV$7R-I!g1H5AU$<ax!Q9s^nEU#Tg5fQgcdRkly5GC~wEh3!P59Nqd3;_d zT^`te>dU?V18fcGzk>>eZ2R9^YnVdnzq$qU*J^?$>=2h4VA2BmZ$zH`4h(V`BF0Y> z)dx)(x(cWoK>oMwet;ONf+P;^YtL~jirl~k0zXiJzCNfgoGZL79e}2*=>L#2MMTwY zm@GXG$yol3lj-YIQrQg;r>-DaCxD_nQ#ORH?f8KSua##Feg>%f`kWOT)_)xPb3rj* zL9*jB1PO>Jx!<HXp1}O^yu554AJQ1w5)Fxf+cI)&cKZHCLlp>TBjg=y>=qGSG0nVT zXcI3A<Q`Y}63h}?8KQlSvC2{~v~4P(w___n-L+)%IaG$jA8i5qW><#?z=jhrw?4~- z4u4Sdd0{r;@o8NNfUDTlyBvlxkoy^gELiU15J^B#^>PQuH@Jx6e7)OdQBMt&N&}cb zC_ae+E(LH+kD#t&*nY;+Br56iL~y@bFzj1Cm<-JQJpB8HxuD!#QA92Wo4BTwh!)@( z^JkQ2(lm<vpP8}_w8Ht`kO4qLIJ~1I=O6Yv*b68h2>SykHsh{LbVC}QA^dWUp&Y@% zEoU<89OUvLWPmX{6Cf}PkP3^l>(5f&yH@^kjiH?f07-S{(+Xs#FmRrTf80fr?j!CH z7Lpz>LFYT6I>fY@ev8JijpLm4x>NQ)Dk#05{0P6<4=fh^S|CFg%s>>;1lUla{YH(U zow0FoP|0amstmxy?sMwhaO5M`2|K{hkr?DdEy9zr83sS}`!oj1R_3E;1}@d_fB@RQ z^#{03CqX(hF!#Q{W|&Qytl^xS`lX*znaAr@MSMna6Bmd2IHN}drgHee;K=N&je<L- zpF~rM2={1@V3=qZezag5qb5+C4H)$$U;@bLCsgufIJH>g2oX8z6K@<_X87Jo3cSda zbsBygq3kX=FwX)cH*$LoErQu5A>b!6g7Hd>01<+5Fb@(26974s5Df8ninqC+U$0Ua z?(2dSZ)XN#hPEpDi*QERUPmB$?idb}pfU|Ll=^6a0T6Hmfp;D_U$0V_?B|JZ$8z%_ zvXXFrlxiKh+k7tE&-ypjmd`=6@-|reSU*KDySFDkL0Ayy4l~nLfuv#OS$XVm5Lf8{ z{fD*tKOH3oRcC~gWa9mP09<C}BL?~my0Sy4oWx9DY#!`<aJv<2eiD1`2DfwpFmN|} zB0()E`Z2~#cD_h5UuO%0n`gpWKupGgGak$$_{jY1i%gnQAF<jmP{WyCR%pWbo*h=h zUVgfPd%Q|Vs3(U}P;6)jyfOL)mm}JuUilcp#+(s)w&&Y!f20R^m{6JW&T?}OE(v?U zVx~p`(Ko)TV04KKHiXedgjp!m^bGY@q+z^q0~l?CnMgSE&~WI6^sF3Ac0eE*oso!p z<0X<g)0E*DI>%)PE6|fzOeEj0;NRlQVQy3ac8d*=^!%y09Qwoa(+E#JV=z6*{2ZR8 z$6#(udILm>_aRwli8Pz1GLx57NN~VAGQdLjWU^0nz)+tpnNtNrI(lui{m&KQoWj0T zAUwf@-AZ&&78qFvu=8T`4JeL8M7)w#8WO!j2qTqV)kOru5DXsrU2tVaJ=>1^?#9Eh zY2U#UC4ZK>{GBUcK}xWNl#%?8v7z9W^Te~}G{LBYFg6v8#^_GxWXlj%*c=VN8w@32 zzI!)eNLk5Y)qCEIWt_t~A!pCFT7e-_*<y%qZ|&@o0_(o?_bp4J<6<-z{nXWSBc86T zJPu(rNf8o+yVk0um{dk(iIv1&csMbJ=CodZ6V5_rDuu@xy3&E`AamI&{(X^nf_lRs zxf?l)pRKwt4$YzF!!YiDDhJx0-{f_)=HnEl8#&;r9#}YZvp&o+A;NFiWkZVM4Y#uj zgf;)fsQ;t|jI-h}V{<oJy$a4Yc##!)AqNw*$J#RT0o2(Sin-J*MFef<ZPpQ*P*bRr z_Kh}>4Naxd?IKPwQ{G=dE}L7~ANfUv0%silTcQfg^AgBUgZjPl&f<cBT%$V%m08LD zAJy**Aj|Run2zf3$0D16Rjhiy33yRcHVh1_qXSKHSz=5c!5l7p!cdjm2szQJXf;&{ zJ!Cn*0MM%J^rmbMW1%Q5gX4wSnrCrksJwx3@Su*nBe1s&ND9OIM3Z#Ce9DjWExm># zNfw^Wj~a$Z?sWNhpgJyiUDWIZQ%3P8(6X~!6VOU|Q3L*{_(ImyHRXH=ONbR=v@UJR zh%$Z?Ry5yhL~{x%6Ld0mKH~AYSP)DSM=~$)jlEc=%3!U-O%s>}+A&dN<sC!5(+Ctn z>ISb#`!kT}+euKR8#7{9Mst=qXi1SB)}M42vZ7MlpXwEysqTyp5*H3Mn$&l439~OD zn64}=svNtmo)3@E(}IBrE*XIA%nm1c$skvRsK5epi#UUI=B_=#fw_BK*39GI4Vlsx zGZH*K#c)Jpa><xA#Z6O>;yU?fC~HV&AT<{+#Oj8D+`DMNfC)-Xw);@{&)t#1k)C}X zE*Cb4vz9?!a7w1EzmbfHYpx21fDx^J@bAcYq_r~mcnB>iq}#aL{!zvTyfA0>JlbEr zE<#pJm11Vm{mO4d1YUWq=ZV}jDF*wlC~zI?B9A&>q_<ER+6>l@b6r0MnJ3XTi~I<a z36F74>ig+y$7t8ebe1}RtgM_NhuI^jqsr1k(Q^|RSi!3aW(J>#A{4;I&a5)J7Yvng zj!ddE3?usiXP?O#q8>DaREn{M-~dXvRq%o1IJ$B&#wxc;Gvum-)f{)urb0=x;Y~n4 zmOaY0FfT<F)J)jOoLtWK7y(d8)O}?p^GRG83q{d=Wv<y!Eh4aOk$i)b$ZGp{Ba&hB zZs`Pk7$q~hoxnn?7J(B+7OpR90s+lCyDC?vMi{-4yYDMxPXLMQLLx3h%A*GgyTnoT z(r^zY@m-olw%@Ao-)WK3xgV@E+RHTNBvS_Cte&=NSyK9mn}WDGIpv;yD<$-)1Ri5s zGahiTtxjtzjPWbe{!dN#gJecn%C;Bo&mc^(snHhPFQwNka~9gJt0*=z2kPCW(hEO% zfIaHy^0O4D%b_y&*ey<mio4tiNMe-z2MdN~+?4WN%=8)XjK|~_y=3vlv18!i;igy% zxMWf<sWBrlo<+_hiY`>1cSqMP28Wt4YtXDEAi)J5MhLR~WNenDI<+GDsfz!~60>ti z%t#(H#F`wKZQD~{!w@^ro-+vPP%dF07S?K8$L*YVHo4;{=kNye+`@;-0cVsRfR`RJ zkJIE5k+P9kWMfEM!Mc2f2F>cUB*#XYIAi4)+HNHIQCM~mtSlao_yk;3Fig+mTm8@k zyFR4Wmzl9wKC!q{rcjQnCh_futSdWTTw8t`Dnn}K*0RlKY0+ce9<Yq*S}VE*qlop| z2P7qJIhJoh&WnFbQTgLUOJ?lJa0h9{#<@ln$z~{XiAD|+$O)V}O0bp{)0+Z}3ll8I z2qRYjD`&v)eVFVYH3kbINu_ep7l~zQMyX=({4@`IZEKG$7Kzl@udMyB41Z>-6fxVc z`p4$5f*0195a;StIRpr?NuoDA7+7@5iaWHsI^wV}Gy76qK?%Vb&t@1RGU47^J0Qv) z@1*|y)y)|GO)&6wJIr;bN45Z{O&SX$*U+jn3|24%_c%m$cyjoxOqe&P!K<OznSRW0 zBs>DrC?0v5Wij?2MZhaN8G`p^0CHr5l<~;Io97dX?C95T>Gg90Dnnu}W3Z&9|0F=P zg*voUTN|9Tiqzt|B@$YIcA4`0bHDlZ#O3K+S+Fgdtd#k&CMgX)n`YS(ZiFQzeOVnA zhH~;jp<>!OrmDTsL&2*mkh5)2;k38dvXd&x4*Yj|;<0}))xGz$3S=g09Ockje1+#k zD!id&FNHoEK&+^#_Qxun>ip~Ein6C>nG@h5^}m46xymPkBJMq6h^i|hI{rgoN_(+R zP-?`K_KIPkwVi8k?2Gl{W~F8iQtky16E%pKb&h%v4_YyOg!(Bcde!H!=aM5JTKi0! z(w7V7h<#1O<g)v9WO*yG>ZWI%&53_DLvJ22^V%-5Yxlki?jm+pTCi>6(z2kt8mjKi z%wVqmgtnSwg5RPs6T+mmAg#YhQ%n_=uI$L?QyRgU%8ytvMQQsp&%BTeMgtpII!(Vu zFp;>i^+hWS@qLG)rll8pNrW<f&pHa|8G4@7uH(n1$-Jkx|7-)0CRZRKuZgZ{+>JJ; zg6WMl`=Ps)h~4KqRe)n7#Sah9e7r?0@z~xuMp^+at;<;&d(c3X4d6n)3?lUPB`(0$ z_sesBoyOF>R*sriHAV}l)@C3+R(P#Aglnw$B~aP1>p2EoN_lMtM3J?sM!M{&9YmP9 z&@CGIO+Da~;c~xKW6GsKP37#0MQ@#@R03nH029V>w6eq;LLXV?RP%I?TWprGg;Ysa zsti{*VL0xG@T8{}s(vW$*1R*^a#4*zPkn@L*5yP7%wQNJPCIDi$~i1JGC(mu{ZowC z6*a~hf+D12(i+2xFszLgVi_c(AMg1CHpU0nI7_Z5n6yZu5{yhtv@<L6i*=(XF!fnn z`NIEaQ?{~h3<-bTAo=tpCUZy0k+2fYzktY}_AIX^7&$s7%Hpc)3saXW2BQ`;a=C;c z`j+cV!v1O(y*xvyoN$$<h#`XE1pf5R)Kq|NAFzW3zA4}Gr)W&JZf;?0f`!N3qK{H` zVRG_qrcL+q7>F(7ZOXpS6(Ghlb7E9XjVW3S6;)@cmo`sKfcRae?Aw+Obs=X<P5;Vp zGE2b@_lLJ&Uc@ahj4!XllIr*GN{CMm!TcGf>>s6LorkUa53E&hlEZDvUTN-PGs`&x zBR1opcD(1)$;`K4o?5Ht(%IWwjYa_d&|!4<bz=61>6*vSlERbz2X!e&p2~=jV!%t} zU;Gfk6kwO5M~oyK?@dxdum%UPo6M2!R=GX5VDfn_sHzp!_|lZBH{4m9wa|&V(!Jc; z=9g14{DT)VITd3BjXNZq(Je1fQvxOw|AF-Mw%ih4i$(X*qkPdr^N%%#k(1EK5;Q)D z<T2M-@mn)pn7LQN5%htWiHku`hQZ+|P!<iiY<S9iA?AR<t|)X46gpXDHLgQ&w@)x{ zb(g%DA-5O(GC>gCoiZ^8+Vl`Dlz~ctVkj9dtF<^Q@!<EdDoFg<PJ|yNfxF~4YFL;+ z8=p2FmG+En&c&QTiP_^L0E?LVZkdU&`pB8`{`CzD)Avwl5<mL$(Yh(vK;p<kj;I)T zpxRaaSDo2Jb4>a-JsbdBL22u?^gI)OK7Ha!VfE6|_L1cb^sp=|jcGF%&ksc{4EFUV zyjMt2S)^51RL+g&y~VfJ2KD8w5~i9tw+!u?E94zxGEEkF8t!bh>&^hp$2kQvRko&S zquJ-WaB0-x<=S5}n(|nLXl84(<Ncw3!J==mEQ=cOa?`Hc@iQqFlQ)DKt3fK?x(Q-j zZP7DE($tgFrL`)gFAl>w7T#5$GU!ki79$7g@JRxU<W@m}<*>`xBEwotH4Q;7rtZ`> zX-NT_R`+RvOa{xQGE%2!mV-6W*2}`N@ZP|qT$a(4Yf*oiO_>z9dOuq(Y*af)owM|z z#Z*5VPu0|eq{+*g`g-@PmO^J~s8uLu3N@i2FoR_OyuA~b&+@%x@s*jklE&`I!GxNv zR3DLMrn53runsV@GlncqHdAPITVv@&A^kKsR*<2~Up`X2JJ~Ea&~6yiW4kbQ`r5f= zbwc2Z)e)4vrP9U0-LvzpD$U~vP<n3#RQ;~1jw)buuaO*-qHu1*(~_jp5jB0&*qBhP zskF5}mjlzYL?qb~KV;K88pH@<V4;GeXk2|w6m7sBhtGgeS~}NGW*t?eR(z`5GX(2N zBwX~Rgm0I2$Zts;CWAfbZ>q`5cH9dxZKWIALOojO_8iSFY*`k}-dDN1dg^`E4nb$B zr8+)JcUXbv1Q|USkY$41Sj{{@t`TIKkknBD>Jc!u7-NWe{A8OXOvIhkQ=9gkC|#k7 z>D-<}LY=4H%_?=OeTiVc3T}fFHdvI;=dDkyEUSY?1T#UlY*brIc5sRgV$89a=FXs> zM{Qt&3(ACMWS&h}Vp{i;xh%Q6JiU);TP%xCbe!g|uvU-K-PAjC^V0*g@d;Ox)Iduo zVfI`ybQ%YIon^U*%apHi`l>ovD)E{9w;oxH!ye7=V5=$79hE51&F!=#DISNOzXgzJ zZkLx&qA8|DjYAm0I0P>^U$nxT3)h-(-~3OqTIoy-+krD*eLN!{J5MY$n^~~jgT^k- zW17a5J(l5%z-LI+#NqT28wxVJTo#4m0hv|lsX=1Mv-R-3U}uZCD0e;Kp^qw%H%(tx zuwP9yi)B`GfI2g@DNPxp<v~0=yRHNxy_m3Go##=MDB#eF<sDWA`KcG|&(nmzBUj;I zRD77auxiTW<JxmyU<P9P=<ttKUK2mGdB}%$Z46cTbgBhHr9d~0pee;OV-^6ptdITE zuoV?*u26PNB%2M`C5%l&t;KLHL!T7lDHQE^5goI1zA6k;qnE%A^=6^5yC($8?TgP= zm_XGT_%=t3Axub<BLwh@8j$H-$W=3}I(eg&{w3;jzW4_Ru7$N*-%%OoKLSODRM_(s zUC_OoyN^iNBCSQZBw7wD_*%A*p-=J?*ga&Fv30_!1w7pz8`Z8t9_hsNa|ZO)X%^Zx zS93MyxDLn(A4vg*^oHjV%n;j49I*CvJz|GwCf0IUfL7t7b+Dkodd7MlnzKOoSx?(J zoEOhR74jMe#t%9JH1+CA_mRrQ*>O-&S!9>*>*LqALd*AcU-xxiKjHch7Mi*Bg7V7( P00000NkvXXu0mjf;zvIF literal 0 HcmV?d00001 diff --git a/lib/venus/themes/common/images/opml.png b/lib/venus/themes/common/images/opml.png new file mode 100644 index 0000000000000000000000000000000000000000..3f18190cdec1bf6ff9d956ac7246a9a27028c8a9 GIT binary patch literal 804 zcmV+<1Ka$GP)<h;3K|Lk000e1NJLTq001KZ000aK0{{R3-~IH60004iP)t-s(*OYT zr>TX7h22X?t*x!#Eh+oz>EUT)-8DA8zP>XvGo+-X<uWnhF)`m-QQ;gK_~zzjW@gtk zG2vrl<%ERqtETasp6t20--LkL7YON_o8_yg^wrn@|NrN8aq!jD-YFv269xG2@8ICz z;~W>*J~-u#ito<Q^To&H4*=g!OWu8Y;29GB_xR>QJIu_?`S$kiud>~2Xw@PZ;h2}) z4FKgB4&yN?*araVSy$|kj^igB@4~+2As6qZrQ=Ua-$O&)A|TsyZs@nT*)c2Rdvxb} ze7Co^;5RefVO;5^rt;w6<rES3-qz?zNb$6<{O<7H9~1Ze`|62?<vcj%At2r&8r=r~ z;8auQX=CL<K=;<(-ZU!Y77Oi;j_<a%;z>c?F(L4-t=s?r>vwkMYi!&U2IL<T<R>BJ zWMt-0Qr8m>;6XXzfqw48#^!Tw-Z(biV^Zd=t?n5W<P-|&VqET%lmGw#0000000000 z00000000000000000000$Gv~L00009a7bBm0000;0000;07l7cJ^%m#N=ZaPR2b83 zkO@!1U<`(*Lr`HlaU71XFi|--1aS^UbW=fw1QC=gLll*vpkO?HzZ(G(65ph0`u0iF z7K5osd!}vFvd6)}j4&;bD^`SPnHb_$#fL&ng$C*^Flt|#4ZAvHmlABjPw|__;C*0a zmH$3O$)IV(7}l2OHKw$PS=$!eB|ObRsh<xs!KP6A%Gcpqd55hfvpxq^dI)0i7rdIz z4G&D@BdhYfs=DDPxH8$NoKUoUniZ;SrB4G#G$WsKty=~ql^ZkU?Z&DPAbm$P>qX5d z5}kX1OqWKIK#m!z7xJG2h(wJY=tj}Pwg61swk;o-+>Te*0ZM1AlTN+rhBwRj0qWbo zbn`}RGz#(wiOC(QWfK`6IJ%*d4tX>L!jcm`58UM=svx?stG#6WVt};X!j5<PTFw}q i`SV9$RCD_|_+Nj;NJ?rJVR`)k0000<MNUMnLSTZdU3_i; literal 0 HcmV?d00001 diff --git a/lib/venus/themes/common/images/planet.png b/lib/venus/themes/common/images/planet.png new file mode 100644 index 0000000000000000000000000000000000000000..9606a0c30dafe4f279464804a9a5c92b8c5466eb GIT binary patch literal 426 zcmV;b0agBqP)<h;3K|Lk000e1NJLTq002+`000jN0{{R3L>IWd0001ZP)t-s6&4vG zAtEd*E;&0<P)}4)P;OIHd016yS66XfU}a}vYi4F<Xk>qKV`^${d~tAgh-qeVagus* zg>rMCdwqI?e3N&7tAT=(e1oHueuIjFjfswVrh|BujFOR(o065Ps+E<ao}a3%tiZs- z)78?_*xL8`_W%F?17w|r00009a7bBm000XT000XT0n*)m`~Uy|2XskIMF-RY69p|T zS&88(0002RNkl<Zc-pO#4G)4K5Qf3TkF-?EOw#1r|NjqbqlB5YGTz<b2HSHl;|^q= z1+vkrT%cA$C~fa9E93$dqb((k@&%N%E6X;D_R0@X4|Pu{#&wcv2WU3KHmO~gYFeN= zSA?-9M{yus;4_?boGN^BC{9(|?P7#BfG;~VM~`rfzXKksoc1|NW<YD@M~7;_m<X{= zA&{@YaYp;Xp}sbZ@gfXBs6Dk!njMP}g6U8GZfuzb5Wvt|W8YsbId?~I?JRrN4Gq<T UDHGgccmMzZ07*qoM6N<$f?q+e;s5{u literal 0 HcmV?d00001 diff --git a/lib/venus/themes/common/images/tcosm11.gif b/lib/venus/themes/common/images/tcosm11.gif new file mode 100644 index 0000000000000000000000000000000000000000..548c998c54fac495a96dff0e691328b09b17d338 GIT binary patch literal 203 zcmV;+05tzcNk%w1VG95X0K^{vZnsK>!e&9B5{$)fH=6}mt1eBZAvl``r_+(W-mIe0 zi^SozJDmz+u{?mkU}3O1VX!)f!)a%;LBZg%pwEb*(204xSgzNagTZ7(p%rbmNi~`S zG?@W4ngTPK0RR90A^8LW0018VEC2ui01E&L000F@;P)+nKnTDheu38!Z|7lO<!y$N zD8}YdhC^c|Y8(uV4TAVVY#0zufmz@@7Ou<yF%WP(Dvf9S^#~Foi$@XxI4(rYgt-a< F06Q2fQFZ_T literal 0 HcmV?d00001 diff --git a/lib/venus/themes/common/images/venus.ico b/lib/venus/themes/common/images/venus.ico new file mode 100644 index 0000000000000000000000000000000000000000..e813cb0ad53532dd7b14ce2f1be8dff3a6534945 GIT binary patch literal 894 zcmZQzU<5(|0R|wDV8~!*U=RbcG=LZ+qyWT>Kyh5)@87?F{`>)xf8V@&KWFtOpVT5F zpBSU)JmZ}9z`E)4w;%rS<ttDDSk0e*|Nenlf6raL=AV+U8I-IYRiNgds1{$YQZP}g zX`x=ry!a_A?mT%3)&VB|{5^mDhFw&eig%PwT&Z#86zzmc^@u!;%y!MH=@!%18}%#? z>00vm)jNom4<A28<dthh<Qo=un>Nn2=v!{tu~Z|rSTDIkuWE`;>q5I38?Ablmo41- z{pU}hmepH#>V~D6l=oXqT4}XloBh(=jth5KbuKWB%Co8LaG1Hypm()V^J3GoX~!?$ z`u5{{QdzxrWVThuJlpwOZ5C{EUvtoF{Xx4WJFVMinIsq6)pglSTyECB+@x-9+w$E{ zU%hgO$<PQ+x2T?M-M`wbZL#CLEk5fG*{|GZwQ#HLl;uWslk`ivbxNliRLqR;U2*Tx z6U(SH&G2lUv>K!0e$(n1F0<CU%vt9&W3|P+4JOmp>rY;%KXs#8<0AE(Zl99Amv3LY zCFQ9{=ICYCnO00PteIxrI^DK)x=q(?+nK8kC#}@&T5Qn2LT}<Kt=1)x9SeT``kCL@ zt>BZOkyx#l(_>K7YhFFUdg4N}*(*)wt+AN1#(2sKt>)R<WfN7hx|^2o0*24N<0qBf zLzM#4)DtW83%d=grWiKO*6En1)w@W0;xdiiWooU9)#~SIXSANUd>g3#`;Q-4Exii< zsmkFw3NeN1=}kr@6ZD(rXm`w4ZC|L+Fi);@ielcRvIX0I{Q3=vvS+W~#<b1U%<0g` z?@`O|)G6r)#*|v!48`i13RN?dfgv+t)vNcPfQq5%=kMPaZ{L?M+GNx;-*D<$+j-lp z7jD&<xKgEUp?=l$ig_Dfz5M_&8Aw9XCy@UB>(_}}53AQ7@}9fRWB%5F)%yx}oH}~< zG0+*{Bn(msF&|3)1*KJ><nxc8@4R^V=-mgP31ByX%!a68VEF%^fkEUtga)zsN`UOY b5Pr*h2o1_TAi25}28M<ZAo~@BpL7HOvCrS9 literal 0 HcmV?d00001 diff --git a/lib/venus/themes/common/images/venus.png b/lib/venus/themes/common/images/venus.png new file mode 100644 index 0000000000000000000000000000000000000000..685035deef15ff2e82bba825fa4d94a781870bb1 GIT binary patch literal 570 zcmV-A0>%A_P)<h;3K|Lk000e1NJLTq002+`000jN0ssI2{Hx#C0000PbVXQnQ*UN; zcVTj607r6RaA;{`LvL<&WpZ?7ay4B@?f?J*+(|@1RCt_XNE=~*0f_&P7=ZBbKN$G< z_UWa=Gf(ZRyMAWUr?(H_;&44bBxHvuLh&%z&wmeZnKHjgfAv(;^|P%OwyRF9RNue8 z@893Q$bmHA?xv21fpNHIVRvz~N_o6;TaMO@2K|K{8uOYIrj;rlT-!YiWBAF#yWwsM z#c}FAB}TIv%ocPSESspmtY2$hlX^#{+N-Dc@p~9q9L`45f=dlnSy}`xiicM&nvxx% z-kGc4Tc|s=(PGJDi#4+?mh~IVYcrhJq<w1V9P&JjrUs8cap|I!ho|>f=0>XZ6dBBH zu-r7?W7Tx0jdLv4Og324uQR1kW5c{=f}swP!V^)*3W$ke+Io25tgezcoi)?#)=joP zxWQ-7N{1!=*6pRHQ|rwZG?|{-HJ3aOV@(D4qZ*fXDtq|qrPF!QdW*ZwcP_Hsv)pNM zuWfOH@$#vTE4xjmR~S8ibd#8Tg*6T1&%>DUNxRbVKUyi>)0hK{<o*(a#qH*WQCi&v z+Kbx^)=ahCyQ*mzmeN4*{o9wu@|4U_jl4*WvN+A@6&g#r^f%2b{Qc|ae<U#A?xtD{ zL&#Idc6Zce_f$tOpOSaw^e(vgaP#m80Z<Q*C;$Ke0RR6307i|kbWeYux&QzG07*qo IM6N<$f|#)wdjJ3c literal 0 HcmV?d00001 diff --git a/lib/venus/themes/common/opml.xml.xslt b/lib/venus/themes/common/opml.xml.xslt new file mode 100644 index 0000000..dfc172c --- /dev/null +++ b/lib/venus/themes/common/opml.xml.xslt @@ -0,0 +1,40 @@ +<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0" + xmlns:atom="http://www.w3.org/2005/Atom" + xmlns:planet="http://planet.intertwingly.net/" + exclude-result-prefixes="atom planet"> + + <xsl:output indent="yes" method="xml"/> + + <xsl:template name="rfc822" xmlns:date="http://exslt.org/dates-and-times"> + <xsl:param name="date"/> + <!-- http://www.trachtenberg.com/blog/2005/03/03/xslt-cookbook-generating-an-rfc-822-date/ --> + <xsl:value-of select="concat(date:day-abbreviation($date), ', ', + format-number(date:day-in-month($date), '00'), ' ', + date:month-abbreviation($date), ' ', date:year($date), ' ', + format-number(date:hour-in-day($date), '00'), ':', + format-number(date:minute-in-hour($date), '00'), ':', + format-number(date:second-in-minute($date), '00'), ' GMT')"/> + </xsl:template> + + <xsl:template match="atom:feed"> + <opml version="1.1"> + <head> + <title><xsl:value-of select="atom:title"/></title> + <dateModified> + <xsl:call-template name="rfc822"> + <xsl:with-param name="date" select="atom:updated"/> + </xsl:call-template> + </dateModified> + <ownerName><xsl:value-of select="atom:author/atom:name"/></ownerName> + <ownerEmail><xsl:value-of select="atom:author/atom:email"/></ownerEmail> + </head> + + <body> + <xsl:for-each select="planet:source"> + <outline type="rss" text="{planet:name}" title="{atom:title}" + xmlUrl="{atom:link[@rel='self']/@href}"/> + </xsl:for-each> + </body> + </opml> + </xsl:template> +</xsl:stylesheet> diff --git a/lib/venus/themes/common/rss10.xml.tmpl b/lib/venus/themes/common/rss10.xml.tmpl new file mode 100644 index 0000000..cdaaa79 --- /dev/null +++ b/lib/venus/themes/common/rss10.xml.tmpl @@ -0,0 +1,37 @@ +<?xml version="1.0"?> +<rdf:RDF + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:dc="http://purl.org/dc/elements/1.1/" + xmlns:foaf="http://xmlns.com/foaf/0.1/" + xmlns:content="http://purl.org/rss/1.0/modules/content/" + xmlns="http://purl.org/rss/1.0/" +> +<channel rdf:about="<TMPL_VAR link ESCAPE="HTML">"> + <title><TMPL_VAR name ESCAPE="HTML"></title> + <link><TMPL_VAR link ESCAPE="HTML"></link> + <description><TMPL_VAR name ESCAPE="HTML"> - <TMPL_VAR link ESCAPE="HTML"></description> + + <items> + <rdf:Seq> +<TMPL_LOOP Items> + <rdf:li rdf:resource="<TMPL_VAR id ESCAPE="HTML">" /> +</TMPL_LOOP> + </rdf:Seq> + </items> +</channel> + +<TMPL_LOOP Items> +<item rdf:about="<TMPL_VAR id ESCAPE="HTML">"> + <title><TMPL_VAR channel_name ESCAPE="HTML"><TMPL_IF title>: <TMPL_VAR title_plain ESCAPE="HTML"></TMPL_IF></title> + <link><TMPL_VAR link ESCAPE="HTML"></link> + <TMPL_IF content> + <content:encoded><TMPL_VAR content ESCAPE="HTML"></content:encoded> + </TMPL_IF> + <dc:date><TMPL_VAR date_iso></dc:date> + <TMPL_IF author_name> + <dc:creator><TMPL_VAR author_name></dc:creator> + </TMPL_IF> +</item> +</TMPL_LOOP> + +</rdf:RDF> diff --git a/lib/venus/themes/common/rss20.xml.tmpl b/lib/venus/themes/common/rss20.xml.tmpl new file mode 100644 index 0000000..724a104 --- /dev/null +++ b/lib/venus/themes/common/rss20.xml.tmpl @@ -0,0 +1,33 @@ +<?xml version="1.0"?> +<rss version="2.0"> + +<channel> + <title><TMPL_VAR name></title> + <link><TMPL_VAR link ESCAPE="HTML"></link> + <language>en</language> + <description><TMPL_VAR name ESCAPE="HTML"> - <TMPL_VAR link ESCAPE="HTML"></description> + +<TMPL_LOOP Items> +<item> + <title><TMPL_VAR channel_name ESCAPE="HTML"><TMPL_IF title>: <TMPL_VAR title_plain ESCAPE="HTML"></TMPL_IF></title> + <guid isPermaLink="<TMPL_VAR guid_isPermaLink>"><TMPL_VAR id ESCAPE="HTML"></guid> + <link><TMPL_VAR link ESCAPE="HTML"></link> + <TMPL_IF content> + <description><TMPL_VAR content ESCAPE="HTML"></description> + </TMPL_IF> + <pubDate><TMPL_VAR date_822></pubDate> + <TMPL_IF author_email> + <TMPL_IF author_name> + <author><TMPL_VAR author_email> (<TMPL_VAR author_name>)</author> + <TMPL_ELSE> + <author><TMPL_VAR author_email></author> + </TMPL_IF> + </TMPL_IF> + <TMPL_IF enclosure_href> + <enclosure url="<TMPL_VAR enclosure_href ESCAPE="HTML">" length="<TMPL_VAR enclosure_length>" type="<TMPL_VAR enclosure_type>"/> + </TMPL_IF> +</item> +</TMPL_LOOP> + +</channel> +</rss> diff --git a/lib/venus/themes/common/validate.html.xslt b/lib/venus/themes/common/validate.html.xslt new file mode 100644 index 0000000..0cabdcc --- /dev/null +++ b/lib/venus/themes/common/validate.html.xslt @@ -0,0 +1,146 @@ +<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0" + xmlns:atom="http://www.w3.org/2005/Atom" + xmlns:xhtml="http://www.w3.org/1999/xhtml" + xmlns:planet="http://planet.intertwingly.net/" + xmlns="http://www.w3.org/1999/xhtml"> + + <xsl:template match="atom:feed"> + <html xmlns="http://www.w3.org/1999/xhtml"> + + <!-- head --> + <xsl:text> </xsl:text> + <head> + <title><xsl:value-of select="atom:title"/></title> + <meta name="robots" content="noindex,nofollow" /> + <meta name="generator" content="{atom:generator}" /> + <link rel="shortcut icon" href="/favicon.ico" /> + <style type="text/css"> + img{border:0} + a{text-decoration:none} + a:hover{text-decoration:underline} + .message{border-bottom:1px dashed red} a.message:hover{cursor: help;text-decoration: none} + dl{margin:0} + dt{float:left;width:9em} + dt:after{content:':'} + </style> + </head> + + <!-- body --> + <xsl:text> </xsl:text> + <body> + <table border="1" cellpadding="3" cellspacing="0"> + <thead> + <tr> + <th></th> + <th>Name</th> + <th>Format</th> + <xsl:if test="//planet:ignore_in_feed | //planet:filters | + //planet:xml_base | //planet:*[contains(local-name(),'_type')]"> + <th>Notes</th> + </xsl:if> + </tr> + </thead> + <xsl:apply-templates select="planet:source"> + <xsl:sort select="planet:name"/> + </xsl:apply-templates> + <xsl:text> </xsl:text> + </table> + </body> + </html> + </xsl:template> + + <xsl:template match="planet:source"> + <xsl:variable name="validome_format"> + <xsl:choose> + <xsl:when test="planet:format = 'rss090'">rss_0_90</xsl:when> + <xsl:when test="planet:format = 'rss091n'">rss_0_91</xsl:when> + <xsl:when test="planet:format = 'rss091u'">rss_0_91</xsl:when> + <xsl:when test="planet:format = 'rss10'">rss_1_0</xsl:when> + <xsl:when test="planet:format = 'rss092'">rss_0_90</xsl:when> + <xsl:when test="planet:format = 'rss093'"></xsl:when> + <xsl:when test="planet:format = 'rss094'">rss_0_90</xsl:when> + <xsl:when test="planet:format = 'rss20'">rss_2_0</xsl:when> + <xsl:when test="planet:format = 'rss'">rss_2_0</xsl:when> + <xsl:when test="planet:format = 'atom01'"></xsl:when> + <xsl:when test="planet:format = 'atom02'"></xsl:when> + <xsl:when test="planet:format = 'atom03'">atom_0_3</xsl:when> + <xsl:when test="planet:format = 'atom10'">atom_1_0</xsl:when> + <xsl:when test="planet:format = 'atom'">atom_1_0</xsl:when> + <xsl:when test="planet:format = 'cdf'"></xsl:when> + <xsl:when test="planet:format = 'hotrss'"></xsl:when> + </xsl:choose> + </xsl:variable> + + <xsl:text> </xsl:text> + <tr> + <xsl:if test="planet:bozo='true'"> + <xsl:attribute name="style">background-color:#FCC</xsl:attribute> + </xsl:if> + <td> + <a title="feed validator"> + <xsl:attribute name="href"> + <xsl:text>http://feedvalidator.org/check?url=</xsl:text> + <xsl:choose> + <xsl:when test="planet:http_location"> + <xsl:value-of select="planet:http_location"/> + </xsl:when> + <xsl:when test="atom:link[@rel='self']/@href"> + <xsl:value-of select="atom:link[@rel='self']/@href"/> + </xsl:when> + </xsl:choose> + </xsl:attribute> + <img src="http://feedvalidator.org/favicon.ico" hspace='2' vspace='1'/> + </a> + <a title="validome"> + <xsl:attribute name="href"> + <xsl:text>http://www.validome.org/rss-atom/validate?</xsl:text> + <xsl:text>viewSourceCode=1&version=</xsl:text> + <xsl:value-of select="$validome_format"/> + <xsl:text>&url=</xsl:text> + <xsl:choose> + <xsl:when test="planet:http_location"> + <xsl:value-of select="planet:http_location"/> + </xsl:when> + <xsl:when test="atom:link[@rel='self']/@href"> + <xsl:value-of select="atom:link[@rel='self']/@href"/> + </xsl:when> + </xsl:choose> + </xsl:attribute> + <img src="http://validome.org/favicon.ico" hspace='2' vspace='1'/> + </a> + </td> + <td> + <a href="{atom:link[@rel='alternate']/@href}"> + <xsl:choose> + <xsl:when test="planet:message"> + <xsl:attribute name="class">message</xsl:attribute> + <xsl:attribute name="title"> + <xsl:value-of select="planet:message"/> + </xsl:attribute> + </xsl:when> + <xsl:when test="atom:title"> + <xsl:attribute name="title"> + <xsl:value-of select="atom:title"/> + </xsl:attribute> + </xsl:when> + </xsl:choose> + <xsl:value-of select="planet:name"/> + </a> + </td> + <td><xsl:value-of select="planet:format"/></td> + <xsl:if test="planet:ignore_in_feed | planet:filters | planet:xml_base | + planet:*[contains(local-name(),'_type')]"> + <td> + <dl> + <xsl:for-each select="planet:ignore_in_feed | planet:filters | + planet:xml_base | planet:*[contains(local-name(),'_type')]"> + <xsl:sort select="local-name()"/> + <dt><xsl:value-of select="local-name()"/></dt> + <dd><xsl:value-of select="."/></dd> + </xsl:for-each> + </dl> + </td> + </xsl:if> + </tr> + </xsl:template> +</xsl:stylesheet> diff --git a/lib/venus/themes/django/bland.css b/lib/venus/themes/django/bland.css new file mode 100644 index 0000000..f24e8b6 --- /dev/null +++ b/lib/venus/themes/django/bland.css @@ -0,0 +1,39 @@ +body { + margin: 50px 60px; + font-family: Georgia, Times New Roman, serif; +} + +h1 { + font: normal 4em Georgia, serif; + color: #900; + margin-bottom: 0px; +} + +.updated, .entry-tools { + font: .8em Verdana, Arial, sans-serif; + margin-bottom: 2em; +} + +#channels { + float: right; + width: 30%; + padding: 20px; + margin: 20px; + margin-top: 0px; + border: 1px solid #FC6; + background: #FFC; +} + +#channels h2 { + margin-top: 0px; +} + +#channels ul { + margin-bottom: 0px; +} + +.entry { + border-top: 1px solid #CCC; + padding-top: 1em; +} + diff --git a/lib/venus/themes/django/config.ini b/lib/venus/themes/django/config.ini new file mode 100644 index 0000000..e44023c --- /dev/null +++ b/lib/venus/themes/django/config.ini @@ -0,0 +1,11 @@ +# This theme is an example Planet Venus theme using the +# Django template engine. + +[Planet] +template_files: + index.html.dj + +template_directories: + +bill_of_materials: + bland.css diff --git a/lib/venus/themes/django/index.html.dj b/lib/venus/themes/django/index.html.dj new file mode 100644 index 0000000..632a527 --- /dev/null +++ b/lib/venus/themes/django/index.html.dj @@ -0,0 +1,49 @@ +<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" + "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> +<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en"> +<head> + <title>{{ name }}</title> + <meta http-equiv="content-type" content="text/html; charset=utf-8" /> + <link rel="stylesheet" href="bland.css" type="text/css" /> +</head> + +<body> + +<h1>{{ name }}</h1> + +<p class="updated"> + last updated by <a href="http://intertwingly.net/code/venus/">Venus</a> + on {{ date }} on behalf of {{ author_name }} +</p> + +<div id="channels"> + <h2>Feeds</h2> + + <ul> + {% for channel in Channels %} + <li>{{ channel.title }} by {{ channel.author_name }}</li> + {% endfor %} + </ul> +</div> + +{% for item in Items %} +{% ifchanged item.channel_name %} +<h3>{{ item.channel_name }}</h3> +{% endifchanged %} + +<div class="entry"> + {% if item.title %}<h4>{{ item.title }}</h4>{% endif %} + + {{ item.content }} + + <p class="entry-tools"> + by {{ item.channel_author }} on + {{ item.date }} · + <a href="{{ item.link }}">permalink</a> + </p> +</div> +{% endfor %} + +</body> +</html> + diff --git a/lib/venus/themes/genshi_fancy/config.ini b/lib/venus/themes/genshi_fancy/config.ini new file mode 100644 index 0000000..d5a127d --- /dev/null +++ b/lib/venus/themes/genshi_fancy/config.ini @@ -0,0 +1,20 @@ +# This theme reimplements the classic "fancy" htmltmpl using genshi + +[Planet] +template_files: + atom.xml.xslt + foafroll.xml.xslt + index.html.genshi + opml.xml.xslt + rss10.xml.tmpl + rss20.xml.tmpl + +template_directories: + ../common + ../classic_fancy + +bill_of_materials: + planet.css + images/feed-icon-10x10.png + images/logo.png + images/venus.png diff --git a/lib/venus/themes/genshi_fancy/index.html.genshi b/lib/venus/themes/genshi_fancy/index.html.genshi new file mode 100644 index 0000000..fe26934 --- /dev/null +++ b/lib/venus/themes/genshi_fancy/index.html.genshi @@ -0,0 +1,95 @@ +<html xmlns="http://www.w3.org/1999/xhtml" + xmlns:py="http://genshi.edgewall.org/"> + +<!--! +### Fancy Planet HTML template, converted to Genshi. +### +### When combined with the stylesheet and images in the output/ directory +### of the Planet source, this gives you a much prettier result than the +### default examples template and demonstrates how to use the config file +### to support things like faces +### +### For documentation on the more boring template elements, see +### http://www.intertwingly.net/code/venus/docs/templates.html +--> + +<head> +<title>$feed.config.name</title> +<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/> +<meta name="generator" content="$feed.generator"/> +<link rel="stylesheet" href="planet.css" type="text/css"/> +<link py:for="link in feed.links" + py:if="link.type in ['application/atom+xml','application/rss+xml']" + href="$link.href" rel="alternate" title="$link.title" type="$link.type"/> +</head> + +<body> +<h1>$feed.config.name</h1> + +<py:for each="entry in entries"> + +<div class="channelgroup" py:strip="not entry.new_date"> +<h2 py:if="entry.new_date">$entry.new_date</h2> + +<div class="entrygroup" py:strip="not entry.new_feed"> +<h3 py:if="entry.new_feed"><a href="$entry.link" title="$entry.source.title">$entry.source.config.name</a></h3> + +<img py:if="entry.new_feed and entry.source.config.face" class="face" src="images/$entry.source.config.face" width="$entry.source.config.facewidth" height="$entry.source.config.faceheight" alt=""/> + +<h4 py:if="entry.title" lang="$entry.title_detail.language"><a href="$entry.link">$entry.title_detail.stream</a></h4> + +<div class="entry"> +<div class="content" py:choose=""> +<py:when test="entry.content">${entry.content[0].stream}</py:when> +<py:when test="entry.summary_detail">${entry.summary_detail.stream}</py:when> +</div> + +<p class="date"><py:if test="entry.author_detail and entry.author_detail.name">by $entry.author_detail.name at </py:if>$entry.updated</p> +</div> + +</div> +</div> + +</py:for> + +<div class="sidebar"> +<img src="images/logo.png" width="136" height="136" alt=""/> + +<h2>Subscriptions</h2> +<ul> +<li py:for="feed in feeds"> +<a py:for="link in feed.links" py:if="link.rel == 'self' and + link.type in ['application/atom+xml','application/rss+xml']" + href="$link.href" title="subscribe"><img src="images/feed-icon-10x10.png" alt="(feed)"/></a> +<py:choose> +<a py:when="feed.planet_message" href="$feed.link" class="message" title="$feed.planet_message">$feed.config.name</a> +<a py:otherwise="1" href="$feed.link" title="$feed.title">$feed.config.name</a> +</py:choose> +</li> +</ul> + +<p> +<strong>Last updated:</strong><br/> +$feed.updated<br/> +<em>All times are UTC.</em><br/> +<br/> +Powered by:<br/> +<a href="http://intertwingly.net/code/venus/"><img src="images/venus.png" width="80" height="15" alt="Planet Venus" border="0"/></a> +</p> + +<p> +<h2>Planetarium:</h2> +<ul> +<li><a href="http://www.planetapache.org/">Planet Apache</a></li> +<li><a href="http://planet.debian.net/">Planet Debian</a></li> +<li><a href="http://planet.freedesktop.org/">Planet freedesktop.org</a></li> +<li><a href="http://planet.gnome.org/">Planet GNOME</a></li> +<li><a href="http://planetsun.org/">Planet Sun</a></li> +<li><a href="http://fedora.linux.duke.edu/fedorapeople/">Fedora People</a></li> +<li><a href="http://www.planetplanet.org/">more...</a></li> +</ul> +</p> +</div> + +</body> +</html> diff --git a/lib/venus/themes/genshi_fancy/planet.css b/lib/venus/themes/genshi_fancy/planet.css new file mode 100644 index 0000000..05653c0 --- /dev/null +++ b/lib/venus/themes/genshi_fancy/planet.css @@ -0,0 +1,150 @@ +body { + border-right: 1px solid black; + margin-right: 200px; + + padding-left: 20px; + padding-right: 20px; +} + +h1 { + margin-top: 0px; + padding-top: 20px; + + font-family: "Bitstream Vera Sans", sans-serif; + font-weight: normal; + letter-spacing: -2px; + text-transform: lowercase; + text-align: right; + + color: grey; +} + +.admin { + text-align: right; +} + +h2 { + font-family: "Bitstream Vera Sans", sans-serif; + font-weight: normal; + color: #200080; + + margin-left: -20px; +} + +h3 { + font-family: "Bitstream Vera Sans", sans-serif; + font-weight: normal; + + background-color: #a0c0ff; + border: 1px solid #5080b0; + + padding: 4px; +} + +h3 a { + text-decoration: none; + color: inherit; +} + +h4 { + font-family: "Bitstream Vera Sans", sans-serif; + font-weight: bold; +} + +h4 a { + text-decoration: none; + color: inherit; +} + +img.face { + float: right; + margin-top: -3em; +} + +.entry { + margin-bottom: 2em; +} + +.entry .date { + font-family: "Bitstream Vera Sans", sans-serif; + color: grey; +} + +.entry .date a { + text-decoration: none; + color: inherit; +} + +.sidebar { + position: absolute; + top: 0px; + right: 0px; + width: 200px; + + margin-left: 0px; + margin-right: 0px; + padding-right: 0px; + + padding-top: 20px; + padding-left: 0px; + + font-family: "Bitstream Vera Sans", sans-serif; + font-size: 85%; +} + +.sidebar h2 { + font-size: 110%; + font-weight: bold; + color: black; + + padding-left: 5px; + margin-left: 0px; +} + +.sidebar ul { + padding-left: 1em; + margin-left: 0px; + + list-style-type: none; +} + +.sidebar ul li:hover { + color: grey; +} + +.sidebar ul li a { + text-decoration: none; +} + +.sidebar ul li a:hover { + text-decoration: underline; +} + +.sidebar ul li a img { + border: 0; +} + +.sidebar p { + border-top: 1px solid grey; + margin-top: 30px; + padding-top: 10px; + + padding-left: 5px; +} + +.sidebar .message { + cursor: help; + border-bottom: 1px dashed red; +} + +.sidebar a.message:hover { + cursor: help; + background-color: #ff0000; + color: #ffffff !important; + text-decoration: none !important; +} + +a:hover { + text-decoration: underline !important; + color: blue !important; +} diff --git a/lib/venus/themes/mobile/config.ini b/lib/venus/themes/mobile/config.ini new file mode 100644 index 0000000..7a886e0 --- /dev/null +++ b/lib/venus/themes/mobile/config.ini @@ -0,0 +1,24 @@ +# In addition to the outputs produced by the 'asf' theme, this one adds +# a 'mobile' version. For best results, this needs to be combined with +# the 'excerpt.py' filter. + +[Planet] +template_files: + atom.xml.xslt + foafroll.xml.xslt + index.html.xslt + mobile.html.xslt + opml.xml.xslt + validate.html.xslt + +template_directories: + ../asf + ../common + +bill_of_materials: + default.css + personalize.js + images/feed-icon-10x10.png + images/opml.png + images/foaf.png + images/venus.png diff --git a/lib/venus/themes/mobile/mobile.html.xslt b/lib/venus/themes/mobile/mobile.html.xslt new file mode 100644 index 0000000..62b2ff3 --- /dev/null +++ b/lib/venus/themes/mobile/mobile.html.xslt @@ -0,0 +1,199 @@ +<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0" + xmlns:atom="http://www.w3.org/2005/Atom" + xmlns:xhtml="http://www.w3.org/1999/xhtml" + xmlns:planet="http://planet.intertwingly.net/" + xmlns="http://www.w3.org/1999/xhtml"> + + <xsl:template match="atom:feed"> + <html xmlns="http://www.w3.org/1999/xhtml"> + + <!-- head --> + <xsl:text> </xsl:text> + <head> + <link rel="stylesheet" href="default.css" type="text/css" /> + <title><xsl:value-of select="atom:title"/></title> + <meta name="robots" content="noindex,nofollow" /> + <meta name="generator" content="{atom:generator}" /> + <xsl:if test="atom:link[@rel='self']"> + <link rel="alternate" href="{atom:link[@rel='self']/@uri}" + title="{atom:title}" type="{atom:link[@rel='self']/@type}" /> + </xsl:if> + <link rel="shortcut icon" href="/favicon.ico" /> + <script type="text/javascript" src="personalize.js"> + <xsl:comment>HTML Compatibility</xsl:comment> + </script> + </head> + + <xsl:text> </xsl:text> + <body> + <xsl:text> </xsl:text> + <h1><xsl:value-of select="atom:title"/></h1> + + <xsl:text> </xsl:text> + <div id="body"> + <xsl:apply-templates select="atom:entry"/> + <xsl:text> </xsl:text> + </div> + + <xsl:text> </xsl:text> + <h1>Subscriptions</h1> + + <xsl:text> </xsl:text> + <div id="sidebar"> + + <xsl:text> </xsl:text> + <h2>Info</h2> + + <dl> + <dt>Last updated:</dt> + <dd> + <span class="date" title="GMT"> + <xsl:value-of select="atom:updated/@planet:format"/> + </span> + </dd> + <dt>Powered by:</dt> + <dd> + <a href="http://intertwingly.net/code/venus/"> + <img src="images/venus.png" width="80" height="15" + alt="Venus" border="0"/> + </a> + </dd> + <dt>Export:</dt> + <dd> + <ul> + <li> + <a href="opml.xml"> + <img src="images/opml.png" alt="OPML"/> + </a> + </li> + <li> + <a href="foafroll.xml"> + <img src="images/foaf.png" alt="FOAF"/> + </a> + </li> + </ul> + </dd> + </dl> + + </div> + + <xsl:text> </xsl:text> + <div id="footer"> + + <xsl:text> </xsl:text> + <xsl:text> </xsl:text> + <ul> + <xsl:for-each select="planet:source"> + <xsl:sort select="planet:name"/> + <xsl:text> </xsl:text> + <li> + <a href="{atom:link[@rel='alternate']/@href}"> + <xsl:value-of select="planet:name"/> + </a> + </li> + </xsl:for-each> + <xsl:text> </xsl:text> + </ul> + </div> + + </body> + </html> + </xsl:template> + + <xsl:template match="atom:entry"> + <!-- date header --> + <xsl:variable name="date" select="substring(atom:updated,1,10)"/> + <xsl:if test="not(preceding-sibling::atom:entry + [substring(atom:updated,1,10) = $date])"> + <xsl:text> </xsl:text> + <h2 class="date"> + <xsl:value-of select="substring-before(atom:updated/@planet:format,', ')"/> + <xsl:text>, </xsl:text> + <xsl:value-of select="substring-before(substring-after(atom:updated/@planet:format,', '), ' ')"/> + </h2> + </xsl:if> + + <xsl:text> </xsl:text> + <div class="news"> + + <xsl:if test="@xml:lang"> + <xsl:attribute name="xml:lang"> + <xsl:value-of select="@xml:lang"/> + </xsl:attribute> + </xsl:if> + + <!-- entry title --> + <xsl:text> </xsl:text> + <h3> + <xsl:if test="atom:source/atom:icon"> + <img src="{atom:source/atom:icon}" class="icon"/> + </xsl:if> + <a href="{atom:source/atom:link['alternate']/@href}" class="icon"> + <xsl:attribute name="title" select="{atom:source/atom:title}"/> + <xsl:value-of select="atom:source/planet:name"/> + </a> + <xsl:if test="string-length(atom:title) > 0"> + <xsl:text>—</xsl:text> + <a href="{atom:link[@rel='alternate']/@href}"> + <xsl:if test="atom:title/@xml:lang != @xml:lang"> + <xsl:attribute name="xml:lang" select="{atom:title/@xml:lang}"/> + </xsl:if> + <xsl:value-of select="atom:title"/> + </a> + </xsl:if> + </h3> + + <!-- entry content --> + <xsl:text> </xsl:text> + <xsl:apply-templates select="planet:excerpt"/> + + <!-- entry footer --> + <xsl:text> </xsl:text> + <div class="permalink"> + <a href="{atom:link[@rel='alternate']/@href}"> + <xsl:choose> + <xsl:when test="atom:author/atom:name"> + <xsl:text>by </xsl:text> + <xsl:value-of select="atom:author/atom:name"/> + <xsl:text> at </xsl:text> + </xsl:when> + <xsl:when test="atom:source/atom:author/atom:name"> + <xsl:text>by </xsl:text> + <xsl:value-of select="atom:source/atom:author/atom:name"/> + <xsl:text> at </xsl:text> + </xsl:when> + </xsl:choose> + <span class="date" title="GMT"> + <xsl:value-of select="atom:updated/@planet:format"/> + </span> + </a> + </div> + </div> + + </xsl:template> + + <!-- xhtml content --> + <xsl:template match="planet:excerpt/xhtml:div"> + <xsl:copy> + <xsl:if test="../@xml:lang and not(../@xml:lang = ../../@xml:lang)"> + <xsl:attribute name="xml:lang"> + <xsl:value-of select="../@xml:lang"/> + </xsl:attribute> + </xsl:if> + <xsl:attribute name="class">content</xsl:attribute> + <xsl:copy-of select="@*|node()"/> + </xsl:copy> + </xsl:template> + + <!-- plain text content --> + <xsl:template match="planet:excerpt/text()"> + <div class="content" xmlns="http://www.w3.org/1999/xhtml"> + <xsl:if test="../@xml:lang and not(../@xml:lang = ../../@xml:lang)"> + <xsl:attribute name="xml:lang"> + <xsl:value-of select="../@xml:lang"/> + </xsl:attribute> + </xsl:if> + <xsl:copy-of select="."/> + </div> + </xsl:template> +</xsl:stylesheet> diff --git a/lib/venus/themes/musings/config.ini b/lib/venus/themes/musings/config.ini new file mode 100644 index 0000000..76f6ecc --- /dev/null +++ b/lib/venus/themes/musings/config.ini @@ -0,0 +1,18 @@ +[Planet] +template_files: + atom.xml.xslt + foafroll.xml.xslt + opml.xml.xslt + index.html.xslt + +template_directories: + ../common + +bill_of_materials: + default.css + personalize.js + images/feed-icon-10x10.png + images/opml.png + images/foaf.png + images/venus.png + images/venus.ico diff --git a/lib/venus/themes/musings/default.css b/lib/venus/themes/musings/default.css new file mode 100644 index 0000000..c059227 --- /dev/null +++ b/lib/venus/themes/musings/default.css @@ -0,0 +1,402 @@ +/* + * Written by Stefano Mazzocchi <stefano at apache dot org> + */ + +/* ----------------------------- Global Definitions -------------------- */ + +body { + margin: 0px; + padding: 0px; + color: #222; + background-color: #fff; + quotes: "\201C" "\201E" "\2018" "\2019"; +} + +a:link { + color: #222; +} + +a:visited { + color: #555; +} + +a[rel~='license'] { + text-decoration: none; +} + +h1 { + font-size: 1.75em; + text-transform: uppercase; + letter-spacing: 0.25em; + padding: 10px; + margin: 0px 0px 0px 0px; + color: #FFF; + font-weight: normal; + background-color: #036; + border-bottom: 2px solid #bbb +} + +/* ----------------------------- Sidebar --------------------------- */ + +#sidebar { + float: right; + top: 150px; + right: 0px; + width: 11em; + background-color: white; + padding: 0px 10px 20px 0px; + margin: 0px 0px 20px 20px; + border-left: 1px solid #ccc; + border-bottom: 1px solid #ccc; +} + +#sidebar h2 { + letter-spacing: 0.15em; + text-transform: uppercase; + font-size: .9em; + background-color: #BCD; + color: #222; + font-weight: bold; + padding: 3px 0px 2px 4px; + margin: 15px 0px 5px 10px; + border: 1px solid #ccc; +} + +#sidebar p { + font-size: .8em; + padding-left: 20px; + padding-right: 5px; +} + +#sidebar ul { + font-family: sans-serif; + margin-left: 5px; + padding-left: 25px; +} + +#sidebar li { + margin-left: 0px; + text-indent: -15px; + list-style-type: none; + font-size: .8em; + line-height: 1.2em; +} + +#sidebar ul li a { + text-decoration: none; +} + +#sidebar ul li a:hover { + text-decoration: underline; +} + +#sidebar img { + border: 0; +} + +#sidebar dl { + font-size: .8em; + padding-left: 1.0em; +} + +#sidebar dl ul { + padding-left: 1em; +} + +#sidebar dt { + margin-top: 1em; + font-weight: bold; + padding-left: 1.0em; +} + +#sidebar dd { + margin-left: 2.5em; +} + +#sidebar .message { + cursor: help; + border-bottom: 1px dashed red; +} + +#sidebar a:active, +#sidebar a:hover { + color: #FFCC66; +} + +#sidebar a.message:hover { + cursor: help; + background-color: #ffD0D0; + color:#000; + border: 1px dashed red !important; + text-decoration: none !important; +} +#sidebar dl a { + text-decoration:none; +} + +/* ----------------------------- Body ---------------------------- */ + +#body { + margin-top: 10px; +} + +.admin { + text-align: right; +} + +#body h2.date { + text-transform: none; + font-size: 1em; + color: #222; + font-weight: bold; + text-align: right; + border-top: 1px solid #ccc; + background-color: #BCD; + border-bottom: 1px solid #ccc; + padding: 3px 15px 2px 5px; + max-width:43em; + margin: 0; +} + +/* ----------------------------- News ---------------------------- */ + +.news { + margin: 30px 10px 30px 10px; + clear: left; + max-width: 50em; +} + +.news h3 { + margin: 0 0 .5em 0; + padding: 0px; + font-size: 1.5em; +} +.news h3 a { + color:#036; + text-decoration:none; +} + +.news .content { + margin: 5px 5px 5px 15px; + padding: 0px 1em 1em 1em; + border-left: 1px solid #ccc; + border-bottom: 1px solid #ccc; + border-right: 1px solid #ccc; + line-height: 1.5em; + font-size: 1em; + font-family: sans-serif; + max-width:40em; +} + +.news .links { + +} + +.news .permalink { + text-align: right; +} + +.news .icon { + height: 1em; + width: 1em; + border: 0; + bottom: 0; +} + +/* ----------------------------- News Content ---------------------------- */ + +.news .content p { + line-height: 1.2em; +} + +.news .content img { + margin: 5px; +} + +.news .content blockquote { + margin: 10px 35px 10px 35px; + padding: 5px; +} + +.news .content pre { + font-family: monospace; + border: 1px solid #ddd; + padding: 10px; + margin: 10px 20px 10px 20px; + background-color: #f8f8f8; + overflow: auto; +} +.news .content code { + font-family: monospace; +} + +.news .content ul, .news .content ol { + margin: 5px 35px 5px 35px; + padding: 5px; + counter-reset: item; +} + +.news .content ul > ul, .news .content ul > ol, .news .content ol > ul, .news .content ol > ol { + margin: 0px 0px 0px 35px; + padding: 0px; +} + +.news .content li { + padding: 1px; + line-height: 1.2em; +} + +.news code { + font-family: large; +} + +.news .content :link, +.news .content :visited { + text-decoration: none; + font-weight:bold; + color:#036; +} +.news .content p:hover a, +.news .content dt:hover a, +.news .content dd:hover a, +.news .content li:hover a { + text-decoration: underline; +} + +.news :link:active, +.news :visited:active, +.news :link:hover, +.news :visited:hover { + color: #FFCC66; +} + +/* --------------------------- Accomodations ----------------------- */ + +/* Cosmic Variance */ +.alignright { + float:right; +} +.alignleft { + float:left; +} +img.alignright { + padding: 4px; + margin: 0 0 2px 7px; + display: inline; +} +img.centered { + display: block; + margin-left: auto; + margin-right: auto; +} +/* Backreaction */ +div.content:hover a, +div.permalink:hover a { + text-decoration:underline; +} + +/* Musings/String Coffee Table */ +math[display=block] {overflow:auto;} +math { white-space: nowrap } +.numberedEq span, .eqno {float:right} +merror {display:inline;font-size:1em;} +img.mathlogo, img.svglogo { + float:right; + border:0 +} +.footnote {font-size: .9em} +.update h4 { + display:inline; + font-size:1em; + font-weight:bold; +} +table.plaintable {border-collapse:collapse;} +.plaintable td {border:1px solid #000; padding: 3px;} +.plaintable th {padding: 3px;} +.plaintable caption { + font-weight: bold; + font-size:1.1em; + text-align:center; + margin-left:30px; +} +.centeredfigure { + position:relative; + margin:auto; + text-align:center; +} +.figurecaption {color:#630;} + +/* Bosker Blog */ +p.center {text-align:center} + +/* boing boing */ +br { + clear: none !important; +} + +/* engadget */ +h6 { + clear: left !important; +} + +/* cadenhead */ +p.sourcecode { + font-family: monospace; + border: 1px solid #ddd; + padding: 10px; + margin: 10px 20px 10px 20px; + background-color: #f8f8f8; + overflow: auto; +} + +/* programmableweb */ +.imgRight { + float: right; +} + +/* gizmodo */ +img.left { + float: left; +} + +/* gizmodo */ +img.right { + float: right; +} + +/* gizmodo */ +img.center { + display: block; + margin-left: auto; + margin-right: auto; +} + +/* wikipedia */ +table { + width: auto !important; +} + +/* del.icio.us */ +.delicious-tags { + font-size: .8em; + text-align: right; +} + +/* ----------------------------- Footer ---------------------------- */ + +#footer { + padding: 0px; + margin: 30px 0px 50px 50px; +} + +#footer p { + padding: 2px 2px 2px 5px; + background-color: #ccc; + border-top: 1px solid #aaa; + border-bottom: 1px solid #aaa; + border-left: 1px solid #aaa; + letter-spacing: 0.15em; + text-transform: uppercase; + text-align: left; +} + diff --git a/lib/venus/themes/musings/index.html.xslt b/lib/venus/themes/musings/index.html.xslt new file mode 100644 index 0000000..3c17db9 --- /dev/null +++ b/lib/venus/themes/musings/index.html.xslt @@ -0,0 +1,293 @@ +<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0" + xmlns:atom="http://www.w3.org/2005/Atom" + xmlns:xhtml="http://www.w3.org/1999/xhtml" + xmlns:planet="http://planet.intertwingly.net/" + xmlns="http://www.w3.org/1999/xhtml" + exclude-result-prefixes="atom planet xhtml"> + +<xsl:output method="xml" doctype-system="http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd" doctype-public="-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN"/> + + <xsl:template match="atom:feed"> + <html xmlns="http://www.w3.org/1999/xhtml"> + + <!-- head --> + <xsl:text> </xsl:text> + <head> + <link rel="stylesheet" href="default.css" type="text/css" /> + <title><xsl:value-of select="atom:title"/></title> + <meta name="robots" content="noindex,nofollow" /> + <meta name="generator" content="{atom:generator}" /> + <xsl:if test="atom:link[@rel='self']"> + <link rel="alternate" href="{atom:link[@rel='self']/@href}" + title="{atom:title}" type="{atom:link[@rel='self']/@type}" /> + </xsl:if> + <link rel="shortcut icon" type="image/x-icon" href="images/venus.ico" /> + <link rel="icon" type="image/x-icon" href="images/venus.ico" /> + <script type="text/javascript" src="personalize.js"> + <!-- hack to prevent XHTML tag minimization --> + <xsl:text> </xsl:text> + </script> + </head> + + <xsl:text> </xsl:text> + <body> + <xsl:text> </xsl:text> + <h1><xsl:value-of select="atom:title"/></h1> + + <xsl:text> </xsl:text> + <div id="sidebar"> + + <xsl:text> </xsl:text> + <h2>Subscriptions</h2> + <xsl:text> </xsl:text> + <ul> + <xsl:for-each select="planet:source"> + <xsl:sort select="planet:name"/> + <xsl:text> </xsl:text> + <li> + <!-- icon --> + <a title="subscribe to {planet:name}’s feed"> + <xsl:choose> + <xsl:when test="planet:http_location"> + <xsl:attribute name="href"> + <xsl:value-of select="planet:http_location"/> + </xsl:attribute> + </xsl:when> + <xsl:when test="atom:link[@rel='self']/@href"> + <xsl:attribute name="href"> + <xsl:value-of select="atom:link[@rel='self']/@href"/> + </xsl:attribute> + </xsl:when> + </xsl:choose> + <img src="images/feed-icon-10x10.png" alt="(feed)"/> + </a> + <xsl:text> </xsl:text> + + <!-- name --> + <a href="{atom:link[@rel='alternate']/@href}"> + <xsl:choose> + <xsl:when test="planet:message"> + <xsl:attribute name="class">message</xsl:attribute> + <xsl:attribute name="title"> + <xsl:value-of select="planet:message"/> + </xsl:attribute> + </xsl:when> + <xsl:when test="atom:title"> + <xsl:attribute name="title"> + <xsl:value-of select="atom:title"/> + </xsl:attribute> + </xsl:when> + </xsl:choose> + <xsl:value-of select="planet:name"/> + </a> + </li> + </xsl:for-each> + <xsl:text> </xsl:text> + </ul> + + <xsl:text> </xsl:text> + <h2>Info</h2> + + <dl> + <dt>Last updated:</dt> + <dd> + <span class="date" title="GMT"> + <xsl:value-of select="atom:updated/@planet:format"/> + </span> + </dd> + <dt>Powered by:</dt> + <dd> + <a href="http://intertwingly.net/code/venus/" title="Sam Ruby’s Venus"> + <img src="images/venus.png" width="80" height="15" + alt="Planet" /> + </a> + </dd> + <dt>Export:</dt> + <dd> + <a href="opml.xml" title="export the {planet:name} subscription list in OPML format"> + <img src="images/opml.png" alt="OPML"/> + </a> + </dd> + <dd> + <a href="foafroll.xml" title="export the {planet:name} subscription list in FOAF format"> + <img src="images/foaf.png" alt="FOAF"/> + </a> + </dd> + </dl> + + </div> + + <xsl:text> </xsl:text> + <div id="body"> + <xsl:apply-templates select="atom:entry"/> + <xsl:text> </xsl:text> + </div> + </body> + </html> + </xsl:template> + + <xsl:template match="atom:entry"> + <!-- date header --> + <xsl:variable name="date" select="substring(atom:updated,1,10)"/> + <xsl:if test="not(preceding-sibling::atom:entry + [substring(atom:updated,1,10) = $date])"> + <xsl:text> </xsl:text> + <h2 class="date"> + <xsl:value-of select="substring-before(atom:updated/@planet:format,', ')"/> + <xsl:text>, </xsl:text> + <xsl:value-of select="substring-before(substring-after(atom:updated/@planet:format,', '), ' ')"/> + </h2> + </xsl:if> + + <xsl:text> </xsl:text> + <div class="news"> + + <xsl:if test="@xml:lang"> + <xsl:attribute name="xml:lang"> + <xsl:value-of select="@xml:lang"/> + </xsl:attribute> + </xsl:if> + + <!-- entry title --> + <xsl:text> </xsl:text> + <h3> + <a href="{atom:source/atom:link[@rel='alternate']/@href}"> + <xsl:attribute name="title" select="{atom:source/atom:title}"/> + <xsl:value-of select="atom:source/planet:name"/> + </a> + <xsl:if test="atom:title"> + <xsl:text> </xsl:text> + <xsl:choose> + <xsl:when test="atom:source/atom:icon"> + <img src="{atom:source/atom:icon}" class="icon" alt="" /> + </xsl:when> + <xsl:otherwise> + <xsl:text>—</xsl:text> + </xsl:otherwise> + </xsl:choose> + <xsl:text> </xsl:text> + <a href="{atom:link[@rel='alternate']/@href}"> + <xsl:if test="atom:title/@xml:lang != @xml:lang"> + <xsl:attribute name="xml:lang" select="{atom:title/@xml:lang}"/> + </xsl:if> + <xsl:value-of select="atom:title"/> + </a> + </xsl:if> + </h3> + + <!-- entry content --> + <xsl:text> </xsl:text> + <div class="content"> + <xsl:choose> + <xsl:when test="atom:content"> + <xsl:apply-templates select="atom:content"/> + </xsl:when> + <xsl:otherwise> + <xsl:apply-templates select="atom:summary"/> + </xsl:otherwise> + </xsl:choose> + + <!-- entry footer --> + <xsl:text> </xsl:text> + <div class="permalink"> + <xsl:if test="atom:link[@rel='license'] or + atom:source/atom:link[@rel='license'] or + atom:rights or atom:source/atom:rights"> + <a> + <xsl:if test="atom:source/atom:link[@rel='license']/@href"> + <xsl:attribute name="rel">license</xsl:attribute> + <xsl:attribute name="href"> + <xsl:value-of select="atom:source/atom:link[@rel='license']/@href"/> + </xsl:attribute> + </xsl:if> + <xsl:if test="atom:link[@rel='license']/@href"> + <xsl:attribute name="rel">license</xsl:attribute> + <xsl:attribute name="href"> + <xsl:value-of select="atom:link[@rel='license']/@href"/> + </xsl:attribute> + </xsl:if> + <xsl:if test="atom:source/atom:rights"> + <xsl:attribute name="title"> + <xsl:value-of select="atom:source/atom:rights"/> + </xsl:attribute> + </xsl:if> + <xsl:if test="atom:rights"> + <xsl:attribute name="title"> + <xsl:value-of select="atom:rights"/> + </xsl:attribute> + </xsl:if> + <xsl:text>©</xsl:text> + </a> + <xsl:text> </xsl:text> + </xsl:if> + <a href="{atom:link[@rel='alternate']/@href}" class="permalink"> + <xsl:choose> + <xsl:when test="atom:author/atom:name"> + <xsl:if test="not(atom:link[@rel='license'] or + atom:source/atom:link[@rel='license'] or + atom:rights or atom:source/atom:rights)"> + <xsl:text>by </xsl:text> + </xsl:if> + <xsl:value-of select="atom:author/atom:name"/> + <xsl:text> at </xsl:text> + </xsl:when> + <xsl:when test="atom:source/atom:author/atom:name"> + <xsl:if test="not(atom:link[@rel='license'] or + atom:source/atom:link[@rel='license'] or + atom:rights or atom:source/atom:rights)"> + <xsl:text>by </xsl:text> + </xsl:if> + <xsl:value-of select="atom:source/atom:author/atom:name"/> + <xsl:text> at </xsl:text> + </xsl:when> + </xsl:choose> + <span class="date" title="GMT"> + <xsl:value-of select="atom:updated/@planet:format"/> + </span> + </a> + </div> + </div> + </div> + + </xsl:template> + + <!-- xhtml content --> + <xsl:template match="atom:content/xhtml:div | atom:summary/xhtml:div"> + <xsl:copy> + <xsl:if test="../@xml:lang and not(../@xml:lang = ../../@xml:lang)"> + <xsl:attribute name="xml:lang"> + <xsl:value-of select="../@xml:lang"/> + </xsl:attribute> + </xsl:if> + <xsl:apply-templates select="@*|node()"/> + </xsl:copy> + </xsl:template> + + <!-- plain text content --> + <xsl:template match="atom:content/text() | atom:summary/text()"> + <div> + <xsl:if test="../@xml:lang and not(../@xml:lang = ../../@xml:lang)"> + <xsl:attribute name="xml:lang"> + <xsl:value-of select="../@xml:lang"/> + </xsl:attribute> + </xsl:if> + <xsl:copy-of select="."/> + </div> + </xsl:template> + + <!-- Feedburner detritus --> + <xsl:template match="xhtml:div[@class='feedflare']"/> + + <!-- Remove stray atom elements --> + <xsl:template match="atom:*"> + <xsl:apply-templates/> + </xsl:template> + + <!-- pass through everything else --> + <xsl:template match="@*|node()"> + <xsl:copy> + <xsl:apply-templates select="@*|node()"/> + </xsl:copy> + </xsl:template> + +</xsl:stylesheet> diff --git a/lib/venus/themes/musings/personalize.js b/lib/venus/themes/musings/personalize.js new file mode 100644 index 0000000..83db3a3 --- /dev/null +++ b/lib/venus/themes/musings/personalize.js @@ -0,0 +1,220 @@ +var entries = []; // list of news items + +var days = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", + "Friday", "Saturday"]; +var months = ["January", "February", "March", "April", "May", "June", "July", + "August", "September", "October", "November", "December"]; + +// event complete: stop propagation of the event +function stopPropagation(event) { + if (event.preventDefault) { + event.preventDefault(); + event.stopPropagation(); + } else { + event.returnValue = false; + } +} + +// scroll back to the previous article +function prevArticle(event) { + for (var i=entries.length; --i>=0;) { + if (entries[i].anchor.offsetTop < document.documentElement.scrollTop) { + window.location.hash=entries[i].anchor.id; + stopPropagation(event); + break; + } + } +} + +// advance to the next article +function nextArticle(event) { + for (var i=1; i<entries.length; i++) { + if (entries[i].anchor.offsetTop-20 > document.documentElement.scrollTop) { + window.location.hash=entries[i].anchor.id; + stopPropagation(event); + break; + } + } +} + +// process keypresses +function navkey(event) { + var checkbox = document.getElementById('navkeys'); + if (!checkbox || !checkbox.checked) return; + + if (!event) event=window.event; + key=event.keyCode; + + if (!document.documentElement) return; + if (!entries[0].anchor || !entries[0].anchor.offsetTop) return; + + if (key == 'J'.charCodeAt(0)) nextArticle(event); + if (key == 'K'.charCodeAt(0)) prevArticle(event); +} + +// create (or reset) a cookie +function createCookie(name,value,days) { + if (days) { + var date = new Date(); + date.setTime(date.getTime()+(days*24*60*60*1000)); + var expires = "; expires="+date.toGMTString(); + } + else expires = ""; + document.cookie = name+"="+value+expires+"; path=/"; +} + +// read a cookie +function readCookie(name) { + var nameEQ = name + "="; + var ca = document.cookie.split(';'); + for(var i=0;i < ca.length;i++) { + var c = ca[i]; + while (c.charAt(0)==' ') c = c.substring(1,c.length); + if (c.indexOf(nameEQ) == 0) return c.substring(nameEQ.length,c.length); + } + return null; +} + +// each time the value of the option changes, update the cookie +function selectOption() { + var checkbox = document.getElementById('navkeys'); + if (!checkbox) return; + createCookie("navkeys", checkbox.checked?'true':'false', 365); +} + +// add navkeys option to sidebar +function addOption(event) { + if (entries.length > 1 && entries[entries.length-1].parent.offsetTop > 0) { + var sidebar = document.getElementById('sidebar'); + if (!sidebar) return; + + for (var i=entries.length; --i>=0;) { + var a = entries[i].anchor = document.createElement('a'); + a.id = "news-" + i; + entries[i].parent.insertBefore(a, entries[i].parent.firstChild); + } + + var h2 = document.createElement('h2'); + h2.appendChild(document.createTextNode('Options')); + sidebar.appendChild(h2); + + var form = document.createElement('form'); + var p = document.createElement('p'); + var input = document.createElement('input'); + input.type = "checkbox"; + input.id = "navkeys"; + p.appendChild(input); + var a = document.createElement('a'); + a.title = "Navigate entries"; + a.appendChild(document.createTextNode('Enable ')); + var code = document.createElement('code'); + code.appendChild(document.createTextNode('J')); + a.appendChild(code); + a.appendChild(document.createTextNode(' and ')); + code = document.createElement('code'); + code.appendChild(document.createTextNode('K')); + a.appendChild(code); + a.appendChild(document.createTextNode(' keys')); + p.appendChild(a); + form.appendChild(p); + sidebar.appendChild(form); + + var cookie = readCookie("navkeys"); + if (cookie && cookie == 'true') input.checked = true; + input.onclick = selectOption; + document.onkeydown = navkey; + } +} + +// convert date to local time +var localere = /^(\w+) (\d+) (\w+) \d+ 0?(\d\d?:\d\d):\d\d ([AP]M) (EST|EDT|CST|CDT|MST|MDT|PST|PDT)/; +function localizeDate(element) { + var date = new Date(); + date.setTime(Date.parse(element.innerHTML + " GMT")); + + var local = date.toLocaleString(); + var match = local.match(localere); + if (match) { + element.innerHTML = match[4] + ' ' + match[5].toLowerCase(); + element.title = match[6] + " \u2014 " + + match[1] + ', ' + match[3] + ' ' + match[2]; + return days[date.getDay()] + ', ' + months[date.getMonth()] + ' ' + + date.getDate() + ', ' + date.getFullYear(); + } else { + element.title = element.innerHTML + ' GMT'; + element.innerHTML = local; + return days[date.getDay()] + ', ' + date.getDate() + ' ' + + months[date.getMonth()] + ' ' + date.getFullYear(); + } + +} + +// find entries (and localizeDates) +function findEntries() { + + var span = document.getElementsByTagName('span'); + + for (var i=0; i<span.length; i++) { + if (span[i].className == "date" && span[i].title == "GMT") { + var date = localizeDate(span[i]); + + var parent = span[i]; + while (parent && parent.className != 'news') { + parent = parent.parentNode; + } + + if (parent) { + var info = entries[entries.length] = new Object(); + info.parent = parent; + info.date = date; + } + } + } + +} + +// insert/remove date headers to indicate change of date in local time zone +function moveDateHeaders() { + lastdate = '' + for (var i=0; i<entries.length; i++) { + var parent = entries[i].parent; + var date = entries[i].date; + + sibling = parent.previousSibling; + while (sibling && sibling.nodeType != 1) { + sibling = sibling.previousSibling; + } + + if (sibling && sibling.nodeName.toLowerCase() == 'h2') { + if (lastdate == date) { + sibling.parentNode.removeChild(sibling); + } else { + sibling.innerHTML = date; + lastdate = date; + } + } else if (lastdate != date) { + var h2 = document.createElement('h2'); + h2.className = 'date' + h2.appendChild(document.createTextNode(date)); + parent.parentNode.insertBefore(h2, parent); + lastdate = date; + } + } +} + +// adjust dates to local time zones, optionally provide navigation keys +function personalize() { + findEntries(); + addOption(); + moveDateHeaders(); +} + +// hook event +window.onload = personalize; +if (document.addEventListener) { + onDOMLoad = function() { + window.onload = undefined; + personalize(); + }; + document.addEventListener("DOMContentLoaded", onDOMLoad, false); +} -- GitLab