Revision: 6554 Author: nicdumz Date: 2009-03-24 05:42:13 +0000 (Tue, 24 Mar 2009)
Log Message: ----------- Patiently going through all the changes of 6540 to fix the places where tabs and spaces were mixed ...
Modified Paths: -------------- trunk/pywikipedia/copyright.py trunk/pywikipedia/family.py trunk/pywikipedia/login.py trunk/pywikipedia/standardize_notes.py trunk/pywikipedia/userlib.py trunk/pywikipedia/wiktionary/wiktionarypagetest.py trunk/pywikipedia/wiktionary.py
Modified: trunk/pywikipedia/copyright.py =================================================================== --- trunk/pywikipedia/copyright.py 2009-03-24 05:11:11 UTC (rev 6553) +++ trunk/pywikipedia/copyright.py 2009-03-24 05:42:13 UTC (rev 6554) @@ -1031,7 +1031,7 @@
wikipedia.output(page.title())
- if original_text: + if original_text: text = skip_section(original_text)
if remove_wikicode_dotall:
Modified: trunk/pywikipedia/family.py =================================================================== --- trunk/pywikipedia/family.py 2009-03-24 05:11:11 UTC (rev 6553) +++ trunk/pywikipedia/family.py 2009-03-24 05:42:13 UTC (rev 6554) @@ -2997,21 +2997,21 @@ # equivalent articles have identical titles among the group. self.language_groups = { # languages using the arabic script (incomplete) - 'arab' : [ + 'arab' : [ 'ar', 'arz', 'ps', 'sd', 'ur', # languages using multiple scripts, including arabic - 'kk', 'ku', 'tt', 'ug' + 'kk', 'ku', 'tt', 'ug' ], # languages that use chinese symbols 'chinese': [ 'wuu', 'zh', 'zh-classical', 'zh-yue', 'gan', 'ii', # languages using multiple/mixed scripts, including chinese - 'ja', 'za' + 'ja', 'za' ], # languages that use the cyrillic alphabet 'cyril': [ 'ab', 'av', 'ba', 'be', 'be-x-old', 'bg', 'bxr', 'ce', 'cu', 'cv', 'kv', - 'ky', 'mk', 'lbe', 'mdf', 'mn', 'mo', 'myv', 'os', 'ru', 'sah', 'tg', + 'ky', 'mk', 'lbe', 'mdf', 'mn', 'mo', 'myv', 'os', 'ru', 'sah', 'tg', 'tk', 'udm', 'uk', 'xal', # languages using multiple scripts, including cyrillic 'ha', 'kk', 'sh', 'sr', 'tt' @@ -3019,22 +3019,22 @@ # languages that use the latin alphabet 'latin': [ 'aa', 'af', 'ak', 'als', 'an', 'ang', 'ast', 'ay', 'bar', 'bat-smg', - 'bcl', 'bi', 'bm', 'br', 'bs', 'ca', 'cbk-zam', 'cdo', 'ceb', 'ch', - 'cho', 'chy', 'co', 'crh', 'cs', 'csb', 'cy', 'da', 'de', 'diq', 'dsb', - 'ee', 'eml', 'en', 'eo', 'es', 'et', 'eu', 'ext', 'ff', 'fi', 'fiu-vro', - 'fj', 'fo', 'fr', 'frp', 'fur', 'fy', 'ga', 'gd', 'gl', 'gn', 'gv', - 'hak', 'haw', 'hif', 'ho', 'hr', 'hsb', 'ht', 'hu', 'hz', 'ia', 'id', - 'ie', 'ig', 'ik', 'ilo', 'io', 'is', 'it', 'jbo', 'jv', 'kaa', 'kab', - 'kg', 'ki', 'kj', 'kl', 'kr', 'ksh', 'kw', 'la', 'lad', 'lb', 'lg', - 'li', 'lij', 'lmo', 'ln', 'lt', 'lv', 'map-bms', 'mg', 'mh', 'mi', 'ms', - 'mt', 'mus', 'na', 'nah', 'nap', 'nds', 'nds-nl', 'ng', 'nl', 'nn', - 'no', 'nov', 'nrm', 'nv', 'ny', 'oc', 'om', 'pag', 'pam', 'pap', 'pdc', - 'pih', 'pl', 'pms', 'pt', 'qu', 'rm', 'rn', 'ro', 'roa-rup', 'roa-tara', + 'bcl', 'bi', 'bm', 'br', 'bs', 'ca', 'cbk-zam', 'cdo', 'ceb', 'ch', + 'cho', 'chy', 'co', 'crh', 'cs', 'csb', 'cy', 'da', 'de', 'diq', 'dsb', + 'ee', 'eml', 'en', 'eo', 'es', 'et', 'eu', 'ext', 'ff', 'fi', 'fiu-vro', + 'fj', 'fo', 'fr', 'frp', 'fur', 'fy', 'ga', 'gd', 'gl', 'gn', 'gv', + 'hak', 'haw', 'hif', 'ho', 'hr', 'hsb', 'ht', 'hu', 'hz', 'ia', 'id', + 'ie', 'ig', 'ik', 'ilo', 'io', 'is', 'it', 'jbo', 'jv', 'kaa', 'kab', + 'kg', 'ki', 'kj', 'kl', 'kr', 'ksh', 'kw', 'la', 'lad', 'lb', 'lg', + 'li', 'lij', 'lmo', 'ln', 'lt', 'lv', 'map-bms', 'mg', 'mh', 'mi', 'ms', + 'mt', 'mus', 'na', 'nah', 'nap', 'nds', 'nds-nl', 'ng', 'nl', 'nn', + 'no', 'nov', 'nrm', 'nv', 'ny', 'oc', 'om', 'pag', 'pam', 'pap', 'pdc', + 'pih', 'pl', 'pms', 'pt', 'qu', 'rm', 'rn', 'ro', 'roa-rup', 'roa-tara', 'rw', 'sc', 'scn', 'sco', 'se', 'sg', 'simple', 'sk', 'sl', 'sm', 'sn', - 'so', 'sq', 'srn', 'ss', 'st', 'stq', 'su', 'sv', 'sw', 'szl', 'tet', - 'tl', 'tn', 'to', 'tpi', 'tr', 'ts', 'tum', 'tw', 'ty', 'uz', 've', - 'vec', 'vi', 'vls', 'vo', 'wa', 'war', 'wo', 'xh', 'yo', 'zea', - 'zh-min-nan', 'zu', + 'so', 'sq', 'srn', 'ss', 'st', 'stq', 'su', 'sv', 'sw', 'szl', 'tet', + 'tl', 'tn', 'to', 'tpi', 'tr', 'ts', 'tum', 'tw', 'ty', 'uz', 've', + 'vec', 'vi', 'vls', 'vo', 'wa', 'war', 'wo', 'xh', 'yo', 'zea', + 'zh-min-nan', 'zu', # languages using multiple scripts, including latin 'az', 'chr', 'ha', 'iu', 'kk', 'ku', 'rmy', 'sh', 'sr', 'tt', 'ug', 'za' ],
Modified: trunk/pywikipedia/login.py =================================================================== --- trunk/pywikipedia/login.py 2009-03-24 05:11:11 UTC (rev 6553) +++ trunk/pywikipedia/login.py 2009-03-24 05:42:13 UTC (rev 6554) @@ -29,8 +29,8 @@
-v -v Shows http requests made when logging in. This might leak (doubly private data (password, session id), so make sure to check the - verbose) output. Using -log is recommended: this will output a lot of - data + verbose) output. Using -log is recommended: this will output a lot of + data
If not given as parameter, the script will ask for your username and password (password entry will be hidden), log in to your home wiki using this
Modified: trunk/pywikipedia/standardize_notes.py =================================================================== --- trunk/pywikipedia/standardize_notes.py 2009-03-24 05:11:11 UTC (rev 6553) +++ trunk/pywikipedia/standardize_notes.py 2009-03-24 05:42:13 UTC (rev 6554) @@ -741,15 +741,15 @@ break # found a matching previous linkname so stop looking if extlink_linktext == None or len(extlink_linktext) < 20: exlink_linktext = urltitle - # Look for a news web site + # Look for a news web site for (sitename, newscompany, stripprefix) in newssites: - if refname.startswith( sitename ): - # If there is a prefix to strip from the title + if refname.startswith( sitename ): + # If there is a prefix to strip from the title if stripprefix and extlink_linktext.startswith( stripprefix ): - extlink_linktext = extlink_linktext[len(stripprefix):] - new_text = u'{{news reference | title=%s | url=%s | urldate=%s | org=%s }}' % ( extlink_linktext, extlink_linkname, now.isoformat(), newscompany ) + '\n' - break - else: # else no special site found + extlink_linktext = extlink_linktext[len(stripprefix):] + new_text = u'{{news reference | title=%s | url=%s | urldate=%s | org=%s }}' % ( extlink_linktext, extlink_linkname, now.isoformat(), newscompany ) + '\n' + break + else: # else no special site found new_text = u'{{web reference | title=%s | url=%s | date=%s }}' % ( extlink_linktext, extlink_linkname, now.isoformat() ) return (new_text)
@@ -762,9 +762,9 @@ urltitle = self.doGetTitleFromURL( 'http://dx.doi.org/' + doi_linktext ) # try to get title from URL refname = 'refbot%d' % refsequence if urltitle: - new_text = '# {{note|%s}} %s {{doi|%s}}' % (refname, urltitle, doi_linktext) + '\n' + new_text = '# {{note|%s}} %s {{doi|%s}}' % (refname, urltitle, doi_linktext) + '\n' else: - new_text = '# {{note|%s}} {{doi|%s}}' % (refname, doi_linktext) + '\n' + new_text = '# {{note|%s}} {{doi|%s}}' % (refname, doi_linktext) + '\n' return (refname, new_text)
def doBuildSequenceListOfReferences(self, original_text):
Modified: trunk/pywikipedia/userlib.py =================================================================== --- trunk/pywikipedia/userlib.py 2009-03-24 05:11:11 UTC (rev 6553) +++ trunk/pywikipedia/userlib.py 2009-03-24 05:42:13 UTC (rev 6554) @@ -85,15 +85,15 @@ offset = 0 step = min(limit,500) older_str = None - try: - older_str = self.site.mediawiki_message('pager-older-n') - except wikipedia.KeyError: - older_str = self.site.mediawiki_message('sp-contributions-older') + try: + older_str = self.site.mediawiki_message('pager-older-n') + except wikipedia.KeyError: + older_str = self.site.mediawiki_message('sp-contributions-older') if older_str.startswith('{{PLURAL:$1'): - older_str = older_str[13:] - older_str = older_str[older_str.find('|')+1:] - older_str = older_str[:-2] - older_str = older_str.replace('$1',str(step)) + older_str = older_str[13:] + older_str = older_str[older_str.find('|')+1:] + older_str = older_str[:-2] + older_str = older_str.replace('$1',str(step))
address = self.site.contribs_address(self.name,limit=step) contribRX = re.compile('<li[^>]*> *<a href="(?P<url>[^"]*?)" title="[^"]+">(?P<date>[^<]+)</a>.*>diff</a>) *(<span class="[^"]+">[A-Za-z]</span>)* *<a href="[^"]+" (class="[^"]+" )?title="[^"]+">(?P<title>[^<]+)</a> *(?P<comment>.*?)(?P<top><strong> *(top) *</strong>)? *(<span class="mw-rollback-link">[<a href="[^"]+token=(?P<rollbackToken>[^"]+)%2B%5C".*rollback</a>]</span>)? *</li>')
Modified: trunk/pywikipedia/wiktionary/wiktionarypagetest.py =================================================================== --- trunk/pywikipedia/wiktionary/wiktionarypagetest.py 2009-03-24 05:11:11 UTC (rev 6553) +++ trunk/pywikipedia/wiktionary/wiktionarypagetest.py 2009-03-24 05:42:13 UTC (rev 6554) @@ -200,7 +200,7 @@ } }, {'definition': u"A piece of metal, often [[hexagonal]], with a hole through it with internal threading intended to fit on to a bolt.", 'concisedef': u'that fits on a bolt', - 'trans': {'remark': '', + 'trans': {'remark': '', 'alltrans': { 'nl': {'remark': '', 'translations': [{'remark': '',
Modified: trunk/pywikipedia/wiktionary.py =================================================================== --- trunk/pywikipedia/wiktionary.py 2009-03-24 05:11:11 UTC (rev 6553) +++ trunk/pywikipedia/wiktionary.py 2009-03-24 05:42:13 UTC (rev 6554) @@ -584,11 +584,11 @@ """ Constructor Called with one parameter: - the language of this entry - and can optionally be initialized with a first meaning + and can optionally be initialized with a first meaning """ self.entrylang=entrylang self.meanings = {} # a dictionary containing the meanings for this term grouped by part of speech - if meaning: + if meaning: self.addMeaning(meaning) self.posorder = [] # we don't want to shuffle the order of the parts of speech, so we keep a list to keep the order in which they were encountered
@@ -804,7 +804,7 @@ ready for Wiktionary The behavior changes with the circumstances. For an entry in the same language as the Wiktionary the full list of translations is contained in the output, excluding the local - language itself + language itself - This list of translations has to end up in a table with two columns - The first column of this table contains languages with names from A to M, the second contains N to Z - If a column in this list remains empty a html comment is put in that column