jenkins-bot has submitted this change and it was merged.
Change subject: Fix remaining pep8 issue: E265: use of comments ......................................................................
Fix remaining pep8 issue: E265: use of comments
E265 is '#' not followed by a space, and is part of the normal pep8 rules. Fixed existing problems and made it part of the mandatory checkin rules.
This was used for: - visual dividers in code - commented out debugging code - commented out code that was replaced - commented out code instead of FIXME:/TODO: - commented out example code
and occasionally a normal comment with the space missing after the #
Replaced some with logging, raised NotImplementedException in checkimages.py with FIXME and bug numbers.
Change-Id: I02864ad75f9e11f5cd8821fe13f52b57a93d56c3 --- M pwb.py M pywikibot/bot.py M pywikibot/families/wikisource_family.py M pywikibot/fixes.py M pywikibot/interwiki_graph.py M pywikibot/site.py M scripts/blockpageschecker.py M scripts/blockreview.py M scripts/casechecker.py M scripts/checkimages.py M scripts/commonscat.py M scripts/cosmetic_changes.py M scripts/editarticle.py M scripts/featured.py M scripts/imagerecat.py M scripts/imagetransfer.py M scripts/imageuncat.py M scripts/isbn.py M scripts/redirect.py M scripts/reflinks.py M scripts/script_wui.py M scripts/solve_disambiguation.py M scripts/spamremove.py M scripts/weblinkchecker.py M scripts/welcome.py M tests/dry_site_tests.py M tests/site_tests.py M tox.ini 28 files changed, 153 insertions(+), 198 deletions(-)
Approvals: John Vandenberg: Looks good to me, but someone else must approve Ladsgroup: Looks good to me, approved jenkins-bot: Verified
diff --git a/pwb.py b/pwb.py index 29ddc24..13614e6 100644 --- a/pwb.py +++ b/pwb.py @@ -82,7 +82,7 @@ sys.path[0] = old_path0 pwb.argvu = old_argvu
-#### end of snippet +# end of snippet from coverage
if sys.version_info[0] not in (2, 3): raise RuntimeError("ERROR: Pywikibot only runs under Python 2 " diff --git a/pywikibot/bot.py b/pywikibot/bot.py index 9fb6254..6e92dbf 100644 --- a/pywikibot/bot.py +++ b/pywikibot/bot.py @@ -81,7 +81,6 @@ sfn = "%s.%d%s" % (root, i, ext) dfn = "%s.%d%s" % (root, i + 1, ext) if os.path.exists(sfn): - #print "%s -> %s" % (sfn, dfn) if os.path.exists(dfn): os.remove(dfn) os.rename(sfn, dfn) @@ -89,7 +88,6 @@ if os.path.exists(dfn): os.remove(dfn) os.rename(self.baseFilename, dfn) - #print "%s -> %s" % (self.baseFilename, dfn) elif self.backupCount == -1: if not hasattr(self, '_lastNo'): self._lastNo = 1 @@ -1189,7 +1187,7 @@ if not treat_missing_item: pywikibot.output( '%s doesn't have a wikidata item.' % page) - #TODO FIXME: Add an option to create the item + # TODO: Add an option to create the item continue self.treat(page, item) except QuitKeyboardInterrupt: diff --git a/pywikibot/families/wikisource_family.py b/pywikibot/families/wikisource_family.py index 94cc43f..437e546 100644 --- a/pywikibot/families/wikisource_family.py +++ b/pywikibot/families/wikisource_family.py @@ -112,13 +112,13 @@ 'de': (u'/Doku', u'/Meta'), 'el': (u'/τεκμηρίωση', ), 'eo': ('u/dokumentado', ), - #'fa': (u'/صفحه الگو', ), - #'fa': (u'/فضاینام توضیحات', ), - #'fa': (u'/آغاز جعبه', ), - #'fa': (u'/پایان جعبه۲', ), - #'fa': (u'/آغاز جعبه۲', ), - #'fa': (u'/پایان جعبه', ), - #'fa': (u'/توضیحات', ), + # 'fa': (u'/صفحه الگو', ), + # 'fa': (u'/فضاینام توضیحات', ), + # 'fa': (u'/آغاز جعبه', ), + # 'fa': (u'/پایان جعبه۲', ), + # 'fa': (u'/آغاز جعبه۲', ), + # 'fa': (u'/پایان جعبه', ), + # 'fa': (u'/توضیحات', ), 'fr': (u'/documentation', ), 'id': (u'/dok', ), 'ko': (u'/설명문서', ), diff --git a/pywikibot/fixes.py b/pywikibot/fixes.py index 723631d..b492727 100644 --- a/pywikibot/fixes.py +++ b/pywikibot/fixes.py @@ -67,9 +67,9 @@ # Keep in mind that MediaWiki automatically converts <br> to <br /> # when rendering pages, so you might comment the next two lines out # to save some time/edits. - #(r'(?i)<br>', r'<br />'), + # (r'(?i)<br>', r'<br />'), # linebreak with attributes - #(r'(?i)<br ([^>/]+?)>', r'<br \1 />'), + # (r'(?i)<br ([^>/]+?)>', r'<br \1 />'), (r'(?i)<b>(.*?)</b>', r"'''\1'''"), (r'(?i)<strong>(.*?)</strong>', r"'''\1'''"), (r'(?i)<i>(.*?)</i>', r"''\1''"), @@ -77,7 +77,7 @@ # horizontal line without attributes in a single line (r'(?i)([\r\n])<hr[ /]*>([\r\n])', r'\1----\2'), # horizontal line without attributes with more text in the same line - #(r'(?i) +<hr[ /]*> +', r'\r\n----\r\n'), + # (r'(?i) +<hr[ /]*> +', r'\r\n----\r\n'), # horizontal line with attributes; can't be done with wiki syntax # so we only make it XHTML compliant (r'(?i)<hr ([^>/]+?)>', r'<hr \1 />'), @@ -108,8 +108,8 @@ 'de': u'Bot: korrigiere Grammatik', }, 'replacements': [ - #(u'([Ss]owohl) ([^,.]+?), als auch', r'\1 \2 als auch'), - #(u'([Ww]eder) ([^,.]+?), noch', r'\1 \2 noch'), + # (u'([Ss]owohl) ([^,.]+?), als auch', r'\1 \2 als auch'), + # (u'([Ww]eder) ([^,.]+?), noch', r'\1 \2 noch'), # # Vorsicht bei Substantiven, z. B. 3-Jähriger! (u'(\d+)(minütig|stündig|tägig|wöchig|jährig|minütlich|stündlich|täglich|wöchentlich|jährlich|fach|mal|malig|köpfig|teilig|gliedrig|geteilt|elementig|dimensional|bändig|eckig|farbig|stimmig)', r'\1-\2'), @@ -128,7 +128,7 @@ # Achtung bei Französisch: https://de.wikipedia.org/wiki/Plenk#Sonderfall_Franz.C3.B6sisch # Leerzeichen vor Doppelpunkt/Semikolon kann korrekt sein, nach irgendeiner Norm für Zitationen. (u'([a-zäöüß](]])?) ([,.!?]) (([[)?[a-zäöüA-ZÄÖÜ])', r'\1\3 \4'), - #(u'([a-z].)([A-Z])', r'\1 \2'), + # (u'([a-z].)([A-Z])', r'\1 \2'), ], 'exceptions': { 'inside-tags': [ @@ -207,7 +207,7 @@ # external link starting with double bracket (r'[[(?P<url>https?://.+?)]', r'[\g<url>]'), # external link with forgotten closing bracket - #(r'[(?P<url>https?://[^]\s]+)\r\n', r'[\g<url>]\r\n'), + # (r'[(?P<url>https?://[^]\s]+)\r\n', r'[\g<url>]\r\n'), # external link ending with double bracket. # do not change weblinks that contain wiki links inside # inside the description @@ -283,7 +283,7 @@ # external link starting with double bracket (r'[[(?P<url>https?://.+?)]', r'[\g<url>]'), # external link with forgotten closing bracket - #(r'[(?P<url>https?://[^]\s]+)\r\n', r'[\g<url>]\r\n'), + # (r'[(?P<url>https?://[^]\s]+)\r\n', r'[\g<url>]\r\n'), # external link and description separated by a dash, with # whitespace in front of the dash, so that it is clear that # the dash is not a legitimate part of the URL. @@ -382,15 +382,15 @@ }, 'replacements': [ # space after birth sign w/ year - #(u'(*(\d{3,4})', u'(* \1'), - ## space after death sign w/ year - #(u'†(\d{3,4})', u'† \1'), - #(u'†(\d{3,4})', u'† \1'), - ## space after birth sign w/ linked date - #(u'(*[[(\d)', u'(* [[\1'), - ## space after death sign w/ linked date - #(u'†[[(\d)', u'† [[\1'), - #(u'†[[(\d)', u'† [[\1'), + # (u'(*(\d{3,4})', u'(* \1'), + # space after death sign w/ year + # (u'†(\d{3,4})', u'† \1'), + # (u'†(\d{3,4})', u'† \1'), + # space after birth sign w/ linked date + # (u'(*[[(\d)', u'(* [[\1'), + # space after death sign w/ linked date + # (u'†[[(\d)', u'† [[\1'), + # (u'†[[(\d)', u'† [[\1'), (u'[[(\d+. (?:Januar|Februar|März|April|Mai|Juni|Juli|August|September|Oktober|November|Dezember)) (\d{1,4})]]', u'[[\1]] [[\2]]'), # Keine führende Null beim Datum (ersteinmal nur bei denen, bei denen auch ein Leerzeichen fehlt) (u'0(\d+).(Januar|Februar|März|April|Mai|Juni|Juli|August|September|Oktober|November|Dezember)', r'\1. \2'), @@ -458,7 +458,9 @@ 'ar': u'تدقيق إملائي', }, 'replacements': [ - #(u' ,', u' ،'), # FIXME: Do not replace comma in non-Arabic text, interwiki, image links or <math> syntax. + # FIXME: Do not replace comma in non-Arabic text, + # interwiki, image links or <math> syntax. + # (u' ,', u' ،'), (r'\b' + u'إمرأة' + r'\b', u'امرأة'), (r'\b' + u'الى' + r'\b', u'إلى'), (r'\b' + u'إسم' + r'\b', u'اسم'), diff --git a/pywikibot/interwiki_graph.py b/pywikibot/interwiki_graph.py index d1d4754..00fe657 100644 --- a/pywikibot/interwiki_graph.py +++ b/pywikibot/interwiki_graph.py @@ -104,7 +104,6 @@ if isinstance(oppositeEdge, list): # bugfix for pydot >= 1.0.3 oppositeEdge = oppositeEdge[0] - #oppositeEdge.set_arrowtail('normal') oppositeEdge.set_dir('both') # workaround for bug [ 1722739 ]: prevent duplicate edges # (it is unclear why duplicate edges occur) diff --git a/pywikibot/site.py b/pywikibot/site.py index 8f260e8..9734672 100644 --- a/pywikibot/site.py +++ b/pywikibot/site.py @@ -4559,7 +4559,7 @@ pywikibot.debug(result, _logger)
if "warnings" in result and not ignore_warnings: - #TODO: Handle multiple warnings at the same time + # TODO: Handle multiple warnings at the same time warning = list(result["warnings"].keys())[0] message = result["warnings"][warning] raise pywikibot.UploadWarning(warning, upload_warnings[warning] diff --git a/scripts/blockpageschecker.py b/scripts/blockpageschecker.py index 7e6a6e4..8bc027e 100755 --- a/scripts/blockpageschecker.py +++ b/scripts/blockpageschecker.py @@ -71,9 +71,7 @@ '¶ms;': pagegenerators.parameterHelp, }
-####################################################### -#--------------------- PREFERENCES -------------------# -################### -- Edit below! -- ################# +# PREFERENCES
templateSemiProtection = { 'en': None, @@ -152,9 +150,7 @@ # Check list to block the users that haven't set their preferences project_inserted = ['en', 'fr', 'it', 'ja', 'pt', 'zh']
-####################################################### -#------------------ END PREFERENCES ------------------# -################## -- Edit above! -- ################## +# END PREFERENCES
def understandBlock(text, TTP, TSP, TSMP, TTMP, TU): diff --git a/scripts/blockreview.py b/scripts/blockreview.py index b4790bd..5243d2c 100644 --- a/scripts/blockreview.py +++ b/scripts/blockreview.py @@ -144,8 +144,8 @@ self.parts) adminText += note self.save(adminText, adminPage, comment, False) - ### test for pt-wiki - ### just print all sysops talk pages + # test for pt-wiki + # just print all sysops talk pages elif self.site.sitename() == 'wikipedia:pt': from pywikibot import pagegenerators as pg gen = pg.PreloadingGenerator(self.SysopGenerator()) diff --git a/scripts/casechecker.py b/scripts/casechecker.py index 97d84fe..89347ca 100644 --- a/scripts/casechecker.py +++ b/scripts/casechecker.py @@ -276,12 +276,6 @@ for nn in self.FindBadWords(n['title'])]
self.knownWords = set(allWords) -## kw = set() -## for w in allWords: -## if len(self.ProcessTitle(w)[1]) > 0: -## kw.add(w) -## self.knownWords = kw - else: raise ValueError(u'The number of pageids is not 1')
@@ -381,9 +375,6 @@ if self.replace: if len(err[1]) == 1: newTitle = err[1][0] -## choice = pywikibot.input_yn(u'Move %s to %s?' -## % (title, newTitle), -## automatic_quit=False) editSummary = i18n.twtranslate( self.site, "casechecker-rename") dst = self.Page(newTitle) diff --git a/scripts/checkimages.py b/scripts/checkimages.py index ec36f30..1310359 100644 --- a/scripts/checkimages.py +++ b/scripts/checkimages.py @@ -529,12 +529,12 @@
# Page where is stored the message to send as email to the users emailPageWithText = { - #'de': 'Benutzer:ABF/D3', + # 'de': 'Benutzer:ABF/D3', }
# Title of the email emailSubject = { - #'de': 'Problemen mit Deinem Bild auf der Deutschen Wikipedia', + # 'de': 'Problemen mit Deinem Bild auf der Deutschen Wikipedia', }
# Seems that uploaderBots aren't interested to get messages regarding the @@ -556,9 +556,7 @@ project_inserted = ['ar', 'commons', 'de', 'en', 'fa', 'ga', 'hu', 'it', 'ja', 'ko', 'ta', 'ur', 'zh']
-################################################################################ -# <--------------------------- Change only above! ---------------------------> # -################################################################################ +# END OF CONFIGURATION.
class LogIsFull(pywikibot.Error): @@ -1016,12 +1014,8 @@ u'%s is a duplicate and has to be tagged...' % duplicate) images_to_tag_list.append(duplicate) -## if duplicate != duplicates[-1]: string += u"*[[:%s%s]]\n" % (self.image_namespace, duplicate) -## else: -## string += "*[[:%s%s]]" \ -## % (self.image_namespace, duplicate) else: pywikibot.output( u"Already put the dupe-template in the files's page" @@ -1230,17 +1224,6 @@
def load_licenses(self): """Load the list of the licenses.""" -## catName = i18n.translate(self.site, category_with_licenses) -## cat = pywikibot.Category(pywikibot.Site(), catName) -## categories = [page.title() for page in pagegenerators.SubCategoriesPageGenerator(cat)] -## categories.append(catName) -## list_licenses = list() -## pywikibot.output(u'\n\t...Loading the licenses allowed...\n') -## for catName in categories: -## cat = pywikibot.Category(pywikibot.Site(), catName) -## gen = pagegenerators.CategorizedPageGenerator(cat) -## pages = [page for page in gen] -## list_licenses.extend(pages) catName = i18n.translate(self.site, category_with_licenses) if not catName: raise pywikibot.Error( @@ -1406,7 +1389,6 @@ else: reported = self.report_image(self.imageName) if reported: - #if self.imagestatus_used: self.report(self.mex_used, self.imageName, self.text_used, u"\n%s\n" % self.head_used, None, self.imagestatus_used, self.summary_used) @@ -1482,7 +1464,11 @@ imagesToSkip = 0 # if normal, we can take as many images as "limit" has told us, # otherwise, sorry, nope. - if normal and False: + # TODO: remove this exception as part of bug 65136 + raise NotImplementedError( + "The wait option is not available at core yet.") + + if normal: printWithTimeZone( u'Skipping the files uploaded less than %s seconds ago..' % waitTime) @@ -1537,11 +1523,9 @@ newGen.append(imageData[0]) return newGen else: - #pywikibot.output( - # u"The wait option is available only with the standard " - # u"generator.") pywikibot.output( - u"The wait option is not available at core yet.") + u"The wait option is available only with the standard " + u"generator.") return generator
def isTagged(self): @@ -1670,10 +1654,6 @@ if parl.lower() in extension.lower(): delete = True (license_found, hiddenTemplateFound) = self.smartDetection() - # If the image exists (maybe it has been deleting during the oder - # checking parts or something, who knows? ;-)) - #if p.exists(): <-- improve thebot, better to make as - # less call to the server as possible # Here begins the check block. if brackets and license_found: # It works also without this... but i want only to be sure ^^ @@ -1776,14 +1756,15 @@ elif len(arg) > 5: skip_number = int(arg[6:]) elif arg.startswith('-wait'): - pywikibot.warning( - u'"-wait" option is not implemented yet in core. Sorry!\n') -## if len(arg) == 5: -## waitTime = int(pywikibot.input( -## u'How many time do you want to wait before checking the ' -## u'files?')) -## elif len(arg) > 5: -## waitTime = int(arg[6:]) + # FIXME: bug 65136 + raise NotImplementedError( + "-wait option is not available at core yet. Sorry!") + if len(arg) == 5: + waitTime = int(pywikibot.input( + u'How many time do you want to wait before checking the ' + u'files?')) + elif len(arg) > 5: + waitTime = int(arg[6:]) elif arg.startswith('-start'): if len(arg) == 6: firstPageTitle = pywikibot.input( diff --git a/scripts/commonscat.py b/scripts/commonscat.py index ad004f6..3605fae 100755 --- a/scripts/commonscat.py +++ b/scripts/commonscat.py @@ -330,14 +330,13 @@ checkedCommonscatTarget, LinkText, Note) return True else: - #Commonscat link is wrong + # Commonscat link is wrong commonscatLink = self.findCommonscatLink(page) if (commonscatLink != u''): self.changeCommonscat(page, currentCommonscatTemplate, currentCommonscatTarget, primaryCommonscat, commonscatLink) - #else - #Should i remove the commonscat link? + # TODO: if the commonsLink == u'', should it be removed?
elif self.skipPage(page): pywikibot.output("Found a template in the skip list. Skipping %s" @@ -361,7 +360,7 @@ description=u''): """ Change the current commonscat template and target. """ if oldcat == '3=S' or linktitle == '3=S': - return # additional param on de-wiki, TODO: to be handled + return # TODO: handle additional param on de-wiki if not linktitle and (page.title().lower() in oldcat.lower() or oldcat.lower() in page.title().lower()): linktitle = oldcat @@ -432,7 +431,7 @@ ipage.title(), checkedCommonscat)) return checkedCommonscat except pywikibot.BadTitle: - #The interwiki was incorrect + # The interwiki was incorrect return u'' return u''
@@ -475,7 +474,7 @@ pywikibot.log("getCommonscat: " + name) try: commonsSite = self.site.image_repository() - #This can throw a pywikibot.BadTitle + # This can throw a pywikibot.BadTitle commonsPage = pywikibot.Page(commonsSite, "Category:" + name)
if not commonsPage.exists(): diff --git a/scripts/cosmetic_changes.py b/scripts/cosmetic_changes.py index 5776a6e..e9437a1 100755 --- a/scripts/cosmetic_changes.py +++ b/scripts/cosmetic_changes.py @@ -181,9 +181,9 @@ self.cleanUpSectionHeaders, self.putSpacesInLists, self.translateAndCapitalizeNamespaces, -## self.translateMagicWords, +# FIXME: self.translateMagicWords, self.replaceDeprecatedTemplates, -## self.resolveHtmlEntities, +# FIXME: self.resolveHtmlEntities, self.validXhtml, self.removeUselessSpaces, self.removeNonBreakingSpaceBeforePercent, @@ -340,10 +340,10 @@
# Adding categories if categories: - ##Sorting categories in alphabetic order. beta test only on Persian Wikipedia, TODO fix bug for sorting - #if self.site.code == 'fa': - # categories.sort() - ##Taking main cats to top + # TODO: Sorting categories in alphabetic order. + # e.g. using categories.sort() + + # TODO: Taking main cats to top # for name in categories: # if re.search(u"(.+?)|(.{,1}?)",name.title()) or name.title()==name.title().split(":")[0]+title: # categories.remove(name) @@ -515,9 +515,9 @@ label[len(titleWithSection):]) else: # Try to capitalize the first letter of the title. - # Maybe this feature is not useful for languages that - # don't capitalize nouns... - #if not self.site.nocapitalize: + # Not useful for languages that don't capitalize nouns. + # TODO: Determine which languages this is suitable for + # perhaps using self.site.nocapitalize if self.site.sitename() == 'wikipedia:de': titleWithSection = (titleWithSection[0].upper() + titleWithSection[1:]) @@ -660,17 +660,18 @@ new, exceptions) return text
- #from fixes.py + # from fixes.py def fixSyntaxSave(self, text): exceptions = ['nowiki', 'comment', 'math', 'pre', 'source', 'startspace'] # link to the wiki working on - ## TODO: disable this for difflinks and titled links - ## https://de.wikipedia.org/w/index.php?title=Wikipedia%3aVandalismusmeldung&am... -## text = textlib.replaceExcept(text, -## r'[https?://%s.%s.org/wiki/(?P<link>\S+)\s+(?P<title>.+?)\s?]' -## % (self.site.code, self.site.family.name), -## r'[[\g<link>|\g<title>]]', exceptions) + # TODO: disable this for difflinks and titled links, + # to prevent edits like this: + # https://de.wikipedia.org/w/index.php?title=Wikipedia%3aVandalismusmeldung&am... +# text = textlib.replaceExcept(text, +# r'[https?://%s.%s.org/wiki/(?P<link>\S+)\s+(?P<title>.+?)\s?]' +# % (self.site.code, self.site.family.name), +# r'[[\g<link>|\g<title>]]', exceptions) # external link in double brackets text = textlib.replaceExcept( text, @@ -730,13 +731,13 @@ return text
def fixReferences(self, text): - #https://en.wikipedia.org/wiki/User:AnomieBOT/source/tasks/OrphanReferenceFix... + # See also https://en.wikipedia.org/wiki/User:AnomieBOT/source/tasks/OrphanReferenceFix... exceptions = ['nowiki', 'comment', 'math', 'pre', 'source', 'startspace']
# it should be name = " or name=" NOT name =" text = re.sub(r'(?i)<ref +name(= *| *=)"', r'<ref name="', text) - #remove empty <ref/>-tag + # remove empty <ref/>-tag text = textlib.replaceExcept(text, r'(?i)(<ref\s*/>|<ref *>\s*</ref>)', r'', exceptions) @@ -783,8 +784,8 @@ 'gallery', 'hyperlink', 'interwiki', - # but changes letters inside wikilinks - #'link', + # FIXME: but changes letters inside wikilinks + # 'link', 'math', 'pre', 'template', @@ -794,6 +795,7 @@ 'startspace', 'inputbox', ] + # FIXME: use textlib.NON_LATIN_DIGITS # valid digits digits = { 'ckb': u'٠١٢٣٤٥٦٧٨٩', @@ -809,7 +811,7 @@ u'[[(' + '|'.join(namespaces) + '):.+?.\w+? *(|(([[.*?]])|.)*)?]]', re.UNICODE) - #not to let bot edits in latin content + # not to let bot edits in latin content exceptions.append(re.compile(u"[^%(fa)s] *?"*? *?, *?[^%(fa)s]" % {'fa': faChrs})) exceptions.append(pattern) @@ -822,7 +824,10 @@ text = textlib.replaceExcept(text, u'ه', u'ھ', exceptions) text = textlib.replaceExcept(text, u'ك', u'ک', exceptions) text = textlib.replaceExcept(text, u'[ىي]', u'ی', exceptions) + return text + + # FIXME: split this function into two. # replace persian/arabic digits # deactivated due to bug 55185 for i in range(0, 10): diff --git a/scripts/editarticle.py b/scripts/editarticle.py index 680c9b5..0ee678c 100755 --- a/scripts/editarticle.py +++ b/scripts/editarticle.py @@ -51,8 +51,6 @@ parser.add_option("-p", "--page", help="Page to edit") parser.add_option("-w", "--watch", action="store_true", default=False, help="Watch article after edit") - #parser.add_option("-n", "--new_data", default="", - # help="Automatically generated content") (self.options, args) = parser.parse_args(args=my_args)
# for convenience, if we have an arg, stuff it into the opt, so we diff --git a/scripts/featured.py b/scripts/featured.py index 6de45d6..125076e 100644 --- a/scripts/featured.py +++ b/scripts/featured.py @@ -160,7 +160,7 @@ 'nn': ['Link AA'], 'no': ['Link AA'], 'pt': ['Bom interwiki'], -## 'tr': ['Link GA', 'Link KM'], + # 'tr': ['Link GA', 'Link KM'], 'vi': [u'Liên kết bài chất lượng tốt'], 'wo': ['Lien BA'], } diff --git a/scripts/imagerecat.py b/scripts/imagerecat.py index f9bcb12..70e7ccd 100644 --- a/scripts/imagerecat.py +++ b/scripts/imagerecat.py @@ -143,7 +143,7 @@ 'cl': hint_wiki, 'w': lang}) else: - #Cant handle other sites atm + # Cant handle other sites atm return [], [], []
commonsenseRe = re.compile('^#COMMONSENSE(.*)#USAGE(\s)+((?P<usagenum>(\d)+))\s(?P<usage>(.*))\s#KEYWORDS(\s)+((?P<keywords>(\d)+))(.*)#CATEGORIES(\s)+((?P<catnum>(\d)+))\s(?P<cats>(.*))\s#GALLERIES(\s)+((?P<galnum>(\d)+))\s(?P<gals>(.*))\s(.*)#EOF$', re.MULTILINE + re.DOTALL) # noqa @@ -173,7 +173,6 @@ used = matches.group('usage').splitlines() for use in used: usage = usage + getUsage(use) - #pywikibot.output(use) if matches.group('catnum') > 0: cats = matches.group('cats').splitlines() for cat in cats: @@ -196,7 +195,7 @@ result = [] locationList = getOpenStreetMap(latitude, longitude) for i in range(0, len(locationList)): - #print 'Working on ' + locationList[i] + pywikibot.log(u'Working on %r' % locationList[i]) if i <= len(locationList) - 3: category = getCategoryByName(name=locationList[i], parent=locationList[i + 1], @@ -208,7 +207,6 @@ category = getCategoryByName(name=locationList[i]) if category and not category == u'': result.append(category) - #print result return result
@@ -235,7 +233,6 @@ validParts = [u'hamlet', u'village', u'city', u'county', u'country'] invalidParts = [u'path', u'road', u'suburb', u'state', u'country_code'] addressparts = et.find('addressparts') - #xml.etree.ElementTree.dump(et)
for addresspart in addressparts.getchildren(): if addresspart.tag in validParts: @@ -244,7 +241,6 @@ pywikibot.output(u'Dropping %s, %s' % (addresspart.tag, addresspart.text)) else: pywikibot.warning(u'%s, %s is not in addressparts lists' % (addresspart.tag, addresspart.text)) - #print result return result
@@ -279,13 +275,10 @@ if matches: if matches.group('lang'): lang = matches.group('lang') - #pywikibot.output(lang) if matches.group('project'): project = matches.group('project') - #pywikibot.output(project) if matches.group('articles'): articles = matches.group('articles') - #pywikibot.output(articles) for article in articles.split(): result.append((lang, project, article)) return result @@ -353,8 +346,8 @@ if cat.endswith(u'by country'): listByCountry.append(cat)
- #If cat contains 'by country' add it to the list - #If cat contains the name of a country add it to the list + # If cat contains 'by country' add it to the list + # If cat contains the name of a country add it to the list else: for country in countries: if country in cat: @@ -386,11 +379,11 @@ result = filterCategoriesRe.findall( filterCategoriesPage.read().decode('utf-8')) except IOError: - #Something is wrong, forget about this filter and just return the input + # Something is wrong, forget about this filter, and return the input return categories
if not result: - #Is empty, dont want to remove all categories + # Is empty, dont want to remove all categories return categories return result
diff --git a/scripts/imagetransfer.py b/scripts/imagetransfer.py index 10758cf..68d6649 100644 --- a/scripts/imagetransfer.py +++ b/scripts/imagetransfer.py @@ -229,7 +229,6 @@ def showImageList(self, imagelist): for i in range(len(imagelist)): image = imagelist[i] - #sourceSite = sourceImagePage.site print("-" * 60) pywikibot.output(u"%s. Found image: %s" % (i, image.title(asLink=True))) diff --git a/scripts/imageuncat.py b/scripts/imageuncat.py index 36bacd0..581eae9 100755 --- a/scripts/imageuncat.py +++ b/scripts/imageuncat.py @@ -20,7 +20,7 @@ import pywikibot from pywikibot import pagegenerators
-#Probably unneeded because these are hidden categories. Have to figure it out. +# Probably unneeded because these are hidden categories. Have to figure it out. ignoreCategories = [u'[[Category:CC-BY-SA-3.0]]', u'[[Category:GFDL]]', u'[[Category:Media for cleanup]]', @@ -29,7 +29,7 @@ u'[[Category:Media lacking a description]]', u'[[Category:Self-published work]]']
-#Dont bother to put the template on a image with one of these templates +# Dont bother to put the template on a image with one of these templates skipTemplates = [u'Delete', u'Nocat', u'No license', @@ -39,7 +39,7 @@ u'Uncategorized', u'Uncat']
-#Ignore the templates in this really long list when looking for relevant categories +# Ignore templates in this long list when looking for relevant categories ignoreTemplates = [u'1000Bit', u'1922 cyc', u'2MASS', @@ -1283,7 +1283,7 @@ return False
for templateWithTrail in page.templates(): - #Strip of trailing garbage + # Strip of trailing garbage template = templateWithTrail.title().rstrip('\n').rstrip() if template in skipTemplates: # Already tagged with a template, skip it diff --git a/scripts/isbn.py b/scripts/isbn.py index c9f14d6..50663ac 100755 --- a/scripts/isbn.py +++ b/scripts/isbn.py @@ -1277,11 +1277,8 @@ sum = 0 for i in range(0, 9): sum += (i + 1) * int(self.digits()[i]) - #print sum checksum = sum % 11 - #print checksum lastDigit = self.digits()[-1] - #print lastDigit if not ((checksum == 10 and lastDigit in 'Xx') or (lastDigit.isdigit() and checksum == int(lastDigit))): raise InvalidIsbnException('The ISBN checksum of %s is incorrect.' @@ -1304,11 +1301,10 @@ Adds the GS1 prefix '978' and recalculates the checksum. The hyphenation structure is taken from the format of the original ISBN number. + + @rtype: L{ISBN13} """ code = '978-' + self.code[:-1] - - #cs = self.calculateChecksum() - #code += str(cs) return ISBN13(code, checksumMissing=True)
def format(self): diff --git a/scripts/redirect.py b/scripts/redirect.py index 39212a7..d172db0 100755 --- a/scripts/redirect.py +++ b/scripts/redirect.py @@ -438,7 +438,7 @@ movedTarget = self.moved_page(targetPage) if movedTarget: if not movedTarget.exists(): - ### FIXME: Test to another move + # FIXME: Test to another move pywikibot.output(u'Target page %s does not exist' % (movedTarget)) elif redir_name == movedTarget.title(): @@ -494,8 +494,8 @@ pywikibot.output(u"No sysop in user-config.py, " u"put page to speedy deletion.") content = redir_page.get(get_redirect=True) - ### TODO: Add bot's signature if needed - ### Not supported via TW yet + # TODO: Add bot's signature if needed + # Not supported via TW yet content = i18n.twtranslate( targetPage.site, 'redirect-broken-redirect-template' @@ -606,31 +606,30 @@ pywikibot.warning( u'Redirect target %s forms a redirect loop.' % targetPage.title(asLink=True)) - break # doesn't work. edits twice! -## try: -## content = targetPage.get(get_redirect=True) -## except pywikibot.SectionError: -## content = pywikibot.Page( -## targetPage.site, -## targetPage.title(withSection=False) -## ).get(get_redirect=True) -## if i18n.twhas_key( -## targetPage.site.lang, -## 'redirect-broken-redirect-template') and \ -## i18n.twhas_key(targetPage.site.lang, -## 'redirect-remove-loop'): -## pywikibot.output(u"Tagging redirect for deletion") -## # Delete the two redirects -## content = i18n.twtranslate( -## targetPage.site.lang, -## 'redirect-broken-redirect-template' -## ) + "\n" + content -## summ = i18n.twtranslate( -## targetPage.site.lang, -## 'redirect-remove-loop') -## targetPage.put(content, summ) -## redir.put(content, summ) -## break # TODO Better implement loop redirect + break # FIXME: doesn't work. edits twice! + try: + content = targetPage.get(get_redirect=True) + except pywikibot.SectionError: + content_page = pywikibot.Page( + targetPage.site, + targetPage.title(withSection=False)) + content = content_page.get(get_redirect=True) + if i18n.twhas_key( + targetPage.site.lang, + 'redirect-broken-redirect-template') and \ + i18n.twhas_key(targetPage.site.lang, + 'redirect-remove-loop'): + pywikibot.output(u"Tagging redirect for deletion") + # Delete the two redirects + content = i18n.twtranslate( + targetPage.site.lang, + 'redirect-broken-redirect-template' + ) + "\n" + content + summ = i18n.twtranslate(targetPage.site.lang, + 'redirect-remove-loop') + targetPage.put(content, summ) + redir.put(content, summ) + break else: # redirect target found if targetPage.isStaticRedirect(): pywikibot.output( @@ -769,7 +768,7 @@ if ns == '': # "-namespace:" does NOT yield -namespace:0 further down the road! ns = i18n.input('pywikibot-enter-namespace-number') - # TODO! at least for some generators enter a namespace by its name + # TODO: at least for some generators enter a namespace by its name # or number if ns == '': ns = '0' diff --git a/scripts/reflinks.py b/scripts/reflinks.py index 90bebe1..b377d20 100644 --- a/scripts/reflinks.py +++ b/scripts/reflinks.py @@ -672,7 +672,6 @@ enc.append(tmp) else: pywikibot.output(u'No charset found for %s' % ref.link) -## continue # do not process pages without charset if not contentType: pywikibot.output(u'No content-type found for %s' % ref.link) continue diff --git a/scripts/script_wui.py b/scripts/script_wui.py index 730312d..a2dd6b4 100755 --- a/scripts/script_wui.py +++ b/scripts/script_wui.py @@ -21,7 +21,7 @@ python script_wui.py -dir:. Default operating mode. """ -## @package script_wui +# @package script_wui # @brief Script WikiUserInterface (WUI) Bot # # @copyright Dr. Trigon, 2012 @@ -78,7 +78,8 @@ import lua # The crontab package is https://github.com/josiahcarlson/parse-crontab # version 0.20 installs a package called 'tests' which conflicts with our -# test suite. Use https://github.com/jayvdb/parse-crontab until it is fixed. +# test suite. The patch to fix this has been merged, but is not released. +# TODO: Use https://github.com/jayvdb/parse-crontab until it is fixed. import crontab
import pywikibot @@ -129,8 +130,10 @@ # - Lua - pywikibot.output(u'** Redirecting Lua print in order to catch it') lua.execute('__print = print') - #lua.execute('print = python.builtins().print') lua.execute('print = python.globals().pywikibot.output') + # It may be useful in debugging to install the 'print' builtin + # as the 'print' function in lua. To do this: + # lua.execute('print = python.builtins().print')
# init constants templ = pywikibot.Page(self.site, bot_config['ConfCSSshell']) @@ -155,7 +158,6 @@ match = self.re_edit.match(e.arguments()[0]) if not match: return - #print match.groups(), match.group('page'), match.group('user') user = match.group('user').decode(self.site.encoding()) if user == bot_config['BotName']: return @@ -187,7 +189,6 @@ entry = crontab.CronTab(timestmp) # find the delay from current minute (does not return 0.0 - but next) delay = entry.next(datetime.datetime.now().replace(second=0, microsecond=0) - datetime.timedelta(microseconds=1)) - #pywikibot.output(u'CRON delay for execution: %.3f (<= %i)' % (delay, bot_config['CRONMaxDelay']))
if (delay <= bot_config['CRONMaxDelay']): pywikibot.output(u"CRONTAB: %s / %s / %s" % (page, rev, timestmp)) @@ -267,8 +268,10 @@
def wiki_logger(buffer, page, rev=None): """Log to wiki.""" + # FIXME: what is this?? # (might be a problem here for TS and SGE, output string has another encoding) - #buffer = buffer.decode(config.console_encoding) + if False: + buffer = buffer.decode(pywikibot.config.console_encoding) buffer = re.sub("\03{(.*?)}(.*?)\03{default}", "\g<2>", buffer) if rev is None: rev = page.latestRevision() diff --git a/scripts/solve_disambiguation.py b/scripts/solve_disambiguation.py index 9f26db8..e6d589d 100644 --- a/scripts/solve_disambiguation.py +++ b/scripts/solve_disambiguation.py @@ -247,8 +247,8 @@ u'Wikipedy:Fangnet', ], 'hu': [ - #hu:Wikipédia:Kocsmafal (egyéb)#Hol nem kell egyértelműsíteni? - #2012-02-08 + # hu:Wikipédia:Kocsmafal (egyéb)#Hol nem kell egyértelműsíteni? + # 2012-02-08 u'Wikipédia:(?!Sportműhely/Eddigi cikkeink).*', u'.*(egyértelműsítő lap)$', u'.*[Vv]ita:.*', @@ -417,7 +417,7 @@ # remove trailing newlines and carriage returns while line[-1] in ['\n', '\r']: line = line[:-1] - #skip empty lines + # skip empty lines if line != '': self.ignorelist.append(line) f.close() @@ -642,7 +642,7 @@ n += 1 # how many bytes should be displayed around the current link context = 60 - #there's a {{dn}} here already + # check if there's a {{dn}} here already already_dn = text[m.end():m.end() + 8].find(dn_template_str[:4]) > -1 if already_dn and self.dnSkip: continue @@ -755,7 +755,7 @@ position_split = end_of_word_match.start(0) else: position_split = 0 - #insert dab needed template + # insert dab needed template text = (text[:m.end() + position_split] + dn_template_str + text[m.end() + position_split:]) diff --git a/scripts/spamremove.py b/scripts/spamremove.py index ec92df4..92f70a2 100755 --- a/scripts/spamremove.py +++ b/scripts/spamremove.py @@ -1,5 +1,5 @@ -# -*- coding: utf-8 -*- #!/usr/bin/python +# -*- coding: utf-8 -*-
""" Script to remove links that are being or have been spammed. diff --git a/scripts/weblinkchecker.py b/scripts/weblinkchecker.py index 5c2a0e9..b2d13f9 100644 --- a/scripts/weblinkchecker.py +++ b/scripts/weblinkchecker.py @@ -361,7 +361,7 @@ else: raise if self.response.status >= 300 and self.response.status <= 399: - #print response.getheaders() + # to debug, print response.getheaders() redirTarget = self.response.getheader('Location') if redirTarget: try: @@ -752,7 +752,7 @@ def __init__(self, generator, HTTPignore=None, day=7): self.generator = generator if config.report_dead_links_on_talk: - #pywikibot.output("Starting talk page thread") + pywikibot.log("Starting talk page thread") reportThread = DeadLinkReportThread() # thread dies when program terminates # reportThread.setDaemon(True) diff --git a/scripts/welcome.py b/scripts/welcome.py index 67f9800..0ae8350 100644 --- a/scripts/welcome.py +++ b/scripts/welcome.py @@ -426,7 +426,6 @@ queryLimit = 50 # number of users that the bot load to check quiet = False # Prevents users without contributions are displayed quick = False # Provide quick check by API bulk-retrieve user datas -## fileOption = False # check if the user wants to use a file or the wikipage
class WelcomeBot(object): @@ -468,7 +467,7 @@ if not globalvar.filtBadName: return False
- #initialize blacklist + # initialize blacklist if not hasattr(self, '_blacklist') or force: elenco = [ ' ano', ' anus', 'anal ', 'babies', 'baldracca', 'balle', 'bastardo', @@ -560,7 +559,7 @@ return False
def reportBadAccount(self, name=None, final=False): - #Queue process + # Queue process if name: if globalvar.confirm: answer = pywikibot.input_choice( @@ -643,7 +642,7 @@ if logPage.exists(): text = logPage.get() else: - #make new log page + # make new log page showStatus() pywikibot.output( 'Log page is not exist, getting information for page creation') @@ -657,7 +656,7 @@ luser = pywikibot.url2link(result.name(), self.site, self.site) text += u'\n{{WLE|user=%s|contribs=%d}}' % ( luser, result.editCount()) - #update log page. + # update log page. while True: try: logPage.put(text, i18n.twtranslate(self.site, @@ -735,8 +734,6 @@ pywikibot.output(u'%s might be a global bot!' % users.name()) continue - #if globalvar.offset != 0 and time.strptime(users.registrationTime(), "%Y-%m-%dT%H:%M:%SZ") >= globalvar.offset: - # if users.editCount() >= globalvar.attachEditCount: showStatus(2) pywikibot.output(u'%s has enough edits to be welcomed.' @@ -771,7 +768,7 @@ welcome_comment = i18n.twtranslate(self.site, 'welcome-welcome') try: - #append welcomed, welcome_count++ + # append welcomed, welcome_count++ ustp.put(welcome_text, welcome_comment, minorEdit=False) welcomed_count += 1 @@ -942,8 +939,8 @@ globalvar.confirm = True elif arg == '-filter': globalvar.filtBadName = True - #elif arg == '-savedata': - # globalvar.saveSignIndex = True + elif arg == '-savedata': + globalvar.saveSignIndex = True elif arg == '-random': globalvar.randomSign = True elif arg == '-sul': diff --git a/tests/dry_site_tests.py b/tests/dry_site_tests.py index 626cf7e..45daa10 100644 --- a/tests/dry_site_tests.py +++ b/tests/dry_site_tests.py @@ -215,10 +215,11 @@
def test_need_version_fail_with_deprecated(self): """Test order of combined version check and deprecation warning.""" + # FIXME: The deprecation message should be: + # __name__ + '.TestNeedVersion.deprecated_unavailable_method # The outermost decorator is the version check, so no deprecation message. self.assertRaisesRegex( NotImplementedError, - #__name__ + '.TestNeedVersion.deprecated_unavailable_method', 'deprecated_unavailable_method', self.deprecated_unavailable_method) self.assertNoDeprecation() @@ -226,7 +227,6 @@ # The deprecator is first, but the version check still raises exception. self.assertRaisesRegex( NotImplementedError, - #__name__ + '.TestNeedVersion.deprecated_unavailable_method2', 'deprecated_unavailable_method2', self.deprecated_unavailable_method2) self.assertDeprecation( diff --git a/tests/site_tests.py b/tests/site_tests.py index ca605d8..be66fba 100644 --- a/tests/site_tests.py +++ b/tests/site_tests.py @@ -1158,7 +1158,7 @@ if error.code == u'badtoken': raise unittest.SkipTest(error) except pywikibot.Error as error: - #expected result + # expected result pass
diff --git a/tox.ini b/tox.ini index 5aa95f7..20b989d 100644 --- a/tox.ini +++ b/tox.ini @@ -19,7 +19,7 @@ flake8-docstrings
[testenv:flake8-docstrings-mandatory] -commands = flake8 --ignore=D102,D103,E122,E127,E241,E265 +commands = flake8 --ignore=D102,D103,E122,E127,E241
deps = flake8>=2.2.5 flake8-docstrings
pywikibot-commits@lists.wikimedia.org