jenkins-bot has submitted this change and it was merged. ( https://gerrit.wikimedia.org/r/463257 )
Change subject: Fix all diff_checker related errors in scripts/* ......................................................................
Fix all diff_checker related errors in scripts/*
Change-Id: Ibedb230e13bc2be15bfc0cba229c257fff8fa595 --- M scripts/archive/featured.py M scripts/archivebot.py M scripts/casechecker.py M scripts/category.py M scripts/category_redirect.py M scripts/cfd.py M scripts/checkimages.py M scripts/claimit.py M scripts/commonscat.py M scripts/data_ingestion.py M scripts/delete.py M scripts/fixing_redirects.py M scripts/harvest_template.py M scripts/illustrate_wikidata.py M scripts/imagecopy_self.py M scripts/imageuncat.py M scripts/login.py M scripts/lonelypages.py M scripts/maintenance/cache.py M scripts/maintenance/compat2core.py M scripts/maintenance/make_i18n_dict.py M scripts/maintenance/wikimedia_sites.py M scripts/match_images.py M scripts/noreferences.py M scripts/nowcommons.py M scripts/pagefromfile.py M scripts/reflinks.py M scripts/replace.py M scripts/script_wui.py M scripts/solve_disambiguation.py M scripts/table2wiki.py M scripts/transferbot.py M scripts/weblinkchecker.py M scripts/welcome.py 34 files changed, 206 insertions(+), 176 deletions(-)
Approvals: Xqt: Looks good to me, approved jenkins-bot: Verified
diff --git a/scripts/archive/featured.py b/scripts/archive/featured.py index d21128b..5f5c406 100755 --- a/scripts/archive/featured.py +++ b/scripts/archive/featured.py @@ -122,11 +122,11 @@ '_default': ['Link FA'], 'als': ['LinkFA'], 'an': ['Destacato', 'Destacau'], - 'ar': [u'وصلة مقالة مختارة'], + 'ar': ['وصلة مقالة مختارة'], 'ast': ['Enllaz AD'], 'az': ['Link FM'], 'br': ['Liamm PuB', 'Lien AdQ'], - 'ca': [u'Enllaç AD', 'Destacat'], + 'ca': ['Enllaç AD', 'Destacat'], 'cy': ['Cyswllt erthygl ddethol', 'Dolen ED'], 'eo': ['LigoElstara'], 'en': ['Link FA', 'FA link'], @@ -135,36 +135,36 @@ 'fr': ['Lien AdQ'], 'fur': ['Leam VdC'], 'ga': ['Nasc AR'], - 'gl': [u'Ligazón AD', 'Destacado'], + 'gl': ['Ligazón AD', 'Destacado'], 'hi': ['Link FA', 'Lien AdQ'], - 'is': [u'Tengill ÚG'], + 'is': ['Tengill ÚG'], 'it': ['Link V', 'Link AdQ'], 'no': ['Link UA'], 'oc': ['Ligam AdQ', 'Lien AdQ'], - 'ro': [u'Legătură AC', u'Legătură AF'], + 'ro': ['Legătură AC', 'Legătură AF'], 'sv': ['UA', 'Link UA'], 'tr': ['Link SM'], - 'vi': [u'Liên kết chọn lọc'], - 'vo': [u'Yüm YG'], - 'yi': [u'רא'], + 'vi': ['Liên kết chọn lọc'], + 'vo': ['Yüm YG'], + 'yi': ['רא'], }
template_good = { '_default': ['Link GA'], - 'ar': [u'وصلة مقالة جيدة'], - 'ca': [u'Enllaç AB', 'Lien BA', 'Abo'], + 'ar': ['وصلة مقالة جيدة'], + 'ca': ['Enllaç AB', 'Lien BA', 'Abo'], 'da': ['Link GA', 'Link AA'], 'eo': ['LigoLeginda'], 'es': ['Bueno'], 'fr': ['Lien BA'], - 'gl': [u'Ligazón AB'], + 'gl': ['Ligazón AB'], 'is': ['Tengill GG'], 'it': ['Link VdQ'], 'nn': ['Link AA'], 'no': ['Link AA'], 'pt': ['Bom interwiki'], # 'tr': ['Link GA', 'Link KM'], - 'vi': [u'Liên kết bài chất lượng tốt'], + 'vi': ['Liên kết bài chất lượng tốt'], 'wo': ['Lien BA'], }
@@ -174,7 +174,7 @@ }
featured_name = { - 'wikidata': (DATA, u'Q4387444'), + 'wikidata': (DATA, 'Q4387444'), }
good_name = { @@ -183,23 +183,23 @@
lists_name = { 'wikidata': (TMPL, 'Q5857568'), - 'ar': (BACK, u'قائمة مختارة'), - 'da': (BACK, u'FremragendeListe'), - 'de': (BACK, u'Informativ'), - 'en': (BACK, u'Featured list'), - 'fa': (BACK, u"فهرست برگزیده"), - 'id': (BACK, u'Featured list'), - 'ja': (BACK, u'Featured List'), + 'ar': (BACK, 'قائمة مختارة'), + 'da': (BACK, 'FremragendeListe'), + 'de': (BACK, 'Informativ'), + 'en': (BACK, 'Featured list'), + 'fa': (BACK, 'فهرست برگزیده'), + 'id': (BACK, 'Featured list'), + 'ja': (BACK, 'Featured List'), 'ksh': (CAT, 'Joode Leß'), - 'no': (BACK, u'God liste'), - 'pl': (BACK, u'Medalista'), - 'pt': (BACK, u'Anexo destacado'), - 'ro': (BACK, u'Listă de calitate'), - 'ru': (BACK, u'Избранный список или портал'), - 'tr': (BACK, u'Seçkin liste'), - 'uk': (BACK, u'Вибраний список'), - 'vi': (BACK, u'Sao danh sách chọn lọc'), - 'zh': (BACK, u'特色列表'), + 'no': (BACK, 'God liste'), + 'pl': (BACK, 'Medalista'), + 'pt': (BACK, 'Anexo destacado'), + 'ro': (BACK, 'Listă de calitate'), + 'ru': (BACK, 'Избранный список или портал'), + 'tr': (BACK, 'Seçkin liste'), + 'uk': (BACK, 'Вибраний список'), + 'vi': (BACK, 'Sao danh sách chọn lọc'), + 'zh': (BACK, '特色列表'), }
# Third parameter is the sort key indicating articles to hide from the given @@ -221,7 +221,7 @@ """Only accepts options defined in availableOptions.""" self.availableOptions.update({ 'async': False, # True for asynchronously putting a page - 'afterpage': u"!", + 'afterpage': '!', 'count': False, # featuredcount 'featured': False, 'former': False, @@ -280,19 +280,19 @@ return generator elif self.getOption('fromlang'): fromlang = self.getOption('fromlang') - if len(fromlang) == 1 and fromlang[0].find("--") >= 0: - start, end = fromlang[0].split("--", 1) + if len(fromlang) == 1 and fromlang[0].find('--') >= 0: + start, end = fromlang[0].split('--', 1) if not start: - start = "" + start = '' if not end: - end = "zzzzzzz" + end = 'zzzzzzz' return (site for site in generator if site.code >= start and site.code <= end) else: return (site for site in generator if site.code in fromlang) else: - pywikibot.warning(u'No sites given to verify %s articles.\n' - u'Please use -fromlang: or fromall option\n' + pywikibot.warning('No sites given to verify %s articles.\n' + 'Please use -fromlang: or fromall option\n' % task) return ()
@@ -314,34 +314,34 @@ def readcache(self, task): if self.getOption('count') or self.getOption('nocache') is True: return - self.filename = pywikibot.config.datafilepath("cache", task) + self.filename = pywikibot.config.datafilepath('cache', task) try: - f = open(self.filename, "rb") + f = open(self.filename, 'rb') self.cache = pickle.load(f) f.close() - pywikibot.output(u'Cache file %s found with %d items.' + pywikibot.output('Cache file %s found with %d items.' % (self.filename, len(self.cache))) except IOError: - pywikibot.output(u'Cache file %s not found.' % self.filename) + pywikibot.output('Cache file %s not found.' % self.filename)
def writecache(self): if self.getOption('count'): return if not self.getOption('nocache') is True: - pywikibot.output(u'Writing %d items to cache file %s.' + pywikibot.output('Writing %d items to cache file %s.' % (len(self.cache), self.filename)) - with open(self.filename, "wb") as f: + with open(self.filename, 'wb') as f: pickle.dump(self.cache, f, protocol=config.pickle_protocol) self.cache = {}
def run(self): for task in self.tasks: self.run_task(task) - pywikibot.output(u'%d pages written.' % self._save_counter) + pywikibot.output('%d pages written.' % self._save_counter)
def run_task(self, task): if not self.hastemplate(task): - pywikibot.output(u'\nNOTE: %s articles are not implemented at %s.' + pywikibot.output('\nNOTE: %s articles are not implemented at %s.' % (task, self.site)) return
@@ -369,7 +369,7 @@ method = info[code][0] except KeyError: pywikibot.error( - u'language %s doesn't has %s category source.' + "language %s doesn't has %s category source." % (code, task)) return name = info[code][1] @@ -393,13 +393,13 @@ if p.title() < self.getOption('afterpage'): continue
- if u"/" in p.title() and p.namespace() != 0: - pywikibot.output(u"%s is a subpage" % p.title()) + if '/' in p.title() and p.namespace() != 0: + pywikibot.output('%s is a subpage' % p.title()) continue
if p.title() in cache: - pywikibot.output(u"(cached) %s -> %s" % (p.title(), - cache[p.title()])) + pywikibot.output('(cached) %s -> %s' % ( + p.title(), cache[p.title()])) continue yield p
@@ -418,27 +418,28 @@
if not ourpage: if not quiet: - pywikibot.output(u"%s -> no corresponding page in %s" + pywikibot.output('%s -> no corresponding page in %s' % (page.title(), oursite)) elif ourpage.section(): - pywikibot.output(u"%s -> our page is a section link: %s" + pywikibot.output('%s -> our page is a section link: %s' % (page.title(), ourpage.title())) elif not ourpage.exists(): - pywikibot.output(u"%s -> our page doesn't exist: %s" + pywikibot.output("%s -> our page doesn't exist: %s" % (page.title(), ourpage.title())) else: if ourpage.isRedirectPage(): ourpage = ourpage.getRedirectTarget()
- pywikibot.output(u"%s -> corresponding page is %s" + pywikibot.output('%s -> corresponding page is %s' % (page.title(), ourpage.title())) if ourpage.namespace() != 0: - pywikibot.output(u"%s -> not in the main namespace, skipping" + pywikibot.output('%s -> not in the main namespace, skipping' % page.title()) elif ourpage.isRedirectPage(): - pywikibot.output(u"%s -> double redirect, skipping" % page.title()) + pywikibot.output( + '%s -> double redirect, skipping' % page.title()) elif not ourpage.exists(): - pywikibot.output(u"%s -> page doesn't exist, skipping" + pywikibot.output("%s -> page doesn't exist, skipping" % ourpage.title()) else: backpage = None @@ -447,7 +448,8 @@ backpage = pywikibot.Page(link) break if not backpage: - pywikibot.output(u"%s -> no back interwiki ref" % page.title()) + pywikibot.output( + '%s -> no back interwiki ref' % page.title()) elif backpage == page: # everything is ok yield ourpage @@ -458,10 +460,10 @@ yield ourpage else: pywikibot.output( - u"%s -> back interwiki ref target is redirect to %s" + '%s -> back interwiki ref target is redirect to %s' % (page.title(), backpage.title())) else: - pywikibot.output(u"%s -> back interwiki ref target is %s" + pywikibot.output('%s -> back interwiki ref target is %s' % (page.title(), backpage.title()))
def getTemplateList(self, code, task): @@ -531,7 +533,7 @@ source = source.getRedirectTarget()
if not source.exists(): - pywikibot.output(u"source page doesn't exist: %s" + pywikibot.output("source page doesn't exist: %s" % source) continue
@@ -544,8 +546,8 @@ def compile_link(site, templates): """Compile one link template list.""" findtemplate = '(%s)' % '|'.join(templates) - return re.compile(r"{{%s|%s}}" - % (findtemplate.replace(u' ', u'[ _]'), + return re.compile(r'{{%s|%s}}' + % (findtemplate.replace(' ', '[ _]'), site.code), re.IGNORECASE)
tosite = dest.site @@ -560,26 +562,26 @@ interactive = self.getOption('interactive') if add_tl: if m1: - pywikibot.output(u"(already added)") + pywikibot.output('(already added)') else: # insert just before interwiki if (not interactive or pywikibot.input_yn( - u'Connecting %s -> %s. Proceed?' + 'Connecting %s -> %s. Proceed?' % (source.title(), dest.title()), default=False, automatic_quit=False)): if self.getOption('side'): # Placing {{Link FA|xx}} right next to # corresponding interwiki text = (text[:m1.end()] + - u" {{%s|%s}}" % (add_tl[0], fromsite.code) + + ' {{%s|%s}}' % (add_tl[0], fromsite.code) + text[m1.end():]) else: # Moving {{Link FA|xx}} to top of interwikis iw = textlib.getLanguageLinks(text, tosite) text = textlib.removeLanguageLinks(text, tosite) - text += u"%s{{%s|%s}}%s" % (config.LS, add_tl[0], - fromsite.code, config.LS) + text += '%s{{%s|%s}}%s' % ( + config.LS, add_tl[0], fromsite.code, config.LS) text = textlib.replaceLanguageLinks(text, iw, tosite) changed = True @@ -588,13 +590,13 @@ if (changed or # Don't force the user to say "Y" twice not interactive or pywikibot.input_yn( - u'Connecting %s -> %s. Proceed?' + 'Connecting %s -> %s. Proceed?' % (source.title(), dest.title()), default=False, automatic_quit=False)): text = re.sub(re_link_remove, '', text) changed = True elif task == 'former': - pywikibot.output(u"(already removed)") + pywikibot.output('(already removed)') if changed: comment = i18n.twtranslate(tosite, 'featured-' + task, {'page': source}) @@ -602,10 +604,10 @@ dest.put(text, comment) self._save_counter += 1 except pywikibot.LockedPage: - pywikibot.output(u'Page %s is locked!' + pywikibot.output('Page %s is locked!' % dest.title()) except pywikibot.PageNotSaved: - pywikibot.output(u"Page not saved") + pywikibot.output('Page not saved')
def main(*args): @@ -626,11 +628,11 @@
for arg in local_args: if arg.startswith('-fromlang:'): - options[arg[1:9]] = arg[10:].split(",") + options[arg[1:9]] = arg[10:].split(',') elif arg.startswith('-after:'): options['afterpage'] = arg[7:] elif arg.startswith('-nocache:'): - options[arg[1:8]] = arg[9:].split(",") + options[arg[1:8]] = arg[9:].split(',') else: options[arg[1:].lower()] = True
@@ -638,5 +640,5 @@ bot.run()
-if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/scripts/archivebot.py b/scripts/archivebot.py index 22e6418..e501dd8 100755 --- a/scripts/archivebot.py +++ b/scripts/archivebot.py @@ -485,7 +485,7 @@ def update(self, summary, sort_threads=False): """Recombine threads and save page.""" if sort_threads: - pywikibot.output(u'Sorting threads...') + pywikibot.output('Sorting threads...') self.threads.sort(key=lambda t: t.timestamp) newtext = re.sub('\n*$', '\n\n', self.header) # Fix trailing newlines for t in self.threads: diff --git a/scripts/casechecker.py b/scripts/casechecker.py index ecac4c7..51c4904 100755 --- a/scripts/casechecker.py +++ b/scripts/casechecker.py @@ -325,7 +325,7 @@ if len(err[1]) == 1: newTitle = err[1][0] editSummary = i18n.twtranslate( - self.site, "casechecker-rename") + self.site, 'casechecker-rename') dst = self.Page(newTitle)
if 'redirect' in page: @@ -744,7 +744,7 @@ prf = '' if self.Page(title).namespace() == 0 else ':' cc = '|««« {} »»»'.format( self.ColorCodeWord(title) if colorcode else '') - return u"[[%s%s%s]]" % (prf, title, cc) + return '[[%s%s%s]]' % (prf, title, cc)
def OpenLogFile(self, filename): """Open logfile.""" @@ -780,6 +780,6 @@ return text
-if __name__ == "__main__": +if __name__ == '__main__': bot = CaseChecker() bot.Run() diff --git a/scripts/category.py b/scripts/category.py index 6e379bb..049aaec 100755 --- a/scripts/category.py +++ b/scripts/category.py @@ -931,7 +931,7 @@ {'fromcat': self.cat.title(), 'num': len(setOfArticles)})
- listString = "" + listString = '' for article in setOfArticles: if (not article.is_filepage() or self.showImages) and not article.is_categorypage(): @@ -1478,7 +1478,8 @@ new_cat_title = pywikibot.input( 'Please enter the new name of the category:') if use_deletion_summary: - deletion_comment = CategoryMoveRobot.DELETION_COMMENT_SAME_AS_EDIT_COMMENT + deletion_comment = \ + CategoryMoveRobot.DELETION_COMMENT_SAME_AS_EDIT_COMMENT else: deletion_comment = CategoryMoveRobot.DELETION_COMMENT_AUTOMATIC bot = CategoryMoveRobot(oldcat=old_cat_title, diff --git a/scripts/category_redirect.py b/scripts/category_redirect.py index bc9fdee..ccc68cc 100755 --- a/scripts/category_redirect.py +++ b/scripts/category_redirect.py @@ -73,7 +73,8 @@ # Category that contains all redirected category pages self.cat_redirect_cat = { 'commons': 'Category:Category redirects', - 'meta': 'Category:Maintenance of categories/Soft redirected categories', + 'meta': 'Category:Maintenance of categories/Soft redirected ' + 'categories', 'ar': 'تصنيف:تحويلات تصنيفات ويكيبيديا', 'cs': 'Kategorie:Údržba:Zastaralé kategorie', 'da': 'Kategori:Omdirigeringskategorier', diff --git a/scripts/cfd.py b/scripts/cfd.py index d514a5b..86f7073 100755 --- a/scripts/cfd.py +++ b/scripts/cfd.py @@ -30,11 +30,13 @@ DEFAULT_CFD_PAGE = 'Wikipedia:Categories for discussion/Working'
# A list of templates that are used on category pages as part of the CFD -# process that contain information such as the link to the per-day discussion page. +# process that contain information such as the link to the per-day discussion +# page. cfdTemplates = ['Cfd full', 'Cfr full']
# Regular expression declarations -# See the en-wiki CFD working page at [[Wikipedia:Categories for discussion/Working]] +# See the en-wiki CFD working page at +# [[Wikipedia:Categories for discussion/Working]] # to see how these work in context. To get this bot working on other wikis you # will need to adjust these regular expressions at the very least. nobots = re.compile(r'NO\s*BOTS', re.IGNORECASE) @@ -88,7 +90,8 @@ for arg in local_args: if arg.startswith('-page'): if len(arg) == len('-page'): - cfd_page = pywikibot.input('Enter the CFD working page to use:') + cfd_page = pywikibot.input( + 'Enter the CFD working page to use:') else: cfd_page = arg[len('-page:'):]
@@ -128,7 +131,8 @@ mode = 'Delete' day = 'None' elif maintenance.search(line): - # It's probably best not to try to handle these in an automated fashion. + # It's probably best not to try to handle these in an automated + # fashion. mode = 'None' day = 'None' elif m.check(dateheader, line): @@ -142,8 +146,9 @@ summary = ( 'Robot - Moving category ' + src + ' to [[:Category:' + dest + ']] per [[WP:CFD|CFD]] at ' + thisDay + '.') - action_summary = 'Robot - Result of [[WP:CFD|CFD]] at ' + thisDay + '.' - elif mode == "Speedy": + action_summary = \ + 'Robot - Result of [[WP:CFD|CFD]] at ' + thisDay + '.' + elif mode == 'Speedy': summary = ( 'Robot - Speedily moving category ' + src + ' to [[:Category:' + dest + ']] per [[WP:CFDS|CFDS]].') @@ -177,7 +182,8 @@ summary = ( 'Robot - Removing category {0} per [[WP:CFD|CFD]] ' 'at {1}.'.format(src, thisDay)) - action_summary = 'Robot - Result of [[WP:CFD|CFD]] at ' + thisDay + '.' + action_summary = \ + 'Robot - Result of [[WP:CFD|CFD]] at ' + thisDay + '.' else: continue robot = CategoryMoveBot(oldcat=src, batch=True, comment=summary, diff --git a/scripts/checkimages.py b/scripts/checkimages.py index 43333ee..f27350b 100755 --- a/scripts/checkimages.py +++ b/scripts/checkimages.py @@ -1019,8 +1019,9 @@ "('''forced mode'''):") % self.image.title(as_url=True)) else: - repme = ((self.list_entry + 'has the following duplicates:') - % self.image.title(as_url=True)) + repme = ( + (self.list_entry + 'has the following duplicates:') + % self.image.title(as_url=True))
for dup_page in duplicates: if ( diff --git a/scripts/claimit.py b/scripts/claimit.py index 86eda37..348c5e5 100755 --- a/scripts/claimit.py +++ b/scripts/claimit.py @@ -139,12 +139,14 @@ elif claim.type == 'string': target = commandline_claims[i + 1] elif claim.type == 'globe-coordinate': - coord_args = [float(c) for c in commandline_claims[i + 1].split(',')] + coord_args = [ + float(c) for c in commandline_claims[i + 1].split(',')] if len(coord_args) >= 3: precision = coord_args[2] else: precision = 0.0001 # Default value (~10 m at equator) - target = pywikibot.Coordinate(coord_args[0], coord_args[1], precision=precision) + target = pywikibot.Coordinate( + coord_args[0], coord_args[1], precision=precision) else: raise NotImplementedError( '{} datatype is not yet supported by claimit.py' diff --git a/scripts/commonscat.py b/scripts/commonscat.py index 71a95cf..8b4062a 100755 --- a/scripts/commonscat.py +++ b/scripts/commonscat.py @@ -345,9 +345,10 @@ return True return True
- def changeCommonscat(self, page=None, oldtemplate='', oldcat='', - newtemplate='', newcat='', linktitle='', - description=NotImplemented): # pylint: disable=unused-argument + def changeCommonscat( + self, page=None, oldtemplate='', oldcat='', + newtemplate='', newcat='', linktitle='', + description=NotImplemented): # pylint: disable=unused-argument """Change the current commonscat template and target.""" if oldcat == '3=S' or linktitle == '3=S': return # TODO: handle additional param on de-wiki diff --git a/scripts/data_ingestion.py b/scripts/data_ingestion.py index e4ad76e..348cfbc 100755 --- a/scripts/data_ingestion.py +++ b/scripts/data_ingestion.py @@ -98,7 +98,8 @@ hashObject = hashlib.sha1() hashObject.update(self.downloadPhoto().getvalue()) return [page.title(with_ns=False) for page in - self.site.allimages(sha1=base64.b16encode(hashObject.digest()))] + self.site.allimages( + sha1=base64.b16encode(hashObject.digest()))]
def getTitle(self, fmt): """ @@ -164,7 +165,7 @@ @type site: APISite, 'deprecated_default_commons' or None """ if site == 'deprecated_default_commons': - warn('site='deprecated_default_commons' is deprecated; ' + warn("site='deprecated_default_commons' is deprecated; " 'please specify a site or use site=None', DeprecationWarning, 2) site = pywikibot.Site('commons', 'commons') diff --git a/scripts/delete.py b/scripts/delete.py index ec62481..158acf9 100755 --- a/scripts/delete.py +++ b/scripts/delete.py @@ -200,8 +200,9 @@
if self.getOption('orphansonly'): namespaces = self.getOption('orphansonly') - ns_with_ref = self.current_page.namespaces_with_ref_to_page( - namespaces) + ns_with_ref = \ + self.current_page.namespaces_with_ref_to_page( + namespaces) ns_with_ref = sorted(list(ns_with_ref)) if ns_with_ref: ns_names = ', '.join(str(ns.id) for ns in ns_with_ref) @@ -258,7 +259,7 @@ options['isorphan'] = False elif arg.startswith('-orphansonly'): if arg[13:]: - namespaces = mysite.namespaces.resolve(arg[13:].split(",")) + namespaces = mysite.namespaces.resolve(arg[13:].split(',')) else: namespaces = mysite.namespaces options['orphansonly'] = namespaces @@ -279,8 +280,8 @@ un + 'delete-linked-pages', {'page': page_name}) elif arg.startswith('-ref'): - summary = i18n.twtranslate(mysite, 'delete-referring-pages', - {'page': page_name}) + summary = i18n.twtranslate( + mysite, 'delete-referring-pages', {'page': page_name}) elif arg.startswith('-imageused'): summary = i18n.twtranslate(mysite, un + 'delete-images', {'page': page_name}) diff --git a/scripts/fixing_redirects.py b/scripts/fixing_redirects.py index f9933104..8c1f212 100755 --- a/scripts/fixing_redirects.py +++ b/scripts/fixing_redirects.py @@ -54,8 +54,9 @@ linktrail = mysite.linktrail()
# make a backup of the original text so we can show the changes later - linkR = re.compile(r'[[(?P<title>[^]|#]*)(?P<section>#[^]|]*)?' - r'(|(?P<label>[^]]*))?]](?P<linktrail>' + linktrail + ')') + linkR = re.compile( + r'[[(?P<title>[^]|#]*)(?P<section>#[^]|]*)?' + r'(|(?P<label>[^]]*))?]](?P<linktrail>' + linktrail + ')') curpos = 0 # This loop will run until we have finished the current page while True: @@ -71,7 +72,8 @@ or isDisabled(text, m.start())): continue else: - actualLinkPage = pywikibot.Page(targetPage.site, m.group('title')) + actualLinkPage = pywikibot.Page( + targetPage.site, m.group('title')) # Check whether the link found is to page. if actualLinkPage != linkedPage: continue @@ -111,8 +113,8 @@
if (new_page_title == link_text and not section): newlink = '[[{}]]'.format(new_page_title) - # check if we can create a link with trailing characters instead of a - # pipelink + # check if we can create a link with trailing characters instead of + # a pipelink elif (len(new_page_title) <= len(link_text) and firstcap(link_text[:len(new_page_title)]) == firstcap(new_page_title) and diff --git a/scripts/harvest_template.py b/scripts/harvest_template.py index 73aff99..6ee8563 100755 --- a/scripts/harvest_template.py +++ b/scripts/harvest_template.py @@ -149,7 +149,7 @@ }) super(HarvestRobot, self).__init__(**kwargs) self.generator = generator - # TODO: Make it a list which also includes the redirects to the template + # TODO: Make it a list including the redirects to the template self.fields = {} for key, value in fields.items(): if isinstance(value, tuple): diff --git a/scripts/illustrate_wikidata.py b/scripts/illustrate_wikidata.py index 350ec36..58378e0 100755 --- a/scripts/illustrate_wikidata.py +++ b/scripts/illustrate_wikidata.py @@ -32,7 +32,7 @@
"""A bot to add Wikidata image claims."""
- def __init__(self, generator, wdproperty=u'P18'): + def __init__(self, generator, wdproperty='P18'): """ Initializer.
diff --git a/scripts/imagecopy_self.py b/scripts/imagecopy_self.py index 3c51a4b..4730d16 100644 --- a/scripts/imagecopy_self.py +++ b/scripts/imagecopy_self.py @@ -512,7 +512,7 @@ text = imagepage.get() # text = re.sub(u'== Summary ==', u'', text, re.IGNORECASE) # text = re.sub(u'== Licensing ==', u'', text, re.IGNORECASE) - # text = re.sub('{{(self|self2)|[^}]+}}', '', text, re.IGNORECASE) + # text = re.sub('{{(self|self2)|[^}]+}}', '', text, re.I)
for toRemove in sourceGarbage[imagepage.site.lang]: text = re.sub(toRemove, '', text, flags=re.IGNORECASE) @@ -718,7 +718,7 @@ self.skip = False
# Start building the page - self.root.geometry("1500x400+100-100") + self.root.geometry('1500x400+100-100') self.root.title(self.filename)
self.url = self.imagepage.permalink() @@ -738,10 +738,10 @@ self.root, text='The old description was : ') self.new_description_label = Tkinter.Label( self.root, text='The new fields are : ') - self.filename_label = Tkinter.Label(self.root, text=u'Filename : ') + self.filename_label = Tkinter.Label(self.root, text='Filename : ') self.information_description_label = Tkinter.Label( self.root, text='Description : ') - self.information_date_label = Tkinter.Label(self.root, text=u'Date : ') + self.information_date_label = Tkinter.Label(self.root, text='Date : ') self.information_source_label = Tkinter.Label(self.root, text='Source : ') self.information_author_label = Tkinter.Label(self.root, diff --git a/scripts/imageuncat.py b/scripts/imageuncat.py index d63f3a0..49a8cdc 100755 --- a/scripts/imageuncat.py +++ b/scripts/imageuncat.py @@ -1254,7 +1254,9 @@ today = pywikibot.Timestamp.utcnow() yesterday = today + timedelta(days=-1)
- for logentry in site.logevents(logtype='upload', start=yesterday, end=today, reverse=True): + for logentry in site.logevents( + logtype='upload', start=yesterday, end=today, reverse=True + ): yield logentry.page()
diff --git a/scripts/login.py b/scripts/login.py index 969ddff..161f17f 100755 --- a/scripts/login.py +++ b/scripts/login.py @@ -104,7 +104,7 @@ 'consumer': consumer_key}) pywikibot.output('NOTE: To use OAuth, you need to copy the ' 'following line to your user-config.py:') - pywikibot.output('authenticate['%(hostname)s'] = %(oauth_token)s' % + pywikibot.output("authenticate['%(hostname)s'] = %(oauth_token)s" % {'hostname': site.hostname(), 'oauth_token': oauth_token})
@@ -142,7 +142,7 @@ "revelant lines from '{0}' (or the entire file) " 'and try again.' .format(join(config.base_dir, 'pywikibot.lwp'))) - elif arg == "-logout": + elif arg == '-logout': logout = True elif arg == '-oauth': oauth = True diff --git a/scripts/lonelypages.py b/scripts/lonelypages.py index 39b7cdd..d2adce2 100755 --- a/scripts/lonelypages.py +++ b/scripts/lonelypages.py @@ -89,11 +89,14 @@
# The orphan template names in the different languages. _templates = { - 'af': ('Weesbladsy', 'datum={{subst:CURRENTMONTHNAME}} {{subst:CURRENTYEAR}}', ['wi']), + 'af': ('Weesbladsy', 'datum={{subst:CURRENTMONTHNAME}} ' + '{{subst:CURRENTYEAR}}', ['wi']), 'ar': ('يتيمة', 'تاريخ={{نسخ:اسم_شهر}} {{نسخ:عام}}'), 'ca': ('Orfe', 'date={{subst:CURRENTMONTHNAME}} {{subst:CURRENTYEAR}}'), - 'en': ('Orphan', 'date={{subst:CURRENTMONTHNAME}} {{subst:CURRENTYEAR}}', ['wi']), - 'it': ('O', '||mese={{subst:CURRENTMONTHNAME}} {{subst:CURRENTYEAR}}', ['a']), + 'en': ('Orphan', 'date={{subst:CURRENTMONTHNAME}} {{subst:CURRENTYEAR}}', + ['wi']), + 'it': ('O', '||mese={{subst:CURRENTMONTHNAME}} {{subst:CURRENTYEAR}}', + ['a']), 'ja': ('孤立', '{{subst:DATE}}'), 'ko': ('외톨이', '{{{{{|안전풀기:}}}#timel:Y-m-d|now}}'), 'zh': ('Orphan/auto', '', ['orphan'], True), @@ -129,13 +132,15 @@ orphan_template = e if orphan_template is None or isinstance(orphan_template, ValueError): err_message = 'Missing configuration for site {}'.format(self.site) - suggest_help(exception=orphan_template, additional_text=err_message) + suggest_help( + exception=orphan_template, additional_text=err_message) sys.exit(err_message) else: self._settings = orphan_template # DisambigPage part if self.getOption('disambigPage') is not None: - self.disambigpage = pywikibot.Page(self.site, self.getOption('disambigPage')) + self.disambigpage = pywikibot.Page( + self.site, self.getOption('disambigPage')) try: self.disambigtext = self.disambigpage.get() except pywikibot.NoPage: @@ -209,7 +214,8 @@ 'Your regex has found something in {0}, skipping...' .format(page.title())) return - if page.isDisambig() and self.getOption('disambigPage') is not None: + if (page.isDisambig() + and self.getOption('disambigPage') is not None): pywikibot.output('{0} is a disambig page, report..' .format(page.title())) if not page.title().lower() in self.disambigtext.lower(): diff --git a/scripts/maintenance/cache.py b/scripts/maintenance/cache.py index e3be450..445a277 100755 --- a/scripts/maintenance/cache.py +++ b/scripts/maintenance/cache.py @@ -271,7 +271,7 @@ try: entry.parse_key() except ParseError: - pywikibot.error(u'Problems parsing %s with key %s' + pywikibot.error('Problems parsing %s with key %s' % (entry.filename, entry.key)) pywikibot.exception() continue @@ -279,7 +279,7 @@ try: entry._rebuild() except Exception as e: - pywikibot.error(u'Problems loading %s with key %s, %r' + pywikibot.error('Problems loading %s with key %s, %r' % (entry.filename, entry.key, entry._parsed_key)) pywikibot.exception(e, tb=True) continue @@ -452,7 +452,7 @@
for cache_path in cache_paths: if len(cache_paths) > 1: - pywikibot.output(u'Processing %s' % cache_path) + pywikibot.output('Processing %s' % cache_path) process_entries(cache_path, filter_func, output_func=output_func, action_func=action_func)
diff --git a/scripts/maintenance/compat2core.py b/scripts/maintenance/compat2core.py index 00afe9d..4caf605 100755 --- a/scripts/maintenance/compat2core.py +++ b/scripts/maintenance/compat2core.py @@ -99,8 +99,8 @@ ('.replaceImage(', 'Page.replaceImage() is deprecated and does not work at core'), ('.getVersionHistory(', - 'Page.getVersionHistory() returns a pywikibot.Timestamp object instead of\n' - 'a MediaWiki one'), + 'Page.getVersionHistory() returns a pywikibot.Timestamp object instead of' + '\na MediaWiki one'), ('.contributions(', 'User.contributions() returns a pywikibot.Timestamp object instead of a\n' 'MediaWiki one'), diff --git a/scripts/maintenance/make_i18n_dict.py b/scripts/maintenance/make_i18n_dict.py index 479295e..49194ed 100755 --- a/scripts/maintenance/make_i18n_dict.py +++ b/scripts/maintenance/make_i18n_dict.py @@ -4,28 +4,24 @@ Generate a i18n file from a given script.
run IDLE at topmost level: -
import pwb from scripts.maintenance.make_i18n_dict import i18nBot bot = i18nBot('<scriptname>', '<msg dict>') bot.run()
If you have more than one message dictionary, give all these names to the bot: -
bot = i18nBot('<scriptname>', '<msg dict1>', '<msg dict2>', '<msg dict3>')
If you want to rename the message index use keyword arguments. This may be mixed with preleading positonal argumens: -
bot = i18nBot('<scriptname>', '<msg dict1>', the_other_msg='<msg dict2>')
If you have the messages as instance constants you may call the bot as follows: - ->>> bot = i18nBot('<scriptname>.<class instance>', '<msg dict1>', '<msg dict2>') +>>> bot = i18nBot( +... '<scriptname>.<class instance>', '<msg dict1>', '<msg dict2>')
It's also possible to make json files too by using to_json method after instantiating the bot. It also calls C{bot.run()} to create the dictionaries: -
bot.to_json()
""" # diff --git a/scripts/maintenance/wikimedia_sites.py b/scripts/maintenance/wikimedia_sites.py index 493a1c6..6a1ff94 100755 --- a/scripts/maintenance/wikimedia_sites.py +++ b/scripts/maintenance/wikimedia_sites.py @@ -70,18 +70,18 @@ i -= 1
if original == new: - pywikibot.output(u'The lists match!') + pywikibot.output('The lists match!') else: - pywikibot.output(u"The lists don't match, the new list is:") + pywikibot.output("The lists don't match, the new list is:") text = ' languages_by_size = [\n' line = ' ' * 7 for code in new: if len(line) + len(code) < 76: - line += u" '%s'," % code + line += " '%s'," % code else: text += '%s\n' % line line = ' ' * 7 - line += u" '%s'," % code + line += " '%s'," % code text += '%s\n' % line text += ' ]' pywikibot.output(text) diff --git a/scripts/match_images.py b/scripts/match_images.py index e9f6899..b923d16 100755 --- a/scripts/match_images.py +++ b/scripts/match_images.py @@ -139,7 +139,7 @@
def main(*args): - """Extracting file page information of images to work on and initiate matching.""" + """Extracting file page information and initiate matching.""" images = [] other_family = '' other_lang = '' diff --git a/scripts/noreferences.py b/scripts/noreferences.py index 72cbc7f..1287795 100755 --- a/scripts/noreferences.py +++ b/scripts/noreferences.py @@ -524,7 +524,9 @@ return False elif self.referencesTemplates: templateR = '{{(' + '|'.join(self.referencesTemplates) + ')' - if re.search(templateR, oldTextCleaned, re.IGNORECASE | re.UNICODE): + if re.search( + templateR, oldTextCleaned, re.IGNORECASE | re.UNICODE + ): if self.getOption('verbose'): pywikibot.output( 'No changes necessary: references template found.') @@ -711,7 +713,8 @@ if self.lacksReferences(text): newText = self.addReferences(text) try: - self.userPut(page, page.text, newText, summary=self.comment) + self.userPut( + page, page.text, newText, summary=self.comment) except pywikibot.EditConflict: pywikibot.warning('Skipping {0} because of edit conflict' .format(page.title(as_link=True))) @@ -745,7 +748,8 @@ xmlFilename = i18n.input('pywikibot-enter-xml-filename') else: xmlFilename = arg[5:] - genFactory.gens.append(XmlDumpNoReferencesPageGenerator(xmlFilename)) + genFactory.gens.append( + XmlDumpNoReferencesPageGenerator(xmlFilename)) elif arg == '-always': options['always'] = True elif arg == '-quiet': diff --git a/scripts/nowcommons.py b/scripts/nowcommons.py index 7500f07..3add65c 100755 --- a/scripts/nowcommons.py +++ b/scripts/nowcommons.py @@ -242,7 +242,8 @@ filenameOnCommons = par[par.index(':') + 1:] break if val[0].strip() == '1': - filenameOnCommons = val[1].strip()[val[1].strip().index(':') + 1:] + filenameOnCommons = \ + val[1].strip()[val[1].strip().index(':') + 1:] break skip = True if not filenameOnCommons: @@ -279,7 +280,8 @@ usingPages = list(localImagePage.usingPages()) if usingPages and usingPages != [localImagePage]: pywikibot.output(color_format( - '"{lightred}{0}{default}" is still used in {1} pages.', + '"{lightred}{0}{default}" ' + 'is still used in {1} pages.', localImagePage.title(with_ns=False), len(usingPages))) if self.getOption('replace') is True: @@ -298,7 +300,8 @@ # If the image is used with the urlname the # previous function won't work is_used = bool(list(pywikibot.FilePage( - self.site, page.title()).usingPages(total=1))) + self.site, + page.title()).usingPages(total=1))) if is_used and self.getOption('replaceloose'): bot = ImageBot( pg.FileLinksGenerator( @@ -329,8 +332,8 @@ if len(localImagePage.getFileVersionHistory()) > 1: pywikibot.output( 'This image has a version history. Please ' - 'delete it manually after making sure that the ' - 'old versions are not worth keeping.') + 'delete it manually after making sure that ' + 'the old versions are not worth keeping.') continue if self.getOption('always') is False: format_str = color_format( diff --git a/scripts/pagefromfile.py b/scripts/pagefromfile.py index 52f1d44..a62c801 100755 --- a/scripts/pagefromfile.py +++ b/scripts/pagefromfile.py @@ -296,7 +296,7 @@ @param args: command line arguments @type args: unicode """ - filename = "dict.txt" + filename = 'dict.txt' options = {} r_options = {}
diff --git a/scripts/reflinks.py b/scripts/reflinks.py index 41e3b2a..52d33a5 100755 --- a/scripts/reflinks.py +++ b/scripts/reflinks.py @@ -143,7 +143,8 @@ # anywhere |.*( 403[ ]forbidden - |(404|page|file|information|resource).*not([ ]*be)?[ ]*(available|found) + |(404|page|file|information|resource).*not([ ]*be)?[ ]* + (available|found) |site.*disabled |error[ ]404 |error.+not[ ]found @@ -247,7 +248,7 @@ # avoid multiple } being interpreted as a template inclusion self.title = self.title.replace('}}', '}}') # prevent multiple quotes being interpreted as '' or ''' - self.title = self.title.replace('''', '''') + self.title = self.title.replace("''", "''") self.title = pywikibot.unicode2html(self.title, self.site.encoding()) # TODO : remove HTML when both opening and closing tags are included
@@ -640,13 +641,13 @@ tmp = s.group('enc').strip(""' ").lower() naked = re.sub(r'[ _-]', '', tmp) # Convert to python correct encoding names - if naked == "gb2312": - enc.append("gbk") - elif naked == "shiftjis": - enc.append("shift jis 2004") - enc.append("cp932") - elif naked == "xeucjp": - enc.append("euc-jp") + if naked == 'gb2312': + enc.append('gbk') + elif naked == 'shiftjis': + enc.append('shift jis 2004') + enc.append('cp932') + elif naked == 'xeucjp': + enc.append('euc-jp') else: enc.append(tmp) else: diff --git a/scripts/replace.py b/scripts/replace.py index dd6924d..3fb4157 100755 --- a/scripts/replace.py +++ b/scripts/replace.py @@ -859,7 +859,7 @@ pattern = pattern.replace(r'\d', '[:digit:]') pattern = pattern.replace(r'\w', '[:alnum:]')
- pattern = pattern.replace("'", "\" + "'") + pattern = pattern.replace("'", '\' + "'") # pattern = pattern.replace('\', '\\') # for char in ['[', ']', "'"]: # pattern = pattern.replace(char, '%s' % char) diff --git a/scripts/script_wui.py b/scripts/script_wui.py index b327274..efeac37 100755 --- a/scripts/script_wui.py +++ b/scripts/script_wui.py @@ -279,21 +279,21 @@ logHandler.flush() buffer.flush()
- pywikibot.output(u'--- ' * 20) + pywikibot.output('--- ' * 20)
# safety; restore settings pywikibot.config.simulate = __simulate sys.argv = __sys_argv if resource: pywikibot.output( - u'environment: garbage; %s / memory; %s / members; %s' % ( + 'environment: garbage; %s / memory; %s / members; %s' % ( gc.collect(), resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * resource.getpagesize(), len(dir()))) else: pywikibot.output( - u'environment: garbage; %s / members; %s' % ( + 'environment: garbage; %s / members; %s' % ( gc.collect(), len(dir()))) # 'len(dir())' is equivalent to 'len(inspect.getmembers(__main__))'
@@ -352,7 +352,7 @@ if hasattr(value, 'format'): bot_config[key] = value.format(username=bot_user_name)
- bot = ScriptWUIBot(site, chan, site.user() + "_WUI", "irc.wikimedia.org") + bot = ScriptWUIBot(site, chan, site.user() + '_WUI', 'irc.wikimedia.org') try: bot.start() except BaseException: @@ -360,5 +360,5 @@ raise
-if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/scripts/solve_disambiguation.py b/scripts/solve_disambiguation.py index 9196e66..5385413 100755 --- a/scripts/solve_disambiguation.py +++ b/scripts/solve_disambiguation.py @@ -522,7 +522,7 @@
def result(self, value): """Add the alternative and then list them.""" - newAlternative = pywikibot.input(u'New alternative:') + newAlternative = pywikibot.input('New alternative:') self._outputter.sequence.append(newAlternative) super(AddAlternativeOption, self).result(value)
diff --git a/scripts/table2wiki.py b/scripts/table2wiki.py index cd2d7b6..91bd44e 100644 --- a/scripts/table2wiki.py +++ b/scripts/table2wiki.py @@ -164,7 +164,7 @@ # <th> often people don't write them within <tr>, be warned! # <th> with attributes new_table = re.sub( - r"(?i)[\r\n]+<th(?P<attr> [^>]*?)>(?P<header>[\w\W]*?)</th>", + r'(?i)[\r\n]+<th(?P<attr> [^>]*?)>(?P<header>[\w\W]*?)</th>', r'\r\n!\g<attr> | \g<header>\r\n', new_table)
# <th> without attributes diff --git a/scripts/transferbot.py b/scripts/transferbot.py index b26a67f..a858e72 100755 --- a/scripts/transferbot.py +++ b/scripts/transferbot.py @@ -139,7 +139,7 @@ + page.title(with_ns=False)) targetpage = pywikibot.Page(tosite, target_title) edithistpage = pywikibot.Page(tosite, target_title + '/edithistory') - summary = 'Moved page from {old} ([[{new}/edithistory|history]])'\ + summary = 'Moved page from {old} ([[{new}/edithistory|history]])' \ .format(old=page.title(as_link=True, insite=tosite), new=targetpage.title() if not targetpage.namespace().subpages else '') diff --git a/scripts/weblinkchecker.py b/scripts/weblinkchecker.py index 60add89..1089399 100755 --- a/scripts/weblinkchecker.py +++ b/scripts/weblinkchecker.py @@ -910,9 +910,9 @@ except threading.ThreadError: pywikibot.warning( "Can't start a new thread.\nPlease decrease " - "max_external_links in your user-config.py or use\n" + 'max_external_links in your user-config.py or use\n' "'-max_external_links:' option with a smaller value. " - "Default is 50.") + 'Default is 50.') raise
diff --git a/scripts/welcome.py b/scripts/welcome.py index 79da206..39e6179 100755 --- a/scripts/welcome.py +++ b/scripts/welcome.py @@ -288,7 +288,7 @@ # The page where the bot will report users with a possibly bad username. report_page = { 'commons': ("Project:Administrators'noticeboard/User problems/Usernames" - "to be checked"), + 'to be checked'), 'wikipedia': { 'am': 'User:Beria/Report', 'ar': 'Project:إخطار الإداريين/أسماء مستخدمين للفحص', @@ -517,8 +517,8 @@ 'zoccola', ] elenco_others = [ - '@', ".com", ".sex", ".org", ".uk", ".en", ".it", "admin", - "administrator", "amministratore", '@yahoo.com', '@alice.com', + '@', '.com', '.sex', '.org', '.uk', '.en', '.it', 'admin', + 'administrator', 'amministratore', '@yahoo.com', '@alice.com', 'amministratrice', 'burocrate', 'checkuser', 'developer', 'http://', 'jimbo', 'mediawiki', 'on wheals', 'on wheal', 'on wheel', 'planante', 'razinger', 'sysop', 'troll', 'vandal', @@ -653,7 +653,7 @@ if not globalvar.makeWelcomeLog or len(queue) == 0: return
- text = u'' + text = '' logg = i18n.translate(self.site, logbook) if not logg: return @@ -713,7 +713,7 @@ return self._randomSignature
sign_text = '' - creg = re.compile(r"^* ?(.*?)$", re.M) + creg = re.compile(r'^* ?(.*?)$', re.M) if not globalvar.signFileName: sign_page_name = i18n.translate(self.site, random_sign) if not sign_page_name: @@ -931,10 +931,10 @@ except ValueError: # upon request, we could check for software version here raise ValueError( - "Mediawiki has changed, -offset:# is not supported " - "anymore, but -offset:TIMESTAMP is, assuming TIMESTAMP " - "is yyyymmddhhmmss. -timeoffset is now also supported. " - "Please read this script source header for documentation.") + 'Mediawiki has changed, -offset:# is not supported ' + 'anymore, but -offset:TIMESTAMP is, assuming TIMESTAMP ' + 'is yyyymmddhhmmss. -timeoffset is now also supported. ' + 'Please read this script source header for documentation.') elif arg == '-file': globalvar.randomSign = True globalvar.signFileName = val or pywikibot.input(
pywikibot-commits@lists.wikimedia.org