jenkins-bot has submitted this change and it was merged. ( https://gerrit.wikimedia.org/r/222708 )
Change subject: Use existing site attribute ......................................................................
Use existing site attribute
Also keep lines below 80 chars
Change-Id: I54410460a314126ad69f1d11beb98815daddae91 --- M scripts/editarticle.py M scripts/isbn.py M scripts/reflinks.py 3 files changed, 46 insertions(+), 30 deletions(-)
Approvals: Xqt: Looks good to me, approved jenkins-bot: Verified
diff --git a/scripts/editarticle.py b/scripts/editarticle.py index b51fb07..938bde2 100755 --- a/scripts/editarticle.py +++ b/scripts/editarticle.py @@ -23,7 +23,7 @@ """ # # (C) Gerrit Holl, 2004 -# (C) Pywikibot team, 2004-2017 +# (C) Pywikibot team, 2004-2018 # # Distributed under the terms of the MIT license. # @@ -77,9 +77,8 @@
def setpage(self): """Set page and page title.""" - site = pywikibot.Site() pageTitle = self.options.page or pywikibot.input("Page to edit:") - self.page = pywikibot.Page(pywikibot.Link(pageTitle, site)) + self.page = pywikibot.Page(pywikibot.Link(pageTitle, self.site)) if not self.options.edit_redirect and self.page.isRedirectPage(): self.page = self.page.getRedirectTarget()
@@ -105,7 +104,7 @@ if new and old != new: pywikibot.showDiff(old, new) changes = pywikibot.input("What did you change?") - comment = i18n.twtranslate(pywikibot.Site(), 'editarticle-edit', + comment = i18n.twtranslate(self.site, 'editarticle-edit', {'description': changes}) try: self.page.put(new, summary=comment, minorEdit=False, diff --git a/scripts/isbn.py b/scripts/isbn.py index ee7b9ad..94cd33d 100755 --- a/scripts/isbn.py +++ b/scripts/isbn.py @@ -35,7 +35,7 @@
""" # -# (C) Pywikibot team, 2009-2017 +# (C) Pywikibot team, 2009-2018 # # Distributed under the terms of the MIT license. # @@ -64,7 +64,7 @@ '¶ms;': pagegenerators.parameterHelp, }
-# Maps each group number to the list of its publisher number ranges. Taken from: +# Maps each group number to the list of its publisher number ranges. Source: # https://web.archive.org/web/20090823122028/http://www.isbn-international.org... ranges = { '0': [ # English speaking area @@ -267,7 +267,8 @@ ('85000', '94999'), ('970000', '999999'), ], - '92': [ # International Publishers (Unesco, EU), European Community Organizations + # International Publishers (Unesco, EU), European Community Organizations + '92': [ ('0', '5'), ('60', '79'), ('800', '899'), @@ -1492,7 +1493,7 @@
self.generator = generator self.isbnR = re.compile(r'(?<=ISBN )(?P<code>[\d-]+[Xx]?)') - self.comment = i18n.twtranslate(pywikibot.Site(), 'isbn-formatting') + self.comment = i18n.twtranslate(self.site, 'isbn-formatting')
def treat(self, page): """Treat a page.""" @@ -1558,7 +1559,7 @@ self.isbn_10_prop_id = self.get_property_by_name('ISBN-10') if self.isbn_13_prop_id is None: self.isbn_13_prop_id = self.get_property_by_name('ISBN-13') - self.comment = i18n.twtranslate(pywikibot.Site(), 'isbn-formatting') + self.comment = i18n.twtranslate(self.site, 'isbn-formatting')
def treat_page_and_item(self, page, item): """Treat a page.""" @@ -1596,7 +1597,8 @@ if old_isbn == new_isbn: continue # remove 'ISBN ' prefix - assert new_isbn.startswith('ISBN '), 'ISBN should start with "ISBN"' + assert new_isbn.startswith('ISBN '), \ + 'ISBN should start with "ISBN"' new_isbn = new_isbn[5:] claim.setTarget(new_isbn) change_messages.append('Changing %s (%s --> %s)' % diff --git a/scripts/reflinks.py b/scripts/reflinks.py index d713f9f..3e73007 100755 --- a/scripts/reflinks.py +++ b/scripts/reflinks.py @@ -120,7 +120,8 @@ re.IGNORECASE) # matches an URL at the index of a website dirIndex = re.compile( - r'^\w+://[^/]+/((default|index).(asp|aspx|cgi|htm|html|phtml|mpx|mspx|php|shtml|var))?$', + r'^\w+://[^/]+/((default|index).' + r'(asp|aspx|cgi|htm|html|phtml|mpx|mspx|php|shtml|var))?$', re.IGNORECASE) # Extracts the domain name domain = re.compile(r'^(\w+)://(?:www.|)([^/]+)') @@ -179,7 +180,8 @@ # unbracketed with() r'^[]\s<>"]+([^[]\s<>"]+[^[]\s.:;\,<>?"]+|' + # unbracketed without () - r'[^[]\s<>"]+[^[]\s).:;\,<>?"]+|[^[]\s<>"]+))[!?,\s]*]?\s*</ref>') + r'[^[]\s<>"]+[^[]\s).:;\,<>?"]+|[^[]\s<>"]+))' + r'[!?,\s]*]?\s*</ref>')
# Download this file : # http://www.twoevils.org/files/wikipedia/404-links.txt.gz @@ -194,11 +196,11 @@
"""Container to handle a single bare reference."""
- def __init__(self, link, name): + def __init__(self, link, name, site=None): """Constructor.""" self.refname = name self.link = link - self.site = pywikibot.Site() + self.site = site or pywikibot.Site() self.linkComment = i18n.twtranslate(self.site, 'reflinks-comment') self.url = re.sub(u'#.*', '', self.link) self.title = None @@ -278,7 +280,11 @@ name the first, and remove the content of the others """
- def __init__(self): + def __init__(self, site=None): + """Constructor.""" + if not site: + site = pywikibot.Site() + """Constructor.""" # Match references self.REFS = re.compile( @@ -287,7 +293,7 @@ r'(?i).*name\s*=\s*(?P<quote>"?)\s*(?P<name>.+)\s*(?P=quote).*') self.GROUPS = re.compile( r'(?i).*group\s*=\s*(?P<quote>"?)\s*(?P<group>.+)\s*(?P=quote).*') - self.autogen = i18n.twtranslate(pywikibot.Site(), 'reflinks-autogen') + self.autogen = i18n.twtranslate(site, 'reflinks-autogen')
def process(self, text): """Process the page.""" @@ -380,7 +386,8 @@ if v[1]: name = u'"%s"' % name text = re.sub( - u'<ref name\s*=\s*(?P<quote>"?)\s*%s\s*(?P=quote)\s*/>' % k, + '<ref name\s*=\s*(?P<quote>"?)\s*{}\s*(?P=quote)\s*/>' + .format(k), u'<ref name=%s />' % name, text) return text
@@ -400,7 +407,8 @@ super(ReferencesRobot, self).__init__(**kwargs) self.generator = generator self.site = pywikibot.Site() - self._use_fake_user_agent = config.fake_user_agent_default.get('reflinks', False) + self._use_fake_user_agent = config.fake_user_agent_default.get( + 'reflinks', False) # Check manual = 'mw:Manual:Pywikibot/refLinks' code = None @@ -422,7 +430,7 @@ bad = globalbadtitles self.titleBlackList = re.compile(bad, re.I | re.S | re.X) self.norefbot = noreferences.NoReferencesBot(None, verbose=False) - self.deduplicator = DuplicateReferences() + self.deduplicator = DuplicateReferences(self.site)
self.site_stop_page = i18n.translate(self.site, stop_page) if self.site_stop_page: @@ -466,7 +474,8 @@
try: pdfinfo_out = subprocess.Popen([r"pdfinfo", "/dev/stdin"], - stdin=urlobj, stdout=subprocess.PIPE, + stdin=urlobj, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False).communicate()[0] for aline in pdfinfo_out.splitlines(): @@ -508,7 +517,8 @@ % page.title(asLink=True)) continue except pywikibot.NoPage: - pywikibot.output(u'Page %s not found' % page.title(asLink=True)) + pywikibot.output('Page {} not found' + .format(page.title(asLink=True))) continue except pywikibot.IsRedirectPage: pywikibot.output(u'Page %s is a redirect' @@ -526,7 +536,7 @@ # TODO: Clean URL blacklist continue
- ref = RefLink(link, match.group('name')) + ref = RefLink(link, match.group('name'), site=self.site) f = None
try: @@ -582,17 +592,19 @@ % (f.status, ref.url, page.title(asLink=True)), toStdout=True) - # 410 Gone, indicates that the resource has been purposely - # removed + # 410 Gone, indicates that the resource has been + # purposely removed if f.status == 410 or \ - (f.status == 404 and (u'\t%s\t' % ref.url in deadLinks)): + (f.status == 404 and ('\t{}\t'.format(ref.url) + in deadLinks)): repl = ref.refDead() new_text = new_text.replace(match.group(), repl) continue
linkedpagetext = f.raw except UnicodeError: - # example : http://www.adminet.com/jo/20010615%C2%A6/ECOC0100037D.html + # example: + # http://www.adminet.com/jo/20010615%C2%A6/ECOC0100037D.html # in [[fr:Cyanure]] pywikibot.output(color_format( '{lightred}Bad link{default} : {0} in {1}', @@ -641,7 +653,7 @@ else: pywikibot.output(u'No charset found for %s' % ref.link) if not contentType: - pywikibot.output(u'No content-type found for %s' % ref.link) + pywikibot.output('No content-type found for %s' % ref.link) continue elif not self.MIME.search(contentType): pywikibot.output(color_format( @@ -672,7 +684,8 @@ try: u = linkedpagetext.decode(enc[0]) # Bug T69410 except (UnicodeDecodeError, LookupError) as e: - pywikibot.output(u'%s : Decoding error - %s' % (ref.link, e)) + pywikibot.output('{} : Decoding error - {}' + .format(ref.link, e)) continue
# Retrieves the first non empty string inside <title> tags @@ -729,8 +742,10 @@ else: editedpages += 1
- if self.getOption('limit') and editedpages >= self.getOption('limit'): - pywikibot.output('Edited %s pages, stopping.' % self.getOption('limit')) + if self.getOption('limit') \ + and editedpages >= self.getOption('limit'): + pywikibot.output('Edited {} pages, stopping.' + .format(self.getOption('limit'))) return
if self.site_stop_page and editedpages % 20 == 0:
pywikibot-commits@lists.wikimedia.org