jenkins-bot has submitted this change and it was merged.
Change subject: [L10N] remove wrong templates which does not exist on target wikipedia
......................................................................
[L10N] remove wrong templates which does not exist on target wikipedia
Change-Id: Ifab9ee9dac3f2558fc9e813f9d33e1febbe5adb7
---
M redirect.py
1 file changed, 1 insertion(+), 14 deletions(-)
Approvals:
Merlijn van Deen: Looks good to me, approved
jenkins-bot: Verified
diff --git a/redirect.py b/redirect.py
index ca9c5bc..cfe3dd2 100644
--- a/redirect.py
+++ b/redirect.py
@@ -20,7 +20,7 @@
'redirect-fix-broken-moved': u'Edit summary when the bot fixes a broken redirect to a moved page whose origin has been deleted.\nParameters:\n* <code>%(to)s</code>: the new redirect target, as a wiki link.',
'redirect-fix-loop': u'Edit summary when the bot fixes redirect loops. <code>%(to)s</code> displays the new redirect target as a wiki link.',
'redirect-remove-loop': u'Edit summary when the bot tags a redirect loop for speedy deletion. The internal links are to pages on the English Wikipedia, [http://en.wikipedia.org/wiki/Wikipedia:CSD#G8 here] and [http://en.wikipedia.org/wiki/Wikipedia:Redirect here]. They won\'t work anywhere except on the English Wikipedia, as they stand.',
- 'redirect-broken-redirect-template': u'Template for speedy deletion of broken redirect or redirect loops which the bot tags onto the redirect page. This message may contain additional informations like template parameters or reasons for the deletion request.\n\nNOTE: If this system message is not given for a language code, speedy deletion request by a bot is not supported on your site except there is a bot with sysop flag.\n\n{{doc-important|Only use your deletion template like <code><nowiki>{{delete}}</nowiki></code> which exist on your local project.}}',
+ 'redirect-broken-redirect-template': u'NOTE TO TRANSLATOR: This should only be translated by someone on the Wikipedia of your language code. Thank you.\n\nTemplate for speedy deletion of broken redirect or redirect loops which the bot tags onto the redirect page. This message may contain additional informations like template parameters or reasons for the deletion request.\n\nNOTE: If this system message is not given for a language code, speedy deletion request by a bot is not supported on your site except there is a bot with sysop flag.\n\n{{doc-important|Only use your deletion template like <code><nowiki>{{delete}}</nowiki></code> which exist on your local project.}}',
},
# Author: Csisc
'aeb': {
@@ -28,7 +28,6 @@
'redirect-remove-broken': u'تحويلة إلى صفحة محذوفة أو غير موجودة',
'redirect-fix-loop': u'روبوت: تعديل حلقة إعادة التوجيه إلى %(to)s',
'redirect-remove-loop': u'هدف التحويلة يصنع عقدة تحويل: Robot',
- 'redirect-broken-redirect-template': u'{{شطب|تحويلة مكسورة}}',
},
# Author: Naudefj
# Author: Xqt
@@ -37,7 +36,6 @@
'redirect-remove-broken': u'Robot: Aanstuur na \'n geskrapte of nie-bestaande bladsy',
'redirect-fix-loop': u'Robot: sirkulêre aanstuur na %(to)s reggemaak',
'redirect-remove-loop': u'Robot: Aanstuur vorm \'n sirkulêre lus',
- 'redirect-broken-redirect-template': u'{{db-r1}}',
},
# Author: Als-Holder
# Author: Xqt
@@ -84,7 +82,6 @@
'redirect-fix-broken-moved': u'Bot: İşləməyən yönləndirilmənin yeri dəyişdirilmiş hədəf səhifəyə %(to)s düzəldilməsi',
'redirect-fix-loop': u'Bot: Sonsuz yönləndirilmənin %(to)s düzəldilməsi',
'redirect-remove-loop': u'Bot: Yönləndirilmə sonsuz yönləndirilmə formalaşdırır',
- 'redirect-broken-redirect-template': u'{{db-r1}}',
},
# Author: Amir a57
# Author: E THP
@@ -93,7 +90,6 @@
'redirect-remove-broken': u'[[ویکیپئدییا:سیل#یستیقامتلندیرمه|وپ:سیل]]: سیلینئن یا دا وار اولمایان صحیفهیه اولان ایستیقامیلندیرمه',
'redirect-fix-loop': u'روبوت: فیخینگ اوزوک اولان%(to)s یؤنلندیرن',
'redirect-remove-loop': u'بوت: ایستیقامتلندیرمه هدفی بیر ایستیقامتلندیرمه دؤورو تشکیل ائدیر',
- 'redirect-broken-redirect-template': u'{{سیل|y1}}',
},
# Author: Haqmar
# Author: Sagan
@@ -102,7 +98,6 @@
'redirect-remove-broken': u'Робот: булмаған йәки юйылған биткә йүнәлтеү',
'redirect-fix-loop': u'Робот: %(to)s битенә йүнәлтеүҙе төҙәтеү',
'redirect-remove-loop': u'Робот: бер ҡайҙа ла йүнәлтелмәгән',
- 'redirect-broken-redirect-template': u'{{db-r1}}',
},
# Author: Mucalexx
# Author: Xqt
@@ -114,10 +109,6 @@
},
'bat-smg': {
'redirect-fix-double': u'Robots: Taisuoms dvėgobs paradresavėms → %(to)s',
- },
- # Author: Stephensuleeman
- 'bbc-latn': {
- 'redirect-broken-redirect-template': u'{{db-r1}}',
},
# Author: EugeneZelenko
# Author: Jim-by
@@ -140,7 +131,6 @@
'redirect-fix-double': u'Robot: Pamasangan paugahan ganda ka %(to)s',
'redirect-remove-broken': u'[[WP:CSD#G8|G8]]: [[Wikipedia:Redirect|Paalihan]] ka tungkaran nang dihapus atawa kada ada',
'redirect-remove-loop': u'[[WP:CSD#G8|G8]]: Bidikan [[Wikipedia:Redirect|paalihan]] mahasilakan paalihan siklik',
- 'redirect-broken-redirect-template': u'{{db-r1}}',
},
# Author: Wikitanvir
'bn': {
@@ -156,7 +146,6 @@
'redirect-fix-broken-moved': u'Robot : O reizhañ an adkasoù torret war-zu ar bajenn bal %(to)s',
'redirect-fix-loop': u'Robot : O kempenn al lagadenn adkas war-zu %(to)s',
'redirect-remove-loop': u'Robot: Stumm ur c\'helc\'h-tro born zo gant an [[Wikipedia:Redirect|adkas]]',
- 'redirect-broken-redirect-template': u'{{db-r1}}',
},
# Author: CERminator
# Author: Edinwiki
@@ -208,7 +197,6 @@
'redirect-fix-double': u'Bot: Yn trwsio ailgyfeiriad dwbl i %(to)s',
'redirect-remove-broken': u'Bot: Yn ailgyfeirio i dudalen a ddilëwyd neu nad yw ar gael',
'redirect-remove-loop': u'Bot: Mae nod yr ailgyfeiriad yn ffurfio dolen ailgyfeirio',
- 'redirect-broken-redirect-template': u'{{db-r1}}',
},
# Author: Christian List
# Author: Kaare
@@ -389,7 +377,6 @@
'redirect-fix-broken-moved': u'機械人:修復損壞个重定向頁到移動目標頁面 %(to)s',
'redirect-fix-loop': u'機械人:修復重定向迴圈至%(to)s',
'redirect-remove-loop': u'機械人:重定向目標構成循環',
- 'redirect-broken-redirect-template': u'{{db-r1}}',
},
# Author: Amire80
# Author: YaronSh
--
To view, visit https://gerrit.wikimedia.org/r/102071
To unsubscribe, visit https://gerrit.wikimedia.org/r/settings
Gerrit-MessageType: merged
Gerrit-Change-Id: Ifab9ee9dac3f2558fc9e813f9d33e1febbe5adb7
Gerrit-PatchSet: 2
Gerrit-Project: pywikibot/i18n
Gerrit-Branch: master
Gerrit-Owner: Xqt <info(a)gno.de>
Gerrit-Reviewer: Merlijn van Deen <valhallasw(a)arctus.nl>
Gerrit-Reviewer: Siebrand <siebrand(a)wikimedia.org>
Gerrit-Reviewer: jenkins-bot
jenkins-bot has submitted this change and it was merged.
Change subject: [PEP8] changes, code improvements, insert __version__ string
......................................................................
[PEP8] changes, code improvements, insert __version__ string
Change-Id: Icff281c4d659d40a527eeecac12de17afaef8201
---
M data_ingestion.py
1 file changed, 80 insertions(+), 51 deletions(-)
Approvals:
Merlijn van Deen: Looks good to me, approved
jenkins-bot: Verified
diff --git a/data_ingestion.py b/data_ingestion.py
index f5c8f3d..4098399 100644
--- a/data_ingestion.py
+++ b/data_ingestion.py
@@ -1,69 +1,85 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-'''
+"""
A generic bot to do data ingestion (batch uploading) to Commons
-'''
-import pywikibot, upload
-import posixpath, urlparse
+"""
+#
+# (C) Pywikibot team, 2011-2013
+#
+# Distributed under the terms of the MIT license.
+#
+__version__ = '$Id$'
+#
+
+import posixpath
+import urlparse
import urllib
-import hashlib, base64
+import hashlib
+import base64
import StringIO
try:
import json
except ImportError:
import simplejson as json
+import pywikibot
+import upload
+
class Photo(object):
- '''
+ """
Represents a Photo (or other file), with metadata, to upload to Commons.
The constructor takes two parameters: URL (string) and metadata (dict with str:str key:value pairs)
that can be referred to from the title & template generation.
-
- '''
+ """
def __init__(self, URL, metadata):
self.URL = URL
self.metadata = metadata
self.metadata["_url"] = URL
- self.metadata["_filename"] = filename = posixpath.split(urlparse.urlparse(URL)[2])[1]
+ self.metadata["_filename"] = filename = posixpath.split(
+ urlparse.urlparse(URL)[2])[1]
self.metadata["_ext"] = ext = filename.split(".")[-1]
if ext == filename:
self.metadata["_ext"] = ext = None
self.contents = None
def downloadPhoto(self):
- '''
+ """
Download the photo and store it in a StringIO.StringIO object.
TODO: Add exception handling
- '''
+
+ """
if not self.contents:
- imageFile=urllib.urlopen(self.URL).read()
+ imageFile = urllib.urlopen(self.URL).read()
self.contents = StringIO.StringIO(imageFile)
return self.contents
- def findDuplicateImages(self, site = pywikibot.getSite(u'commons', u'commons')):
- '''
- Takes the photo, calculates the SHA1 hash and asks the mediawiki api for a list of duplicates.
+ def findDuplicateImages(self,
+ site=pywikibot.getSite(u'commons', u'commons')):
+ """
+ Takes the photo, calculates the SHA1 hash and asks the mediawiki api
+ for a list of duplicates.
TODO: Add exception handling, fix site thing
- '''
+
+ """
hashObject = hashlib.sha1()
hashObject.update(self.downloadPhoto().getvalue())
return site.getFilesFromAnHash(base64.b16encode(hashObject.digest()))
def getTitle(self, fmt):
"""
- Given a format string with %(name)s entries, returns the string formatted with metadata
+ Given a format string with %(name)s entries, returns the string
+ formatted with metadata
+
"""
return fmt % self.metadata
def getDescription(self, template, extraparams={}):
- '''
- Generate a description for a file
- '''
+ """ Generate a description for a file """
params = {}
params.update(self.metadata)
@@ -72,13 +88,15 @@
for key in sorted(params.keys()):
value = params[key]
if not key.startswith("_"):
- description = description + (u'|%s=%s' % (key, self._safeTemplateValue(value))) + "\n"
- description = description + u'}}'
+ description += (u'|%s=%s'
+ % (key, self._safeTemplateValue(value))) + "\n"
+ description += u'}}'
return description
def _safeTemplateValue(self, value):
return value.replace("|", "{{!}}")
+
def CSVReader(fileobj, urlcolumn, *args, **kwargs):
import csv
@@ -88,30 +106,35 @@
yield Photo(line[urlcolumn], line)
-def JSONReader(baseurl, start=0, end=100, JSONBase=None, metadataFunction=None, fileurl=u'fileurl'):
- '''
+def JSONReader(baseurl, start=0, end=100, JSONBase=None, metadataFunction=None,
+ fileurl=u'fileurl'):
+ """
Loops over a bunch of json page and process them with processJSONPage().
Will yield Photo objects with metadata
- '''
+
+ """
if baseurl:
- for i in range(start , end):
+ for i in range(start, end):
url = baseurl % (i,)
- photo = processJSONPage(url, JSONBase=JSONBase, metadataFunction=metadataFunction, fileurl=u'fileurl')
+ photo = processJSONPage(url, JSONBase=JSONBase,
+ metadataFunction=metadataFunction,
+ fileurl=u'fileurl')
if photo:
yield photo
-
-def processJSONPage(url, JSONBase=None, metadataFunction=None, fileurl=u'fileurl'):
- '''
+def processJSONPage(url, JSONBase=None, metadataFunction=None,
+ fileurl=u'fileurl'):
+ """
Process a single JSON page.
For the JSON page you can rebase it to not get all the crap
You can apply a custom metadata function to do some modification on the metadata and checking
By default the field 'fileurl' is expected in the metadata to contain the file. You can change this.
Will a return Photo object with metadata or None if something is wrong
- '''
+
+ """
JSONPage = urllib.urlopen(url)
JSONData = json.load(JSONPage)
JSONPage.close()
@@ -130,17 +153,20 @@
if metadataFunction:
metadata = metadataFunction(metadata)
- # If the metadataFunction didn't return none (something was wrong). Return the photo
+ # If the metadataFunction didn't return none (something was wrong).
+ # Return the photo
if metadata:
return Photo(metadata.get(fileurl), metadata)
-
return False
+
def JSONRebase(JSONData, JSONBase):
- '''
+ """
Moves the base of the JSON object to the part you're intrested in.
- JSONBase is a list to crawl the tree. If one of the steps is not found, return None
- '''
+ JSONBase is a list to crawl the tree. If one of the steps is not found,
+ return None
+
+ """
for step in JSONBase:
if JSONData:
if type(JSONData) == dict:
@@ -148,21 +174,20 @@
elif type(JSONData) == list:
# FIXME: Needs error, length etc checking
JSONData = JSONData[step]
-
return JSONData
def JSONTree(metadata, fieldlist, record):
- '''
+ """
metadata: Dict with end result
key: The key we encountered
record: Record to work on
- '''
+ """
if type(record) == list:
for r in record:
metadata = JSONTree(metadata, fieldlist, r)
elif type(record) == dict:
- for k,v in record.items():
+ for k, v in record.items():
metadata = JSONTree(metadata, fieldlist + [k], v)
elif type(record) == unicode:
key = u'_'.join(fieldlist)
@@ -172,11 +197,13 @@
newkey = key + u'_2'
if not newkey in metadata:
metadata[newkey] = record
-
return metadata
+
class DataIngestionBot:
- def __init__(self, reader, titlefmt, pagefmt, site=pywikibot.getSite(u'commons', u'commons')):
+
+ def __init__(self, reader, titlefmt, pagefmt,
+ site=pywikibot.getSite(u'commons', u'commons')):
self.reader = reader
self.titlefmt = titlefmt
self.pagefmt = pagefmt
@@ -190,17 +217,16 @@
title = photo.getTitle(self.titlefmt)
description = photo.getDescription(self.pagefmt)
- bot = upload.UploadRobot(url = photo.URL,
- description = description,
- useFilename = title,
- keepFilename = True,
- verifyDescription = False,
+ bot = upload.UploadRobot(url=photo.URL,
+ description=description,
+ useFilename=title,
+ keepFilename=True,
+ verifyDescription=False,
ignoreWarning=True,
- targetSite = self.site)
+ targetSite=self.site)
bot._contents = photo.downloadPhoto().getvalue()
bot._retrieved = True
bot.run()
-
return title
def doSingle(self):
@@ -210,9 +236,12 @@
for photo in self.reader:
self._doUpload(photo)
-if __name__=="__main__":
+
+if __name__ == "__main__":
reader = CSVReader(open('tests/data/csv_ingestion.csv'), 'url')
- bot = DataIngestionBot(reader, "%(name)s - %(set)s.%(_ext)s", ":user:valhallasw/test_template", pywikibot.getSite('test', 'test'))
+ bot = DataIngestionBot(reader, "%(name)s - %(set)s.%(_ext)s",
+ ":user:valhallasw/test_template",
+ pywikibot.getSite('test', 'test'))
bot.run()
"""
--
To view, visit https://gerrit.wikimedia.org/r/103252
To unsubscribe, visit https://gerrit.wikimedia.org/r/settings
Gerrit-MessageType: merged
Gerrit-Change-Id: Icff281c4d659d40a527eeecac12de17afaef8201
Gerrit-PatchSet: 1
Gerrit-Project: pywikibot/compat
Gerrit-Branch: master
Gerrit-Owner: Xqt <info(a)gno.de>
Gerrit-Reviewer: Ladsgroup <ladsgroup(a)gmail.com>
Gerrit-Reviewer: Merlijn van Deen <valhallasw(a)arctus.nl>
Gerrit-Reviewer: Multichill <maarten(a)mdammers.nl>
Gerrit-Reviewer: jenkins-bot
Gerrit-Reviewer: saper <saper(a)saper.info>
jenkins-bot has submitted this change and it was merged.
Change subject: [PEP8] changes
......................................................................
[PEP8] changes
Change-Id: I0562f6b814e4d83f5094d4cd851354cb86aee493
---
M daemonize.py
1 file changed, 4 insertions(+), 2 deletions(-)
Approvals:
Xqt: Looks good to me, approved
jenkins-bot: Verified
diff --git a/daemonize.py b/daemonize.py
index 121d3a3..0eb9ffb 100644
--- a/daemonize.py
+++ b/daemonize.py
@@ -1,16 +1,18 @@
# -*- coding: utf-8 -*-
#
-# (C) Pywikipedia bot team, 2007-2008, 2010
+# (C) Pywikibot team, 2007-2013
#
# Distributed under the terms of the MIT license.
#
__version__ = '$Id$'
#
-import sys, os
+import sys
+import os
is_daemon = False
+
def daemonize(close_fd=True, chdir=True, write_pid=False, redirect_std=None):
""" Daemonize the current process. Only works on POSIX compatible operating
systems. The process will fork to the background and return control to
--
To view, visit https://gerrit.wikimedia.org/r/103250
To unsubscribe, visit https://gerrit.wikimedia.org/r/settings
Gerrit-MessageType: merged
Gerrit-Change-Id: I0562f6b814e4d83f5094d4cd851354cb86aee493
Gerrit-PatchSet: 1
Gerrit-Project: pywikibot/compat
Gerrit-Branch: master
Gerrit-Owner: Xqt <info(a)gno.de>
Gerrit-Reviewer: Ladsgroup <ladsgroup(a)gmail.com>
Gerrit-Reviewer: Xqt <info(a)gno.de>
Gerrit-Reviewer: jenkins-bot
jenkins-bot has submitted this change and it was merged.
Change subject: [PEP8] changes, insert __version__ id
......................................................................
[PEP8] changes, insert __version__ id
Change-Id: Iab25774e256c0cfdd7ff1446ce5445320c6ed2fe
---
M logindata.py
1 file changed, 25 insertions(+), 19 deletions(-)
Approvals:
Ladsgroup: Looks good to me, approved
jenkins-bot: Verified
diff --git a/logindata.py b/logindata.py
index 152163b..7072354 100644
--- a/logindata.py
+++ b/logindata.py
@@ -1,31 +1,37 @@
# -*- coding: utf-8 -*-
+"""
+Usable example module: Use of pywikipedia as a
+library.
-# Usable example module: Use of pywikipedia as a
-# library.
-#
-# Looks up the path to pywikipedia (pywikipedia_path)
-# in a settings.py file. You'll need to provide that,
-# and/or refactor.
-
+Looks up the path to pywikipedia (pywikipedia_path)
+in a settings.py file. You'll need to provide that,
+and/or refactor.
+"""
# (C) Kim Bruning for Wikiation, sponsored by Kennisnet, 2009
+# (C) Pywikipedia bot team, 2009-2013
#
# Distributed under the terms of the MIT license.
#
+__version__ = '$Id$'
+#
-import sys, os
+import sys
+import os
import settings
+
if settings.pywikipedia_path not in sys.path:
sys.path.append(settings.pywikipedia_path)
# pywikipedia can only set itself up if everything is
# done in its own directory. This needs fixing sometime.
# for now, we live with it.
-cwd=os.getcwd()
+cwd = os.getcwd()
os.chdir(settings.pywikipedia_path)
import wikipedia as pywikibot
import login
from simple_family import Family
os.chdir(cwd)
+
class LoginData:
"""An example class that uses pywikipedia as a library.
@@ -50,7 +56,7 @@
password='MY_SECRET_PASSWORD',
RversionTab=None,
api_supported=False
- ):
+ ):
"""
paramaters:
name: arbitrary name. Pick something easy to remember
@@ -67,10 +73,10 @@
password: password for this user
"""
- self.lang=lang
- self.user=user
- self.password=password
- self.family=base_family.Family(
+ self.lang = lang
+ self.user = user
+ self.password = password
+ self.family = base_family.Family(
name=name,
protocol=protocol,
server=server,
@@ -80,20 +86,20 @@
encoding=encoding,
RversionTab=RversionTab,
api_supported=api_supported)
- self.site=None
+ self.site = None
def login(self):
"""Attempt to log in on the site described
by this class. Returns a pywikipedia site object"""
- self.site=pywikibot.Site(
+ self.site = pywikibot.Site(
code=self.lang,
fam=self.family,
user=self.user
- )
- loginManager=login.LoginManager(
+ )
+ loginManager = login.LoginManager(
password=self.password,
site=self.site,
username=self.user
- )
+ )
loginManager.login()
return self.site
--
To view, visit https://gerrit.wikimedia.org/r/103235
To unsubscribe, visit https://gerrit.wikimedia.org/r/settings
Gerrit-MessageType: merged
Gerrit-Change-Id: Iab25774e256c0cfdd7ff1446ce5445320c6ed2fe
Gerrit-PatchSet: 1
Gerrit-Project: pywikibot/compat
Gerrit-Branch: master
Gerrit-Owner: Xqt <info(a)gno.de>
Gerrit-Reviewer: Ladsgroup <ladsgroup(a)gmail.com>
Gerrit-Reviewer: jenkins-bot
Xqt has submitted this change and it was merged.
Change subject: [PEP8] changes
......................................................................
[PEP8] changes
Change-Id: Ie759c4d8253e270e8ad7071a23a4eff4dd56f6ed
---
M misspelling.py
M ndashredir.py
M nowcommons.py
M statistics_in_wikitable.py
4 files changed, 75 insertions(+), 56 deletions(-)
Approvals:
Xqt: Looks good to me, approved
diff --git a/misspelling.py b/misspelling.py
index 8c891c3..287b4b4 100644
--- a/misspelling.py
+++ b/misspelling.py
@@ -20,14 +20,16 @@
-main only check pages in the main namespace, not in the talk,
wikipedia, user, etc. namespaces.
"""
-__version__ = '$Id$'
# (C) Daniel Herding, 2007
-# (C) Pywikipedia bot team 2007-2013
+# (C) Pywikibot team, 2007-2013
#
# Distributed under the terms of the MIT license.
+#
+__version__ = '$Id$'
+#
-import wikipedia as pywikibot
+import pywikibot
import catlib
import pagegenerators as pg
import solve_disambiguation
@@ -73,7 +75,8 @@
def createPageGenerator(self, firstPageTitle):
if pywikibot.getSite().lang in self.misspellingCategory:
- misspellingCategoryTitle = self.misspellingCategory[pywikibot.getSite().lang]
+ misspellingCategoryTitle = self.misspellingCategory[
+ pywikibot.getSite().lang]
misspellingCategory = catlib.Category(pywikibot.getSite(),
misspellingCategoryTitle)
generator = pg.CategorizedPageGenerator(misspellingCategory,
@@ -81,14 +84,16 @@
start=firstPageTitle)
else:
misspellingTemplateName = 'Template:%s' \
- % self.misspellingTemplate[pywikibot.getSite().lang]
+ % self.misspellingTemplate[
+ pywikibot.getSite().lang]
misspellingTemplate = pywikibot.Page(pywikibot.getSite(),
misspellingTemplateName)
generator = pg.ReferringPageGenerator(misspellingTemplate,
onlyTemplateInclusion=True)
if firstPageTitle:
pywikibot.output(
- u'-start parameter unsupported on this wiki because there is no category for misspellings.')
+ u'-start parameter unsupported on this wiki because there '
+ u'is no category for misspellings.')
preloadingGen = pg.PreloadingGenerator(generator)
return preloadingGen
@@ -119,8 +124,8 @@
dn=False):
# TODO: setSummaryMessage() in solve_disambiguation now has parameters
# new_targets and unlink. Make use of these here.
- comment = pywikibot.translate(self.mysite, self.msg) \
- % disambPage.title()
+ comment = pywikibot.translate(
+ self.mysite, self.msg) % disambPage.title()
pywikibot.setAction(comment)
@@ -146,6 +151,7 @@
bot = MisspellingRobot(always, firstPageTitle, main_only)
bot.run()
+
if __name__ == "__main__":
try:
main()
diff --git a/ndashredir.py b/ndashredir.py
index 61759ea..4ffed75 100644
--- a/ndashredir.py
+++ b/ndashredir.py
@@ -40,31 +40,35 @@
"""
#
-# (C) Bináris, 2012
+# (c) Bináris, 2012
+# (c) pywikibot team, 2012-2013
#
# Distributed under the terms of the MIT license.
#
-__version__='$Id$'
+__version__ = '$Id$'
+#
-import codecs, re
-import wikipedia as pywikibot
+import codecs
+import re
+import pywikibot
from pagegenerators import RegexFilterPageGenerator as RPG
from pywikibot import i18n
+
def main(*args):
- regex = ur'.*[–—]' # Alt 0150 (n dash), alt 0151 (m dash), respectively.
+ regex = ur'.*[–—]' # Alt 0150 (n dash), alt 0151 (m dash), respectively.
ns = 0
start = '!'
- filename = None # The name of the file to save titles
- titlefile = None # The file object itself
- ignorefilename = None # The name of the ignore file
- ignorelist = [] # A list to ignore titles that redirect to somewhere else
+ filename = None # The name of the file to save titles
+ titlefile = None # The file object itself
+ ignorefilename = None # The name of the ignore file
+ ignorelist = [] # A list to ignore titles that redirect to somewhere else
# Handling parameters:
for arg in pywikibot.handleArgs(*args):
if arg == '-start':
start = pywikibot.input(
- u'From which title do you want to continue?')
+ u'From which title do you want to continue?')
elif arg.startswith('-start:'):
start = arg[7:]
elif arg in ['-ns', '-namespace']:
@@ -98,42 +102,42 @@
ignorelist = re.findall(ur'\[\[:?(.*?)\]\]', igfile.read())
igfile.close()
except IOError:
- pywikibot.output("%s cannot be opened for reading." % ignorefilename)
+ pywikibot.output("%s cannot be opened for reading."
+ % ignorefilename)
return
# Ready to initialize
site = pywikibot.getSite()
redirword = site.redirect()
gen = RPG(site.allpages(
- start=start, namespace=ns, includeredirects=False), [regex])
+ start=start, namespace=ns, includeredirects=False), [regex])
# Processing:
for page in gen:
title = page.title()
editSummary = i18n.twtranslate(site, 'ndashredir-create',
{'title': title})
- newtitle = title.replace(u'–','-').replace(u'—','-')
+ newtitle = title.replace(u'–', '-').replace(u'—', '-')
# n dash -> hyphen, m dash -> hyphen, respectively
redirpage = pywikibot.Page(site, newtitle)
if redirpage.exists():
if redirpage.isRedirectPage() and \
- redirpage.getRedirectTarget() == page:
- pywikibot.output(
- u'[[%s]] already redirects to [[%s]], nothing to do with it.'
- % (newtitle, title))
+ redirpage.getRedirectTarget() == page:
+ pywikibot.output(u'[[%s]] already redirects to [[%s]], nothing '
+ u'to do with it.' % (newtitle, title))
elif newtitle in ignorelist:
pywikibot.output(
u'Skipping [[%s]] because it is on your ignore list.'
% newtitle)
else:
pywikibot.output(
- (u'\03{lightyellow}Skipping [[%s]] because it exists '
- u'already with a different content.\03{default}')
+ u'\03{lightyellow}Skipping [[%s]] because it exists '
+ u'already with a different content.\03{default}'
% newtitle)
if titlefile:
- s = u'\n#%s does not redirect to %s.' %\
- (redirpage.title(asLink=True, textlink=True),
- page.title(asLink=True, textlink=True))
+ s = u'\n#%s does not redirect to %s.' % (
+ redirpage.title(asLink=True, textlink=True),
+ page.title(asLink=True, textlink=True))
# For the unlikely case if someone wants to run it in
# file namespace.
titlefile.write(s)
@@ -154,7 +158,8 @@
# RegexFilterPageGenerator or throttle.py or anything else and cannot
# be catched in this loop.)
if titlefile:
- titlefile.close() # For the spirit of programming (it was flushed)
+ titlefile.close() # For the spirit of programming (it was flushed)
+
if __name__ == "__main__":
try:
diff --git a/nowcommons.py b/nowcommons.py
index fe54bad..e06a0c2 100644
--- a/nowcommons.py
+++ b/nowcommons.py
@@ -48,16 +48,19 @@
#
# (C) Wikipedian, 2006-2007
# (C) Siebrand Mazeland, 2007-2008
-# (C) xqt, 2010-2012
-# (C) Pywikipedia bot team, 2006-2013
+# (C) xqt, 2010-2013
+# (C) Pywikibot team, 2006-2013
#
# Distributed under the terms of the MIT license.
#
__version__ = '$Id$'
#
-import sys, re, webbrowser, urllib
-import wikipedia as pywikibot
+import sys
+import re
+import webbrowser
+import urllib
+import pywikibot
import pagegenerators as pg
import image
# only for nowCommonsMessage
@@ -119,7 +122,7 @@
'it': [
u'NowCommons',
],
- 'ja':[
+ 'ja': [
u'NowCommons',
],
'ko': [
@@ -128,7 +131,7 @@
u'공용 중복',
u'Nowcommons',
],
- 'nds-nl' : [
+ 'nds-nl': [
u'NoenCommons',
u'NowCommons',
],
@@ -152,7 +155,7 @@
u'Перенесено на Викисклад',
u'На Викискладе',
],
- 'zh':[
+ 'zh': [
u'NowCommons',
u'Nowcommons',
u'NCT',
@@ -175,7 +178,7 @@
word_to_skip = {
'en': [],
'it': ['stemma', 'stub', 'hill40 '],
- }
+}
#nowCommonsMessage = imagetransfer.nowCommonsMessage
@@ -200,7 +203,7 @@
images_processed = list()
while 1:
url = 'http://toolserver.org/~multichill/nowcommons.php?language=%s&page=%s&filter=' % (lang, num_page)
- HTML_text = self.site.getUrl(url, no_hostname = True)
+ HTML_text = self.site.getUrl(url, no_hostname=True)
reg = r'<[Aa] href="(?P<urllocal>.*?)">(?P<imagelocal>.*?)</[Aa]> +?</td><td>\n\s*?'
reg += r'<[Aa] href="(?P<urlcommons>http://commons.wikimedia.org/.*?)">Image:(?P<imagecommons>.*?)</[Aa]> +?</td><td>'
regex = re.compile(reg, re.UNICODE)
@@ -309,7 +312,7 @@
for page in self.getPageGenerator():
if use_hash:
# Page -> Has the namespace | commons image -> Not
- images_list = page # 0 -> local image, 1 -> commons image
+ images_list = page # 0 -> local image, 1 -> commons image
page = pywikibot.Page(self.site, images_list[0])
else:
# If use_hash is true, we have already print this before, no need
@@ -345,7 +348,7 @@
u'\"\03{lightred}%s\03{default}\" is still used in %i pages.'
% (localImagePage.title(withNamespace=False),
len(usingPages)))
- if replace == True:
+ if replace:
pywikibot.output(
u'Replacing \"\03{lightred}%s\03{default}\" by \"\03{lightgreen}%s\03{default}\".'
% (localImagePage.title(withNamespace=False),
@@ -358,9 +361,10 @@
oImageRobot.run()
# If the image is used with the urlname the
# previous function won't work
- if len(list(pywikibot.ImagePage(self.site,
- page.title()).usingPages())) > 0 and \
- replaceloose:
+ if len(list(pywikibot.ImagePage(
+ self.site,
+ page.title()).usingPages())) > 0 and \
+ replaceloose:
oImageRobot = image.ImageRobot(
pg.FileLinksGenerator(
localImagePage),
@@ -377,7 +381,9 @@
if usingPages > 0 and use_hash:
# just an enter
pywikibot.input(
- u'There are still %s pages with this image, confirm the manual removal from them please.'
+ u'There are still %s pages with this '
+ u'image, confirm the manual removal '
+ u'from them please.'
% usingPages)
else:
@@ -388,15 +394,17 @@
u'No page is using \"\03{lightgreen}%s\03{default}\" anymore.'
% localImagePage.title(withNamespace=False))
commonsText = commonsImagePage.get()
- if replaceonly == False:
+ if not replaceonly:
if md5 == commonsImagePage.getFileMd5Sum():
pywikibot.output(
u'The image is identical to the one on Commons.')
- if len(localImagePage.getFileVersionHistory()) > 1 and not use_hash:
- pywikibot.output(
- u"This image has a version history. Please delete it manually after making sure that the old versions are not worth keeping.""")
+ if len(localImagePage.getFileVersionHistory()) > 1 and \
+ not use_hash:
+ pywikibot.output(u"""
+This image has a version history. Please delete it manually after
+making sure that the old versions are not worth keeping.""")
continue
- if autonomous == False:
+ if not autonomous:
pywikibot.output(
u'\n\n>>>> Description on \03{lightpurple}%s\03{default} <<<<\n'
% page.title())
@@ -406,17 +414,17 @@
% commonsImagePage.title())
pywikibot.output(commonsText)
choice = pywikibot.inputChoice(
-u'Does the description on Commons contain all required source and license\n'
- u'information?',
+ u'Does the description on Commons contain all '
+ u'required source and license\ninformation?',
['yes', 'no'], ['y', 'N'], 'N')
if choice.lower() in ['y', 'yes']:
localImagePage.delete(
comment + ' [[:commons:Image:%s]]'
- % filenameOnCommons, prompt = False)
+ % filenameOnCommons, prompt=False)
else:
localImagePage.delete(
comment + ' [[:commons:Image:%s]]'
- % filenameOnCommons, prompt = False)
+ % filenameOnCommons, prompt=False)
else:
pywikibot.output(
u'The image is not identical to the one on Commons.')
diff --git a/statistics_in_wikitable.py b/statistics_in_wikitable.py
index e2604c8..73e2204 100644
--- a/statistics_in_wikitable.py
+++ b/statistics_in_wikitable.py
@@ -38,7 +38,7 @@
class StatisticsBot:
-
+
def __init__(self, screen, your_page):
"""
Constructor. Parameter:
--
To view, visit https://gerrit.wikimedia.org/r/103246
To unsubscribe, visit https://gerrit.wikimedia.org/r/settings
Gerrit-MessageType: merged
Gerrit-Change-Id: Ie759c4d8253e270e8ad7071a23a4eff4dd56f6ed
Gerrit-PatchSet: 1
Gerrit-Project: pywikibot/compat
Gerrit-Branch: master
Gerrit-Owner: Xqt <info(a)gno.de>
Gerrit-Reviewer: Xqt <info(a)gno.de>
Gerrit-Reviewer: jenkins-bot
jenkins-bot has submitted this change and it was merged.
Change subject: [PEP8] changes, remove obsolete imports
......................................................................
[PEP8] changes, remove obsolete imports
Change-Id: Iea4c84c9eebe99ba5d0d722d9e12601bcfb7ff56
---
M imageharvest.py
1 file changed, 35 insertions(+), 26 deletions(-)
Approvals:
Ladsgroup: Looks good to me, approved
jenkins-bot: Verified
diff --git a/imageharvest.py b/imageharvest.py
index 49c8792..7b3203e 100644
--- a/imageharvest.py
+++ b/imageharvest.py
@@ -15,15 +15,19 @@
-shown Choose images shown on the page as well as linked from it
-justshown Choose _only_ images shown on the page, not those linked
"""
+# (C) Pywikibot team, 2004-2013
+#
+# Distributed under the terms of the MIT license.
+#
+__version__ = '$Id$'
+#
-__version__='$Id$'
-
-import re, sys, os
-import wikipedia as pywikibot
-import externals # check for and install needed
+import os
import urllib
+import pywikibot
import BeautifulSoup
import upload
+
def get_imagelinks(url):
"""Given a URL, get all images linked to by the page at that URL."""
@@ -45,35 +49,36 @@
if link:
ext = os.path.splitext(link)[1].lower().strip('.')
if ext in fileformats:
- links.append(urllib.basejoin(url, link))
+ links.append(urllib.basejoin(url, link))
return links
+
def main(give_url, image_url, desc):
url = give_url
if url == '':
if image_url:
- url = pywikibot.input(
- u"What URL range should I check (use $ for the part that is changeable)")
+ url = pywikibot.input(u"What URL range should I check "
+ u"(use $ for the part that is changeable)")
else:
- url = pywikibot.input(
- u"From what URL should I get the images?")
+ url = pywikibot.input(u"From what URL should I get the images?")
if image_url:
- minimum=1
- maximum=99
- answer= pywikibot.input(
+ minimum = 1
+ maximum = 99
+ answer = pywikibot.input(
u"What is the first number to check (default: 1)")
if answer:
- minimum=int(answer)
- answer= pywikibot.input(
+ minimum = int(answer)
+ answer = pywikibot.input(
u"What is the last number to check (default: 99)")
if answer:
- maximum=int(answer)
+ maximum = int(answer)
if not desc:
basicdesc = pywikibot.input(
- u"What text should be added at the end of the description of each image from this url?")
+ u"What text should be added at the end of "
+ u"the description of each image from this url?")
else:
basicdesc = desc
@@ -81,7 +86,7 @@
ilinks = []
i = minimum
while i <= maximum:
- ilinks += [url.replace("$",str(i))]
+ ilinks += [url.replace("$", str(i))]
i += 1
else:
ilinks = get_imagelinks(url)
@@ -94,19 +99,23 @@
desc = pywikibot.input(u"Give the description of this image:")
categories = []
while True:
- cat = pywikibot.input(
- u"Specify a category (or press enter to end adding categories)")
- if not cat.strip(): break
+ cat = pywikibot.input(u"Specify a category (or press enter to "
+ u"end adding categories)")
+ if not cat.strip():
+ break
if ":" in cat:
- categories.append("[["+cat+"]]")
+ categories.append(u"[[%s]]" % cat)
else:
- categories.append("[["+mysite.namespace(14)+":"+cat+"]]")
- desc = desc + "\r\n\r\n" + basicdesc + "\r\n\r\n" + \
- "\r\n".join(categories)
- uploadBot = upload.UploadRobot(image, description = desc)
+ categories.append(u"[[%s:%s]]"
+ % (mysite.namespace(14), cat))
+ desc += "\r\n\r\n" + basicdesc + "\r\n\r\n" + \
+ "\r\n".join(categories)
+ uploadBot = upload.UploadRobot(image, description=desc)
uploadBot.run()
elif answer == 's':
break
+
+
try:
url = u''
image_url = False
--
To view, visit https://gerrit.wikimedia.org/r/103242
To unsubscribe, visit https://gerrit.wikimedia.org/r/settings
Gerrit-MessageType: merged
Gerrit-Change-Id: Iea4c84c9eebe99ba5d0d722d9e12601bcfb7ff56
Gerrit-PatchSet: 1
Gerrit-Project: pywikibot/compat
Gerrit-Branch: master
Gerrit-Owner: Xqt <info(a)gno.de>
Gerrit-Reviewer: Andre Engels <andreengels(a)gmail.com>
Gerrit-Reviewer: DrTrigon <dr.trigon(a)surfeu.ch>
Gerrit-Reviewer: Ladsgroup <ladsgroup(a)gmail.com>
Gerrit-Reviewer: jenkins-bot