jenkins-bot has submitted this change and it was merged.
Change subject: [IMPROV] Missing Python 3 imports ......................................................................
[IMPROV] Missing Python 3 imports
Change-Id: Iac3864b7fc15fe59645acde4e92ab52330e56c8f --- M pywikibot/compat/query.py M pywikibot/diff.py M pywikibot/page.py M pywikibot/site.py M pywikibot/userinterfaces/gui.py M pywikibot/version.py M scripts/category_redirect.py M scripts/checkimages.py M scripts/data_ingestion.py M scripts/flickrripper.py M scripts/imagerecat.py M scripts/maintenance/wikimedia_sites.py M scripts/makecat.py M scripts/reflinks.py M scripts/script_wui.py M scripts/upload.py M scripts/weblinkchecker.py M scripts/welcome.py 18 files changed, 152 insertions(+), 70 deletions(-)
Approvals: John Vandenberg: Looks good to me, approved jenkins-bot: Verified
diff --git a/pywikibot/compat/query.py b/pywikibot/compat/query.py index a23025a..42ff2c7 100644 --- a/pywikibot/compat/query.py +++ b/pywikibot/compat/query.py @@ -2,6 +2,12 @@ from pywikibot.data import api from pywikibot.tools import deprecated, deprecate_arg
+import sys +if sys.version_info[0] > 2: + import io as StringIO +else: + import StringIO +
@deprecated("pywikibot.data.api.Request") @deprecate_arg("useAPI", None) @@ -17,7 +23,6 @@ if back_response: pywikibot.warning(u"back_response is no longer supported; an empty " u"response object will be returned") - import StringIO res_dummy = StringIO.StringIO() res_dummy.__dict__.update({u'code': 0, u'msg': u''}) return res_dummy, result diff --git a/pywikibot/diff.py b/pywikibot/diff.py index 490b0df..346fcba 100644 --- a/pywikibot/diff.py +++ b/pywikibot/diff.py @@ -11,7 +11,11 @@
import difflib -import itertools +import sys +if sys.version_info[0] > 2: + from itertools import zip_longest +else: + from itertools import izip_longest as zip_longest
import pywikibot
@@ -129,7 +133,7 @@
colored_line = u'' state = 'Close' - for char, char_ref in itertools.izip_longest(line, line_ref.strip(), fillvalue=' '): + for char, char_ref in zip_longest(line, line_ref.strip(), fillvalue=' '): char_tagged = char if state == 'Close': if char_ref != ' ': diff --git a/pywikibot/page.py b/pywikibot/page.py index d30e649..ca1d169 100644 --- a/pywikibot/page.py +++ b/pywikibot/page.py @@ -29,18 +29,18 @@ if sys.version_info[0] == 2: import htmlentitydefs from urllib import quote as quote_from_bytes, unquote as unquote_to_bytes + from urllib import urlopen else: unicode = basestring = str from html import entities as htmlentitydefs from urllib.parse import quote_from_bytes, unquote_to_bytes + from urllib.request import urlopen
import logging import re import sys import unicodedata import collections - -import urllib
logger = logging.getLogger("pywiki.wiki.page")
@@ -1878,7 +1878,7 @@ """Return image file's MD5 checksum.""" # FIXME: MD5 might be performed on incomplete file due to server disconnection # (see bug #1795683). - f = urllib.urlopen(self.fileUrl()) + f = urlopen(self.fileUrl()) # TODO: check whether this needs a User-Agent header added h = hashlib.md5() h.update(f.read()) diff --git a/pywikibot/site.py b/pywikibot/site.py index 07a280c..14954d5 100644 --- a/pywikibot/site.py +++ b/pywikibot/site.py @@ -23,7 +23,6 @@ from collections import Iterable, Container import threading import time -import urllib import json import copy
@@ -45,8 +44,11 @@ from pywikibot.echo import Notification
if sys.version_info[0] > 2: + from urllib.parse import urlencode basestring = (str,) unicode = str +else: + from urllib import urlencode
_logger = "wiki.site"
@@ -811,7 +813,7 @@ @deprecated("urllib.urlencode()") def urlEncode(self, query): """DEPRECATED.""" - return urllib.urlencode(query) + return urlencode(query)
@deprecated("pywikibot.comms.http.request") def getUrl(self, path, retry=True, sysop=False, data=None, @@ -825,7 +827,7 @@ from pywikibot.comms import http if data: if not isinstance(data, basestring): - data = urllib.urlencode(data) + data = urlencode(data) return http.request(self, path, method="PUT", body=data) else: return http.request(self, path) diff --git a/pywikibot/userinterfaces/gui.py b/pywikibot/userinterfaces/gui.py index abdbe76..db7f0c3 100644 --- a/pywikibot/userinterfaces/gui.py +++ b/pywikibot/userinterfaces/gui.py @@ -16,9 +16,15 @@ __version__ = '$Id$' #
-import Tkinter -from ScrolledText import ScrolledText -import tkSimpleDialog +import sys +if sys.version_info[0] > 2: + import tkinter as Tkinter + from tkinter.scrolledtext import ScrolledText + from tkinter import simpledialog as tkSimpleDialog +else: + import Tkinter + from ScrolledText import ScrolledText + import tkSimpleDialog
from idlelib import SearchDialog, ReplaceDialog, configDialog from idlelib.configHandler import idleConf diff --git a/pywikibot/version.py b/pywikibot/version.py index 9243910..8fc5685 100644 --- a/pywikibot/version.py +++ b/pywikibot/version.py @@ -14,6 +14,7 @@ import time import datetime import subprocess +import sys
import pywikibot.config2 as config
@@ -145,7 +146,10 @@ @return: the git hash @rtype: str """ - from StringIO import StringIO + if sys.version_info[0] > 2: + from io import StringIO + else: + from StringIO import StringIO import xml.dom.minidom from pywikibot.comms import http
diff --git a/scripts/category_redirect.py b/scripts/category_redirect.py index 5eb105a..b2123b2 100755 --- a/scripts/category_redirect.py +++ b/scripts/category_redirect.py @@ -21,8 +21,7 @@ # __version__ = '$Id$' # - -import cPickle +import sys import re import time from datetime import datetime, timedelta @@ -30,6 +29,11 @@ from pywikibot import pagegenerators from pywikibot import i18n
+if sys.version_info[0] > 2: + import pickle as cPickle +else: + import cPickle +
class CategoryRedirectBot(object):
diff --git a/scripts/checkimages.py b/scripts/checkimages.py index 1343c6e..8004226 100644 --- a/scripts/checkimages.py +++ b/scripts/checkimages.py @@ -95,7 +95,6 @@ import time import datetime import locale -import urllib import pywikibot from pywikibot import pagegenerators as pg from pywikibot import config, i18n @@ -917,14 +916,8 @@ return imageName
def convert_to_url(self, page): - # Function stolen from wikipedia.py - """The name of the page this Page refers to, in a form suitable for the - URL of the page. - - """ - title = page.replace(u" ", u"_") - encodedTitle = title.encode(self.site.encoding()) - return urllib.quote(encodedTitle) + """Return the page title suitable as for an URL.""" + return page.title(asUrl=True)
def countEdits(self, pagename, userlist): """Function to count the edit of a user or a list of users in a page.""" diff --git a/scripts/data_ingestion.py b/scripts/data_ingestion.py index a791b0d..83ec1c0 100755 --- a/scripts/data_ingestion.py +++ b/scripts/data_ingestion.py @@ -10,14 +10,21 @@ #
import posixpath -import urlparse -import urllib import hashlib import base64 -import StringIO +import sys
import pywikibot import upload + +if sys.version_info[0] > 2: + from urllib.parse import urlparse + from urllib.request import urlopen + import io as StringIO +else: + from urlparse import urlparse + from urllib import urlopen + import StringIO
class Photo(object): @@ -37,7 +44,7 @@ self.metadata = metadata self.metadata["_url"] = URL self.metadata["_filename"] = filename = posixpath.split( - urlparse.urlparse(URL)[2])[1] + urlparse(URL)[2])[1] self.metadata["_ext"] = ext = filename.split(".")[-1] if ext == filename: self.metadata["_ext"] = ext = None @@ -50,7 +57,7 @@ TODO: Add exception handling """ if not self.contents: - imageFile = urllib.urlopen(self.URL).read() + imageFile = urlopen(self.URL).read() self.contents = StringIO.StringIO(imageFile) return self.contents
@@ -183,7 +190,7 @@
TODO: Add exception handling """ - imageFile = urllib.urlopen(photoUrl).read() + imageFile = urlopen(photoUrl).read() return StringIO.StringIO(imageFile)
def findDuplicateImages(self, photo=None, site=pywikibot.Site(u'commons', u'commons')): diff --git a/scripts/flickrripper.py b/scripts/flickrripper.py index 37a8808..d5550d2 100644 --- a/scripts/flickrripper.py +++ b/scripts/flickrripper.py @@ -32,12 +32,11 @@ __version__ = '$Id$' #
-import urllib import re -import StringIO import hashlib import base64 import time +import sys import pywikibot from pywikibot import config, textlib import upload @@ -45,16 +44,26 @@ try: import flickrapi # see: http://stuvel.eu/projects/flickrapi except ImportError: - import sys pywikibot.error('This script requires the python flickrapi module') pywikibot.error('See: http://stuvel.eu/projects/flickrapi') pywikibot.exception() sys.exit()
-from Tkinter import ( - Tk, Label, Entry, Scrollbar, Text, Button, - END, VERTICAL, NORMAL, WORD -) +if sys.version_info[0] > 2: + from urllib.parse import urlencode + from urllib.request import urlopen + import io as StringIO + from tkinter import ( + Tk, Label, Entry, Scrollbar, Text, Button, + END, VERTICAL, NORMAL, WORD + ) +else: + from urllib import urlencode, urlopen + import StringIO + from Tkinter import ( + Tk, Label, Entry, Scrollbar, Text, Button, + END, VERTICAL, NORMAL, WORD + ) from PIL import Image, ImageTk # see: http://www.pythonware.com/products/pil/
flickr_allowed_license = { @@ -118,7 +127,7 @@ TODO: Add exception handling
""" - imageFile = urllib.urlopen(photoUrl).read() + imageFile = urlopen(photoUrl).read() return StringIO.StringIO(imageFile)
@@ -150,9 +159,9 @@
TODO: Add exception handling, try a couple of times """ - parameters = urllib.urlencode({'id': photo_id, 'raw': 'on'}) + parameters = urlencode({'id': photo_id, 'raw': 'on'})
- rawDescription = urllib.urlopen( + rawDescription = urlopen( "http://wikipedia.ramselehof.de/flinfo.php?%s" % parameters).read()
return rawDescription.decode('utf-8') diff --git a/scripts/imagerecat.py b/scripts/imagerecat.py index fbeee07..295bb7f 100644 --- a/scripts/imagerecat.py +++ b/scripts/imagerecat.py @@ -34,13 +34,20 @@ #
import re -import urllib import time import socket import xml.etree.ElementTree +import sys
import pywikibot from pywikibot import pagegenerators, textlib + +if sys.version_info[0] > 2: + from urllib.parse import urlencode + from urllib.request import urlopen +else: + from urllib import urlencode, urlopen +
category_blacklist = [] countries = [] @@ -119,14 +126,14 @@ lang = site.language() family = site.family.name if lang == u'commons' and family == u'commons': - parameters = urllib.urlencode( + parameters = urlencode( {'i': imagepage.title(withNamespace=False).encode('utf-8'), 'r': 'on', 'go-clean': 'Find+Categories', 'p': search_wikis, 'cl': hint_wiki}) elif family == u'wikipedia': - parameters = urllib.urlencode( + parameters = urlencode( {'i': imagepage.title(withNamespace=False).encode('utf-8'), 'r': 'on', 'go-move': 'Find+Categories', @@ -147,7 +154,7 @@ try: if tries < maxtries: tries += 1 - commonsHelperPage = urllib.urlopen( + commonsHelperPage = urlopen( "https://toolserver.org/~daniel/WikiSense/CommonSense.php?%s" % parameters) matches = commonsenseRe.search( commonsHelperPage.read().decode('utf-8')) @@ -210,10 +217,10 @@ """ result = [] gotInfo = False - parameters = urllib.urlencode({'lat': latitude, 'lon': longitude, 'accept-language': 'en'}) + parameters = urlencode({'lat': latitude, 'lon': longitude, 'accept-language': 'en'}) while not gotInfo: try: - page = urllib.urlopen("https://nominatim.openstreetmap.org/reverse?format=xml&%s" % parameters) + page = urlopen("https://nominatim.openstreetmap.org/reverse?format=xml&%s" % parameters) et = xml.etree.ElementTree.parse(page) gotInfo = True except IOError: @@ -366,11 +373,11 @@ for cat in categories: cat = cat.replace('_', ' ') toFilter = toFilter + "[[Category:" + cat + "]]\n" - parameters = urllib.urlencode({'source': toFilter.encode('utf-8'), + parameters = urlencode({'source': toFilter.encode('utf-8'), 'bot': '1'}) filterCategoriesRe = re.compile('[[Category:([^]]*)]]') try: - filterCategoriesPage = urllib.urlopen( + filterCategoriesPage = urlopen( "https://toolserver.org/~multichill/filtercats.php?%s" % parameters) result = filterCategoriesRe.findall( filterCategoriesPage.read().decode('utf-8')) diff --git a/scripts/maintenance/wikimedia_sites.py b/scripts/maintenance/wikimedia_sites.py index 76a9361..fa94029 100644 --- a/scripts/maintenance/wikimedia_sites.py +++ b/scripts/maintenance/wikimedia_sites.py @@ -14,11 +14,16 @@
import re import codecs -import urllib from xml.etree import cElementTree +import sys
import pywikibot from pywikibot.site import Family + +if sys.version_info[0] > 2: + from urllib.request import urlopen +else: + from urllib import urlopen
URL = 'https://wikistats.wmflabs.org/api.php?action=dump&table=%s&format=xm...'
@@ -44,7 +49,7 @@ original = Family(family).languages_by_size obsolete = Family(family).obsolete
- feed = urllib.urlopen(URL % familiesDict[family]) + feed = urlopen(URL % familiesDict[family]) tree = cElementTree.parse(feed)
new = [] diff --git a/scripts/makecat.py b/scripts/makecat.py index f1c8a7e..e523cff 100644 --- a/scripts/makecat.py +++ b/scripts/makecat.py @@ -122,6 +122,8 @@ pywikibot.output(u'') pywikibot.output(u"==%s==" % pl.title()) while True: + # TODO: Use pywikibot.inputChoice? + # (needs the support for 'other options') answer = pywikibot.input("[y]es/[n]o/[i]gnore/[o]ther options?") if answer == 'y': include(pl) diff --git a/scripts/reflinks.py b/scripts/reflinks.py index 2d35316..11031fd 100644 --- a/scripts/reflinks.py +++ b/scripts/reflinks.py @@ -43,19 +43,29 @@ #
import re -import urllib2 -import httplib import socket import codecs import subprocess import tempfile import os import gzip -import StringIO +import sys
import pywikibot from pywikibot import i18n, pagegenerators, textlib, xmlreader, Bot import noreferences + +# TODO: Convert to httlib2 +if sys.version_info[0] > 2: + from urllib.parse import quote + from urllib.request import urlopen + from urllib.error import HTTPError, URLError + import http.client as httplib + import io as StringIO +else: + from urllib2 import quote, urlopen, HTTPError, URLError + import httplib + import StringIO
docuReplacements = { '¶ms;': pagegenerators.parameterHelp @@ -528,10 +538,10 @@ try: socket.setdefaulttimeout(20) try: - f = urllib2.urlopen(ref.url.decode("utf8")) + f = urlopen(ref.url.decode("utf8")) except UnicodeError: - ref.url = urllib2.quote(ref.url.encode("utf8"), "://") - f = urllib2.urlopen(ref.url) + ref.url = quote(ref.url.encode("utf8"), "://") + f = urlopen(ref.url) # Try to get Content-Type from server headers = f.info() contentType = headers.getheader('Content-Type') @@ -596,7 +606,7 @@ u'\03{lightred}Bad link\03{default} : %s in %s' % (ref.url, page.title(asLink=True))) continue - except urllib2.HTTPError as e: + except HTTPError as e: pywikibot.output(u'HTTP error (%s) for %s on %s' % (e.code, ref.url, page.title(asLink=True)), @@ -608,7 +618,7 @@ repl = ref.refDead() new_text = new_text.replace(match.group(), repl) continue - except (urllib2.URLError, + except (URLError, socket.error, IOError, httplib.error) as e: diff --git a/scripts/script_wui.py b/scripts/script_wui.py index ab2a595..1e2ef0c 100755 --- a/scripts/script_wui.py +++ b/scripts/script_wui.py @@ -62,7 +62,6 @@
import datetime -import thread import threading import sys import os @@ -84,6 +83,11 @@ import pywikibot # pywikibot.botirc depends on https://pypi.python.org/pypi/irc import pywikibot.botirc + +if sys.version_info[0] > 2: + import _thread as thread +else: + import thread
bot_config = { @@ -203,7 +207,10 @@ def main_script(page, rev=None, params=None): # http://opensourcehacker.com/2011/02/23/temporarily-capturing-python-logging-... # https://docs.python.org/release/2.6/library/logging.html - from StringIO import StringIO + if sys.version_info[0] > 2: + from io import StringIO + else: + from StringIO import StringIO import logging
# safety; default mode is safe (no writing) diff --git a/scripts/upload.py b/scripts/upload.py index 9261661..17ae914 100755 --- a/scripts/upload.py +++ b/scripts/upload.py @@ -40,16 +40,22 @@
import os import time -import urllib -import urlparse import tempfile import re import math +import sys
import pywikibot import pywikibot.data.api from pywikibot import config from pywikibot.bot import QuitKeyboardInterrupt + +if sys.version_info[0] > 2: + from urllib.parse import urlparse + from urllib.request import URLopener +else: + from urlparse import urlparse + from urllib import URLopener
class UploadRobot: @@ -91,7 +97,7 @@ rlen = 0 _contents = None dt = 15 - uo = urllib.URLopener() + uo = URLopener() retrieved = False
while not retrieved: @@ -149,7 +155,7 @@ # Filename may be either a local file path or a URL if "://" in filename: # extract the path portion of the URL - filename = urlparse.urlparse(filename).path + filename = urlparse(filename).path filename = os.path.basename(filename)
if self.useFilename: diff --git a/scripts/weblinkchecker.py b/scripts/weblinkchecker.py index 1ead1d9..262d695 100644 --- a/scripts/weblinkchecker.py +++ b/scripts/weblinkchecker.py @@ -99,16 +99,24 @@ import re import codecs import pickle -import httplib import socket -import urlparse -import urllib import threading import time +import sys
import pywikibot from pywikibot import i18n, config, pagegenerators, textlib, xmlreader, weblib
+# TODO: Convert to httlib2 +if sys.version_info[0] > 2: + import urllib.parse as urlparse + import urllib.request as urllib + import http.client as httplib +else: + import urlparse + import urllib + import httplib + docuReplacements = { '¶ms;': pagegenerators.parameterHelp } diff --git a/scripts/welcome.py b/scripts/welcome.py index 0456bf6..1de697e 100644 --- a/scripts/welcome.py +++ b/scripts/welcome.py @@ -186,7 +186,7 @@ import locale import codecs from random import choice -from string import capitalize +import sys import pywikibot from pywikibot import i18n from pywikibot import config @@ -651,7 +651,7 @@ 'Log page is not exist, getting information for page creation') text = i18n.translate(self.site, logpage_header) text += u'\n!%s' % self.site.namespace(2) - text += u'\n!%s' % capitalize( + text += u'\n!%s' % str.capitalize( self.site.mediawiki_message('contribslink'))
for result in queue: @@ -992,7 +992,10 @@ # If there is the savedata, the script must save the number_user. if globalvar.randomSign and globalvar.saveSignIndex and \ bot.welcomed_users: - import cPickle + if sys.version_info[0] > 2: + import pickle as cPickle + else: + import cPickle with open(filename, 'w') as f: cPickle.dump(bot.welcomed_users, f)
pywikibot-commits@lists.wikimedia.org