http://www.mediawiki.org/wiki/Special:Code/pywikipedia/9903
Revision: 9903
Author: drtrigon
Date: 2012-02-17 12:19:57 +0000 (Fri, 17 Feb 2012)
Log Message:
-----------
Adding capabilities of DrTrigonBot 'textlib' script; 'glue_template_and_params'
(inverse function to 'extract_templates_and_params')
Modified Paths:
--------------
trunk/pywikipedia/pywikibot/textlib.py
Modified: trunk/pywikipedia/pywikibot/textlib.py
===================================================================
--- trunk/pywikipedia/pywikibot/textlib.py 2012-02-17 11:34:20 UTC (rev 9902)
+++ trunk/pywikipedia/pywikibot/textlib.py 2012-02-17 12:19:57 UTC (rev 9903)
@@ -880,3 +880,19 @@
# Add it to the result
result.append((name, params))
return result
+
+
+def glue_template_and_params(template_and_params):
+ """Return wiki text of template glued from params.
+
+ You can use items from extract_templates_and_params here to get
+ an equivalent template wiki text (it may happen that the order
+ of the params changes).
+ """
+ (template, params) = template_and_params
+
+ text = u''
+ for item in params:
+ text += u'|%s=%s\n' % (item, params[item])
+
+ return u'{{%s\n%s}}' % (template, text)
http://www.mediawiki.org/wiki/Special:Code/pywikipedia/9902
Revision: 9902
Author: drtrigon
Date: 2012-02-17 11:34:20 +0000 (Fri, 17 Feb 2012)
Log Message:
-----------
Adding capabilities of DrTrigonBot 'wikipedia' script; 'getParsedString' and 'getExpandedString'
Modified Paths:
--------------
trunk/pywikipedia/wikipedia.py
Modified: trunk/pywikipedia/wikipedia.py
===================================================================
--- trunk/pywikipedia/wikipedia.py 2012-02-16 22:44:36 UTC (rev 9901)
+++ trunk/pywikipedia/wikipedia.py 2012-02-17 11:34:20 UTC (rev 9902)
@@ -4807,8 +4807,11 @@
versionnumber: Return int identifying the MediaWiki version.
live_version: Return version number read from Special:Version.
checkCharset(charset): Warn if charset doesn't match family file.
- server_time : returns server time (currently userclock depending)
+ server_time: returns server time (currently userclock depending)
+ getParsedString: Parses the string with API and returns html content.
+ getExpandedString: Expands the string with API and returns wiki content.
+
linktrail: Return regex for trailing chars displayed as part of a link.
disambcategory: Category in which disambiguation pages are listed.
@@ -7581,6 +7584,67 @@
files.append(image)
return files
+ def getParsedString(self, string, keeptags = [u'*']):
+ """Parses the string with API and returns html content.
+
+ @param string: String that should be parsed.
+ @type string: string
+ @param keeptags: Defines which tags (wiki, HTML) should NOT be removed.
+ @type keeptags: list
+
+ Returns the string given, parsed through the wiki parser.
+ """
+
+ if not self.has_api():
+ raise Exception('parse: no API: not implemented')
+
+ # call the wiki to get info
+ params = {
+ u'action' : u'parse',
+ u'text' : string,
+ }
+
+ pywikibot.get_throttle()
+ pywikibot.output(u"Parsing string through the wiki parser via API.")
+
+ result = query.GetData(params, self)
+ r = result[u'parse'][u'text'][u'*']
+
+ # disable/remove comments
+ r = pywikibot.removeDisabledParts(r, tags = ['comments']).strip()
+
+ # disable/remove ALL tags
+ if not (keeptags == [u'*']):
+ r = removeHTMLParts(r, keeptags = keeptags).strip()
+
+ return r
+
+ def getExpandedString(self, string):
+ """Expands the string with API and returns wiki content.
+
+ @param string: String that should be expanded.
+ @type string: string
+
+ Returns the string given, expanded through the wiki parser.
+ """
+
+ if not self.has_api():
+ raise Exception('expandtemplates: no API: not implemented')
+
+ # call the wiki to get info
+ params = {
+ u'action' : u'expandtemplates',
+ u'text' : string,
+ }
+
+ pywikibot.get_throttle()
+ pywikibot.output(u"Expanding string through the wiki parser via API.")
+
+ result = query.GetData(params, self)
+ r = result[u'expandtemplates'][u'*']
+
+ return r
+
# Caches to provide faster access
_sites = {}
_namespaceCache = {}
http://www.mediawiki.org/wiki/Special:Code/pywikipedia/9901
Revision: 9901
Author: drtrigon
Date: 2012-02-16 22:44:36 +0000 (Thu, 16 Feb 2012)
Log Message:
-----------
updated analogue to rewrite 'getUrl' moved/splitted to 'pywikibot.comms.http.request'
the generic one does not attempt to (re)login on the target if it is a wiki
Modified Paths:
--------------
trunk/pywikipedia/wikipedia.py
Added Paths:
-----------
trunk/pywikipedia/pywikibot/comms/
trunk/pywikipedia/pywikibot/comms/__init__.py
trunk/pywikipedia/pywikibot/comms/http.py
Added: trunk/pywikipedia/pywikibot/comms/__init__.py
===================================================================
--- trunk/pywikipedia/pywikibot/comms/__init__.py (rev 0)
+++ trunk/pywikipedia/pywikibot/comms/__init__.py 2012-02-16 22:44:36 UTC (rev 9901)
@@ -0,0 +1,7 @@
+# -*- coding: utf-8 -*-
+#
+# (C) Pywikipedia bot team, 2012
+#
+# Distributed under the terms of the MIT license.
+#
+__version__ = '$Id$'
Property changes on: trunk/pywikipedia/pywikibot/comms/__init__.py
___________________________________________________________________
Added: svn:keywords
+ Id
Added: svn:eol-style
+ native
Added: trunk/pywikipedia/pywikibot/comms/http.py
===================================================================
--- trunk/pywikipedia/pywikibot/comms/http.py (rev 0)
+++ trunk/pywikipedia/pywikibot/comms/http.py 2012-02-16 22:44:36 UTC (rev 9901)
@@ -0,0 +1,198 @@
+# -*- coding: utf-8 -*-
+"""
+Basic HTTP access interface.
+
+This module handles communication between the bot and the HTTP threads.
+
+This module is responsible for
+ - Providing a (blocking) interface for HTTP requests
+ - Urlencoding all data
+ - Basic HTTP error handling
+"""
+
+#
+# (C) Pywikipedia bot team, 2012
+#
+# Distributed under the terms of the MIT license.
+#
+
+__version__ = '$Id$'
+
+import urllib2
+
+import config
+from pywikibot import *
+import wikipedia as pywikibot
+
+
+# global variables
+
+# import useragent and MyURLopener from global namespace
+useragent = pywikibot.useragent
+MyURLopener = pywikibot.MyURLopener
+
+def request(site, uri, retry = None, sysop = False, data = None, compress = True,
+ no_hostname = False, cookie_only=False, refer=None, back_response=False):
+ """
+ Low-level routine to get a URL from any source (may be the wiki).
+
+ Parameters:
+ @param site - The Site to connect to.
+ @param uri - The absolute uri, without the hostname.
+ @param retry - If True, retries loading the page when a network error
+ occurs.
+ @param sysop - If True, the sysop account's cookie will be used.
+ @param data - An optional dict providing extra post request
+ parameters.
+ @param cookie_only - Only return the cookie the server sent us back
+
+ @return: Returns the HTML text of the page converted to unicode.
+ """
+
+ if retry is None:
+ retry = config.retry_on_fail
+
+ headers = {
+ 'User-agent': useragent,
+ #'Accept-Language': config.mylang,
+ #'Accept-Charset': config.textfile_encoding,
+ #'Keep-Alive': '115',
+ #'Connection': 'keep-alive',
+ #'Cache-Control': 'max-age=0',
+ #'': '',
+ }
+
+ if not no_hostname and site.cookies(sysop = sysop):
+ headers['Cookie'] = site.cookies(sysop = sysop)
+ if compress:
+ headers['Accept-encoding'] = 'gzip'
+
+ if refer:
+ headers['Refer'] = refer
+
+ if no_hostname: # This allow users to parse also toolserver's script
+ url = uri # and other useful pages without using some other functions.
+ else:
+ url = '%s://%s%s' % (site.protocol(), site.hostname(), uri)
+ data = site.urlEncode(data)
+
+ # Try to retrieve the page until it was successfully loaded (just in
+ # case the server is down or overloaded).
+ # Wait for retry_idle_time minutes (growing!) between retries.
+ retry_idle_time = 1
+ retry_attempt = 0
+ while True:
+ try:
+ req = urllib2.Request(url, data, headers)
+ f = MyURLopener.open(req)
+
+ # read & info can raise socket.error
+ text = f.read()
+ headers = f.info()
+ break
+ except KeyboardInterrupt:
+ raise
+ except urllib2.HTTPError, e:
+ if e.code in [401, 404]:
+ raise PageNotFound(
+u'Page %s could not be retrieved. Check your family file.'
+ % url)
+ elif e.code in [403]:
+ raise PageNotFound(
+u'Page %s could not be retrieved. Check your virus wall.'
+ % url)
+ elif e.code == 504:
+ output(u'HTTPError: %s %s' % (e.code, e.msg))
+ if retry:
+ retry_attempt += 1
+ if retry_attempt > config.maxretries:
+ raise MaxTriesExceededError()
+ output(
+u"WARNING: Could not open '%s'.Maybe the server or\n your connection is down. Retrying in %i minutes..."
+ % (url, retry_idle_time))
+ time.sleep(retry_idle_time * 60)
+ # Next time wait longer,
+ # but not longer than half an hour
+ retry_idle_time *= 2
+ if retry_idle_time > 30:
+ retry_idle_time = 30
+ continue
+ raise
+ else:
+ output(u"Result: %s %s" % (e.code, e.msg))
+ raise
+ except Exception, e:
+ output(u'%s' %e)
+ if retry:
+ retry_attempt += 1
+ if retry_attempt > config.maxretries:
+ raise MaxTriesExceededError()
+ output(
+u"WARNING: Could not open '%s'. Maybe the server or\n your connection is down. Retrying in %i minutes..."
+ % (url, retry_idle_time))
+ time.sleep(retry_idle_time * 60)
+ retry_idle_time *= 2
+ if retry_idle_time > 30:
+ retry_idle_time = 30
+ continue
+
+ raise
+ # check cookies return or not, if return, send its to update.
+ if hasattr(f, 'sheaders'):
+ ck = f.sheaders
+ else:
+ ck = f.info().getallmatchingheaders('set-cookie')
+ if not no_hostname and ck:
+ Reat=re.compile(': (.*?)=(.*?);')
+ tmpc = {}
+ for d in ck:
+ m = Reat.search(d)
+ if m: tmpc[m.group(1)] = m.group(2)
+ site.updateCookies(tmpc, sysop)
+
+ if cookie_only:
+ return headers.get('set-cookie', '')
+ contentType = headers.get('content-type', '')
+ contentEncoding = headers.get('content-encoding', '')
+
+ # Ensure that all sent data is received
+ # In rare cases we found a douple Content-Length in the header.
+ # We need to split it to get a value
+ content_length = int(headers.get('content-length', '0').split(',')[0])
+ if content_length != len(text) and 'content-length' in headers:
+ output(
+ u'Warning! len(text) does not match content-length: %s != %s'
+ % (len(text), content_length))
+ return request(site, uri, retry, sysop, data, compress, no_hostname,
+ cookie_only, back_response)
+
+ if compress and contentEncoding == 'gzip':
+ text = pywikibot.decompress_gzip(text)
+
+ R = re.compile('charset=([^\'\";]+)')
+ m = R.search(contentType)
+ if m:
+ charset = m.group(1)
+ else:
+ if verbose:
+ output(u"WARNING: No character set found.")
+ # UTF-8 as default
+ charset = 'utf-8'
+ # Check if this is the charset we expected
+ site.checkCharset(charset)
+ # Convert HTML to Unicode
+ try:
+ text = unicode(text, charset, errors = 'strict')
+ except UnicodeDecodeError, e:
+ print e
+ if no_hostname:
+ output(u'ERROR: Invalid characters found on %s, replaced by \\ufffd.' % uri)
+ else:
+ output(u'ERROR: Invalid characters found on %s://%s%s, replaced by \\ufffd.' % (site.protocol(), site.hostname(), uri))
+ # We use error='replace' in case of bad encoding.
+ text = unicode(text, charset, errors = 'replace')
+
+ if back_response:
+ return f, text
+
+ return text
Property changes on: trunk/pywikipedia/pywikibot/comms/http.py
___________________________________________________________________
Added: svn:keywords
+ Id
Added: svn:eol-style
+ native
Modified: trunk/pywikipedia/wikipedia.py
===================================================================
--- trunk/pywikipedia/wikipedia.py 2012-02-16 20:41:18 UTC (rev 9900)
+++ trunk/pywikipedia/wikipedia.py 2012-02-16 22:44:36 UTC (rev 9901)
@@ -5554,10 +5554,12 @@
return f, text
+ #@deprecated("pywikibot.comms.http.request") # in 'trunk' not yet...
def getUrl(self, path, retry = None, sysop = False, data = None, compress = True,
no_hostname = False, cookie_only=False, refer=None, back_response=False):
"""
- Low-level routine to get a URL from the wiki.
+ Low-level routine to get a URL from the wiki. Tries to login if it is
+ another wiki.
Parameters:
path - The absolute path, without the hostname.
@@ -5569,150 +5571,11 @@
Returns the HTML text of the page converted to unicode.
"""
+ from pywikibot.comms import http
- if retry is None:
- retry = config.retry_on_fail
+ f, text = http.request(self, path, retry, sysop, data, compress,
+ no_hostname, cookie_only, refer, back_response = True)
- headers = {
- 'User-agent': useragent,
- #'Accept-Language': config.mylang,
- #'Accept-Charset': config.textfile_encoding,
- #'Keep-Alive': '115',
- #'Connection': 'keep-alive',
- #'Cache-Control': 'max-age=0',
- #'': '',
- }
-
- if not no_hostname and self.cookies(sysop = sysop):
- headers['Cookie'] = self.cookies(sysop = sysop)
- if compress:
- headers['Accept-encoding'] = 'gzip'
-
- if refer:
- headers['Refer'] = refer
-
- if no_hostname: # This allow users to parse also toolserver's script
- url = path # and other useful pages without using some other functions.
- else:
- url = '%s://%s%s' % (self.protocol(), self.hostname(), path)
- data = self.urlEncode(data)
-
- # Try to retrieve the page until it was successfully loaded (just in
- # case the server is down or overloaded).
- # Wait for retry_idle_time minutes (growing!) between retries.
- retry_idle_time = 1
- retry_attempt = 0
- while True:
- try:
- request = urllib2.Request(url, data, headers)
- f = MyURLopener.open(request)
-
- # read & info can raise socket.error
- text = f.read()
- headers = f.info()
- break
- except KeyboardInterrupt:
- raise
- except urllib2.HTTPError, e:
- if e.code in [401, 404]:
- raise PageNotFound(
-u'Page %s could not be retrieved. Check your family file.'
- % url)
- elif e.code in [403]:
- raise PageNotFound(
-u'Page %s could not be retrieved. Check your virus wall.'
- % url)
- elif e.code == 504:
- output(u'HTTPError: %s %s' % (e.code, e.msg))
- if retry:
- retry_attempt += 1
- if retry_attempt > config.maxretries:
- raise MaxTriesExceededError()
- output(
-u"WARNING: Could not open '%s'.Maybe the server or\n your connection is down. Retrying in %i minutes..."
- % (url, retry_idle_time))
- time.sleep(retry_idle_time * 60)
- # Next time wait longer,
- # but not longer than half an hour
- retry_idle_time *= 2
- if retry_idle_time > 30:
- retry_idle_time = 30
- continue
- raise
- else:
- output(u"Result: %s %s" % (e.code, e.msg))
- raise
- except Exception, e:
- output(u'%s' %e)
- if retry:
- retry_attempt += 1
- if retry_attempt > config.maxretries:
- raise MaxTriesExceededError()
- output(
-u"WARNING: Could not open '%s'. Maybe the server or\n your connection is down. Retrying in %i minutes..."
- % (url, retry_idle_time))
- time.sleep(retry_idle_time * 60)
- retry_idle_time *= 2
- if retry_idle_time > 30:
- retry_idle_time = 30
- continue
-
- raise
- # check cookies return or not, if return, send its to update.
- if hasattr(f, 'sheaders'):
- ck = f.sheaders
- else:
- ck = f.info().getallmatchingheaders('set-cookie')
- if not no_hostname and ck:
- Reat=re.compile(': (.*?)=(.*?);')
- tmpc = {}
- for d in ck:
- m = Reat.search(d)
- if m: tmpc[m.group(1)] = m.group(2)
- self.updateCookies(tmpc, sysop)
-
- if cookie_only:
- return headers.get('set-cookie', '')
- contentType = headers.get('content-type', '')
- contentEncoding = headers.get('content-encoding', '')
-
- # Ensure that all sent data is received
- # In rare cases we found a douple Content-Length in the header.
- # We need to split it to get a value
- content_length = int(headers.get('content-length', '0').split(',')[0])
- if content_length != len(text) and 'content-length' in headers:
- output(
- u'Warning! len(text) does not match content-length: %s != %s'
- % (len(text), content_length))
- return self.getUrl(path, retry, sysop, data, compress, no_hostname,
- cookie_only, back_response)
-
- if compress and contentEncoding == 'gzip':
- text = decompress_gzip(text)
-
- R = re.compile('charset=([^\'\";]+)')
- m = R.search(contentType)
- if m:
- charset = m.group(1)
- else:
- if verbose:
- output(u"WARNING: No character set found.")
- # UTF-8 as default
- charset = 'utf-8'
- # Check if this is the charset we expected
- self.checkCharset(charset)
- # Convert HTML to Unicode
- try:
- text = unicode(text, charset, errors = 'strict')
- except UnicodeDecodeError, e:
- print e
- if no_hostname:
- output(u'ERROR: Invalid characters found on %s, replaced by \\ufffd.' % path)
- else:
- output(u'ERROR: Invalid characters found on %s://%s%s, replaced by \\ufffd.' % (self.protocol(), self.hostname(), path))
- # We use error='replace' in case of bad encoding.
- text = unicode(text, charset, errors = 'replace')
-
# If a wiki page, get user data
self._getUserDataOld(text, sysop = sysop)
http://www.mediawiki.org/wiki/Special:Code/pywikipedia/9900
Revision: 9900
Author: drtrigon
Date: 2012-02-16 20:41:18 +0000 (Thu, 16 Feb 2012)
Log Message:
-----------
bug fix; 'live_version' did not work with e.g. arwiki because of IndexError raised
Modified Paths:
--------------
trunk/pywikipedia/wikipedia.py
Modified: trunk/pywikipedia/wikipedia.py
===================================================================
--- trunk/pywikipedia/wikipedia.py 2012-02-16 18:02:32 UTC (rev 9899)
+++ trunk/pywikipedia/wikipedia.py 2012-02-16 20:41:18 UTC (rev 9900)
@@ -7546,14 +7546,18 @@
versionpage = self.getUrl(self.get_address("Special:Version"))
htmldata = BeautifulSoup(versionpage, convertEntities="html")
# try to find the live version
+ versionlist = []
# 1st try is for mw < 1.17wmf1
- # 2nd try is for mw 1.17wmf1
+ versionlist.append(lambda: htmldata.findAll(
+ text="MediaWiki")[1].parent.nextSibling )
+ # 2nd try is for mw >=1.17wmf1
+ versionlist.append(lambda: htmldata.body.table.findAll(
+ 'td')[1].contents[0] )
# 3rd uses family file which is not live
- for versionstring in [htmldata.findAll(
- text="MediaWiki")[1].parent.nextSibling,
- htmldata.body.table.findAll(
- 'td')[1].contents[0],
- self.family.version(self.lang)]:
+ versionlist.append(lambda: self.family.version(self.lang) )
+ for versionfunc in versionlist:
+ try: versionstring = versionfunc()
+ except: continue
m = re.match(PATTERN, str(versionstring).strip())
if m: break
else:
http://www.mediawiki.org/wiki/Special:Code/pywikipedia/9899
Revision: 9899
Author: drtrigon
Date: 2012-02-16 18:02:32 +0000 (Thu, 16 Feb 2012)
Log Message:
-----------
Adding capabilities of DrTrigonBot 'clean_user_sandbox' script
Modified Paths:
--------------
trunk/pywikipedia/clean_sandbox.py
Modified: trunk/pywikipedia/clean_sandbox.py
===================================================================
--- trunk/pywikipedia/clean_sandbox.py 2012-02-16 17:04:35 UTC (rev 9898)
+++ trunk/pywikipedia/clean_sandbox.py 2012-02-16 18:02:32 UTC (rev 9899)
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""
-This bot cleans a sandbox by replacing the current contents with predefined
-text.
+This bot cleans a (user) sandbox by replacing the current contents with
+predefined text.
This script understands the following command-line arguments:
@@ -14,14 +14,13 @@
hours and limits it between 5 and 15 minutes.
The minimum delay time is 5 minutes.
- -userlist Use this parameter to run the script in the user name-
+ -user Use this parameter to run the script in the user name-
space.
> ATTENTION: on most wiki THIS IS FORBIDEN FOR BOTS ! <
> (please talk with your admin first) <
Since it is considered bad style to edit user page with-
- out permission, you have to pass a page containing a
- list of user to process. Argument e.g. is given as
- "-userlist:Benutzer:DrTrigonBot/Diene_Mir\!".
+ out permission, the 'user_sandboxTitle' for given
+ language has to be set-up (no fall-back will be used).
Please be also aware that the rules when to clean the
user sandbox differ from those for project sandbox.
@@ -32,7 +31,7 @@
# (C) Andre Engels, 2007
# (C) Siebrand Mazeland, 2007
# (C) xqt, 2009
-# (C) Dr. Trigon, 2011
+# (C) Dr. Trigon, 2011-2012
#
# DrTrigonBot: http://de.wikipedia.org/wiki/Benutzer:DrTrigonBot
# Clean User Sandbox Robot (clean_user_sandbox.py)
@@ -115,19 +114,33 @@
'zh': u'Project:沙盒',
}
+user_content = {
+ 'de': u'{{Benutzer:DrTrigonBot/Spielwiese}}',
+ }
+
+user_sandboxTitle = {
+ 'de': u'User:DrTrigonBot/Spielwiese',
+ }
+
class SandboxBot:
- def __init__(self, hours, no_repeat, delay, userlist):
+ def __init__(self, hours, no_repeat, delay, user):
self.hours = hours
self.no_repeat = no_repeat
if delay == None:
self.delay = min(15, max(5, int(self.hours *60)))
else:
self.delay = max(5, delay)
+ self.user = user
self.site = pywikibot.getSite()
- if userlist == None:
- self.userlist = None
- else:
- self.userlist = [page.title().split(u'/')[0] for page in pywikibot.Page(self.site, userlist).linkedPages()]
+ if self.user:
+ localSandboxTitle = pywikibot.translate(self.site, user_sandboxTitle)
+ localSandbox = pywikibot.Page(self.site, localSandboxTitle)
+ content.update(user_content)
+ sandboxTitle[self.site.lang] = [item.title() \
+ for item in localSandbox.getReferences(onlyTemplateInclusion=True)]
+ if self.site.lang not in user_sandboxTitle:
+ sandboxTitle[self.site.lang] = []
+ pywikibot.output(u'Not properly set-up to run in user namespace!')
def run(self):
@@ -149,10 +162,6 @@
wait = False
now = time.strftime("%d %b %Y %H:%M:%S (UTC)", time.gmtime())
localSandboxTitle = pywikibot.translate(mySite, sandboxTitle)
- IsUserSandbox = (self.userlist is not None) # DrTrigonBot (Clean User Sandbox Robot)
- if IsUserSandbox:
- localSandboxTitle = u'%s/' + localSandboxTitle.split(u':')[-1]
- localSandboxTitle = [localSandboxTitle % user for user in self.userlist]
if type(localSandboxTitle) is list:
titles = localSandboxTitle
else:
@@ -172,7 +181,7 @@
elif subst and sandboxPage.userName() == mySite.loggedInAs():
pywikibot.output(u'The sandbox might be clean, no change necessary.')
elif pos <> 0 and not subst:
- if IsUserSandbox:
+ if self.user:
endpos = pos + len(translatedContent.strip())
if (pos < 0) or (endpos == len(text)):
pywikibot.output(u'The user sandbox is still clean or not set up, no change necessary.')
@@ -211,7 +220,7 @@
def main():
hours = 1
delay = None
- userlist = None
+ user = False
no_repeat = True
for arg in pywikibot.handleArgs():
if arg.startswith('-hours:'):
@@ -219,13 +228,13 @@
no_repeat = False
elif arg.startswith('-delay:'):
delay = int(arg[7:])
- elif arg.startswith('-userlist:'):
- userlist = arg[10:]
+ elif arg == '-user':
+ user = True
else:
pywikibot.showHelp('clean_sandbox')
return
- bot = SandboxBot(hours, no_repeat, delay, userlist)
+ bot = SandboxBot(hours, no_repeat, delay, user)
try:
bot.run()
except KeyboardInterrupt: