http://www.mediawiki.org/wiki/Special:Code/pywikipedia/9048
Revision: 9048
Author: xqt
Date: 2011-03-13 11:29:45 +0000 (Sun, 13 Mar 2011)
Log Message:
-----------
stripped trailing whitespace
Modified Paths:
--------------
trunk/pywikipedia/pywikibot/textlib.py
Modified: trunk/pywikipedia/pywikibot/textlib.py
===================================================================
--- trunk/pywikipedia/pywikibot/textlib.py 2011-03-13 11:28:42 UTC (rev 9047)
+++ trunk/pywikipedia/pywikibot/textlib.py 2011-03-13 11:29:45 UTC (rev 9048)
@@ -273,14 +273,14 @@
# A family has by definition only one kind of interlanguage links:
# 1 - interlanguage links inside the own family.
# They go to a corresponding page in another language in the same
-# family, such as from 'en.wikipedia' to 'pt.wikipedia', or from
+# family, such as from 'en.wikipedia' to 'pt.wikipedia', or from
# 'es.wiktionary' to 'arz.wiktionary'.
# Families with this kind have several language-specific sites.
# They have their interwiki_forward attribute set to None
# 2 - language links forwarding to another family.
# They go to a corresponding page in another family, such as from
# 'commons' to 'zh.wikipedia, or from 'incubator' to 'en.wikipedia'.
-# Families having those have one member only, and do not have
+# Families having those have one member only, and do not have
# language-specific sites. The name of the target family of their
# interlanguage links is kept in their interwiki_forward attribute.
# These functions only deal with links of these two kinds only. They
@@ -1042,7 +1042,7 @@
code = code.lang
# If xdict attribute is wikipedia, define the xdite had multiple projects
- if 'wikipedia' in xdict:
+ if 'wikipedia' in xdict:
if pywikibot.default_family in xdict:
xdict = xdict[pywikibot.default_family]
else:
http://www.mediawiki.org/wiki/Special:Code/pywikipedia/9046
Revision: 9046
Author: xqt
Date: 2011-03-13 11:26:01 +0000 (Sun, 13 Mar 2011)
Log Message:
-----------
stripped trailing whitespace
Modified Paths:
--------------
trunk/pywikipedia/wiktionary/headertest.py
trunk/pywikipedia/wiktionary/meaningtest.py
trunk/pywikipedia/wiktionary/termtest.py
trunk/pywikipedia/wiktionary/wiktionarypage.py
Modified: trunk/pywikipedia/wiktionary/headertest.py
===================================================================
--- trunk/pywikipedia/wiktionary/headertest.py 2011-03-13 11:24:32 UTC (rev 9045)
+++ trunk/pywikipedia/wiktionary/headertest.py 2011-03-13 11:26:01 UTC (rev 9046)
@@ -49,4 +49,4 @@
self.assertNotEqual(h,header.Header())
if __name__ == "__main__":
- unittest.main()
\ No newline at end of file
+ unittest.main()
Modified: trunk/pywikipedia/wiktionary/meaningtest.py
===================================================================
--- trunk/pywikipedia/wiktionary/meaningtest.py 2011-03-13 11:24:32 UTC (rev 9045)
+++ trunk/pywikipedia/wiktionary/meaningtest.py 2011-03-13 11:26:01 UTC (rev 9046)
@@ -59,4 +59,4 @@
if __name__ == "__main__":
unittest.main()
-
+
Modified: trunk/pywikipedia/wiktionary/termtest.py
===================================================================
--- trunk/pywikipedia/wiktionary/termtest.py 2011-03-13 11:24:32 UTC (rev 9045)
+++ trunk/pywikipedia/wiktionary/termtest.py 2011-03-13 11:26:01 UTC (rev 9046)
@@ -66,4 +66,4 @@
if __name__ == "__main__":
unittest.main()
-
+
Modified: trunk/pywikipedia/wiktionary/wiktionarypage.py
===================================================================
--- trunk/pywikipedia/wiktionary/wiktionarypage.py 2011-03-13 11:24:32 UTC (rev 9045)
+++ trunk/pywikipedia/wiktionary/wiktionarypage.py 2011-03-13 11:26:01 UTC (rev 9046)
@@ -127,7 +127,7 @@
if len(lang)>1 and len(lang)<4:
self.addLink(lang+':'+linkto)
continue
- # store empty lines literally, this is necessary for the blocks we don't parse
+ # store empty lines literally, this is necessary for the blocks we don't parse
# and will return literally
if len(line) <2:
templist.append(line)
@@ -293,7 +293,7 @@
self.addEntry(anentry)
# Then we can easily add this meaning to it.
anentry.addMeaning(ameaning)
-
+
pos=line.find('<!--')
if pos!=-1 and pos < 4:
# A html comment at the beginning of the line means this entry already has disambiguation labels, great
http://www.mediawiki.org/wiki/Special:Code/pywikipedia/9045
Revision: 9045
Author: xqt
Date: 2011-03-13 11:24:32 +0000 (Sun, 13 Mar 2011)
Log Message:
-----------
remove obsolete file duplicate to sortonlanguaename.py
Removed Paths:
-------------
trunk/pywikipedia/wiktionary/sortonlanguagename
Deleted: trunk/pywikipedia/wiktionary/sortonlanguagename
===================================================================
--- trunk/pywikipedia/wiktionary/sortonlanguagename 2011-03-13 10:26:54 UTC (rev 9044)
+++ trunk/pywikipedia/wiktionary/sortonlanguagename 2011-03-13 11:24:32 UTC (rev 9045)
@@ -1,14 +0,0 @@
-# A big thanks to Rob Hooft for the following class:
-# It may not seem like much, but it magically allows the translations to be sorted on
-# the names of the languages. I would never have thought of doing it like this myself.
-
-class sortonlanguagename:
- '''
- This class sorts translations alphabetically on the name of the language,
- instead of on the iso abbreviation that is used internally.
- '''
- def __init__(self, lang):
- self.lang = lang
-
- def __call__(self, one, two):
- return cmp(self.lang[one], self.lang[two])
http://www.mediawiki.org/wiki/Special:Code/pywikipedia/9042
Revision: 9042
Author: xqt
Date: 2011-03-13 10:14:47 +0000 (Sun, 13 Mar 2011)
Log Message:
-----------
stripped trailing whitespace
Modified Paths:
--------------
trunk/pywikipedia/archivebot.py
trunk/pywikipedia/blockreview.py
trunk/pywikipedia/botlist.py
trunk/pywikipedia/capitalize_redirects.py
trunk/pywikipedia/category.py
trunk/pywikipedia/catlib.py
trunk/pywikipedia/censure.py
trunk/pywikipedia/checkimages.py
trunk/pywikipedia/clean_sandbox.py
trunk/pywikipedia/commonscat.py
trunk/pywikipedia/config.py
trunk/pywikipedia/copyright.py
trunk/pywikipedia/cosmetic_changes.py
trunk/pywikipedia/date.py
trunk/pywikipedia/deledpimage.py
trunk/pywikipedia/delinker.py
trunk/pywikipedia/djvutext.py
trunk/pywikipedia/family.py
trunk/pywikipedia/featured.py
trunk/pywikipedia/fixing_redirects.py
trunk/pywikipedia/flickrripper.py
trunk/pywikipedia/generate_family_file.py
trunk/pywikipedia/imagecopy.py
trunk/pywikipedia/imagecopy_enwp.py
trunk/pywikipedia/imagecopy_self.py
trunk/pywikipedia/imagerecat.py
trunk/pywikipedia/imageuncat.py
trunk/pywikipedia/logindata.py
trunk/pywikipedia/match_images.py
trunk/pywikipedia/pagegenerators.py
trunk/pywikipedia/panoramiopicker.py
trunk/pywikipedia/query.py
trunk/pywikipedia/redirect.py
trunk/pywikipedia/replace.py
trunk/pywikipedia/revertbot.py
trunk/pywikipedia/selflink.py
trunk/pywikipedia/simple_family.py
trunk/pywikipedia/standardize_notes.py
trunk/pywikipedia/statistics_in_wikitable.py
trunk/pywikipedia/titletranslate.py
trunk/pywikipedia/upload.py
trunk/pywikipedia/userlib.py
trunk/pywikipedia/version.py
trunk/pywikipedia/watchlist.py
trunk/pywikipedia/weblinkchecker.py
trunk/pywikipedia/welcome.py
trunk/pywikipedia/wikipedia.py
trunk/pywikipedia/xmlreader.py
Modified: trunk/pywikipedia/archivebot.py
===================================================================
--- trunk/pywikipedia/archivebot.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/archivebot.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -17,7 +17,7 @@
Trancluded template may contain the following parameters:
-{{TEMPLATE_PAGE
+{{TEMPLATE_PAGE
|archive =
|algo =
|counter =
@@ -253,13 +253,13 @@
}
if eicontinue:
qdata['eicontinue'] = eicontinue
-
+
pywikibot.output(u'Fetching template transclusions...')
response, result = query.GetData(qdata, Site, back_response = True)
-
+
for page_d in result['query']['embeddedin']:
yield pywikibot.Page(Site, page_d['title'])
-
+
if 'query-continue' in result:
eicontinue = result['query-continue']['embeddedin']['eicontinue']
for page in generateTransclusions(Site, template, namespaces,
Modified: trunk/pywikipedia/blockreview.py
===================================================================
--- trunk/pywikipedia/blockreview.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/blockreview.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -31,7 +31,7 @@
note_admin = {
'de': u"\n\n== Sperrprüfungswunsch ==\nHallo %(admin)s, \n\n[[%(user)s]] wünscht diePrüfung seiner/ihrer Sperre vom %(time)s über die Dauer von %(duration)s. Kommentar war ''%(comment)s''. Bitte äußere Dich dazu auf der [[%(usertalk)s#%(section)s|Diskussionsseite]]. -~~~~"
}
-
+
note_project = {
'de': u"\n\n== [[%(user)s]] ==\n* gesperrt am %(time)s durch {{Benutzer|%(admin)s}} für eine Dauer von %(duration)s.\n* Kommentar war ''%(comment)s''.\n* [[Benutzer:%(admin)s]] wurde [[Benutzer Diskussion:%(admin)s#Sperrprüfungswunsch|benachrichtigt]].\n* [[%(usertalk)s#%(section)s|Link zur Diskussion]]\n:<small>-~~~~</small>\n;Antrag entgegengenommen"
}
@@ -133,13 +133,13 @@
gen = pg.PreloadingGenerator(self.SysopGenerator())
for sysop in gen:
print sysop.title()
-
+
talkText = talkText.replace(u'{{%s}}' % unblock_tpl,
u'{{%s|2}}' % unblock_tpl)
talkText = talkText.replace(u'{{%s|1}}' % unblock_tpl,
u'{{%s|2}}' % unblock_tpl)
talkComment = pywikibot.translate(self.site.lang, self.msg_user % self.parts)
-
+
# some test stuff
if pywikibot.debug and self.site().loggedInAs() == u'Xqbot:':
testPage = pywikibot.Page(self.site, 'Benutzer:Xqt/Test')
Modified: trunk/pywikipedia/botlist.py
===================================================================
--- trunk/pywikipedia/botlist.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/botlist.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -1,12 +1,12 @@
# -*- coding: utf-8 -*-
"""
Allows access to the site's bot user list.
-
+
The function refresh() downloads the current bot user list and saves
it to disk. It is run automatically when a bot first tries to get this
data.
"""
-
+
# (C) Daniel Herding, 2005
# (C) Dr. Trigon, 2009-2010
#
@@ -16,14 +16,14 @@
#
__version__='$Id$'
#
-
+
import re, sys, pickle
import os.path
import time
import wikipedia as pywikibot
-
+
cache = {}
-
+
def get(site = None):
if site is None:
site = pywikibot.getSite()
@@ -49,19 +49,19 @@
# create cached copy
cache[site] = botlist
return botlist
-
+
def isBot(user, site=None):
botlist = get(site)
return user in botlist
-
+
def refresh(site, sysop=False, witheditsonly=True):
#if not site.has_api() or site.versionnumber() < 10:
# _refreshOld(site)
-
+
# get botlist special page's URL
if not site.loggedInAs(sysop=sysop):
site.forceLogin(sysop=sysop)
-
+
params = {
'action': 'query',
'list': 'allusers',
@@ -69,7 +69,7 @@
}
if witheditsonly:
params['auwitheditsonly'] = ''
-
+
pywikibot.output(u'Retrieving bot user list for %s via API.' % repr(site))
pywikibot.put_throttle() # It actually is a get, but a heavy one.
botlist = []
@@ -78,7 +78,7 @@
if 'error' in data:
raise RuntimeError('ERROR: %s' % data)
botlist.extend([w['name'] for w in data['query']['allusers']])
-
+
if 'query-continue' in data:
params['aufrom'] = data['query-continue']['allusers']['aufrom']
else:
@@ -112,13 +112,13 @@
# The file is stored in the botlists subdir. Create if necessary.
if sysop:
f = open(pywikibot.config.datafilepath('botlists',
- 'botlist-%s-%s-sysop.dat' % (site.family.name, site.lang)), 'w')
+ 'botlist-%s-%s-sysop.dat' % (site.family.name, site.lang)), 'w')
else:
f = open(pywikibot.config.datafilepath('botlists',
'botlist-%s-%s.dat' % (site.family.name, site.lang)), 'w')
pickle.dump(botlist, f)
f.close()
-
+
#def refresh_all(new = False, sysop=False):
# if new:
# import config
@@ -170,5 +170,5 @@
# main()
# finally:
# pywikibot.stopme()
-
+
Modified: trunk/pywikipedia/capitalize_redirects.py
===================================================================
--- trunk/pywikipedia/capitalize_redirects.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/capitalize_redirects.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -12,7 +12,7 @@
-always Don't prompt to make changes, just do them.
-titlecase creates a titlecased redirect version of a given page
- where all words of the title start with an uppercase
+ where all words of the title start with an uppercase
character and the remaining characters are lowercase.
Example: "python capitalize_redirects.py -start:B -always"
Modified: trunk/pywikipedia/category.py
===================================================================
--- trunk/pywikipedia/category.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/category.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -1374,7 +1374,7 @@
gen = genFactory.getCombinedGenerator()
if not gen:
#default for backwords compatibility
- genFactory.handleArg('-links')
+ genFactory.handleArg('-links')
# The preloading generator is responsible for downloading multiple
# pages from the wiki simultaneously.
gen = pagegenerators.PreloadingGenerator(
Modified: trunk/pywikipedia/catlib.py
===================================================================
--- trunk/pywikipedia/catlib.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/catlib.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -185,7 +185,7 @@
for tag, page in self._oldParseCategory(purge, startFrom):
yield tag, page
return
-
+
currentPageOffset = None
params = {
'action': 'query',
@@ -349,7 +349,7 @@
equivalent to recurse = False, recurse = 1 gives first-level
subcategories of subcategories but no deeper, etcetera).
- cacheResults - cache the category contents: useful if you need to
+ cacheResults - cache the category contents: useful if you need to
do several passes on the category members list. The simple cache
system is *not* meant to be memory or cpu efficient for large
categories
@@ -386,7 +386,7 @@
Recurse can be a number to restrict the depth at which subcategories
are included.
- cacheResults - cache the category contents: useful if you need to
+ cacheResults - cache the category contents: useful if you need to
do several passes on the category members list. The simple cache
system is *not* meant to be memory or cpu efficient for large
categories
@@ -602,7 +602,7 @@
Category to load all the elements in a category using the APIs. Limit: 5000 elements.
"""
wikipedia.output("Loading %s..." % CatName)
-
+
params = {
'action' :'query',
'list' :'categorymembers',
Modified: trunk/pywikipedia/censure.py
===================================================================
--- trunk/pywikipedia/censure.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/censure.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -45,26 +45,26 @@
del ownWordList[0]
del ownWordList[len(ownWordList) - 1]
-def seekbpos(str1, str2):
- i = 0
- while i < len(str1):
- if str1[i] != str2[i]:
- return i
- i += 1
- return i
-
-def seekepos(str1, str2, bpos):
- i1 = len(str1) - 1
- i2 = len(str2) - 1
- while i1 > -1 and i2 > -1:
- if i1 == bpos:
- return i2
- elif i1 < bpos or str1[i1] != str2[i2]:
- return i2 + 1
- i1 -= 1
- i2 -= 1
- return -1
+def seekbpos(str1, str2):
+ i = 0
+ while i < len(str1):
+ if str1[i] != str2[i]:
+ return i
+ i += 1
+ return i
+def seekepos(str1, str2, bpos):
+ i1 = len(str1) - 1
+ i2 = len(str2) - 1
+ while i1 > -1 and i2 > -1:
+ if i1 == bpos:
+ return i2
+ elif i1 < bpos or str1[i1] != str2[i2]:
+ return i2 + 1
+ i1 -= 1
+ i2 -= 1
+ return -1
+
def checkPage(title, onlyLastDiff = False):
if title == logPages[site.language() + '.' + site.family.name]:
return
@@ -79,7 +79,7 @@
pywikibot.output(u'Page %s has no version history, skipping' %title)
return
if len(text) > len(oldver):
- bpos = seekbpos(oldver, text)
+ bpos = seekbpos(oldver, text)
epos = seekepos(oldver, text, bpos)
diff = text[bpos:epos]
text = diff
Modified: trunk/pywikipedia/checkimages.py
===================================================================
--- trunk/pywikipedia/checkimages.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/checkimages.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -282,7 +282,7 @@
'hu' :u"{{subst:adjforrást|Kép:%s}} \n Ezt az üzenetet ~~~ automatikusan helyezte el a vitalapodon, kérdéseddel fordulj a gazdájához, vagy a [[WP:KF|Kocsmafalhoz]]. --~~~~",
'it' :u"{{subst:Progetto:Coordinamento/Immagini/Bot/Messaggi/Senza licenza|%s|__botnick__}} --~~~~",
'ja' :u"\n{{subst:Image copyright|File:%s}}--~~~~",
- 'ko' :u'\n{{subst:User:Kwjbot IV/untagged|%s}} --~~~~',
+ 'ko' :u'\n{{subst:User:Kwjbot IV/untagged|%s}} --~~~~',
'ta' :u'\n{{subst:Di-no license-notice|படிமம்:%s}} ~~~~ ',
'zh' :u'\n{{subst:Uploadvionotice|File:%s}} ~~~~ ',
}
@@ -419,7 +419,7 @@
'_default':None,
'commons': u"""\n{{subst:User:Filnik/whitetemplate|File:%s}}\n\n''This message was added automatically by __botnick__, if you need some help about it please read the text above again and follow the links in it, if you still need help ask at the [[File:Human-help-browser.svg|18px|link=Commons:Help desk|?]] '''[[Commons:Help desk|→]] [[Commons:Help desk]]''' in any language you like to use.'' --__botnick__""",
'it' : u"{{subst:Progetto:Coordinamento/Immagini/Bot/Messaggi/Template_insufficiente|%s|__botnick__}} --~~~~",
- 'ko' : u"\n{{subst:User:Kwj2772/whitetemplates|%s}} --~~~~",
+ 'ko' : u"\n{{subst:User:Kwj2772/whitetemplates|%s}} --~~~~",
}
# In this part there are the parameters for the dupe images.
@@ -559,58 +559,58 @@
normal = False # Check the new images or use another generator?
urlUsed = False # Use the url-related function instead of the new-pages generator
regexGen = False # Use the regex generator
- untagged = False # Use the untagged generator
+ untagged = False # Use the untagged generator
duplicatesActive = False # Use the duplicate option
duplicatesReport = False # Use the duplicate-report option
sendemailActive = False # Use the send-email
logFullError = True # Raise an error when the log is full
-
-
+
+
class main:
def __init__(self, site, logFulNumber = 25000, sendemailActive = False,
duplicatesReport = False, logFullError = True):
""" Constructor, define some global variable """
- self.site = site
- self.logFullError = logFullError
- self.logFulNumber = logFulNumber
- self.rep_page = pywikibot.translate(self.site, report_page)
- self.rep_text = pywikibot.translate(self.site, report_text)
- self.com = pywikibot.translate(self.site, comm10)
- hiddentemplatesRaw = pywikibot.translate(self.site, HiddenTemplate)
+ self.site = site
+ self.logFullError = logFullError
+ self.logFulNumber = logFulNumber
+ self.rep_page = pywikibot.translate(self.site, report_page)
+ self.rep_text = pywikibot.translate(self.site, report_text)
+ self.com = pywikibot.translate(self.site, comm10)
+ hiddentemplatesRaw = pywikibot.translate(self.site, HiddenTemplate)
self.hiddentemplates = [pywikibot.Page(self.site, tmp)
- for tmp in hiddentemplatesRaw]
+ for tmp in hiddentemplatesRaw]
self.pageHidden = pywikibot.translate(self.site,
- PageWithHiddenTemplates)
+ PageWithHiddenTemplates)
self.pageAllowed = pywikibot.translate(self.site,
- PageWithAllowedTemplates)
+ PageWithAllowedTemplates)
# Commento = Summary in italian
self.commento = pywikibot.translate(self.site, comm)
# Adding the bot's nickname at the notification text if needed.
- botolist = pywikibot.translate(self.site, bot_list)
- project = pywikibot.getSite().family.name
- self.project = project
+ botolist = pywikibot.translate(self.site, bot_list)
+ project = pywikibot.getSite().family.name
+ self.project = project
bot = config.usernames[project]
try:
botnick = bot[self.site.lang]
except KeyError:
raise pywikibot.NoUsername(
u"You have to specify an username for your bot in this project in the user-config.py file.")
-
+
self.botnick = botnick
botolist.append(botnick)
-
+
self.botolist = botolist
-
+
self.sendemailActive = sendemailActive
# Inizialize the skip list used below
self.skip_list = list()
-
+
self.duplicatesReport = duplicatesReport
-
+
self.image_namespace = u"File:"
# Load the licenses only once, so do it once
self.list_licenses = self.load_licenses()
-
+
def setParameters(self, imageName, timestamp, uploader):
""" Function to set parameters, now only image but maybe it can be used
for others in "future"
@@ -621,7 +621,7 @@
self.image = pywikibot.ImagePage(self.site, self.imageName)
self.timestamp = timestamp
self.uploader = uploader
-
+
def report(self, newtext, image_to_report, notification=None, head=None,
notification2 = None, unver=True, commTalk=None, commImage=None):
""" Function to make the reports easier. """
@@ -631,21 +631,21 @@
self.head = head
self.notification = notification
self.notification2 = notification2
-
+
if self.notification:
self.notification = re.sub(r'__botnick__', self.botnick,
notification)
-
+
if self.notification2:
self.notification2 = re.sub(r'__botnick__', self.botnick,
notification2)
self.commTalk = commTalk
-
+
if commImage:
self.commImage = commImage
else:
self.commImage = self.commento
-
+
# Ok, done, let's loop.
while 1:
if unver:
@@ -687,18 +687,18 @@
break
else:
break
-
+
def uploadBotChangeFunction(self, reportPageText, upBotArray):
"""Detect the user that has uploaded the file through the upload bot"""
regex = upBotArray[1]
results = re.findall(regex, reportPageText)
-
+
if results:
luser = results[0]
return luser
else:
return upBotArray[0] # we can't find the user, report the problem to the bot
-
+
def tag_image(self, put = True):
""" Function to add the template in the image and to find out
who's the user that has uploaded the file.
@@ -706,7 +706,7 @@
"""
# Get the image's description
reportPageObject = pywikibot.ImagePage(self.site, self.image_to_report)
-
+
try:
reportPageText = reportPageObject.get()
except pywikibot.NoPage:
@@ -717,7 +717,7 @@
if put:
reportPageObject.put(reportPageText + self.newtext,
comment=self.commImage)
- # paginetta it's the image page object.
+ # paginetta it's the image page object.
try:
if reportPageObject == self.image and self.uploader:
nick = self.uploader
@@ -734,7 +734,7 @@
return False
upBots = pywikibot.translate(self.site, uploadBots)
luser = pywikibot.url2link(nick, self.site, self.site)
-
+
if upBots:
for upBot in upBots:
if upBot[0] == luser:
@@ -743,7 +743,7 @@
self.talk_page = talk_page
self.luser = luser
return True
-
+
def put_mex_in_talk(self):
""" Function to put the warning in talk page of the uploader."""
commento2 = pywikibot.translate(self.site, comm2)
@@ -780,26 +780,26 @@
testoattuale = self.talk_page.get()
except pywikibot.NoPage:
second_text = False
- testoattuale = pywikibot.translate(self.site, empty)
+ testoattuale = pywikibot.translate(self.site, empty)
except pywikibot.NoPage:
pywikibot.output(u'The user page is blank')
second_text = False
- testoattuale = pywikibot.translate(self.site, empty)
+ testoattuale = pywikibot.translate(self.site, empty)
if self.commTalk:
commentox = self.commTalk
else:
commentox = commento2
-
+
if second_text:
newText = u"%s\n\n%s" % (testoattuale, self.notification2)
else:
newText = testoattuale + self.head + self.notification
-
+
try:
self.talk_page.put(newText, comment = commentox, minorEdit = False)
except pywikibot.LockedPage:
pywikibot.output(u'Talk page blocked, skip.')
-
+
if emailPageName and emailSubj:
emailPage = pywikibot.Page(self.site, emailPageName)
try:
@@ -815,7 +815,7 @@
except userlib.UserActionRefuse:
pywikibot.output("User is not mailable, aborted")
return # exit
-
+
def untaggedGenerator(self, untaggedProject, limit):
""" Generator that yield the files without license. It's based on a
tool of the toolserver.
@@ -823,14 +823,14 @@
"""
lang = untaggedProject.split('.', 1)[0]
project = '.%s' % untaggedProject.split('.', 1)[1]
-
+
if lang == 'commons':
link = 'http://toolserver.org/~daniel/WikiSense/UntaggedImages.php?wikifam=commons.…'
else:
link = 'http://toolserver.org/~daniel/WikiSense/UntaggedImages.php?wikilang=%s&wiki…' % (lang, project, limit, limit)
text = self.site.getUrl(link, no_hostname = True)
results = re.findall(r"""<td valign='top' title='Name'><a href='http://.*?\.org/w/index\.php\?title=(.*?)'>.*?</a></td>""", text)
-
+
if results:
for result in results:
wikiPage = pywikibot.ImagePage(self.site, result)
@@ -839,7 +839,7 @@
pywikibot.output(link)
raise NothingFound(
u'Nothing found! Try to use the tool by yourself to be sure that it works!')
-
+
def regexGenerator(self, regexp, textrun):
""" Generator used when an user use a regex parsing a page to yield the
results
@@ -849,17 +849,17 @@
results = regex.findall(textrun)
for image in results:
yield pywikibot.ImagePage(self.site, image)
-
+
def loadHiddenTemplates(self):
""" Function to load the white templates """
# A template as {{en is not a license! Adding also them in the whitelist template...
for langK in pywikibot.Family(u'wikipedia').langs.keys():
self.hiddentemplates.append(pywikibot.Page(self.site,
u'Template:%s' % langK))
-
+
# The template #if: and #switch: aren't something to care about
#self.hiddentemplates.extend([u'#if:', u'#switch:']) FIXME
-
+
# Hidden template loading
if self.pageHidden:
try:
@@ -867,11 +867,11 @@
self.pageHidden).get()
except (pywikibot.NoPage, pywikibot.IsRedirectPage):
pageHiddenText = ''
-
+
for element in self.load(pageHiddenText):
self.hiddentemplates.append(pywikibot.Page(self.site, element))
return self.hiddentemplates
-
+
def returnOlderTime(self, listGiven, timeListGiven):
""" Get some time and return the oldest of them """
# print listGiven; print timeListGiven
@@ -890,23 +890,23 @@
max_usage = len(imageUsage)
num_older = num
num += 1
-
+
if num_older:
return listGiven[num_older][1]
-
+
for element in listGiven:
time = element[0]
imageName = element[1]
not_the_oldest = False
-
+
for time_selected in timeListGiven:
if time > time_selected:
not_the_oldest = True
break
-
+
if not not_the_oldest:
return imageName
-
+
def convert_to_url(self, page):
# Function stolen from wikipedia.py
"""The name of the page this Page refers to, in a form suitable for the
@@ -916,7 +916,7 @@
title = page.replace(u" ", u"_")
encodedTitle = title.encode(self.site.encoding())
return urllib.quote(encodedTitle)
-
+
def countEdits(self, pagename, userlist):
"""Function to count the edit of a user or a list of users in a page."""
# self.botolist
@@ -925,15 +925,15 @@
page = pywikibot.Page(self.site, pagename)
history = page.getVersionHistory()
user_list = list()
-
+
for data in history:
user_list.append(data[2])
number_edits = 0
-
+
for username in userlist:
number_edits += user_list.count(username)
return number_edits
-
+
def checkImageOnCommons(self):
""" Checking if the file is on commons """
pywikibot.output(u'Checking if %s is on commons...' % self.imageName)
@@ -972,7 +972,7 @@
return True # Problems? No, return True
else:
return True # Problems? No, return True
-
+
def checkImageDuplicated(self, duplicates_rollback):
""" Function to check the duplicated files. """
# {{Dupe|File:Blanche_Montel.jpg}}
@@ -989,20 +989,20 @@
imagePage = pywikibot.ImagePage(self.site, self.imageName)
hash_found = imagePage.getHash()
duplicates = self.site.getFilesFromAnHash(hash_found)
-
+
if not duplicates:
return False # Error, image deleted, no hash found. Skip the image.
-
+
if len(duplicates) > 1:
if len(duplicates) == 2:
pywikibot.output(u'%s has a duplicate! Reporting it...' % self.imageName)
else:
pywikibot.output(u'%s has %s duplicates! Reporting them...' % (self.imageName, len(duplicates) - 1))
-
+
if dupText and dupRegex:
time_image_list = list()
time_list = list()
-
+
for duplicate in duplicates:
DupePage = pywikibot.ImagePage(self.site, duplicate)
@@ -1017,7 +1017,7 @@
Page_oder_image = pywikibot.ImagePage(self.site, older_image)
string = ''
images_to_tag_list = []
-
+
for duplicate in duplicates:
if pywikibot.ImagePage(self.site, duplicate) == pywikibot.ImagePage(self.site, older_image):
continue # the older image, not report also this as duplicate
@@ -1027,7 +1027,7 @@
older_page_text = Page_oder_image.get()
except pywikibot.NoPage:
continue # The page doesn't exists
-
+
if not re.findall(dupRegex, DupPageText) and not re.findall(dupRegex, older_page_text):
pywikibot.output(u'%s is a duplicate and has to be tagged...' % duplicate)
images_to_tag_list.append(duplicate)
@@ -1040,13 +1040,13 @@
return False # Ok - No problem. Let's continue the checking phase
older_image_ns = u'%s%s' % (self.image_namespace, older_image) # adding the namespace
only_report = False # true if the image are not to be tagged as dupes
-
+
# put only one image or the whole list according to the request
if u'__images__' in dupText:
text_for_the_report = re.sub(r'__images__', r'\n%s*[[:%s]]\n' % (string, older_image_ns), dupText)
else:
text_for_the_report = re.sub(r'__image__', r'%s' % older_image_ns, dupText)
-
+
# Two iteration: report the "problem" to the user only once (the last)
if len(images_to_tag_list) > 1:
for image_to_tag in images_to_tag_list[:-1]:
@@ -1059,7 +1059,7 @@
text_for_the_report = re.sub(r'\n\*\[\[:%s\]\]' % re.escape(self.image_namespace + image_to_tag), '', text_for_the_report)
self.report(text_for_the_report, image_to_tag,
commImage = dupComment_image, unver = True)
-
+
if len(images_to_tag_list) != 0 and not only_report:
already_reported_in_past = self.countEdits(u'File:%s' % images_to_tag_list[-1], self.botolist)
image_to_resub = images_to_tag_list[-1]
@@ -1073,47 +1073,47 @@
self.report(text_for_the_report, images_to_tag_list[-1],
dupTalkText % (older_image_ns, string), dupTalkHead, commTalk = dupComment_talk,
commImage = dupComment_image, unver = True)
-
+
if self.duplicatesReport or only_report:
if only_report:
repme = u"\n*[[:File:%s]] has the following duplicates ('''forced mode'''):" % self.convert_to_url(self.imageName)
else:
repme = u"\n*[[:File:%s]] has the following duplicates:" % self.convert_to_url(self.imageName)
-
+
for duplicate in duplicates:
if self.convert_to_url(duplicate) == self.convert_to_url(self.imageName):
continue # the image itself, not report also this as duplicate
repme += u"\n**[[:File:%s]]" % self.convert_to_url(duplicate)
result = self.report_image(self.imageName, self.rep_page, self.com, repme, addings = False, regex = duplicateRegex)
if not result:
- return True # If Errors, exit (but continue the check)
-
+ return True # If Errors, exit (but continue the check)
+
if older_image != self.imageName:
return False # The image is a duplicate, it will be deleted. So skip the check-part, useless
return True # Ok - No problem. Let's continue the checking phase
-
+
def report_image(self, image_to_report, rep_page = None, com = None, rep_text = None, addings = True, regex = None):
""" Report the files to the report page when needed. """
if not rep_page:
rep_page = self.rep_page
-
+
if not com:
com = self.com
-
+
if not rep_text:
rep_text = self.rep_text
-
+
another_page = pywikibot.Page(self.site, rep_page)
-
+
if not regex:
regex = image_to_report
try:
text_get = another_page.get()
except pywikibot.NoPage:
text_get = ''
- except pywikibot.IsRedirectPage:
+ except pywikibot.IsRedirectPage:
text_get = another_page.getRedirectTarget().get()
-
+
if len(text_get) >= self.logFulNumber:
if self.logFullError:
raise LogIsFull(u"The log page (%s) is full! Please delete the old files reported." % another_page.title())
@@ -1123,7 +1123,7 @@
# The talk page includes "_" between the two names, in this way i replace them to " "
n = re.compile(regex, re.UNICODE|re.DOTALL)
y = n.findall(text_get)
-
+
if y:
pywikibot.output(u"%s is already in the report page." % image_to_report)
reported = False
@@ -1135,7 +1135,7 @@
pywikibot.output(u"...Reported...")
reported = True
return reported
-
+
def takesettings(self):
""" Function to take the settings from the wiki. """
settingsPage = pywikibot.translate(self.site, page_with_settings)
@@ -1156,7 +1156,7 @@
"\*[Tt]ext ?= ?['\"](.*?)['\"]\n"
"\*[Mm]ex ?= ?['\"]?([^\n]*?)['\"]?\n", re.UNICODE|re.DOTALL)
number = 1
-
+
for m in r.finditer(testo):
name = str(m.group(1))
find_tipe = str(m.group(2))
@@ -1169,7 +1169,7 @@
tupla = [number, name, find_tipe, find, imagechanges, summary, head, text, mexcatched]
self.settingsData += [tupla]
number += 1
-
+
if self.settingsData == list():
pywikibot.output(u"You've set wrongly your settings, please take a look to the relative page. (run without them)")
self.settingsData = None
@@ -1181,10 +1181,10 @@
pywikibot.output(u'Problems with loading the settigs, run without them.')
self.settingsData = None
self.some_problem = False
-
+
if not self.settingsData:
self.settingsData = None
-
+
# Real-Time page loaded
if self.settingsData:
pywikibot.output(u'\t >> Loaded the real-time page... <<')
@@ -1192,7 +1192,7 @@
else:
pywikibot.output(u'\t >> No additional settings found! <<')
return self.settingsData # Useless, but it doesn't harm..
-
+
def load_licenses(self):
""" Load the list of the licenses """
## catName = pywikibot.translate(self.site, category_with_licenses)
@@ -1216,7 +1216,7 @@
for license_given in no_licenses_to_skip:
list_licenses.remove(license_given)
pywikibot.output('') # blank line
-
+
# Add the licenses set in the default page as licenses
# to check
if self.pageAllowed:
@@ -1224,13 +1224,13 @@
pageAllowedText = pywikibot.Page(self.site, self.pageAllowed).get()
except (pywikibot.NoPage, pywikibot.IsRedirectPage):
pageAllowedText = ''
-
+
for nameLicense in self.load(pageAllowedText):
pageLicense = pywikibot.Page(self.site, nameLicense)
if pageLicense not in list_licenses:
list_licenses.append(pageLicense) # the list has wiki-pages
return list_licenses
-
+
def miniTemplateCheck(self, template):
"""
Is the template given in the licenses allowed or in the licenses to skip?
@@ -1241,7 +1241,7 @@
self.seems_ok = True
self.license_found = self.license_selected # let the last "fake" license normally detected
return True
-
+
if template in self.hiddentemplates:
# if the whitetemplate is not in the images description, we don't care
try:
@@ -1250,8 +1250,8 @@
return False
else:
self.whiteTemplatesFound = True
- return False
-
+ return False
+
def templateInList(self):
"""
The problem is the calls to the Mediawiki system because they can be pretty slow.
@@ -1270,10 +1270,10 @@
template = template.getRedirectTarget()
result = self.miniTemplateCheck(template)
if result:
- break
+ break
except pywikibot.NoPage:
- continue
-
+ continue
+
def smartDetection(self):
""" The bot instead of checking if there's a simple template in the
image's description, checks also if that template is a license or
@@ -1287,20 +1287,20 @@
regex_are_licenses = re.compile(r'(?<!\{)\{\{(?:[Tt]emplate:|)([^{]+?)\}\}', re.DOTALL)
#dummy_edit = False
while 1:
- self.hiddentemplates = self.loadHiddenTemplates()
+ self.hiddentemplates = self.loadHiddenTemplates()
self.licenses_found = self.image.getTemplates()
templatesInTheImageRaw = regex_find_licenses.findall(self.imageCheckText)
-
+
if not self.licenses_found and templatesInTheImageRaw:
# {{nameTemplate|something <- this is not a template, be sure that we haven't catch something like that.
licenses_TEST = regex_are_licenses.findall(self.imageCheckText)
if not self.licenses_found and licenses_TEST:
raise pywikibot.Error("APIs seems down. No templates found with them but actually there are templates used in the image's page!")
self.allLicenses = list()
-
+
if not self.list_licenses:
raise pywikibot.Error(u'No licenses allowed provided, add that option to the code to make the script working correctly')
-
+
# Found the templates ONLY in the image's description
for template_selected in templatesInTheImageRaw:
for templateReal in self.licenses_found:
@@ -1321,15 +1321,15 @@
# dummy_edit = True
#else:
break
-
+
if self.licenses_found:
self.templateInList()
-
+
if not self.license_found and self.allLicenses:
# If only iterlist = self.AllLicenses if I remove something
# from iterlist it will be remove from self.AllLicenses too
iterlist = list(self.allLicenses)
-
+
for template in iterlist:
try:
template.pageAPInfo()
@@ -1337,7 +1337,7 @@
template = template.getRedirectTarget()
except pywikibot.NoPage:
self.allLicenses.remove(template)
-
+
if self.allLicenses:
self.license_found = self.allLicenses[0].title()
self.some_problem = False # If it has "some_problem" it must check
@@ -1375,7 +1375,7 @@
elif self.license_found:
printWithTimeZone(u"%s seems ok, license found: %s..." % (self.imageName, self.license_found))
return (self.license_found, self.whiteTemplatesFound)
-
+
def load(self, raw):
""" Load a list of object from a string using regex. """
list_loaded = list()
@@ -1414,7 +1414,7 @@
else:
pywikibot.output('') # Print a blank line.
return False
-
+
def wait(self, waitTime, generator, normal, limit):
""" Skip the images uploaded before x seconds to let
the users to fix the image's problem alone in the
@@ -1425,7 +1425,7 @@
if normal:
printWithTimeZone(u'Skipping the files uploaded less than %s seconds ago..' % waitTime)
imagesToSkip = 0
- while 1:
+ while 1:
loadOtherImages = True # ensure that all the images loaded aren't to skip!
for image in generator:
if normal:
@@ -1482,11 +1482,11 @@
num = 0
for imageData in newImages:
newGen.append(imageData)
- return newGen
+ return newGen
else:
pywikibot.output(u"The wait option is available only with the standard generator.")
return generator
-
+
def isTagged(self):
""" Understand if a file is already tagged or not. """
# Is the image already tagged? If yes, no need to double-check, skip
@@ -1500,9 +1500,9 @@
return True
elif i.lower() in self.imageCheckText:
return True
-
+
return False # Nothing Found
-
+
def findAdditionalProblems(self):
# In every tupla there's a setting configuration
for tupla in self.settingsData:
@@ -1546,7 +1546,7 @@
self.summary_used = summary
self.mex_used = mexCatched
continue
-
+
def checkStep(self):
# nothing = Defining an empty image description
nothing = ['', ' ', ' ', ' ', '\n', '\n ', '\n ', '\n\n', '\n \n', ' \n', ' \n ', ' \n \n']
@@ -1555,8 +1555,8 @@
# MIT license is ok on italian wikipedia, let also this here
something = ['{{'] # Don't put "}}" here, please. Useless and can give problems.
# Unused file extensions. Does not contain PDF.
- notallowed = ("xcf", "xls", "sxw", "sxi", "sxc", "sxd")
- brackets = False
+ notallowed = ("xcf", "xls", "sxw", "sxi", "sxc", "sxd")
+ brackets = False
delete = False
extension = self.imageName.split('.')[-1] # get the extension from the image's name
# Load the notification messages
@@ -1573,7 +1573,7 @@
# Some formatting for delete immediately template
di = u'\n%s' % di
dels = dels % di
-
+
# Page => ImagePage
# Get the text in the image (called imageCheckText)
try:
@@ -1590,13 +1590,13 @@
# Delete the fields where the templates cannot be loaded
regex_nowiki = re.compile(r'<nowiki>(.*?)</nowiki>', re.DOTALL)
regex_pre = re.compile(r'<pre>(.*?)</pre>', re.DOTALL)
- self.imageCheckText = regex_nowiki.sub('', self.imageCheckText); self.imageCheckText = regex_pre.sub('', self.imageCheckText)
+ self.imageCheckText = regex_nowiki.sub('', self.imageCheckText); self.imageCheckText = regex_pre.sub('', self.imageCheckText)
# Deleting the useless template from the description (before adding something
# in the image the original text will be reloaded, don't worry).
if self.isTagged():
# Tagged? Yes, skip.
printWithTimeZone(u'%s is already tagged...' % self.imageName)
- return True
+ return True
for a_word in something: # something is the array with {{, MIT License and so on.
if a_word in self.imageCheckText:
# There's a template, probably a license (or I hope so)
@@ -1614,7 +1614,7 @@
if brackets == True and license_found != None:
# It works also without this... but i want only to be sure ^^
brackets = False
- return True
+ return True
elif delete:
pywikibot.output(u"%s is not a file!" % self.imageName)
# Modify summary text
@@ -1658,7 +1658,7 @@
normal = False # Check the new images or use another generator?
urlUsed = False # Use the url-related function instead of the new-pages generator
regexGen = False # Use the regex generator
- untagged = False # Use the untagged generator
+ untagged = False # Use the untagged generator
duplicatesActive = False # Use the duplicate option
duplicatesReport = False # Use the duplicate-report option
sendemailActive = False # Use the send-email
@@ -1858,14 +1858,14 @@
except IndexError:
pywikibot.output(u"%s is not a file, skipping..." % image.title())
continue
- mainClass.setParameters(imageName, timestamp, uploader) # Setting the image for the main class
+ mainClass.setParameters(imageName, timestamp, uploader) # Setting the image for the main class
# Skip block
if skip == True:
skip = mainClass.skipImages(skip_number, limit)
if skip == True:
- continue
+ continue
# Check on commons if there's already an image with the same name
- if commonsActive == True and site.family.name != "commons":
+ if commonsActive == True and site.family.name != "commons":
response = mainClass.checkImageOnCommons()
if response == False:
continue
Modified: trunk/pywikipedia/clean_sandbox.py
===================================================================
--- trunk/pywikipedia/clean_sandbox.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/clean_sandbox.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -150,7 +150,7 @@
self.userlist = [page.title().split(u'/')[0] for page in pywikibot.Page(self.site, userlist).linkedPages()]
def run(self):
-
+
def minutesDiff(time1, time2):
if type(time1) in [long, int]:
time1 = str(time1)
Modified: trunk/pywikipedia/commonscat.py
===================================================================
--- trunk/pywikipedia/commonscat.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/commonscat.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -83,7 +83,7 @@
u'انبار رده', u'Commons category',u'انبار-رده']),
'fr' : (u'Commonscat', [u'CommonsCat', u'Commons cat',
u'Commons category']),
- 'frp' : (u'Commonscat', [u'CommonsCat']),
+ 'frp' : (u'Commonscat', [u'CommonsCat']),
'ga' : (u'Catcómhaoin', [u'Commonscat']),
'hi' : (u'Commonscat', [u'Commons2', u'Commons cat', u'Commons category']),
'hu' : (u'Commonskat', [u'Közvagyonkat']),
Modified: trunk/pywikipedia/config.py
===================================================================
--- trunk/pywikipedia/config.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/config.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -74,7 +74,7 @@
# Secure Connection to all Wikimedia Projects
SSL_connection = False
-
+
# password_file = ".passwd"
# A password file with default passwords. For more information, please
# see LoginManager.readPassword in login.py.
Modified: trunk/pywikipedia/copyright.py
===================================================================
--- trunk/pywikipedia/copyright.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/copyright.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -351,7 +351,7 @@
f = codecs.open(path, 'w', 'utf-8')
f.write(data)
f.close()
-
+
def update(self):
self.download(force_update = True)
self.scan()
Modified: trunk/pywikipedia/cosmetic_changes.py
===================================================================
--- trunk/pywikipedia/cosmetic_changes.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/cosmetic_changes.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -140,7 +140,7 @@
'bg': u'; козметични промени',
'br': u'; Kemm dister',
'ca': u'; canvis cosmètics',
- 'ckb':u'; دەستکاریی جوانکاری',
+ 'ckb':u'; دەستکاریی جوانکاری',
'cs': u'; kosmetické úpravy',
'da': u'; kosmetiske ændringer',
'de': u'; kosmetische Änderungen',
@@ -391,7 +391,7 @@
print found
hasCommentLine = True
text = regex.sub('', text)
-
+
# Adding categories
if categories:
text = pywikibot.replaceCategoryLinks(text, categories, site = self.site)
@@ -769,7 +769,7 @@
text,
r"([\r\n])\=\= *(Licensing|License information|{{int:license-header}}) *\=\=",
r"\1== {{int:license}} ==", exceptions, True)
-
+
# frequent field values to {{int:}} versions
text = pywikibot.replaceExcept(
text,
@@ -779,10 +779,10 @@
text,
r'(\| *Permission *\=) *(?:[Ss]ee below|[Ss]iehe unten) *([\r\n])',
r'\1\2', exceptions, True)
-
+
# added to transwikied pages
text = pywikibot.replaceExcept(text, r'__NOTOC__', '', exceptions, True)
-
+
# tracker element for js upload form
text = pywikibot.replaceExcept(
text,
@@ -790,7 +790,7 @@
'', exceptions[1:], True)
text = pywikibot.replaceExcept(text, r'{{ImageUpload\|(?:basic|full)}}',
'', exceptions, True)
-
+
# duplicated section headers
text = pywikibot.replaceExcept(
text,
Modified: trunk/pywikipedia/date.py
===================================================================
--- trunk/pywikipedia/date.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/date.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -996,7 +996,7 @@
(lambda v: dh_centuryAD( v, u'%des kansblydhen' ), lambda p: p == 5),
(lambda v: dh_centuryAD( v, u'%dns kansblydhen' ), lambda p: p >= 20),
(lambda v: dh_centuryAD( v, u'%dves kansblydhen' ), alwaysTrue)]),
- 'ksh': lambda v: dh_centuryAD( v, u'%d. Joohunndot'),
+ 'ksh': lambda v: dh_centuryAD( v, u'%d. Joohunndot'),
'la' : lambda v: dh_centuryAD( v, u'Saeculum %d' ),
'lb' : lambda v: dh_centuryAD( v, u'%d. Joerhonnert' ),
Modified: trunk/pywikipedia/deledpimage.py
===================================================================
--- trunk/pywikipedia/deledpimage.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/deledpimage.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -42,7 +42,7 @@
'en': u'This Non-free image NOT used in non-article namespaces, see[[Wikipedia:Non-free content#Policy]]',
'zh': u'不是使用在条目中的非自由版权图像,根据[[Wikipedia:合理使用]],不能在非条目名字空间展示:\n',
}
-
+
msg = {
'ar': u'روبوت: إصلاح استخدام صورة EDP: [[%s]]',
'en': u'Robot: Fix EDP image use: [[%s]]',
@@ -60,7 +60,7 @@
#from References of EDP template get all non-free images
for tempalte in templatelist:
images = [page for page in tempalte.getReferences() if page.isImage()]
-
+
for image in images :
imagetitle=image.title()
imagepage=pywikibot.ImagePage(site,imagetitle)
@@ -77,7 +77,7 @@
re.search('<!--(.*?)'+imagetitle+'(.*?)-->',text,re.I).group(0)
except:
try:
- # imagetext=re.search('\[\['+imagetitle+'(.*?)\]\]',text,re.I).group(0)
+ # imagetext=re.search('\[\['+imagetitle+'(.*?)\]\]',text,re.I).group(0)
if imagetitle not in text:
# Not [[Image:]] namespace
@@ -94,12 +94,12 @@
#Image in userpage, imagepage,and all talkpage , [[Image:wiki.png]] --> [[:Image:wiki.png]]
if ns==1 or ns==6 or ns==2 or ns==3 or ns==5 or ns==7 or ns==9 or ns==11 or ns==13 or ns==15 or ns==17 or ns==101:
-
+
text = re.sub('\[\['+imagetitle+'(.*?)\]\]', '<!--'+lcontent+'\n-->'+'[['+':'+imagetitle+']]',text, re.I)
pywikibot.output(c+u'FIX!\nSleep 10 s......')
pimage.put(text, putmsg % imagetitle)
time.sleep(10)
-
+
#Image in template, categorypage, remove
elif ns==10 or ns==14:
text = re.sub('\[\['+imagetitle+'(.*?)(|)\]\]', '<!--'+lcontent+imagetext+'\n-->',text, re.I)
Modified: trunk/pywikipedia/delinker.py
===================================================================
--- trunk/pywikipedia/delinker.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/delinker.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -14,4 +14,4 @@
module = 'image_replacer'
bot = __import__(module)
-bot.main()
\ No newline at end of file
+bot.main()
Modified: trunk/pywikipedia/djvutext.py
===================================================================
--- trunk/pywikipedia/djvutext.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/djvutext.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -92,7 +92,7 @@
self.prefix = self.prefix[6:]
pywikibot.output(u"Using prefix %s" % self.prefix)
gen = self.PagesGenerator()
-
+
site = pywikibot.getSite()
self.username = config.usernames[site.family.name][site.lang]
@@ -108,7 +108,7 @@
s = f.read()
f.close()
return s.find('TXTz') >= 0
-
+
def get_page(self, pageno):
pywikibot.output(unicode("fetching page %d" % (pageno)))
cmd = u"djvutxt --page=%d \"%s\" \"%s.out\"" \
Modified: trunk/pywikipedia/family.py
===================================================================
--- trunk/pywikipedia/family.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/family.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -84,7 +84,7 @@
'wuu', 'ts', 'yi', 'yo', 'zh-yue', 'diq', 'zea', 'bat-smg', 'zh',
'zh-tw', 'zh-cn',
]
-
+
# Order for fy: alphabetical by code, but y counts as i
def fycomp(x,y):
x = x.replace("y","i")+x.count("y")*"!"
@@ -3544,7 +3544,7 @@
'ab', 'av', 'ba', 'be', 'be-x-old', 'bg', 'bxr', 'ce', 'cu',
'cv', 'koi', 'kv', 'ky', 'mk', 'lbe', 'mdf', 'mn', 'mo', 'myv',
'mhr', 'mrj', 'os', 'ru', 'rue', 'sah', 'tg', 'tk', 'udm', 'uk',
- 'xal',
+ 'xal',
# languages using multiple scripts, including cyrillic
'ha', 'kk', 'sh', 'sr', 'tt'
],
@@ -3607,7 +3607,7 @@
# self.crossnamespace[102] = {
# 'pt': { '_default': [0]}
# }
-
+
@property
def iwkeys(self):
if self.interwiki_forward:
Modified: trunk/pywikipedia/featured.py
===================================================================
--- trunk/pywikipedia/featured.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/featured.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -18,11 +18,11 @@
-side use -side if you want to move all {{Link FA|lang}} next to the
corresponding interwiki links. Default is placing
{{Link FA|lang}} on top of the interwiki links.
-
+
-count Only counts how many featured/good articles exist
on all wikis (given with the "-fromlang" argument) or
- on several language(s) (when using the "-fromall" argument).
- Example: featured.py -fromlang:en,he -count
+ on several language(s) (when using the "-fromall" argument).
+ Example: featured.py -fromlang:en,he -count
counts how many featured articles exist in the en and he
wikipedias.
@@ -422,7 +422,7 @@
if pType == 'good':
name=good_name[site.lang][1]
elif pType == 'former':
- name=former_name[site.lang][1]
+ name=former_name[site.lang][1]
elif pType == 'list':
name=lists_name[site.lang][1]
else:
Modified: trunk/pywikipedia/fixing_redirects.py
===================================================================
--- trunk/pywikipedia/fixing_redirects.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/fixing_redirects.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -181,7 +181,7 @@
else:
pywikibot.output('Nothing left to do.')
return
-
+
for page2 in links:
try:
target = page2.getRedirectTarget()
Modified: trunk/pywikipedia/flickrripper.py
===================================================================
--- trunk/pywikipedia/flickrripper.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/flickrripper.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -57,11 +57,11 @@
Get the photo info and the photo sizes so we can use these later on
TODO: Add exception handling
-
+
'''
gotPhoto = False
while not gotPhoto:
- try:
+ try:
photoInfo = flickr.photos_getInfo(photo_id=photo_id)
#xml.etree.ElementTree.dump(photoInfo)
photoSizes = flickr.photos_getSizes(photo_id=photo_id)
@@ -79,7 +79,7 @@
TODO: Maybe add more licenses
'''
-
+
license = photoInfo.find('photo').attrib['license']
if flickr_allowed_license[int(license)]:
return True
@@ -133,10 +133,10 @@
TODO: Add exception handling, try a couple of times
'''
parameters = urllib.urlencode({'id' : photo_id, 'raw' : 'on'})
-
+
rawDescription = urllib.urlopen(
"http://wikipedia.ramselehof.de/flinfo.php?%s" % parameters).read()
-
+
return rawDescription.decode('utf-8')
def getFilename(photoInfo=None, site=pywikibot.getSite(u'commons', u'commons'),
@@ -161,7 +161,7 @@
i = i + 1
else:
return u'%s - %s - %s (%s).jpg' % (project, username, title,
- str(i))
+ str(i))
else:
return u'%s - %s - %s.jpg' % (project, username, title)
@@ -170,7 +170,7 @@
the page might not be allowed by the software.
'''
- title = title.strip()
+ title = title.strip()
title = re.sub(u"[<{\\[]", u"(", title)
title = re.sub(u"[>}\\]]", u")", title)
title = re.sub(u"[ _]?\\(!\\)", u"", title)
@@ -186,10 +186,10 @@
title = re.sub(u"--+", u"-", title)
title = re.sub(u",,+", u",", title)
title = re.sub(u"[-,^]([.]|$)", u"\\1", title)
- title = title.replace(u" ", u"_")
+ title = title.replace(u" ", u"_")
return title
-
+
def buildDescription(flinfoDescription=u'', flickrreview=False, reviewer=u'',
override=u'', addCategory=u'', removeCategories=False):
''' Build the final description for the image. The description is based on
@@ -207,7 +207,7 @@
description = description.replace(u'{{flickrreview}}\n', u'')
description = description.replace(
u'{{copyvio|Flickr, licensed as "All Rights Reserved" which is not a free license --~~~~}}\n',
- u'')
+ u'')
description = description.replace(u'=={{int:license}}==',
u'=={{int:license}}==\n' + override)
elif flickrreview:
@@ -219,7 +219,7 @@
description = description.replace(u'{{subst:unc}}\n', u'')
description = description + u'\n[[Category:' + addCategory + ']]\n'
description = description.replace(u'\r\n', u'\n')
- return description
+ return description
def processPhoto(flickr=None, photo_id=u'', flickrreview=False, reviewer=u'',
override=u'', addCategory=u'', removeCategories=False,
@@ -269,7 +269,7 @@
verifyDescription=False)
bot.upload_image(debug=False)
return 1
- return 0
+ return 0
class Tkdialog:
@@ -281,7 +281,7 @@
self.root.title(filename)
self.photoDescription = photoDescription
- self.filename = filename
+ self.filename = filename
self.photo = photo
self.skip=False
self.exit=False
@@ -290,14 +290,14 @@
# The image
self.image=self.getImage(self.photo, 800, 600)
self.imagePanel=Label(self.root, image=self.image)
-
+
self.imagePanel.image = self.image
-
+
# The filename
self.filenameLabel=Label(self.root,text=u"Suggested filename")
self.filenameField=Entry(self.root, width=100)
self.filenameField.insert(END, filename)
-
+
# The description
self.descriptionLabel=Label(self.root,text=u"Suggested description")
self.descriptionScrollbar=Scrollbar(self.root, orient=VERTICAL)
@@ -305,20 +305,20 @@
self.descriptionField.insert(END, photoDescription)
self.descriptionField.config(state=NORMAL, height=12, width=100, padx=0, pady=0, wrap=WORD, yscrollcommand=self.descriptionScrollbar.set)
self.descriptionScrollbar.config(command=self.descriptionField.yview)
-
+
# The buttons
self.okButton=Button(self.root, text="OK", command=self.okFile)
self.skipButton=Button(self.root, text="Skip", command=self.skipFile)
-
+
## Start grid
# The image
self.imagePanel.grid(row=0, column=0, rowspan=11, columnspan=4)
-
+
# The buttons
self.okButton.grid(row=11, column=1, rowspan=2)
self.skipButton.grid(row=11, column=2, rowspan=2)
-
+
# The filename
self.filenameLabel.grid(row=13, column=0)
self.filenameField.grid(row=13, column=1, columnspan=3)
@@ -334,7 +334,7 @@
image.thumbnail((width, height))
imageTk = ImageTk.PhotoImage(image)
return imageTk
-
+
def okFile(self):
''' The user pressed the OK button. '''
self.filename=self.filenameField.get()
@@ -364,7 +364,7 @@
found_start_id=True
else:
found_start_id=False
-
+
# http://www.flickr.com/services/api/flickr.groups.pools.getPhotos.html
# Get the photos in a group
if group_id:
@@ -391,12 +391,12 @@
return
else:
yield photo.attrib['id']
-
+
except flickrapi.exceptions.FlickrError:
gotPhotos = False
pywikibot.output(u'Flickr api problem, sleeping')
time.sleep(30)
-
+
# http://www.flickr.com/services/api/flickr.photosets.getPhotos.html
# Get the photos in a photoset
elif photoset_id:
@@ -407,7 +407,7 @@
for i in range(1, int(pages)+1):
gotPhotos = False
while not gotPhotos:
- try:
+ try:
for photo in flickr.photosets_getPhotos(
photoset_id=photoset_id, per_page='100', page=i
).find('photoset').getchildren():
@@ -420,12 +420,12 @@
return
else:
yield photo.attrib['id']
-
+
except flickrapi.exceptions.FlickrError:
gotPhotos = False
pywikibot.output(u'Flickr api problem, sleeping')
time.sleep(30)
-
+
# http://www.flickr.com/services/api/flickr.people.getPublicPhotos.html
# Get the (public) photos uploaded by a user
elif user_id:
@@ -454,7 +454,7 @@
gotPhotos = False
pywikibot.output(u'Flickr api problem, sleeping')
time.sleep(30)
-
+
return
def usage():
@@ -500,8 +500,8 @@
# Do we mark the images as reviewed right away?
if config.flickr['review']:
flickrreview = config.flickr['review']
- else:
- flickrreview = False
+ else:
+ flickrreview = False
# Set the Flickr reviewer
if config.flickr['reviewer']:
@@ -515,7 +515,7 @@
reviewer = u''
# Should be renamed to overrideLicense or something like that
- override = u''
+ override = u''
for arg in pywikibot.handleArgs():
if arg.startswith('-group_id'):
if len(arg) == 9:
@@ -544,7 +544,7 @@
end_id = pywikibot.input(
u'What is the id of the photo you want to end at?')
else:
- end_id = arg[8:]
+ end_id = arg[8:]
elif arg.startswith('-tags'):
if len(arg) == 5:
tags = pywikibot.input(
@@ -557,7 +557,7 @@
if len(arg) == 9:
reviewer = pywikibot.input(u'Who is the reviewer?')
else:
- reviewer = arg[10:]
+ reviewer = arg[10:]
elif arg.startswith('-override'):
if len(arg) == 9:
override = pywikibot.input(u'What is the override text?')
@@ -572,7 +572,7 @@
elif arg == '-removecategories':
removeCategories = True
elif arg == '-autonomous':
- autonomous = True
+ autonomous = True
if user_id or group_id or photoset_id:
for photo_id in getPhotos(flickr, user_id, group_id, photoset_id,
@@ -586,7 +586,7 @@
pywikibot.output(u'Finished running')
pywikibot.output(u'Total photos: ' + str(totalPhotos))
pywikibot.output(u'Uploaded photos: ' + str(uploadedPhotos))
-
+
if __name__ == "__main__":
try:
main()
Modified: trunk/pywikipedia/generate_family_file.py
===================================================================
--- trunk/pywikipedia/generate_family_file.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/generate_family_file.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -48,29 +48,29 @@
name = raw_input("Please insert a short name (eg: freeciv): ")
self.base_url = url
self.name = name
-
+
self.wikis = {} # {'http://wiki/$1': Wiki('http://wiki/$1'), ...}
self.langs = [] # [Wiki('http://wiki/$1'), ...]
-
+
self.namespaces = NamespaceStorage()
-
+
def run(self):
print "Generating family file from %s" % self.base_url
-
+
w = Wiki(self.base_url)
self.wikis[w.iwpath] = w
print
print "=================================="
- print "api url: %s" % w.api
+ print "api url: %s" % w.api
print "MediaWiki version: %s" % w.version
print "=================================="
print
-
+
self.getlangs(w)
self.getapis()
self.getnamespaces()
self.writefile()
-
+
def getlangs(self, w):
print "Determining other languages...",
try:
@@ -102,14 +102,14 @@
print "downloaded"
else:
print "in cache"
-
+
def getnamespaces(self):
print "Retrieving namespaces... ",
for w in self.wikis.itervalues():
print "%s " % w.lang,
self.namespaces.addfromwiki(w)
print
-
+
def writefile(self):
fn = "families/%s_family.py" % self.name
print "Writing %s... " % fn
@@ -121,7 +121,7 @@
except IOError: # file not found
pass
f = codecs.open(fn, 'w', 'utf-8')
-
+
f.write("""
# -*- coding: utf-8 -*-
\"\"\"
@@ -141,23 +141,23 @@
self.name = '%(name)s'
self.langs = {
""".lstrip() % {'url': self.base_url, 'name': self.name})
-
+
for w in self.wikis.itervalues():
f.write(" '%(lang)s': u'%(hostname)s',\n" % {'lang': w.lang, 'hostname': urlparse(w.server).netloc})
-
+
f.write(" }\n\n")
-
+
f.write(self.namespaces.output(8))
f.write("\n\n")
-
+
f.write(" def scriptpath(self, code):\n")
f.write(" return {\n")
-
+
for w in self.wikis.itervalues():
f.write(" '%(lang)s': u'%(path)s',\n" % {'lang': w.lang, 'path': w.scriptpath})
f.write(" }[code]\n")
f.write("\n")
-
+
f.write(" def version(self, code):\n")
f.write(" return {\n")
for w in self.wikis.itervalues():
@@ -171,14 +171,14 @@
def __init__(self):
self.nsinfo = {}
self.f = family.Family()
-
+
def addfromwiki(self, w):
data = json.load(urlopen(w.api + "?action=query&format=json&meta=siteinfo&siprop=namespaces|namespacealiases"))['query']
for ns in data['namespaces'].itervalues():
self.add(ns['id'], w.lang, ns['*'])
for ns in data['namespacealiases']:
self.add(ns['id'], w.lang, ns['*'])
-
+
def add(self, ns, lang, translation):
""" Contains logic for determining whether to define a namespace or not """
ns = int(ns)
@@ -189,11 +189,11 @@
raise KeyError
except KeyError:
self._store(ns, lang, translation)
-
+
def _store(self, ns, lang, translation):
""" Contains logic on how to store a translation """
self.nsinfo.setdefault(ns, {}).setdefault(lang, []).append(translation)
-
+
def output(self, indent):
data = ""
for nsid, langs in self.nsinfo.iteritems():
@@ -205,8 +205,8 @@
data += "self.namespaces[%(nsid)i][%(lang)r] = %(translations)r" % locals()
data += "\n"
return data
-
+
class Wiki(object):
REwgEnableApi = re.compile(ur'wgEnableAPI ?= ?true')
REwgServer = re.compile(ur'wgServer ?= ?"([^"]*)"')
@@ -214,7 +214,7 @@
REwgArticlePath = re.compile(ur'wgArticlePath ?= ?"([^"]*)"')
REwgContentLanguage = re.compile(ur'wgContentLanguage ?= ?"([^"]*)"')
REwgVersion = re.compile(ur'wgVersion ?= ?"([^"]*)"')
-
+
def __init__(self, fromurl):
if fromurl.endswith("$1"):
fromurl = fromurl[:-2]
@@ -257,5 +257,5 @@
print "Usage: %s <url> <short name>"
print "Example: %s http://www.mywiki.bogus/wiki/Main_Page mywiki"
print "This will create the file families/mywiki_family.py"
-
+
FamilyFileGenerator(*sys.argv[1:]).run()
Modified: trunk/pywikipedia/imagecopy.py
===================================================================
--- trunk/pywikipedia/imagecopy.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/imagecopy.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -238,7 +238,7 @@
}
def pageTextPost(url,parameters):
- gotInfo = False;
+ gotInfo = False;
while(not gotInfo):
try:
commonsHelperPage = urllib.urlopen("http://toolserver.org/~magnus/commonshelper.php", parameters)
@@ -249,7 +249,7 @@
except socket.timeout:
pywikibot.output(u'Got a timeout, let\'s try again')
return data
-
+
class imageTransfer (threading.Thread):
def __init__ ( self, imagePage, newname, category):
@@ -269,17 +269,17 @@
'ignorewarnings':'1',
'doit':'Uitvoeren'
}
-
+
tosend=urllib.urlencode(tosend)
print tosend
CH=pageTextPost('http://www.toolserver.org/~magnus/commonshelper.php', tosend)
print 'Got CH desc.'
-
+
tablock=CH.split('<textarea ')[1].split('>')[0]
CH=CH.split('<textarea '+tablock+'>')[1].split('</textarea>')[0]
CH=CH.replace(u'×', u'×')
CH = self.fixAuthor(CH)
- pywikibot.output(CH);
+ pywikibot.output(CH);
# I want every picture to be tagged with the bottemplate so i can check my contributions later.
CH=u'\n\n{{BotMoveToCommons|'+ self.imagePage.site().language() + '.' + self.imagePage.site().family.name +'|year={{subst:CURRENTYEAR}}|month={{subst:CURRENTMONTHNAME}}|day={{subst:CURRENTDAY}}}}' + CH
@@ -287,7 +287,7 @@
if self.category:
CH = CH.replace(u'{{subst:Unc}} <!-- Remove this line once you have added categories -->', u'')
CH = CH + u'[[Category:' + self.category + u']]'
-
+
bot = UploadRobot(url=self.imagePage.fileUrl(), description=CH, useFilename=self.newname, keepFilename=True, verifyDescription=False, ignoreWarning = True, targetSite = pywikibot.getSite('commons', 'commons'))
bot.run()
@@ -327,7 +327,7 @@
imagebot = ImageRobot(generator = self.preloadingGen, oldImage = self.imagePage.titleWithoutNamespace(), newImage = self.newname, summary = moveSummary, always = True, loose = True)
imagebot.run()
return
-
+
def fixAuthor(self, pageText):
'''
Fix the author field in the information template.
@@ -340,16 +340,16 @@
#Find the {{self|author=
selfMatch = selfRegex.search(pageText)
-
+
#Check if both are found and are equal
if (informationMatch and selfMatch):
if(informationMatch.group('author')==selfMatch.group('author')):
#Replace |Author=Original uploader was ... with |Author= ...
pageText = informationRegex.sub(r'|Author=\g<author>', pageText)
-
+
return pageText
-
+
#-label ok skip view
#textarea
archivo=pywikibot.config.datafilepath("Uploadbot.localskips.txt")
@@ -486,7 +486,7 @@
category = arg [len('-cc:'):]
else:
genFactory.handleArg(arg)
-
+
generator = genFactory.getCombinedGenerator()
if not generator:
raise add_text.NoEnoughData('You have to specify the generator you want to use for the script!')
@@ -516,7 +516,7 @@
skip = True
else:
while True:
-
+
# Do the Tkdialog to accept/reject and change te name
(newname, skip)=Tkdialog(imagepage.titleWithoutNamespace(), imagepage.get(), username, imagepage.permalink(), imagepage.templates()).getnewname()
@@ -530,7 +530,7 @@
newname=imagepage.titleWithoutNamespace()
else:
newname = newname.decode('utf-8')
-
+
# Check if the image already exists
CommonsPage=pywikibot.Page(
pywikibot.getSite('commons', 'commons'),
Modified: trunk/pywikipedia/imagecopy_enwp.py
===================================================================
--- trunk/pywikipedia/imagecopy_enwp.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/imagecopy_enwp.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -114,8 +114,8 @@
u'Ffd',
u'PD-user', # Only the self templates are supported for now.
]
-
+
licenseTemplates = [(u'\{\{(self|self2)\|([^\}]+)\}\}', u'{{Self|\\2|author=[[:%(lang)s:User:%(author)s|%(author)s]] at [http://%(lang)s.%(family)s.org %(lang)s.%(family)s]}}'),
(u'\{\{(GFDL-self|GFDL-self-no-disclaimers)\|([^\}]+)\}\}', u'{{Self|GFDL|\\2|author=[[:%(lang)s:User:%(author)s|%(author)s]] at [http://%(lang)s.%(family)s.org %(lang)s.%(family)s]}}'),
(u'\{\{GFDL-self-with-disclaimers\|([^\}]+)\}\}', u'{{Self|GFDL-with-disclaimers|\\1|author=[[:%(lang)s:User:%(author)s|%(author)s]] at [http://%(lang)s.%(family)s.org %(lang)s.%(family)s]}}'),
@@ -155,7 +155,7 @@
self.description = description
self.date = date
self.source = source
- self.author = author
+ self.author = author
self.licensetemplate = licensetemplate
self.categories = categories
self.skip = False
@@ -212,7 +212,7 @@
self.old_description.grid(row=1, column=0, columnspan=3)
self.scrollbar.grid(row=1, column=3)
self.new_description_label.grid(row=2, column=0, columnspan=3)
-
+
self.filename_label.grid(row=3, column=0)
self.information_description_label.grid(row=4, column=0)
self.information_date_label.grid(row=5, column=0)
@@ -244,7 +244,7 @@
self.author=self.information_author.get()
self.licensetemplate=self.information_licensetemplate.get()
self.categories=self.information_categories.get()
-
+
self.root.destroy()
def skipFile(self):
@@ -307,7 +307,7 @@
if self.doiskip(imagepage):
pywikibot.output(u'Skipping %s : Got a template on the skip list.' % page.title())
return False
-
+
text = imagepage.get()
foundMatch = False
for (regex, replacement) in licenseTemplates:
@@ -335,7 +335,7 @@
Build a new description based on the imagepage
'''
if u'{{Information' in imagepage.get() or u'{{information' in imagepage.get():
- (description, date, source, author) = self.getNewFieldsFromInformation(imagepage)
+ (description, date, source, author) = self.getNewFieldsFromInformation(imagepage)
else:
(description, date, source, author) = self.getNewFieldsFromFreetext(imagepage)
@@ -346,7 +346,7 @@
def getNewFieldsFromInformation(self, imagepage):
''' Try to extract fields from the current information template for the
newinformation template.
-
+
'''
description = u''
date = u''
@@ -358,15 +358,15 @@
# Need to add the permission field
# Need to use pywikipedia template parser code
regexes =[u'\{\{Information[\s\r\n]*\|[\s\r\n]*description[\s\r\n]*=(?P<description>.*)\|[\s\r\n]*source[\s\r\n]*=(?P<source>.*)\|[\s\r\n]*date[\s\r\n]*=(?P<date>.*)\|[\s\r\n]*author[\s\r\n]*=(?P<author>.*)\|[\s\r\n]*permission.*=(?P<permission>[^\}]*)\|[\s\r\n]*other_versions.*=(?P<other_versions>[^\}]*)\}\}',
- u'\{\{Information[\s\r\n]*\|[\s\r\n]*description[\s\r\n]*=(?P<description>.*)\|[\s\r\n]*source[\s\r\n]*=(?P<source>.*)\|[\s\r\n]*date[\s\r\n]*=(?P<date>.*)\|[\s\r\n]*author[\s\r\n]*=(?P<author>.*)\|[\s\r\n]*other_versions.*=(?P<other_versions>[^\}]*)\}\}',
+ u'\{\{Information[\s\r\n]*\|[\s\r\n]*description[\s\r\n]*=(?P<description>.*)\|[\s\r\n]*source[\s\r\n]*=(?P<source>.*)\|[\s\r\n]*date[\s\r\n]*=(?P<date>.*)\|[\s\r\n]*author[\s\r\n]*=(?P<author>.*)\|[\s\r\n]*other_versions.*=(?P<other_versions>[^\}]*)\}\}',
]
-
+
for regex in regexes:
match =re.search(regex, text, re.IGNORECASE|re.DOTALL)
if match:
description = self.convertLinks(
match.group(u'description').strip(), imagepage.site())
-
+
date = match.group(u'date').strip()
if date == u'':
date = self.getUploadDate(imagepage)
@@ -380,7 +380,7 @@
imagepage.site())
if author == u'':
author = self.getAuthorText(imagepage)
-
+
if u'permission' in match.groupdict():
permission = self.convertLinks(
match.group(u'permission').strip(), imagepage.site())
@@ -389,7 +389,7 @@
match.group(u'other_versions').strip(), imagepage.site())
# Return the stuff we found
return (description, date, source, author)
-
+
#We didn't find anything, return the empty strings
return (description, date, source, author)
@@ -404,12 +404,12 @@
for toRemove in sourceGarbage:
text = re.sub(toRemove, u'', text, re.IGNORECASE)
-
+
for (regex, repl) in licenseTemplates:
text = re.sub(regex, u'', text, re.IGNORECASE)
text = pywikibot.removeCategoryLinks(text, imagepage.site()).strip()
-
+
description = self.convertLinks(text.strip(), imagepage.site())
date = self.getUploadDate(imagepage)
source = self.getSource(imagepage)
@@ -419,7 +419,7 @@
def getUploadDate(self, imagepage):
''' Get the original upload date to put in the date field of the new
information template. If we really have nothing better.
-
+
'''
uploadtime = imagepage.getFileVersionHistory()[-1][0]
uploadDatetime = datetime.strptime(uploadtime, u'%Y-%m-%dT%H:%M:%SZ')
@@ -441,12 +441,12 @@
def getAuthorText(self, imagepage):
''' Get the original uploader to put in the author field of the new
information template.
-
+
'''
site = imagepage.site()
lang = site.language()
family = site.family.name
-
+
firstuploader = self.getAuthor(imagepage)
return u'[[:%(lang)s:User:%(firstuploader)s|%(firstuploader)s]] at [http://%(lang)s.%(family)s.org %(lang)s.%(family)s]' \
% {u'lang' : lang, u'family' : family ,
@@ -468,10 +468,10 @@
u'[[:%(lang)s:\\1|\\2]]'),
(u'\[\[([^\[\]\|]+)\]\]', u'[[:%(lang)s:\\1|\\1]]'),
]
-
+
for (regex, replacement) in conversions:
text = re.sub(regex, replacement
- % {u'lang' : lang, u'family' : family}, text)
+ % {u'lang' : lang, u'family' : family}, text)
return text
def getNewLicensetemplate(self, imagepage):
@@ -482,7 +482,7 @@
site = imagepage.site()
lang = site.language()
family = site.family.name
- result = u''
+ result = u''
for (regex, replacement) in licenseTemplates:
match = re.search(regex, text, re.IGNORECASE)
if match:
@@ -492,7 +492,7 @@
u'lang' : lang,
u'family' : family}
return result
-
+
def getNewCategories(self, imagepage):
'''
Get a categories for the image
@@ -525,7 +525,7 @@
self.uploadQueue.put(None)
pywikibot.output(u'User worked on all images.')
return True
-
+
def processImage(self, fields):
'''
Work on a single image
@@ -542,7 +542,7 @@
pywikibot.output(u'Skipping %s : User pressed skip.'
% imagepage.title())
return False
-
+
# Check if the image already exists
CommonsPage = pywikibot.Page(pywikibot.getSite('commons', 'commons'), u'File:' + filename)
if not CommonsPage.exists():
@@ -578,7 +578,7 @@
'''
self.checktemplate = False
return
-
+
def processImage(self, fields):
'''
Work on a single image
@@ -588,27 +588,27 @@
pywikibot.output(cid)
bot = UploadRobot(url=imagepage.fileUrl(), description=cid, useFilename=filename, keepFilename=True, verifyDescription=False, ignoreWarning = True, targetSite = pywikibot.getSite('commons', 'commons'))
bot.run()
-
+
self.tagNowcommons(imagepage, filename)
self.replaceUsage(imagepage, filename)
-
+
def buildNewImageDescription(self, imagepage, description, date, source, author, licensetemplate, categories):
'''
- Build a new information template
+ Build a new information template
'''
-
+
site = imagepage.site()
lang = site.language()
family = site.family.name
-
+
cid = u''
if self.checktemplate:
cid = cid + u'\n{{BotMoveToCommons|%(lang)s.%(family)s|year={{subst:CURRENTYEAR}}|month={{subst:CURRENTMONTHNAME}}|day={{subst:CURRENTDAY}}}}\n' % {u'lang' : lang, u'family' : family}
cid = cid + u'== {{int:filedesc}} ==\n'
cid = cid + u'{{Information\n'
cid = cid + u'|description={{%(lang)s|1=' % {u'lang' : lang, u'family' : family}
- cid = cid + description + u'}}\n'
+ cid = cid + description + u'}}\n'
cid = cid + u'|date=' + date + u'\n'
cid = cid + u'|source=' + source + u'\n'
cid = cid + u'|author=' + author + u'\n'
@@ -638,7 +638,7 @@
family = site.family.name
sourceimage = imagepage.site().get_address(imagepage.title()).replace(u'&redirect=no&useskin=monobook', u'')
-
+
result = u'== {{Original upload log}} ==\n'
result = result + u'The original description page is/was [http://%(lang)s.%(family)s.org%(sourceimage)s here]. All following user names refer to %(lang)s.%(family)s.\n' % {u'lang' : lang, u'family' : family , u'sourceimage' : sourceimage}
for (timestamp, username, resolution, size, comment) in filehistory:
@@ -650,8 +650,8 @@
u'username' : username,
u'resolution': resolution,
u'size': size,
- u'comment' : comment}
-
+ u'comment' : comment}
+
return result
def tagNowcommons(self, imagepage, filename):
@@ -688,15 +688,15 @@
if imagepage.titleWithoutNamespace() != filename:
gen = pagegenerators.FileLinksGenerator(imagepage)
preloadingGen = pagegenerators.PreloadingGenerator(gen)
-
+
if imagepage.site().language() in imageMoveMessage:
moveSummary = imageMoveMessage[imagepage.site().language()] % (imagepage.titleWithoutNamespace(), filename)
else:
moveSummary = imageMoveMessage['_default'] % (imagepage.titleWithoutNamespace(), filename)
imagebot = ImageRobot(generator = preloadingGen, oldImage = imagepage.titleWithoutNamespace(), newImage = filename, summary = moveSummary, always = True, loose = True)
- imagebot.run()
-
+ imagebot.run()
+
def main(args):
pywikibot.output(u'WARNING: This is an experimental bot')
pywikibot.output(u'WARNING: It will only work on self published work images')
@@ -706,7 +706,7 @@
generator = None;
always = False
checkTemplate = True
-
+
# Load a lot of default generators
genFactory = pagegenerators.GeneratorFactory()
@@ -715,7 +715,7 @@
checkTemplate = False
else:
genFactory.handleArg(arg)
-
+
generator = genFactory.getCombinedGenerator()
if not generator:
raise add_text.NoEnoughData('You have to specify the generator you want to use for the script!')
@@ -732,10 +732,10 @@
imageFetcherThread.daemon=False
userInteractionThread.daemon=False
uploaderThread.daemon=False
-
+
if not checkTemplate:
uploaderThread.nochecktemplate()
-
+
fetchDone = imageFetcherThread.start()
userDone = userInteractionThread.start()
uploadDone = uploaderThread.start()
Modified: trunk/pywikipedia/imagecopy_self.py
===================================================================
--- trunk/pywikipedia/imagecopy_self.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/imagecopy_self.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -121,8 +121,8 @@
u'Ticket Scan',
],
}
-
+
licenseTemplates = {
'en': [(u'\{\{(self|self2)\|([^\}]+)\}\}', u'{{Self|\\2|author=[[:%(lang)s:User:%(author)s|%(author)s]] at [http://%(lang)s.%(family)s.org %(lang)s.%(family)s]}}'),
(u'\{\{(GFDL-self|GFDL-self-no-disclaimers)\|([^\}]+)\}\}', u'{{Self|GFDL|\\2|author=[[:%(lang)s:User:%(author)s|%(author)s]] at [http://%(lang)s.%(family)s.org %(lang)s.%(family)s]}}'),
@@ -188,7 +188,7 @@
self.description = description
self.date = date
self.source = source
- self.author = author
+ self.author = author
self.licensetemplate = licensetemplate
self.categories = categories
self.skip = False
@@ -246,7 +246,7 @@
self.old_description.grid(row=1, column=0, columnspan=3)
self.scrollbar.grid(row=1, column=3)
self.new_description_label.grid(row=2, column=0, columnspan=3)
-
+
self.filename_label.grid(row=3, column=0)
self.information_description_label.grid(row=4, column=0)
self.information_date_label.grid(row=5, column=0)
@@ -278,7 +278,7 @@
self.author=self.information_author.get()
self.licensetemplate=self.information_licensetemplate.get()
self.categories=self.information_categories.get()
-
+
self.root.destroy()
def skipFile(self):
@@ -345,7 +345,7 @@
u'Skipping %s : Got a template on the skip list.'
% page.title())
return False
-
+
text = imagepage.get()
foundMatch = False
for (regex, replacement) in licenseTemplates[page.site().language()]:
@@ -376,7 +376,7 @@
Build a new description based on the imagepage
'''
if u'{{Information' in imagepage.get() or u'{{information' in imagepage.get():
- (description, date, source, author, permission, other_versions) = self.getNewFieldsFromInformation(imagepage)
+ (description, date, source, author, permission, other_versions) = self.getNewFieldsFromInformation(imagepage)
else:
(description, date, source, author) = self.getNewFieldsFromFreetext(imagepage)
@@ -388,7 +388,7 @@
'''
Try to extract fields from the current information template for the new information template.
'''
-
+
fields = [u'location', u'description', u'source', u'date', u'author', u'permission', u'other versions']
description = u''
@@ -415,7 +415,7 @@
if field in fields:
#Ok, field is good, store it.
contents[field] = value.strip()
-
+
# We now got the contents from the old information template. Let's get the info for the new one
# Description
@@ -446,7 +446,7 @@
# Other_versions
if not contents[u'other versions']==u'':
- other_versions = self.convertLinks(contents[u'other versions'], imagepage.site())
+ other_versions = self.convertLinks(contents[u'other versions'], imagepage.site())
return (description, date, source, author, permission, other_versions)
@@ -461,12 +461,12 @@
for toRemove in sourceGarbage[imagepage.site().language()]:
text = re.sub(toRemove, u'', text, flags=re.IGNORECASE)
-
+
for (regex, repl) in licenseTemplates[imagepage.site().language()]:
text = re.sub(regex, u'', text, flags=re.IGNORECASE)
text = pywikibot.removeCategoryLinks(text, imagepage.site()).strip()
-
+
description = self.convertLinks(text.strip(), imagepage.site())
date = self.getUploadDate(imagepage)
source = self.getSource(imagepage)
@@ -490,7 +490,7 @@
family = site.family.name
if source==u'':
source=u'{{Own}}'
-
+
return source.strip() + u'<BR />Transferred from [http://%(lang)s.%(family)s.org %(lang)s.%(family)s]' % {u'lang' : lang, u'family' : family}
def getAuthorText(self, imagepage):
@@ -500,7 +500,7 @@
site = imagepage.site()
lang = site.language()
family = site.family.name
-
+
firstuploader = self.getAuthor(imagepage)
return u'[[:%(lang)s:User:%(firstuploader)s|%(firstuploader)s]] at [http://%(lang)s.%(family)s.org %(lang)s.%(family)s]' % {u'lang' : lang, u'family' : family , u'firstuploader' : firstuploader}
@@ -519,9 +519,9 @@
conversions =[(u'\[\[([^\[\]\|]+)\|([^\[\]\|]+)\]\]', u'[[:%(lang)s:\\1|\\2]]'),
(u'\[\[([^\[\]\|]+)\]\]', u'[[:%(lang)s:\\1|\\1]]'),
]
-
+
for (regex, replacement) in conversions:
- text = re.sub(regex, replacement % {u'lang' : lang, u'family' : family}, text)
+ text = re.sub(regex, replacement % {u'lang' : lang, u'family' : family}, text)
return text
@@ -530,12 +530,12 @@
Get a license template to put on the image to be uploaded
'''
text = imagepage.get()
-
+
site = imagepage.site()
lang = site.language()
family = site.family.name
- result = u''
+ result = u''
for (regex, replacement) in licenseTemplates[imagepage.site().language()]:
match = re.search(regex, text, flags=re.IGNORECASE)
@@ -544,9 +544,9 @@
return result % {u'author' : self.getAuthor(imagepage),
u'lang' : lang,
u'family' : family}
-
+
return result
-
+
def getNewCategories(self, imagepage):
'''
Get a categories for the image
@@ -578,7 +578,7 @@
self.uploadQueue.put(None)
pywikibot.output(u'User worked on all images.')
return True
-
+
def processImage(self, fields):
'''
Work on a single image
@@ -591,7 +591,7 @@
if skip:
pywikibot.output(u'Skipping %s : User pressed skip.' % imagepage.title())
return False
-
+
# Check if the image already exists
CommonsPage=pywikibot.Page(pywikibot.getSite('commons', 'commons'), u'File:' + filename)
if not CommonsPage.exists():
@@ -627,7 +627,7 @@
'''
self.checktemplate = False
return
-
+
def processImage(self, fields):
'''
Work on a single image
@@ -637,27 +637,27 @@
pywikibot.output(cid)
bot = UploadRobot(url=imagepage.fileUrl(), description=cid, useFilename=filename, keepFilename=True, verifyDescription=False, ignoreWarning = True, targetSite = pywikibot.getSite('commons', 'commons'))
bot.run()
-
+
self.tagNowcommons(imagepage, filename)
self.replaceUsage(imagepage, filename)
-
+
def buildNewImageDescription(self, imagepage, description, date, source, author, licensetemplate, categories):
'''
- Build a new information template
+ Build a new information template
'''
-
+
site = imagepage.site()
lang = site.language()
family = site.family.name
-
+
cid = u''
if self.checktemplate:
cid = cid + u'\n{{BotMoveToCommons|%(lang)s.%(family)s|year={{subst:CURRENTYEAR}}|month={{subst:CURRENTMONTHNAME}}|day={{subst:CURRENTDAY}}}}\n' % {u'lang' : lang, u'family' : family}
cid = cid + u'== {{int:filedesc}} ==\n'
cid = cid + u'{{Information\n'
cid = cid + u'|description={{%(lang)s|1=' % {u'lang' : lang, u'family' : family}
- cid = cid + description + u'}}\n'
+ cid = cid + description + u'}}\n'
cid = cid + u'|date=' + date + u'\n'
cid = cid + u'|source=' + source + u'\n'
cid = cid + u'|author=' + author + u'\n'
@@ -687,7 +687,7 @@
family = site.family.name
sourceimage = imagepage.site().get_address(imagepage.title()).replace(u'&redirect=no&useskin=monobook', u'')
-
+
result = u'== {{Original upload log}} ==\n'
result = result + u'The original description page is/was [http://%(lang)s.%(family)s.org%(sourceimage)s here]. All following user names refer to %(lang)s.%(family)s.\n' % {u'lang' : lang, u'family' : family , u'sourceimage' : sourceimage}
for (timestamp, username, resolution, size, comment) in filehistory:
@@ -699,8 +699,8 @@
u'username' : username,
u'resolution': resolution,
u'size': size,
- u'comment' : comment}
-
+ u'comment' : comment}
+
return result
def tagNowcommons(self, imagepage, filename):
@@ -737,15 +737,15 @@
if imagepage.titleWithoutNamespace() != filename:
gen = pagegenerators.FileLinksGenerator(imagepage)
preloadingGen = pagegenerators.PreloadingGenerator(gen)
-
+
if imagepage.site().language() in imageMoveMessage:
moveSummary = imageMoveMessage[imagepage.site().language()] % (imagepage.titleWithoutNamespace(), filename)
else:
moveSummary = imageMoveMessage['_default'] % (imagepage.titleWithoutNamespace(), filename)
imagebot = ImageRobot(generator = preloadingGen, oldImage = imagepage.titleWithoutNamespace(), newImage = filename, summary = moveSummary, always = True, loose = True)
- imagebot.run()
-
+ imagebot.run()
+
def main(args):
pywikibot.output(u'WARNING: This is an experimental bot')
pywikibot.output(u'WARNING: It will only work on self published work images')
@@ -755,7 +755,7 @@
generator = None;
always = False
checkTemplate = True
-
+
# Load a lot of default generators
genFactory = pagegenerators.GeneratorFactory()
@@ -768,7 +768,7 @@
if not supportedSite():
pywikibot.output(u'Sorry, this site is not supported (yet).')
return False
-
+
generator = genFactory.getCombinedGenerator()
if not generator:
raise add_text.NoEnoughData('You have to specify the generator you want to use for the script!')
@@ -785,10 +785,10 @@
imageFetcherThread.daemon=False
userInteractionThread.daemon=False
uploaderThread.daemon=False
-
+
if not checkTemplate:
uploaderThread.nochecktemplate()
-
+
fetchDone = imageFetcherThread.start()
userDone = userInteractionThread.start()
uploadDone = uploaderThread.start()
Modified: trunk/pywikipedia/imagerecat.py
===================================================================
--- trunk/pywikipedia/imagerecat.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/imagerecat.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -27,7 +27,7 @@
__version__ = '$Id$'
#
# (C) Multichill 2008
-#
+#
# Distributed under the terms of the MIT license.
#
#
@@ -79,7 +79,7 @@
if (onlyUncat and not(u'Uncategorized' in imagepage.templates())):
pywikibot.output(u'No Uncategorized template found')
- else:
+ else:
currentCats = getCurrentCats(imagepage)
if onlyFilter:
commonshelperCats = []
@@ -94,7 +94,7 @@
pywikibot.output(u' Found new cat: ' + cat);
saveImagePage(imagepage, newcats, usage, galleries,
onlyFilter)
-
+
def getCurrentCats(imagepage):
''' Get the categories currently on the image '''
result = []
@@ -367,7 +367,7 @@
def saveImagePage(imagepage, newcats, usage, galleries, onlyFilter):
''' Remove the old categories and add the new categories to the image. '''
- newtext = pywikibot.removeCategoryLinks(imagepage.get(), imagepage.site())
+ newtext = pywikibot.removeCategoryLinks(imagepage.get(), imagepage.site())
if not(onlyFilter):
newtext = removeTemplates(newtext)
newtext = newtext + getCheckCategoriesTemplate(usage, galleries,
@@ -413,7 +413,7 @@
galleryCounter = galleryCounter + 1
result += u'|ncats=%d\n' % ncats
result += u'}}\n'
- return result
+ return result
def main(args):
'''
@@ -426,7 +426,7 @@
global search_wikis
global hint_wiki
-
+
site = pywikibot.getSite(u'commons', u'commons')
pywikibot.setSite(site)
for arg in pywikibot.handleArgs():
Modified: trunk/pywikipedia/imageuncat.py
===================================================================
--- trunk/pywikipedia/imageuncat.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/imageuncat.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -1252,7 +1252,7 @@
The delay is the amount of minutes to wait and the block is the timespan to return images in.
Should probably be copied to somewhere else
'''
-
+
result = []
dateformat ="%Y-%m-%dT%H:%M:%SZ"
rcstart = datetime.utcnow() + timedelta(minutes=-delay-block)
Modified: trunk/pywikipedia/logindata.py
===================================================================
--- trunk/pywikipedia/logindata.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/logindata.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -1,8 +1,8 @@
-# -*- coding: utf-8 -*-
+# -*- coding: utf-8 -*-
# Usable example module: Use of pywikipedia as a
# library.
-#
+#
# Looks up the path to pywikipedia (pywikipedia_path)
# in a settings.py file. You'll need to provide that,
# and/or refactor.
@@ -29,10 +29,10 @@
class LoginData:
"""An example class that uses pywikipedia as a library.
- usage example:
-
+ usage example:
+
from logindata import LoginData, pywikibot
- target_wiki=LoginData( ... ) # for example, fill in from a settings file, or use code to generate, or ...
+ target_wiki=LoginData( ... ) # for example, fill in from a settings file, or use code to generate, or ...
site=target_wiki.login()
page=pywikibot.Page(site,"Main Page")
"""
@@ -49,7 +49,7 @@
user='MY_BOT_USER',
password='MY_SECRET_PASSWORD',
RversionTab=None,
- api_supported=False
+ api_supported=False
):
"""
paramaters:
@@ -71,7 +71,7 @@
self.user=user
self.password=password
self.family=base_family.Family(
- name=name,
+ name=name,
protocol=protocol,
server=server,
scriptpath=scriptpath,
@@ -81,7 +81,7 @@
RversionTab=RversionTab,
api_supported=api_supported)
self.site=None
-
+
def login(self):
"""Attempt to log in on the site described
by this class. Returns a pywikipedia site object"""
Modified: trunk/pywikipedia/match_images.py
===================================================================
--- trunk/pywikipedia/match_images.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/match_images.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -26,13 +26,13 @@
This functions expects two image page objects.
It will return True if the image are the same and False if the images are not the same
'''
-
+
imageA = getImageFromImagePage(imagePageA)
imageB = getImageFromImagePage(imagePageB)
(imA_width, imA_height) = imageA.size
(imB_width, imB_height) = imageB.size
-
+
imageB = imageB.resize((imA_width, imA_height))
imageA_topleft = imageA.crop((0,0, int(imA_width/2), int(imA_height/2)))
@@ -94,23 +94,23 @@
histogramA = imageA.histogram()
histogramB = imageB.histogram()
- totalMatch = 0
+ totalMatch = 0
totalPixels = 0
-
+
if not (len(histogramA)==len(histogramB)):
return 0
-
+
for i in range(0, len(histogramA)):
totalMatch = totalMatch + min(histogramA[i], histogramB[i])
totalPixels = totalPixels + max(histogramA[i], histogramB[i])
if (totalPixels==0):
return 0;
-
+
return float(totalMatch)/float(totalPixels)*100
-
+
def main():
site = wikipedia.getSite(u'commons', u'commons')
@@ -121,7 +121,7 @@
familyA = u''
familyB = u''
langA = u''
- langB = u''
+ langB = u''
imagePageA = None
imagePageB = None
@@ -155,7 +155,7 @@
imageTitleA=images[0]
imageTitleB=images[1]
- if not (imageTitleA == u''):
+ if not (imageTitleA == u''):
if not (langA == u''):
if not (familyA == u''):
imagePageA = wikipedia.ImagePage(wikipedia.getSite(langA, familyA), imageTitleA)
@@ -176,7 +176,7 @@
if (imagePageA and imagePageB):
matchImagePages(imagePageA, imagePageB)
-
+
if __name__ == "__main__":
try:
main()
Modified: trunk/pywikipedia/pagegenerators.py
===================================================================
--- trunk/pywikipedia/pagegenerators.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/pagegenerators.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -138,7 +138,7 @@
delimited with ";"
Example: -usercontribs:DumZiBoT;500
returns 500 distinct pages to work on.
-
+
-<mode>log Work on articles that were on a specified special:log.
You have options for every type of logs given by the
<mode> parameter which could be one of the following:
@@ -168,7 +168,7 @@
"-randomredirect:n" where n is the number of pages to be
returned, else 10 pages are returned.
--gorandom Specifies that the robot should starting at the random pages
+-gorandom Specifies that the robot should starting at the random pages
returned by [[Special:Random]].
-redirectonly Work on redirect pages only, not their target pages.
@@ -528,7 +528,7 @@
namespace = self.namespaces[0]
else:
namespace = pywikibot.Page(site, firstPageTitle).namespace()
-
+
firstPageTitle = pywikibot.Page(site,
firstPageTitle).titleWithoutNamespace()
gen = AllpagesPageGenerator(firstPageTitle, namespace,
@@ -961,7 +961,7 @@
'q': query,
}
url += urllib.urlencode(params)
-
+
while True:
try:
pywikibot.output(u'Querying Google AJAX Search API...') #, offset %i' % offset)
Modified: trunk/pywikipedia/panoramiopicker.py
===================================================================
--- trunk/pywikipedia/panoramiopicker.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/panoramiopicker.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -16,7 +16,7 @@
#For Python 2.6 newer
import json
if not hasattr(json, 'loads'):
- # 'json' can also be the name in for
+ # 'json' can also be the name in for
# http://pypi.python.org/pypi/python-json
raise ImportError
except ImportError:
@@ -89,9 +89,9 @@
# Does Panoramio have more license options?
return photoInfo
-
-
+
+
def getFilename(photoInfo=None, site=pywikibot.getSite(u'commons', u'commons'),
project=u'Panoramio'):
''' Build a good filename for the upload based on the username and the
@@ -114,7 +114,7 @@
i = i + 1
else:
return u'%s - %s - %s (%s).jpg' % (project, username, title,
- str(i))
+ str(i))
else:
return u'%s - %s - %s.jpg' % (project, username, title)
@@ -123,7 +123,7 @@
the page might not be allowed by the software.
'''
- title = title.strip()
+ title = title.strip()
title = re.sub(u"[<{\\[]", u"(", title)
title = re.sub(u"[>}\\]]", u")", title)
title = re.sub(u"[ _]?\\(!\\)", u"", title)
@@ -139,14 +139,14 @@
title = re.sub(u"--+", u"-", title)
title = re.sub(u",,+", u",", title)
title = re.sub(u"[-,^]([.]|$)", u"\\1", title)
- title = title.replace(u" ", u"_")
+ title = title.replace(u" ", u"_")
return title
-
+
def getDescription(photoInfo=None, panoramioreview=False, reviewer=u'',
override=u'', addCategory=u''):
'''
- Build description for the image.
+ Build description for the image.
'''
desc = u''
@@ -166,14 +166,14 @@
if override:
desc = desc + override
- else:
+ else:
if photoInfo.get(u'license')==u'by-sa':
desc = desc + u'{{Cc-by-sa-3.0}}\n'
if panoramioreview:
desc = desc + u'{{Panoramioreview|%s|{{subst:CURRENTYEAR}}-{{subst:CURRENTMONTH}}-{{subst:CURRENTDAY2}}}}\n' % (reviewer,)
else:
desc = desc + u'{{Panoramioreview}}\n'
-
+
desc = desc + u'\n'
cats = u''
if addCategory:
@@ -188,14 +188,14 @@
desc = desc + u'[[Category:%s]]\n' % (cat,)
if not cats:
desc = desc + u'{{subst:Unc}}\n'
-
+
return desc % photoInfo
def processPhoto(photoInfo=None, panoramioreview=False, reviewer=u'',
override=u'', addCategory=u'', autonomous=False):
''' Process a single Panoramio photo '''
-
-
+
+
if isAllowedLicense(photoInfo) or override:
#Should download the photo only once
photo = downloadPhoto(photoInfo.get(u'photo_file_url'))
@@ -209,7 +209,7 @@
pywikibot.output(filename)
description = getDescription(photoInfo, panoramioreview,
reviewer, override, addCategory)
-
+
pywikibot.output(description)
if not autonomous:
(newDescription, newFilename, skip) = Tkdialog(
@@ -234,7 +234,7 @@
verifyDescription=False)
bot.upload_image(debug=False)
return 1
- return 0
+ return 0
class Tkdialog:
@@ -246,7 +246,7 @@
self.root.title(filename)
self.photoDescription = photoDescription
- self.filename = filename
+ self.filename = filename
self.photo = photo
self.skip=False
self.exit=False
@@ -255,14 +255,14 @@
# The image
self.image=self.getImage(self.photo, 800, 600)
self.imagePanel=Label(self.root, image=self.image)
-
+
self.imagePanel.image = self.image
-
+
# The filename
self.filenameLabel=Label(self.root,text=u"Suggested filename")
self.filenameField=Entry(self.root, width=100)
self.filenameField.insert(END, filename)
-
+
# The description
self.descriptionLabel=Label(self.root,text=u"Suggested description")
self.descriptionScrollbar=Scrollbar(self.root, orient=VERTICAL)
@@ -270,20 +270,20 @@
self.descriptionField.insert(END, photoDescription)
self.descriptionField.config(state=NORMAL, height=12, width=100, padx=0, pady=0, wrap=WORD, yscrollcommand=self.descriptionScrollbar.set)
self.descriptionScrollbar.config(command=self.descriptionField.yview)
-
+
# The buttons
self.okButton=Button(self.root, text="OK", command=self.okFile)
self.skipButton=Button(self.root, text="Skip", command=self.skipFile)
-
+
## Start grid
# The image
self.imagePanel.grid(row=0, column=0, rowspan=11, columnspan=4)
-
+
# The buttons
self.okButton.grid(row=11, column=1, rowspan=2)
self.skipButton.grid(row=11, column=2, rowspan=2)
-
+
# The filename
self.filenameLabel.grid(row=13, column=0)
self.filenameField.grid(row=13, column=1, columnspan=3)
@@ -299,7 +299,7 @@
image.thumbnail((width, height))
imageTk = ImageTk.PhotoImage(image)
return imageTk
-
+
def okFile(self):
''' The user pressed the OK button. '''
self.filename=self.filenameField.get()
@@ -343,7 +343,7 @@
pywikibot.output(u'Got an IOError, let\'s try again')
except socket.timeout:
pywikibot.output(u'Got a timeout, let\'s try again')
-
+
metadata = json.loads(contents)
count = metadata.get(u'count') # Useless?
photos = metadata.get(u'photos')
@@ -381,12 +381,12 @@
autonomous = False
totalPhotos = 0
uploadedPhotos = 0
-
+
# Do we mark the images as reviewed right away?
if config.panoramio ['review']:
panoramioreview = config.panoramio['review']
- else:
- panoramioreview = False
+ else:
+ panoramioreview = False
# Set the Panoramio reviewer
if config.panoramio['reviewer']:
@@ -398,9 +398,9 @@
reviewer = config.usernames['commons']['commons']
else:
reviewer = u''
-
+
# Should be renamed to overrideLicense or something like that
- override = u''
+ override = u''
for arg in pywikibot.handleArgs():
if arg.startswith('-set'):
if len(arg) == 4:
@@ -418,7 +418,7 @@
end_id = pywikibot.input(
u'What is the id of the photo you want to end at?')
else:
- end_id = arg[8:]
+ end_id = arg[8:]
elif arg.startswith('-tags'):
if len(arg) == 5:
tags = pywikibot.input(
@@ -431,7 +431,7 @@
if len(arg) == 9:
reviewer = pywikibot.input(u'Who is the reviewer?')
else:
- reviewer = arg[10:]
+ reviewer = arg[10:]
elif arg.startswith('-override'):
if len(arg) == 9:
override = pywikibot.input(u'What is the override text?')
@@ -444,7 +444,7 @@
else:
addCategory = arg[13:]
elif arg == '-autonomous':
- autonomous = True
+ autonomous = True
if photoset:
for photoInfo in getPhotos(photoset, start_id, end_id):
@@ -459,7 +459,7 @@
pywikibot.output(u'Finished running')
pywikibot.output(u'Total photos: ' + str(totalPhotos))
pywikibot.output(u'Uploaded photos: ' + str(uploadedPhotos))
-
+
if __name__ == "__main__":
try:
main()
Modified: trunk/pywikipedia/query.py
===================================================================
--- trunk/pywikipedia/query.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/query.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -30,13 +30,13 @@
#For Python 2.6 newer
import json
if not hasattr(json, 'loads'):
- # 'json' can also be the name in for
+ # 'json' can also be the name in for
# http://pypi.python.org/pypi/python-json
raise ImportError
except ImportError:
import simplejson as json
-
+
def GetData(params, site = None, useAPI = True, retryCount = 5, encodeTitle = True, sysop = False, back_response = False):
"""Get data from the query api, and convert it into a data object
"""
@@ -44,7 +44,7 @@
site = wikipedia.getSite()
data = {}
titlecount = 0
-
+
for k,v in params.iteritems():
if k == u'file':
data[k] = v
@@ -55,7 +55,7 @@
data[k] = unicode(ListToParam(v))
else:
params[k] = unicode(ListToParam(v))
-
+
elif not IsString(v):
params[k] = unicode(v)
elif type(v) == unicode:
@@ -66,16 +66,16 @@
if not useAPI:
params['noprofile'] = ''
-
+
if data:
for k in data:
- del params[k]
-
+ del params[k]
+
if wikipedia.verbose: #dump params info.
wikipedia.output(u"==== API action:%s ====" % params[u'action'])
if data and 'file' not in data:
wikipedia.output(u"%s: (%d items)" % (data.keys()[0], titlecount ) )
-
+
for k, v in params.iteritems():
if k not in ['action', 'format', 'file', 'xml', 'text']:
if k == 'lgpassword' and wikipedia.verbose == 1:
@@ -84,8 +84,8 @@
v = v.decode('utf-8')
wikipedia.output(u"%s: %s" % (k, v) )
wikipedia.output(u'-' * 16 )
-
+
postAC = [
'edit', 'login', 'purge', 'rollback', 'delete', 'undelete', 'protect', 'parse',
'block', 'unblock', 'move', 'emailuser','import', 'userrights', 'upload',
@@ -128,7 +128,7 @@
# This will also work, but all unicode strings will need to be converted from \u notation
# decodedObj = eval( jsontext )
-
+
jsontext = json.loads( jsontext )
if "error" in jsontext:
@@ -137,7 +137,7 @@
wikipedia.output('Received a bad login token error from the server. Attempting to refresh.')
params['token'] = site.getToken(sysop = sysop, getagain = True)
continue
-
+
if back_response:
return res, jsontext
else:
@@ -149,7 +149,7 @@
if 'Wikimedia Error' in jsontext: #wikimedia server error
raise wikipedia.ServerError
-
+
retryCount -= 1
wikipedia.output(u"Error downloading data: %s" % error)
wikipedia.output(u"Request %s:%s" % (site.lang, path))
Modified: trunk/pywikipedia/redirect.py
===================================================================
--- trunk/pywikipedia/redirect.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/redirect.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -33,7 +33,7 @@
-namespace:n Namespace to process. Can be given multiple times, for several
namespaces. If omitted, only the main (article) namespace is
is treated with -api, with -xml all namespaces are treated,
- Works only with an XML dump, or the API interface.
+ Works only with an XML dump, or the API interface.
-offset:n With -moves, the number of hours ago to start scanning moved
pages. With -xml, the number of the redirect to restart with
Modified: trunk/pywikipedia/replace.py
===================================================================
--- trunk/pywikipedia/replace.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/replace.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -34,7 +34,7 @@
-save Saves the titles of the articles to a file instead of
modifying the articles. This way you may collect titles to
- work on in automatic mode, and process them later with
+ work on in automatic mode, and process them later with
-file. Opens the file for append, if exists.
If you insert the contents of the file into a wikipage, it
will appear as a numbered list, and may be used with -links.
@@ -131,7 +131,7 @@
talk about HTTP, where the typo has become part of the standard:
python replace.py referer referrer -file:typos.txt -excepttext:HTTP
-
+
Please type "replace.py -help | more" if you can't read the top of the help.
"""
from __future__ import generators
@@ -353,8 +353,8 @@
# Some function to set default editSummary should probably be added
self.editSummary = editSummary
self.articles = articles
-
- #An edit counter to split the file by 100 titles if -save or -savenew
+
+ #An edit counter to split the file by 100 titles if -save or -savenew
#is on, and to display the number of edited articles otherwise.
self.editcounter = 0
@@ -508,7 +508,7 @@
if not self.articles:
#Primary behaviour: working on wiki
page.put_async(new_text, self.editSummary)
- self.editcounter += 1
+ self.editcounter += 1
#Bug: this increments even if put_async fails
#This is separately in two clauses of if for
#future purposes to get feedback form put_async
@@ -605,7 +605,7 @@
allowoverlap = False
# Do not recurse replacement
recursive = False
- # This is the maximum number of pages to load per query
+ # This is the maximum number of pages to load per query
maxquerysize = 60
# This factory is responsible for processing command line arguments
# that are also used by other scripts and that determine on which pages
@@ -830,14 +830,14 @@
preloadingGen = pagegenerators.PreloadingGenerator(gen,
pageNumber=20, lookahead=100)
else:
- preloadingGen = pagegenerators.PreloadingGenerator(gen,
+ preloadingGen = pagegenerators.PreloadingGenerator(gen,
pageNumber=maxquerysize)
#Finally we open the file for page titles or set article to None
if filename:
try:
#This opens in strict error mode, that means bot will stop
- #on encoding errors with ValueError.
+ #on encoding errors with ValueError.
#See http://docs.python.org/library/codecs.html#codecs.open
titlefile = codecs.open(filename, encoding='utf-8',
mode=(lambda x: x and 'a' or 'w')(append))
@@ -853,7 +853,7 @@
finally:
if titlefile:
#Just for the spirit of programming (it was flushed)
- titlefile.close()
+ titlefile.close()
if __name__ == "__main__":
Modified: trunk/pywikipedia/revertbot.py
===================================================================
--- trunk/pywikipedia/revertbot.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/revertbot.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -116,7 +116,7 @@
class myRevertBot(BaseRevertBot):
-
+
def callback(self, item):
if 'top' in item:
page = pywikibot.Page(self.site, item['title'])
Modified: trunk/pywikipedia/selflink.py
===================================================================
--- trunk/pywikipedia/selflink.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/selflink.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -132,7 +132,7 @@
except pywikibot.InvalidTitle, err:
pywikibot.output(u'Warning: %s' % err)
return text, False
-
+
# Check whether the link found is to the current page itself.
if linkedPage != page:
# not a self-link
Modified: trunk/pywikipedia/simple_family.py
===================================================================
--- trunk/pywikipedia/simple_family.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/simple_family.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -1,8 +1,8 @@
-# -*- coding: utf-8 -*-
+# -*- coding: utf-8 -*-
# ============================================
-# NOTE FOR USERS: Unlike the Family files in
-# the family # directory, you do not need to
+# NOTE FOR USERS: Unlike the Family files in
+# the family # directory, you do not need to
# edit this file to configure anything.
# ============================================
@@ -15,17 +15,17 @@
#if settings.pywikipedia_path not in sys.path:
# sys.path.append(settings.pywikipedia_path)
-import config, family, urllib
-class Family(family.Family):
+import config, family, urllib
+class Family(family.Family):
"""Friendlier version of the pywikipedia family class.
We can use this in conjunction with none-pywikipedia
config files.
-
+
Note that this just handles most common cases.
If you run into a special case, you'll have to fall back
to your regular pywikipedia.
"""
-
+
def __init__(self,
name='MY_NAME_FOR_THIS_SERVER',
protocol='http',
@@ -34,9 +34,9 @@
version='1.13.2',
lang='en',
encoding='utf-8',
- api_supported=False,
+ api_supported=False,
RversionTab=None # very rare beast, you probably won't need it.
- ):
+ ):
"""name: arbitrary name. Pick something easy to remember
protocol: http|https
server: dns address or ip address
@@ -49,7 +49,7 @@
RversionTab: Magic. See superclass for information.
"""
- family.Family.__init__(self)
+ family.Family.__init__(self)
self.name = name # REQUIRED; replace with actual name
self.langs = { # REQUIRED
@@ -77,7 +77,7 @@
return self._scriptpath
def apipath(self, code):
- """returns whether or not this wiki
+ """returns whether or not this wiki
"""
if self._api_supported:
return '%s/api.php' % self.scriptpath(code)
Modified: trunk/pywikipedia/standardize_notes.py
===================================================================
--- trunk/pywikipedia/standardize_notes.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/standardize_notes.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -242,7 +242,7 @@
won't be changed.
* regex - if the entries of replacements and exceptions
should be interpreted as regular expressions
-
+
"""
mysite = pywikibot.getSite()
import sqldump
Modified: trunk/pywikipedia/statistics_in_wikitable.py
===================================================================
--- trunk/pywikipedia/statistics_in_wikitable.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/statistics_in_wikitable.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -119,13 +119,13 @@
def date(self):
return time.strftime('%Y/%m/%d', time.localtime(time.time()))
-
+
def outputall(self):
list = self.dict.keys()
list.sort()
for name in self.dict:
pywikibot.output("There are "+str(self.dict[name])+" "+name)
-
+
def idle(self, retry_idle_time):
time.sleep(retry_idle_time)
pywikibot.output(u"Starting in %i second..." % retry_idle_time)
Modified: trunk/pywikipedia/titletranslate.py
===================================================================
--- trunk/pywikipedia/titletranslate.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/titletranslate.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -32,7 +32,7 @@
brackets and the text between them is removed from the page title.
If 'auto' is true, known year and date page titles are autotranslated
to all known target languages and inserted into the list.
-
+
"""
result = []
if site is None and page:
Modified: trunk/pywikipedia/upload.py
===================================================================
--- trunk/pywikipedia/upload.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/upload.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -163,7 +163,7 @@
file = open(self.url,"rb")
self._contents = file.read()
file.close()
-
+
def process_filename(self):
"""Return base filename portion of self.url"""
# Isolate the pure name
@@ -171,13 +171,13 @@
# Filename may be either a local file path or a URL
if '/' in filename:
filename = filename.split('/')[-1]
-
+
if '\\' in filename:
filename = filename.split('\\')[-1]
-
+
if self.urlEncoding:
filename = urllib.unquote(filename.decode(self.urlEncoding))
-
+
if self.useFilename:
filename = self.useFilename
if not self.keepFilename:
@@ -237,12 +237,12 @@
"""
if not self.targetSite.has_api() or self.targetSite.versionnumber() < 16:
return self._uploadImageOld(debug)
-
+
if not hasattr(self,'_contents'):
self.read_file_content()
-
+
filename = self.process_filename()
-
+
params = {
'action': 'upload',
'token': self.targetSite.getToken(),
@@ -256,17 +256,17 @@
params['url'] = self.url
elif not self.uploadByUrl and not sessionKey:
params['file'] = self._contents
-
+
if self.ignoreWarning:
params['ignorewarnings'] = 1
-
+
pywikibot.output(u'Uploading file to %s via API....' % self.targetSite)
-
+
data = query.GetData(params, self.targetSite)
-
+
if pywikibot.verbose:
pywikibot.output("%s" % data)
-
+
if 'error' in data: # error occured
errCode = data['error']['code']
pywikibot.output("%s" % data)
@@ -297,16 +297,16 @@
else:
pywikibot.output("Upload aborted.")
return
-
+
elif data['result'] == u'Success': #No any warning, upload and online complete.
pywikibot.output(u"Upload successful.")
return filename #data['filename']
-
+
def _uploadImageOld(self, debug=False):
if not hasattr(self,'_contents'):
self.read_file_content()
-
+
filename = self.process_filename()
# Convert the filename (currently Unicode) to the encoding used on the
# target wiki
@@ -327,7 +327,7 @@
if self.uploadByUrl:
formdata["wpUploadFileURL"] = self.url
formdata["wpSourceType"] = 'Url'
-
+
# try to encode the strings to the encoding used by the target site.
# if that's not possible (e.g. because there are non-Latin-1 characters and
# the home Wikipedia uses Latin-1), convert all non-ASCII characters to
Modified: trunk/pywikipedia/userlib.py
===================================================================
--- trunk/pywikipedia/userlib.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/userlib.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -91,7 +91,7 @@
def __repr__(self):
return self.__str__()
-
+
def _load(self):
getall(self.site(), [self], force=True)
return
@@ -229,7 +229,7 @@
if ccMe:
predata['wpCCMe'] = '1'
predata['wpEditToken'] = self.site().getToken()
-
+
response, data = self.site().postForm(address, predata, sysop = False)
if data:
if 'var wgAction = "success";' in data:
@@ -242,7 +242,7 @@
pywikibot.output(u'No data found.')
return False
-
+
@pywikibot.deprecated('contributions()')
def editedPages(self, limit=500):
""" Deprecated function that wraps 'contributions' for backwards
@@ -348,7 +348,7 @@
date = m.group('date')
comment = m.group('comment') or ''
yield pywikibot.ImagePage(self.site(), image), date, comment, deleted
-
+
def block(self, expiry=None, reason=None, anon=True, noCreate=False,
onAutoblock=False, banMail=False, watchUser=False, allowUsertalk=True,
reBlock=False, hidename=False):
@@ -360,7 +360,7 @@
or the block's expiry time
If set to 'infinite', 'indefinite' or 'never',
the block will never expire.
- reason - Reason for block
+ reason - Reason for block
anon - Block anonymous users only
noCreate - Prevent account creation
onAutoblock - Automatically block the last used IP address, and any
@@ -370,7 +370,7 @@
allowUsertalk - Allow the user to edit their own talk page
reBlock - If user is already blocked, overwrite the existing block
watchUser - watch the user's user and talk pages (not used with API)
-
+
The default values for block options are set to as most unrestrictive
"""
@@ -467,7 +467,7 @@
if data:
if self.site().mediawiki_message('ipb_already_blocked').replace('$1', self.name()) in data:
raise AlreadyBlockedError
-
+
raise BlockError
return True
@@ -514,7 +514,7 @@
def getall(site, users, throttle=True, force=False):
"""Bulk-retrieve users data from site
-
+
Arguments: site = Site object
users = iterable that yields User objects
@@ -523,7 +523,7 @@
if len(users) > 1:
pywikibot.output(u'Getting %d users data from %s...'
% (len(users), site))
-
+
if len(users) > 250: # max load prevents HTTPError 400
for urg in range(0, len(users), 250):
if urg == range(0, len(users), 250)[-1]: #latest
@@ -561,7 +561,7 @@
raise
else:
break
- for uj in self.users:
+ for uj in self.users:
try:
x = data[uj.name()]
except KeyError:
@@ -601,7 +601,7 @@
pywikibot.output("""
This module is not for direct usage from the command prompt.
In code, the usage is as follows:
-
+
>>> exampleUser = User("en", 'Example')
>>> pywikibot.output(exampleUser.getUserPage().get())
>>> pywikibot.output(exampleUser.getUserPage('Lipsum').get())
Modified: trunk/pywikipedia/version.py
===================================================================
--- trunk/pywikipedia/version.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/version.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -18,7 +18,7 @@
def getversion():
return '%(tag)s (r%(rev)s, %(date)s)' % getversiondict()
-
+
def getversiondict():
global cache
if cache:
@@ -62,7 +62,7 @@
rev = entries.readline()[:-1]
if not date or not tag or not rev:
raise ParseError
- return (tag, rev, date)
+ return (tag, rev, date)
def getversion_nightly():
data = open(os.path.join(wikipediatools.get_base_dir(), 'version'))
@@ -72,7 +72,7 @@
if not date or not tag or not rev:
raise ParseError
return (tag, rev, date)
-
+
if __name__ == '__main__':
print 'Pywikipedia %s' % getversion()
print 'Python %s' % sys.version
Modified: trunk/pywikipedia/watchlist.py
===================================================================
--- trunk/pywikipedia/watchlist.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/watchlist.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -64,18 +64,18 @@
def refresh(site, sysop=False):
if not site.has_api() or site.versionnumber() < 10:
_refreshOld(site)
-
+
# get watchlist special page's URL
if not site.loggedInAs(sysop=sysop):
site.forceLogin(sysop=sysop)
-
+
params = {
'action': 'query',
'list': 'watchlist',
'wllimit': pywikibot.config.special_page_limit,
'wlprop': 'title',
}
-
+
pywikibot.output(u'Retrieving watchlist for %s via API.' % repr(site))
#pywikibot.put_throttle() # It actually is a get, but a heavy one.
watchlist = []
@@ -84,7 +84,7 @@
if 'error' in data:
raise RuntimeError('ERROR: %s' % data)
watchlist.extend([w['title'] for w in data['query']['watchlist']])
-
+
if 'query-continue' in data:
params['wlstart'] = data['query-continue']['watchlist']['wlstart']
else:
@@ -96,7 +96,7 @@
f = open(pywikibot.config.datafilepath('watchlists',
'watchlist-%s-%s-sysop.dat'
% (site.family.name, site.lang)),
- 'w')
+ 'w')
else:
f = open(pywikibot.config.datafilepath('watchlists',
'watchlist-%s-%s.dat'
@@ -126,7 +126,7 @@
f = open(pywikibot.config.datafilepath('watchlists',
'watchlist-%s-%s-sysop.dat'
% (site.family.name, site.lang)),
- 'w')
+ 'w')
else:
f = open(pywikibot.config.datafilepath('watchlists',
'watchlist-%s-%s.dat'
Modified: trunk/pywikipedia/weblinkchecker.py
===================================================================
--- trunk/pywikipedia/weblinkchecker.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/weblinkchecker.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -48,7 +48,7 @@
the feature.
-day the first time found dead link longer than x day ago, it should
probably be fixed or removed. if no set, default is 7 day.
-
+
All other parameters will be regarded as part of the title of a single page,
and the bot will only work on that single page.
Modified: trunk/pywikipedia/welcome.py
===================================================================
--- trunk/pywikipedia/welcome.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/welcome.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -80,7 +80,7 @@
-file[:#] Use a file instead of a wikipage to take the random sign.
If you use this parameter, you don't need to use -random.
-
+
-sign Use one signature from command line instead of the default
-savedata This feature saves the random signature index to allow to
@@ -206,13 +206,13 @@
# language (e.g. 'de') and modify/translate the text.
# The page where the bot will save the log (e.g. Wikipedia:Welcome log).
-#
+#
# ATTENTION: "Log disabled comment is listed the projects not to log welcomed users, and no necessary to set deatils.
logbook = {
'commons': {'_default': u'Project:Welcome log', },
'wikipedia': {
'_default': None,
- # Log disabled: da, de, en, fa, he, id, ka, pdc, pt, ru, vo.
+ # Log disabled: da, de, en, fa, he, id, ka, pdc, pt, ru, vo.
'ar': u'Project:سجل الترحيب',
'fr': u'Wikipedia:Prise de décision/Accueil automatique des nouveaux par un robot/log',
'ga': u'Project:Log fáilte',
@@ -386,7 +386,7 @@
# The text for reporting a possibly bad username (e.g. *[[Talk_page:Username|Username]]).
report_text = {
- 'commons': {'_default': u"\n*{{user3|%s}}" + timeselected,},
+ 'commons': {'_default': u"\n*{{user3|%s}}" + timeselected,},
'wikipedia':{
'ar': u"\n*{{user13|%s}}" + timeselected,
'da': u'\n*[[Bruger Diskussion:%s]] ' + timeselected,
@@ -455,7 +455,7 @@
class Global(object):
"""Container class for global settings.
Use of globals outside of this is to be avoided."""
-
+
attachEditCount = 1 # number of edits that an user required to be welcomed
dumpToLog = 15 # number of users that are required to add the log :)
offset = 0 # skip users newer than that timestamp
@@ -477,15 +477,15 @@
#fileOption = False # check if the user wants to use a file or the wikipage
class WelcomeBot(object):
-
+
def __init__(self):
#Initial
self.site = pywikibot.getSite()
self.bname = dict()
-
+
self._totallyCount = 0
self.welcomed_users = list()
-
+
if globalvar.randomSign:
self.defineSign(True)
if __name__ != '__main__': #use only in module call
@@ -570,14 +570,14 @@
self.bname[name] = bname
return bname.lower() in name.lower()
except UnicodeEncodeError:
- pass
+ pass
try:
for bname in self._blacklist:
if bname.lower() in str(name).lower(): #bad name positive
self.bname[name] = bname
return True
except UnicodeEncodeError:
- pass
+ pass
return False
def reportBadAccount(self, name = None, final = False):
@@ -711,7 +711,7 @@
if globalvar.quick and count_auto > 0:
showStatus()
pywikibot.output(u'Ignored %d user(s) by auto-create' % count_auto)
-
+
showStatus(5)
pywikibot.output(u'There is nobody left to be welcomed...')
else:
@@ -727,7 +727,7 @@
URL += "&offset=%d" % globalvar.offset
pywikibot.output("Getting new user log from Special:Log/newusers....")
raw = self.site.getUrl(URL)
-
+
# I search with a regex how many user have not the talk page
# and i put them in a list (i find it more easy and secure).
# XXX: That's the regex, if there are problems, take a look here.
@@ -821,7 +821,7 @@
pywikibot.output(u'%s might be a global bot!' % users.name() )
continue
#if globalvar.offset != 0 and time.strptime(users.registrationTime(), "%Y-%m-%dT%H:%M:%SZ") >= globalvar.offset:
- #
+ #
if users.editCount() >= globalvar.attachEditCount:
showStatus(2)
pywikibot.output(u'%s has enough edits to be welcomed.' % users.name() )
@@ -917,9 +917,9 @@
break
#if __name__ != '__main__':
# globalvar.offset = int(time.strftime("%Y%m%d%H%M%S", time.gmtime()))
- #
+ #
# def putName(nm):
- #
+ #
# self._checkQueue.append(name)
# if len(self._checkQueue) >= globalvar.dumpToLog:
# self.run()
@@ -954,7 +954,7 @@
globalvar = Global()
-if __name__ == "__main__":
+if __name__ == "__main__":
try:
number_user = 0
for arg in pywikibot.handleArgs():
@@ -1021,10 +1021,10 @@
globalvar.quiet = True
elif arg == '-quick':
globalvar.quick = True
-
+
# Filename and pywikipedia path
# file where is stored the random signature index
- filename = pywikibot.config.datafilepath('welcome-%s-%s.data' % (pywikibot.default_family, pywikibot.default_code))
+ filename = pywikibot.config.datafilepath('welcome-%s-%s.data' % (pywikibot.default_family, pywikibot.default_code))
if globalvar.offset and globalvar.timeoffset:
pywikibot.output('WARING: both -offset and -timeoffset were provided, ignoring -offset')
globalvar.offset = 0
Modified: trunk/pywikipedia/wikipedia.py
===================================================================
--- trunk/pywikipedia/wikipedia.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/wikipedia.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -768,7 +768,7 @@
if restr['type'] == 'edit':
self.editRestriction = restr['level']
elif restr['type'] == 'move':
- self.moveRestriction = restr['level']
+ self.moveRestriction = restr['level']
self._revisionId = lastRev['revid']
@@ -1079,13 +1079,13 @@
data = query.GetData(params, self.site(), encodeTitle = False)['query']['pages'].values()[0]
if "templates" not in data:
return []
-
+
for tmp in data['templates']:
count += 1
tmpsFound.append(Page(self.site(), tmp['title'], defaultNamespace=tmp['ns']) )
if count >= tllimit:
break
-
+
if 'query-continue' in data and count < tllimit:
params["tlcontinue"] = data["query-continue"]["templates"]["tlcontinue"]
else:
@@ -1377,7 +1377,7 @@
params['blfilterredir'] = 'redirects'
if not self.site().isAllowed('apihighlimits') and config.special_page_limit > 500:
params['bllimit'] = 500
-
+
if withTemplateInclusion or onlyTemplateInclusion:
params['list'].append('embeddedin')
params['eititle'] = self.title()
@@ -1402,7 +1402,7 @@
data = data[0] + data[1]
else:
data = data[0]
-
+
refPages = set()
for blp in data:
pg = Page(self.site(), blp['title'], defaultNamespace = blp['ns'])
@@ -1420,7 +1420,7 @@
yield plk
refPages.add(plk)
if follow_redirects and 'redirect' in p and plk != self:
- for zms in plk.getReferences(follow_redirects, withTemplateInclusion,
+ for zms in plk.getReferences(follow_redirects, withTemplateInclusion,
onlyTemplateInclusion, redirectsOnly, internal=True):
yield zms
else:
@@ -1431,7 +1431,7 @@
if 'query-continue' in datas:
if 'backlinks' in datas['query-continue']:
params['blcontinue'] = datas['query-continue']['backlinks']['blcontinue']
-
+
if 'embeddedin' in datas['query-continue']:
params['eicontinue'] = datas['query-continue']['embeddedin']['eicontinue']
else:
@@ -2325,7 +2325,7 @@
}
if not self.site().isAllowed('apihighlimits') and config.special_page_limit > 500:
params['cllimit'] = 500
-
+
output(u'Getting categories in %s via API...' % self.aslink())
allDone = False
cats=[]
@@ -2604,7 +2604,7 @@
Return value is a list of tuples, where each tuple represents one
edit and is built of revision id, edit date/time, user name,
- edit summary, size and tags. Starts with the most current revision,
+ edit summary, size and tags. Starts with the most current revision,
unless reverseOrder is True.
Defaults to getting the first revCount edits, unless getAll is True.
@@ -2656,7 +2656,7 @@
if len(self._versionhistoryearliest) > revCount and not getAll:
return self._versionhistoryearliest[:revCount]
return self._versionhistoryearliest
-
+
if dataQuery != []:
self._versionhistory = dataQuery
del dataQuery
@@ -2700,7 +2700,7 @@
params['rvstartid'] = result['query-continue']['revisions']['rvstartid']
else:
thisHistoryDone = True
-
+
if skipFirst:
skipFirst = False
else:
@@ -2726,8 +2726,8 @@
if len(result['query']['pages'].values()[0]['revisions']) < revCount:
thisHistoryDone = True
return dataQ
-
- def _getVersionHistoryOld(self, getAll = False, skipFirst = False,
+
+ def _getVersionHistoryOld(self, getAll = False, skipFirst = False,
reverseOrder = False, revCount=500):
"""Load the version history page and return history information.
Internal use for self.getVersionHistory(), don't use this function directly.
@@ -2781,7 +2781,7 @@
if not skipFirst:
edits = editR.findall(self_txt)
-
+
if skipFirst:
# Skip the first page only,
skipFirst = False
@@ -2838,7 +2838,7 @@
unescape(match.group('user')),
unescape(match.group('content')))
for match in r.finditer(data) ]
-
+
# Load history informations by API query.
dataQ = []
@@ -2869,7 +2869,7 @@
params['rvstartid'] = result['query-continue']['revisions']['rvstartid']
else:
thisHistoryDone = True
-
+
if skipFirst:
skipFirst = False
else:
@@ -2947,7 +2947,7 @@
reason = input(u'Please enter a reason for the move:')
if self.isTalkPage():
movetalkpage = False
-
+
params = {
'action': 'move',
'from': self.title(),
@@ -2978,7 +2978,7 @@
# We dont have the user rights to delete
output(u'Page moved failed: Target page [[%s]] already exists.' % newtitle)
#elif err == 'protectedpage':
- #
+ #
else:
output("Unknown Error: %s" % result)
return False
@@ -2987,7 +2987,7 @@
output(u'Page %s moved to %s, deleting the existing page' % (self.title(), newtitle))
else:
output(u'Page %s moved to %s' % (self.title(), newtitle))
-
+
if hasattr(self, '_contents'):
#self.__init__(self.site(), newtitle, defaultNamespace = self._namespace)
try:
@@ -2999,7 +2999,7 @@
def _moveOld(self, newtitle, reason=None, movetalkpage=True, movesubpages=False, sysop=False,
throttle=True, deleteAndMove=False, safe=True, fixredirects=True, leaveRedirect=True):
-
+
# Login
try:
self.get()
@@ -3329,15 +3329,15 @@
"""
# Login
self._getActionUser(action = 'undelete', sysop = True)
-
+
# Check blocks
self.site().checkBlocks(sysop = True)
-
+
token = self.site().getToken(self, sysop=True)
-
+
if throttle:
put_throttle()
-
+
if self.site().has_api() and self.site().versionnumber() >= 12:
params = {
'action': 'undelete',
@@ -3347,7 +3347,7 @@
}
if self._deletedRevs and self._deletedRevsModified:
selected = []
-
+
for ts in self._deletedRevs:
if self._deletedRevs[ts][4]:
selected.append(ts)
@@ -3481,9 +3481,9 @@
err = result['error']['code']
output('%s' % result)
#if err == '':
- #
+ #
#elif err == '':
- #
+ #
else:
if result['protect']:
output(u'Changed protection level of page %s.' % self.aslink())
@@ -3545,7 +3545,7 @@
if token:
predata['wpEditToken'] = token
-
+
response, data = self.site().postForm(address, predata, sysop=True)
if response.code == 302 and not data:
@@ -3726,7 +3726,7 @@
output("API not work, loading page HTML.")
self.getImagePageHtml()
return
-
+
if 'error' in data:
raise RuntimeError("%s" %data['error'])
count = 0
@@ -3739,12 +3739,12 @@
elif 'invalid' in pageInfo:
raise BadTitle('BadTitle: %s' % self)
infos = []
-
+
try:
while True:
for info in pageInfo['imageinfo']:
count += 1
- if count == 1 and 'iistart' not in params:
+ if count == 1 and 'iistart' not in params:
# count 1 and no iicontinue mean first image revision is latest.
self._latestInfo = info
infos.append(info)
@@ -3775,10 +3775,10 @@
#change to API query: action=query&titles=File:wiki.jpg&prop=imageinfo&iiprop=url
if not self._infoLoaded:
self._loadInfo()
-
+
if self._infoLoaded:
return self._latestInfo['url']
-
+
urlR = re.compile(r'<div class="fullImageLink" id="file">.*?<a href="(?P<url>[^ ]+?)"(?! class="image")|<span class="dangerousLink"><a href="(?P<url2>.+?)"', re.DOTALL)
m = urlR.search(self.getImagePageHtml())
@@ -3789,10 +3789,10 @@
"""Return True if the image is stored on Wikimedia Commons"""
if not self._infoLoaded:
self._loadInfo()
-
+
if self._infoLoaded:
return not self._local
-
+
return self.fileUrl().startswith(u'http://upload.wikimedia.org/wikipedia/commons/')
def fileIsShared(self):
@@ -3821,9 +3821,9 @@
if infos:
for i in infos:
result.append((i['timestamp'], i['user'], u"%s×%s" % (i['width'], i['height']), i['size'], i['comment']))
-
+
return result
-
+
#from ImagePage HTML
history = re.search('(?s)<table class="wikitable filehistory">.+?</table>', self.getImagePageHtml())
if history:
@@ -3853,7 +3853,7 @@
self._loadInfo()
if self._infoLoaded:
return [self._latestInfo['user'], self._latestInfo['timestamp']]
-
+
inf = self.getFileVersionHistory()[0]
return [inf[1], inf[0]]
@@ -3906,15 +3906,15 @@
data = query.GetData(params, self.site())
if 'error' in data:
raise RuntimeError("%s" % data['error'])
-
+
for iu in data['query']["imageusage"]:
yield Page(self.site(), iu['title'], defaultNamespace=iu['ns'])
-
+
if 'query-continue' in data:
params['iucontinue'] = data['query-continue']['imageusage']['iucontinue']
else:
break
-
+
def _usingPagesOld(self):
"""Yield Pages on which the image is displayed."""
titleList = re.search('(?s)<h2 id="filelinks">.+?<!-- end content -->',
@@ -4193,7 +4193,7 @@
editRestriction = revs['level']
elif revs['type'] == 'move':
moveRestriction = revs['level']
-
+
page = Page(self.site, title)
successful = False
for page2 in self.pages:
@@ -4210,7 +4210,7 @@
page2._getexception = BadTitle
successful = True
break
-
+
if not (hasattr(page2,'_contents') or hasattr(page2,'_getexception')) or self.force:
page2.editRestriction = editRestriction
page2.moveRestriction = moveRestriction
@@ -4309,7 +4309,7 @@
'prop': ['info', 'revisions'],
'titles': pagenames,
'siprop': ['general', 'namespaces'],
- 'rvprop': ['content', 'timestamp', 'user', 'comment', 'size'],#'ids',
+ 'rvprop': ['content', 'timestamp', 'user', 'comment', 'size'],#'ids',
'inprop': ['protection', 'talkid', 'subjectid'], #, 'url', 'readable'
}
@@ -4335,7 +4335,7 @@
limit = config.special_page_limit / 4 # default is 500/4, but It might have good point for server.
if len(pages) > limit:
# separate export pages for bulk-retrieve
-
+
for pagg in range(0, len(pages), limit):
if pagg == range(0, len(pages), limit)[-1]: #latest retrieve
k = pages[pagg:]
@@ -4984,7 +4984,7 @@
def _loadCookies(self, sysop = False):
"""
Retrieve session cookies for login
- if family datas define the cross projects, this function will search
+ if family datas define the cross projects, this function will search
the central login file made by self or cross available project
functioin will read the cookiedata if got one of them is exist
"""
@@ -5018,7 +5018,7 @@
if os.path.exists(centralPa):
self._cookies[index] = self._readCookies(centralFn)
break
-
+
if os.path.exists(localPa):
#read and dump local logindata into self._cookies[index]
# if self._cookies[index] is not availabe, read the local data and set the dictionary.
@@ -5045,7 +5045,7 @@
return data
except IOError:
return None
-
+
def _setupCookies(self, datas, sysop = False):
"""save the cookie dictionary to files
if cross_project enable, savefiles will separate two, centraldata and localdata.
@@ -5056,7 +5056,7 @@
cache = {0:"",1:""} #0 is central auth, 1 is local.
if not self.username(sysop):
if not self._cookies[index]:
- return
+ return
elif self.family.cross_projects_cookie_username in self._cookies[index]:
# Using centralauth to cross login data, it's not necessary to forceLogin, but Site() didn't know it.
# So we need add centralauth username data into siteattribute
@@ -5069,7 +5069,7 @@
cache[0] += "%s=%s\n" % (k,v)
else:
cache[1] += "%s=%s\n" % (k,v)
-
+
# write the data.
if self.family.cross_projects and cache[0]:
filename = '%s-%s-central-login.data' % (self.family.name, self.username(sysop))
@@ -5099,14 +5099,14 @@
index = self._userIndex(sysop)
if not self._cookies[index]:
self._setupCookies(datas, sysop)
-
+
for k, v in datas.iteritems():
if k in self._cookies[index]:
if v != self._cookies[index][k]:
self._cookies[index][k] = v
else:
self._cookies[index][k] = v
-
+
self._setupCookies(self._cookies[index], sysop)
def urlEncode(self, query):
@@ -5222,11 +5222,11 @@
}
if cookies:
headers['Cookie'] = cookies
-
+
if compress:
headers['Accept-encoding'] = 'gzip'
#print '%s' % headers
-
+
url = '%s://%s%s' % (self.protocol(), self.hostname(), address)
# Try to retrieve the page until it was successfully loaded (just in
# case the server is down or overloaded).
@@ -5265,7 +5265,7 @@
raise
else:
output(u"Result: %s %s" % (e.code, e.msg))
- raise
+ raise
except Exception, e:
output(u'%s' %e)
if config.retry_on_fail:
@@ -5280,7 +5280,7 @@
retry_idle_time = 30
continue
raise
-
+
# check cookies return or not, if return, send its to update.
if hasattr(f, 'sheaders'):
ck = f.sheaders
@@ -5366,16 +5366,16 @@
headers['Cookie'] = self.cookies(sysop = sysop)
if compress:
headers['Accept-encoding'] = 'gzip'
-
+
if refer:
headers['Refer'] = refer
-
+
if no_hostname: # This allow users to parse also toolserver's script
url = path # and other useful pages without using some other functions.
else:
url = '%s://%s%s' % (self.protocol(), self.hostname(), path)
data = self.urlEncode(data)
-
+
# Try to retrieve the page until it was successfully loaded (just in
# case the server is down or overloaded).
# Wait for retry_idle_time minutes (growing!) between retries.
@@ -5420,7 +5420,7 @@
raise
else:
output(u"Result: %s %s" % (e.code, e.msg))
- raise
+ raise
except Exception, e:
output(u'%s' %e)
if retry:
@@ -5435,7 +5435,7 @@
if retry_idle_time > 30:
retry_idle_time = 30
continue
-
+
raise
# check cookies return or not, if return, send its to update.
if hasattr(f, 'sheaders'):
@@ -5449,7 +5449,7 @@
m = Reat.search(d)
if m: tmpc[m.group(1)] = m.group(2)
self.updateCookies(tmpc, sysop)
-
+
if cookie_only:
return headers.get('set-cookie', '')
contentType = headers.get('content-type', '')
@@ -5492,7 +5492,7 @@
if back_response:
return f, text
-
+
return text
def _getUserData(self, text, sysop = False, force = True):
@@ -5503,10 +5503,10 @@
* text - the page text
* sysop - is the user a sysop?
"""
-
+
index = self._userIndex(sysop)
# Check for blocks
-
+
if 'blockedby' in text and not self._isBlocked[index]:
# Write a warning if not shown earlier
if sysop:
@@ -5598,7 +5598,7 @@
else:
if not self._isBlocked[index]:
output(u'WARNING: Token not found on %s. You will not be able to edit any page.' % self)
-
+
def _getUserDataOld(self, text, sysop = False, force = True):
"""
Get the user data from a wiki page data.
@@ -5729,14 +5729,14 @@
if u'<textarea' in text and u'<li id="ca-viewsource"' not in text and not self._isBlocked[index]:
# Token not found
output(u'WARNING: Token not found on %s. You will not be able to edit any page.' % self)
-
+
def siteinfo(self, key = 'general', force = False, dump = False):
"""Get Mediawiki Site informations by API
dump - return all siteinfo datas
-
+
some siprop params is huge data for MediaWiki, they take long times to read by testment.
these params could get, but only one by one.
-
+
"""
# protection for key in other datatype
if type(key) not in [str, unicode]:
@@ -5759,7 +5759,7 @@
if key in ['specialpagealiases', 'interwikimap', 'namespacealiases', 'usergroups', ]:
if verbose: print 'getting huge siprop %s...' % key
params['siprop'] = [key]
-
+
#ver 1.13 handle
if self.versionnumber() > 13:
if key not in ['specialpagealiases', 'interwikimap', 'namespacealiases', 'usergroups', ]:
@@ -5781,7 +5781,7 @@
self._info[key][entry['name']] = entry['aliases']
else:
for k, v in data.iteritems():
- self._info[k] = v
+ self._info[k] = v
#data pre-process
if dump:
return self._info
@@ -5904,7 +5904,7 @@
return True
except KeyError:
return False
-
+
def has_api(self):
"""Return True if this sites family has api interface."""
try:
@@ -5947,7 +5947,7 @@
params['uiprop'].append('preferencestoken')
data = query.GetData(params, self, sysop=sysop)
-
+
# Show the API error code instead making an index error
if 'error' in data:
raise RuntimeError('%s' % data['error'])
@@ -5961,7 +5961,7 @@
else:
url = self.edit_address('Non-existing_page')
text = self.getUrl(url, sysop = sysop)
-
+
self._getUserDataOld(text, sysop = sysop, force = force)
def search(self, key, number=10, namespaces=None):
@@ -6011,7 +6011,7 @@
def logpages(self, number = 50, mode = '', title = None, user = None, repeat = False,
namespace = [], start = None, end = None, tag = None, newer = False, dump = False):
-
+
if not self.has_api() or self.versionnumber() < 11 or \
mode not in ('block', 'protect', 'rights', 'delete', 'upload',
'move', 'import', 'patrol', 'merge', 'suppress',
@@ -6043,7 +6043,7 @@
params['leend'] = end
if tag and self.versionnumber() >= 16: # tag support from mw:r58399
params['letag'] = tag
-
+
nbresults = 0
while True:
result = query.GetData(params, self)
@@ -6061,7 +6061,7 @@
p_ret = ImagePage(self, c['title'])
else:
p_ret = Page(self, c['title'], defaultNamespace=c['ns'])
-
+
yield (p_ret, c['user'],
parsetime2stamp(c['timestamp']),
c['comment'], )
@@ -6325,7 +6325,7 @@
No more than 500 (5000 for bots) allowed.
Default: 10
"""
-
+
for o, u, t, c in self.logpages(number = number, mode = 'upload', title = letitle, user = leuser,
repeat = repeat, start = lestart, end = leend):
yield o, t, u, c
@@ -6826,7 +6826,7 @@
yield Page(self, pages['title'], defaultNamespace=pages['ns'])
if count >= limit:
break
-
+
if 'query-continue' in data and count < limit:
params['euoffset'] = data[u'query-continue'][u'exturlusage'][u'euoffset']
else:
@@ -7584,15 +7584,15 @@
# the argument is not global. Let the specific bot script care
# about it.
nonGlobalArgs.append(arg)
-
+
# TEST for bug #3081100
if unicode_error and (default_code == 'hi' or moduleName=='interwiki'):
output("""
================================================================================
-\03{lightyellow}WARNING:\03{lightred} your python version might trigger issue #3081100\03{default}
+\03{lightyellow}WARNING:\03{lightred} your python version might trigger issue #3081100\03{default}
See http://goo.gl/W8lJB for more information.
-\03{lightyellow}Use an older python version (<2.6.5) if you are running on wikimedia sites!\03{default}
+\03{lightyellow}Use an older python version (<2.6.5) if you are running on wikimedia sites!\03{default}
================================================================================
""")
@@ -8016,13 +8016,13 @@
if config.proxy['host']:
proxyHandler = urllib2.ProxyHandler({'http':'http://%s/' % config.proxy['host'] })
-
+
MyURLopener.add_handler(proxyHandler)
if config.proxy['auth']:
proxyAuth = urllib2.HTTPPasswordMgrWithDefaultRealm()
proxyAuth.add_password(None, config.proxy['host'], config.proxy['auth'][0], config.proxy['auth'][1])
proxyAuthHandler = urllib2.ProxyBasicAuthHandler(proxyAuth)
-
+
MyURLopener.add_handler(proxyAuthHandler)
if config.authenticate:
Modified: trunk/pywikipedia/xmlreader.py
===================================================================
--- trunk/pywikipedia/xmlreader.py 2011-03-12 23:21:45 UTC (rev 9041)
+++ trunk/pywikipedia/xmlreader.py 2011-03-13 10:14:47 UTC (rev 9042)
@@ -193,10 +193,10 @@
self.timestamp[17:19])
self.title = self.title.strip()
# Report back to the caller
- entry = XmlEntry(self.title, self.id,
- text, self.username,
- self.ipedit, timestamp,
- self.editRestriction, self.moveRestriction,
+ entry = XmlEntry(self.title, self.id,
+ text, self.username,
+ self.ipedit, timestamp,
+ self.editRestriction, self.moveRestriction,
self.revisionid, self.comment, self.isredirect)
self.inRevisionTag = False
self.callback(entry)
@@ -337,7 +337,7 @@
yield self._create_revision(elem)
elem.clear()
self.root.clear()
-
+
def _headers(self, elem):
self.title = elem.findtext("{%s}title" % self.uri)
self.pageid = elem.findtext("{%s}id" % self.uri)