Revision: 8520 Author: xqt Date: 2010-09-11 10:40:05 +0000 (Sat, 11 Sep 2010)
Log Message: ----------- import wikipedia as pywikibot for merging to rewrite branch
Modified Paths: -------------- trunk/pywikipedia/djvutext.py trunk/pywikipedia/extract_wikilinks.py trunk/pywikipedia/featured.py trunk/pywikipedia/fixing_redirects.py trunk/pywikipedia/flickrripper.py
Modified: trunk/pywikipedia/djvutext.py =================================================================== --- trunk/pywikipedia/djvutext.py 2010-09-11 10:03:42 UTC (rev 8519) +++ trunk/pywikipedia/djvutext.py 2010-09-11 10:40:05 UTC (rev 8520) @@ -24,7 +24,7 @@ # Distributed under the terms of the MIT license. # __version__ = '$Id$' -import wikipedia +import wikipedia as pywikibot import os, sys import config, codecs
@@ -33,6 +33,7 @@ docuReplacements = { }
+ class DjVuTextBot: # Edit summary message that should be used. # NOTE: Put a good description here, and add translations, if possible! @@ -70,7 +71,7 @@ cmd = u"djvused -e 'n' "%s"" % (self.djvu) count = os.popen( cmd.encode(sys.stdout.encoding) ).readline().rstrip() count = int(count) - wikipedia.output("page count = %d" % count) + pywikibot.output("page count = %d" % count) return count
def PagesGenerator(self): @@ -86,40 +87,43 @@ else: start = int(self.pages) end = start - wikipedia.output(u"Processing pages %d-%d" % (start, end)) + pywikibot.output(u"Processing pages %d-%d" % (start, end)) return range(start, end+1)
def run(self): # Set the edit summary message - wikipedia.setAction(wikipedia.translate(wikipedia.getSite(), self.msg)) + pywikibot.setAction(pywikibot.translate(pywikibot.getSite(), self.msg))
- linkingPage = wikipedia.Page(wikipedia.getSite(), self.index) + linkingPage = pywikibot.Page(pywikibot.getSite(), self.index) self.prefix = linkingPage.titleWithoutNamespace() if self.prefix[0:6] == 'Liber:': self.prefix = self.prefix[6:] - wikipedia.output(u"Using prefix %s" % self.prefix) + pywikibot.output(u"Using prefix %s" % self.prefix) gen = self.PagesGenerator()
- site = wikipedia.getSite() + site = pywikibot.getSite() self.username = config.usernames[site.family.name][site.lang]
for pageno in gen: - wikipedia.output("Processing page %d" % pageno) + pywikibot.output("Processing page %d" % pageno) self.treat(pageno)
def has_text(self): cmd = u"djvudump "%s" > "%s".out" % (self.djvu, self.djvu) os.system ( cmd.encode(sys.stdout.encoding) ) - f = codecs.open(u"%s.out" % self.djvu, 'r', config.textfile_encoding, 'replace') + f = codecs.open(u"%s.out" % self.djvu, 'r', + config.textfile_encoding, 'replace') s = f.read() f.close() return s.find('TXTz') >= 0
def get_page(self, pageno): - wikipedia.output(unicode("fetching page %d" % (pageno))) - cmd = u"djvutxt --page=%d "%s" "%s.out"" % (pageno, self.djvu, self.djvu) + pywikibot.output(unicode("fetching page %d" % (pageno))) + cmd = u"djvutxt --page=%d "%s" "%s.out"" \ + % (pageno, self.djvu, self.djvu) os.system ( cmd.encode(sys.stdout.encoding) ) - f = codecs.open(u"%s.out" % self.djvu, 'r', config.textfile_encoding, 'replace') + f = codecs.open(u"%s.out" % self.djvu, 'r', + config.textfile_encoding, 'replace') djvu_text = f.read() f.close() return djvu_text @@ -128,15 +132,14 @@ """ Loads the given page, does some changes, and saves it. """ - site = wikipedia.getSite() + site = pywikibot.getSite() page_namespace = site.family.namespaces[104][site.lang] - page = wikipedia.Page(site, u'%s:%s/%d' % (page_namespace, self.prefix, pageno) ) + page = pywikibot.Page(site, u'%s:%s/%d' + % (page_namespace, self.prefix, pageno)) exists = page.exists() - djvutxt = self.get_page(pageno) - if not djvutxt: - djvutxt = wikipedia.translate(wikipedia.getSite(), self.blank) + djvutxt = pywikibot.translate(pywikibot.getSite(), self.blank) text = u'<noinclude>{{PageQuality|1|%s}}<div class="pagetext">\n\n\n</noinclude>%s<noinclude><references/></div></noinclude>' % (self.username,djvutxt)
# convert to wikisyntax @@ -145,38 +148,37 @@
# only save if something was changed # automatically ask if overwriting an existing page - ask = self.ask + if exists: ask = True old_text = page.get() if old_text == text: - wikipedia.output(u"No changes were needed on %s" % page.aslink()) + pywikibot.output(u"No changes were needed on %s" % page.aslink()) return else: old_text = '' - - wikipedia.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % page.title()) - wikipedia.showDiff(old_text, text) - + pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" + % page.title()) + pywikibot.showDiff(old_text, text) if self.dry: - wikipedia.inputChoice(u'Dry mode... Press enter to continue', [], [], 'dummy') + pywikibot.inputChoice(u'Dry mode... Press enter to continue', [], + [], 'dummy') return - if ask: - choice = wikipedia.inputChoice(u'Do you want to accept these changes?', ['Yes', 'No'], ['y', 'N'], 'N') + choice = pywikibot.inputChoice(u'Do you want to accept these changes?', ['Yes', 'No'], ['y', 'N'], 'N') else: choice = 'y' if choice == 'y': try: # Save the page page.put_async(text) - except wikipedia.LockedPage: - wikipedia.output(u"Page %s is locked; skipping." % page.aslink()) - except wikipedia.EditConflict: - wikipedia.output(u'Skipping %s because of edit conflict' % (page.title())) - except wikipedia.SpamfilterError, error: - wikipedia.output(u'Cannot change %s because of spam blacklist entry %s' % (page.title(), error.url)) + except pywikibot.LockedPage: + pywikibot.output(u"Page %s is locked; skipping." % page.aslink()) + except pywikibot.EditConflict: + pywikibot.output(u'Skipping %s because of edit conflict' % (page.title())) + except pywikibot.SpamfilterError, error: + pywikibot.output(u'Cannot change %s because of spam blacklist entry %s' % (page.title(), error.url))
def main(): @@ -189,7 +191,7 @@ ask = False
# Parse command line arguments - for arg in wikipedia.handleArgs(): + for arg in pywikibot.handleArgs(): if arg.startswith("-dry"): dry = True elif arg.startswith("-ask"): @@ -201,7 +203,7 @@ elif arg.startswith("-pages:"): pages = arg[7:] else: - wikipedia.output(u"Unknown argument %s" % arg) + pywikibot.output(u"Unknown argument %s" % arg)
# Check the djvu file exists if djvu: @@ -212,33 +214,29 @@ index = os.path.basename(djvu)
if djvu and index: - site = wikipedia.getSite() - index_page = wikipedia.Page(site, index) + site = pywikibot.getSite() + index_page = pywikibot.Page(site, index)
if site.family.name != 'wikisource': - raise wikipedia.PageNotFound(u"Found family '%s'; Wikisource required." % site.family.name) + raise pywikibot.PageNotFound(u"Found family '%s'; Wikisource required." % site.family.name)
if not index_page.exists() and index_page.namespace() == 0: - index_namespace = wikipedia.Page(site, 'MediaWiki:Proofreadpage index namespace').get() + index_namespace = pywikibot.Page(site, 'MediaWiki:Proofreadpage index namespace').get()
- index_page = wikipedia.Page(wikipedia.getSite(), + index_page = pywikibot.Page(pywikibot.getSite(), u"%s:%s" % (index_namespace, index)) - if not index_page.exists(): - raise wikipedia.NoPage(u"Page '%s' does not exist" % index) - - wikipedia.output(u"uploading text from %s to %s" % (djvu, index_page.aslink()) ) - + raise pywikibot.NoPage(u"Page '%s' does not exist" % index) + pywikibot.output(u"uploading text from %s to %s" % (djvu, index_page.aslink()) ) bot = DjVuTextBot(djvu, index, pages, ask, dry) if not bot.has_text(): raise ValueError("No text layer in djvu file") - bot.run() else: - wikipedia.showHelp() + pywikibot.showHelp()
if __name__ == "__main__": try: main() finally: - wikipedia.stopme() + pywikibot.stopme()
Modified: trunk/pywikipedia/extract_wikilinks.py =================================================================== --- trunk/pywikipedia/extract_wikilinks.py 2010-09-11 10:03:42 UTC (rev 8519) +++ trunk/pywikipedia/extract_wikilinks.py 2010-09-11 10:40:05 UTC (rev 8520) @@ -21,15 +21,18 @@ # __version__='$Id$' # -import sys,re,wikipedia,codecs -wikipedia.stopme() # This bot does not contact the Wiki, so no need to get it on the list +import sys,re +import codecs +import wikipedia as pywikibot +# This bot does not contact the Wiki, so no need to get it on the list +pywikibot.stopme() R = re.compile('/wiki/(.*?)" *') fn = [] sorted = False list = [] complete = True
-for arg in wikipedia.handleArgs(): +for arg in pywikibot.handleArgs(): if arg.startswith("-sorted"): sorted = True elif arg.startswith("-bare"): @@ -43,8 +46,7 @@ print "No file specified to get the links from" sys.exit(1)
-mysite = wikipedia.getSite() - +mysite = pywikibot.getSite() f=open(fn,'r') text=f.read() f.close()
Modified: trunk/pywikipedia/featured.py =================================================================== --- trunk/pywikipedia/featured.py 2010-09-11 10:03:42 UTC (rev 8519) +++ trunk/pywikipedia/featured.py 2010-09-11 10:40:05 UTC (rev 8520) @@ -52,7 +52,8 @@ #
import sys, re, pickle, os.path -import wikipedia, catlib, config +import wikipedia as pywikibot +import catlib, config
def CAT(site,name): name = site.namespace(14) + ':' + name @@ -61,8 +62,9 @@
def BACK(site,name): name = site.namespace(10) + ':' + name - p=wikipedia.Page(site, name) - return [page for page in p.getReferences(follow_redirects = False, onlyTemplateInclusion=True)] + p=pywikibot.Page(site, name) + return [page for page in p.getReferences(follow_redirects=False, + onlyTemplateInclusion=True)]
msg = { 'als':u'Bötli: [[%s:%s]] isch en bsunders glungener Artikel', @@ -90,7 +92,7 @@ 'it': u'Bot: collegamento articolo in vetrina [[%s:%s]]', 'ja': u'ロボットによる: 秀逸な記事へのリンク [[%s:%s]]', 'ka': u'ბოტი: რჩეული სტატიის ბმული გვერდისათვის [[%s:%s]]', - 'ko': u'로봇: 알찬 글 [[%s:%s]] 를 가리키는 링크',#로봇이:? + 'ko': u'로봇: 알찬 글 [[%s:%s]] 를 가리키는 링크', #로봇이:? 'ksh':u'bot: [[%s:%s]] ess_enen ußjezëijshneten Atikkel', 'lb': u'Bot: Exzellenten Arikel Link op [[%s:%s]]', 'lt': u'Bot: Pavyzdinis straipsnis [[%s:%s]]', @@ -147,6 +149,7 @@ 'pt': u'Bot: [[%s:%s]] é uma lista destacada', 'sv': u'Bot: [[%s:%s]] är en utmärkt list', } + msg_former = { 'ar': u'بوت: [[%s:%s]] مقالة مختارة سابقة', 'de': u'Bot: [[%s:%s]] ist ein ehemaliger ausgezeichneter Artikel', @@ -406,7 +409,9 @@ else: method=featured_name[site.lang][0] except KeyError: - wikipedia.output(u'Error: language %s doesn't has %s category source.' % (site.lang, feature)) + pywikibot.output( + u'Error: language %s doesn't has %s category source.' + % (site.lang, feature)) return arts if pType == 'good': name=good_name[site.lang][1] @@ -420,20 +425,23 @@ for p in raw: if p.namespace()==0: # Article arts.append(p) - elif p.namespace()==1 and site.lang <> 'el': # Article talk (like in English) - arts.append(wikipedia.Page(p.site(), p.titleWithoutNamespace())) - wikipedia.output('\03{lightred}** wikipedia:%s has %i %s articles\03{default}' % (site.lang, len(arts), pType)) + # Article talk (like in English) + elif p.namespace()==1 and site.lang <> 'el': + arts.append(pywikibot.Page(p.site(), p.titleWithoutNamespace())) + pywikibot.output( + '\03{lightred}** wikipedia:%s has %i %s articles\03{default}' + % (site.lang, len(arts), pType)) return arts
def findTranslated(page, oursite=None, quiet=False): if not oursite: - oursite=wikipedia.getSite() + oursite=pywikibot.getSite() if page.isRedirectPage(): page = page.getRedirectTarget() try: iw=page.interwiki() except: - wikipedia.output(u"%s -> no interwiki, giving up" % page.title()) + pywikibot.output(u"%s -> no interwiki, giving up" % page.title()) return None ourpage=None for p in iw: @@ -442,22 +450,26 @@ break if not ourpage: if not quiet: - wikipedia.output(u"%s -> no corresponding page in %s" % (page.title(), oursite)) + pywikibot.output(u"%s -> no corresponding page in %s" + % (page.title(), oursite)) return None if not ourpage.exists(): - wikipedia.output(u"%s -> our page doesn't exist: %s" % (page.title(), ourpage.title())) + pywikibot.output(u"%s -> our page doesn't exist: %s" + % (page.title(), ourpage.title())) return None if ourpage.isRedirectPage(): ourpage = ourpage.getRedirectTarget() - wikipedia.output(u"%s -> corresponding page is %s" % (page.title(), ourpage.title())) + pywikibot.output(u"%s -> corresponding page is %s" + % (page.title(), ourpage.title())) if ourpage.namespace() != 0: - wikipedia.output(u"%s -> not in the main namespace, skipping" % page.title()) + pywikibot.output(u"%s -> not in the main namespace, skipping" + % page.title()) return None if ourpage.isRedirectPage(): - wikipedia.output(u"%s -> double redirect, skipping" % page.title()) + pywikibot.output(u"%s -> double redirect, skipping" % page.title()) return None if not ourpage.exists(): - wikipedia.output(u"%s -> page doesn't exist, skipping" % ourpage.title()) + pywikibot.output(u"%s -> page doesn't exist, skipping" % ourpage.title()) return None try: iw=ourpage.interwiki() @@ -469,7 +481,7 @@ backpage=p break if not backpage: - wikipedia.output(u"%s -> no back interwiki ref" % page.title()) + pywikibot.output(u"%s -> no back interwiki ref" % page.title()) return None if backpage==page: # everything is ok @@ -479,7 +491,8 @@ if backpage==page: # everything is ok return ourpage - wikipedia.output(u"%s -> back interwiki ref target is %s" % (page.title(), backpage.title())) + pywikibot.output(u"%s -> back interwiki ref target is %s" + % (page.title(), backpage.title())) return None
def getTemplateList (lang, pType): @@ -503,7 +516,8 @@ templates = template['_default'] return templates
-def featuredWithInterwiki(fromsite, tosite, template_on_top, pType, quiet, dry = False): +def featuredWithInterwiki(fromsite, tosite, template_on_top, pType, quiet, + dry=False): if not fromsite.lang in cache: cache[fromsite.lang]={} if not tosite.lang in cache[fromsite.lang]: @@ -514,7 +528,9 @@
templatelist = getTemplateList(tosite.lang, pType) findtemplate = '(' + '|'.join(templatelist) + ')' - re_Link_FA=re.compile(ur"{{%s|%s}}" % (findtemplate.replace(u' ', u'[ _]'), fromsite.lang), re.IGNORECASE) + re_Link_FA=re.compile(ur"{{%s|%s}}" + % (findtemplate.replace(u' ', u'[ _]'), + fromsite.lang), re.IGNORECASE) re_this_iw=re.compile(ur"[[%s:[^]]+]]" % fromsite.lang)
arts=featuredArticles(fromsite, pType) @@ -524,16 +540,16 @@ if a.title()<afterpage: continue if u"/" in a.title() and a.namespace() != 0: - wikipedia.output(u"%s is a subpage" % a.title()) + pywikibot.output(u"%s is a subpage" % a.title()) continue if a.title() in cc: - wikipedia.output(u"(cached) %s -> %s"%(a.title(), cc[a.title()])) + pywikibot.output(u"(cached) %s -> %s"%(a.title(), cc[a.title()])) continue if a.isRedirectPage(): a=a.getRedirectTarget() try: if not a.exists(): - wikipedia.output(u"source page doesn't exist: %s" % a.title()) + pywikibot.output(u"source page doesn't exist: %s" % a.title()) continue atrans = findTranslated(a, tosite, quiet) if pType!='former': @@ -541,43 +557,56 @@ text=atrans.get() m=re_Link_FA.search(text) if m: - wikipedia.output(u"(already done)") + pywikibot.output(u"(already done)") else: # insert just before interwiki if (not interactive or - wikipedia.input(u'Connecting %s -> %s. Proceed? [Y/N]'%(a.title(), atrans.title())) in ['Y','y'] + pywikibot.input( + u'Connecting %s -> %s. Proceed? [Y/N]' + % (a.title(), atrans.title())) in ['Y','y'] ): m=re_this_iw.search(text) if not m: - wikipedia.output(u"no interwiki record, very strange") + pywikibot.output( + u"no interwiki record, very strange") continue - site = wikipedia.getSite() + site = pywikibot.getSite() if pType == 'good': - comment = wikipedia.setAction(wikipedia.translate(site, msg_good) % (fromsite.lang, a.title())) + comment = pywikibot.setAction( + pywikibot.translate(site, msg_good) + % (fromsite.lang, a.title())) elif pType == 'list': - comment = wikipedia.setAction(wikipedia.translate(site, msg_lists) % (fromsite.lang, a.title())) + comment = pywikibot.setAction( + pywikibot.translate(site, msg_lists) + % (fromsite.lang, a.title())) else: - comment = wikipedia.setAction(wikipedia.translate(site, msg) % (fromsite.lang, a.title())) + comment = pywikibot.setAction( + pywikibot.translate(site, msg) + % (fromsite.lang, a.title())) ### Moving {{Link FA|xx}} to top of interwikis ### if template_on_top == True: # Getting the interwiki - iw = wikipedia.getLanguageLinks(text, site) + iw = pywikibot.getLanguageLinks(text, site) # Removing the interwiki - text = wikipedia.removeLanguageLinks(text, site) - text += u"\r\n{{%s|%s}}\r\n"%(templatelist[0], fromsite.lang) + text = pywikibot.removeLanguageLinks(text, site) + text += u"\r\n{{%s|%s}}\r\n" % (templatelist[0], + fromsite.lang) # Adding the interwiki - text = wikipedia.replaceLanguageLinks(text, iw, site) + text = pywikibot.replaceLanguageLinks(text, + iw, site)
### Placing {{Link FA|xx}} right next to corresponding interwiki ### else: text=(text[:m.end()] - + (u" {{%s|%s}}" % (templatelist[0], fromsite.lang)) + + (u" {{%s|%s}}" % (templatelist[0], + fromsite.lang)) + text[m.end():]) if not dry: try: atrans.put(text, comment) - except wikipedia.LockedPage: - wikipedia.output(u'Page %s is locked!' % atrans.title()) + except pywikibot.LockedPage: + pywikibot.output(u'Page %s is locked!' + % atrans.title()) cc[a.title()]=atrans.title() else: if atrans: @@ -586,25 +615,31 @@ if m: # insert just before interwiki if (not interactive or - wikipedia.input(u'Connecting %s -> %s. Proceed? [Y/N]'%(a.title(), atrans.title())) in ['Y','y'] + pywikibot.input( + u'Connecting %s -> %s. Proceed? [Y/N]' + % (a.title(), atrans.title())) in ['Y','y'] ): m=re_this_iw.search(text) if not m: - wikipedia.output(u"no interwiki record, very strange") + pywikibot.output( + u"no interwiki record, very strange") continue - site = wikipedia.getSite() - comment = wikipedia.setAction(wikipedia.translate(site, msg_former) % (fromsite.lang, a.title())) + site = pywikibot.getSite() + comment = pywikibot.setAction( + pywikibot.translate(site, msg_former) + % (fromsite.lang, a.title())) text=re.sub(re_Link_FA,'',text) if not dry: try: atrans.put(text, comment) - except wikipedia.LockedPage: - wikipedia.output(u'Page %s is locked!' % atrans.title()) + except pywikibot.LockedPage: + pywikibot.output(u'Page %s is locked!' + % atrans.title()) else: - wikipedia.output(u"(already done)") + pywikibot.output(u"(already done)") cc[a.title()]=atrans.title() - except wikipedia.PageNotSaved, e: - wikipedia.output(u"Page not saved") + except pywikibot.PageNotSaved, e: + pywikibot.output(u"Page not saved")
if __name__=="__main__": template_on_top = True @@ -615,7 +650,7 @@ part = False quiet = False dry = False - for arg in wikipedia.handleArgs(): + for arg in pywikibot.handleArgs(): if arg == '-interactive': interactive=1 elif arg == '-nocache': @@ -650,13 +685,17 @@ if not ll1: ll1="" if not ll2: ll2="zzzzzzz" if processType == 'good': - fromlang=[ll for ll in good_name.keys() if ll>=ll1 and ll<=ll2] + fromlang=[ll for ll in good_name.keys() + if ll>=ll1 and ll<=ll2] elif processType == 'list': - fromlang=[ll for ll in good_lists.keys() if ll>=ll1 and ll<=ll2] + fromlang=[ll for ll in good_lists.keys() + if ll>=ll1 and ll<=ll2] elif processType == 'former': - fromlang=[ll for ll in former_lists.keys() if ll>=ll1 and ll<=ll2] + fromlang=[ll for ll in former_lists.keys() + if ll>=ll1 and ll<=ll2] else: - fromlang=[ll for ll in featured_name.keys() if ll>=ll1 and ll<=ll2] + fromlang=[ll for ll in featured_name.keys() + if ll>=ll1 and ll<=ll2] except: pass
@@ -678,7 +717,7 @@
if not fromlang: - wikipedia.showHelp('featured') + pywikibot.showHelp('featured') sys.exit(1)
fromlang.sort() @@ -686,27 +725,29 @@ #test whether this site has template enabled hasTemplate = False if not featuredcount: - for tl in getTemplateList(wikipedia.getSite().lang, processType): - t = wikipedia.Page(wikipedia.getSite(), u'Template:'+tl) + for tl in getTemplateList(pywikibot.getSite().lang, processType): + t = pywikibot.Page(pywikibot.getSite(), u'Template:'+tl) if t.exists(): hasTemplate = True break try: for ll in fromlang: - fromsite = wikipedia.getSite(ll) + fromsite = pywikibot.getSite(ll) if featuredcount: featuredArticles(fromsite, processType) elif not hasTemplate: - wikipedia.output(u'\nNOTE: %s arcticles are not implemented at %s-wiki.' % (processType, wikipedia.getSite().lang)) - wikipedia.output('Quitting program...') + pywikibot.output( + u'\nNOTE: %s arcticles are not implemented at %s-wiki.' + % (processType, pywikibot.getSite().lang)) + pywikibot.output('Quitting program...') break - elif fromsite != wikipedia.getSite(): - featuredWithInterwiki(fromsite, wikipedia.getSite(), + elif fromsite != pywikibot.getSite(): + featuredWithInterwiki(fromsite, pywikibot.getSite(), template_on_top, processType, quiet, dry) except KeyboardInterrupt: - wikipedia.output('\nQuitting program...') + pywikibot.output('\nQuitting program...') finally: - wikipedia.stopme() + pywikibot.stopme() if not nocache: pickle.dump(cache,file(filename,"wb"))
Modified: trunk/pywikipedia/fixing_redirects.py =================================================================== --- trunk/pywikipedia/fixing_redirects.py 2010-09-11 10:03:42 UTC (rev 8519) +++ trunk/pywikipedia/fixing_redirects.py 2010-09-11 10:40:05 UTC (rev 8520) @@ -22,7 +22,7 @@ # __version__='$Id$' # -import wikipedia +import wikipedia as pywikibot import pagegenerators import re, sys
@@ -76,7 +76,7 @@ """ Based on the method of the same name in solve_disambiguation.py """ - mysite = wikipedia.getSite() + mysite = pywikibot.getSite() linktrail = mysite.linktrail()
# make a backup of the original text so we can show the changes later @@ -90,10 +90,11 @@ # Make sure that next time around we will not find this same hit. curpos = m.start() + 1 # ignore interwiki links and links to sections of the same page - if m.group('title').strip() == '' or mysite.isInterwikiLink(m.group('title')): + if m.group('title').strip() == '' or \ + mysite.isInterwikiLink(m.group('title')): continue else: - actualLinkPage = wikipedia.Page(targetPage.site(), m.group('title')) + actualLinkPage = pywikibot.Page(targetPage.site(), m.group('title')) # Check whether the link found is to page. if actualLinkPage != linkedPage: continue @@ -102,7 +103,7 @@ context = 15 # at the beginning of the link, start red color. # at the end of the link, reset the color to default - #wikipedia.output(text[max(0, m.start() - context) : m.start()] + '\03{lightred}' + text[m.start() : m.end()] + '\03{default}' + text[m.end() : m.end() + context]) + #pywikibot.output(text[max(0, m.start() - context) : m.start()] + '\03{lightred}' + text[m.start() : m.end()] + '\03{default}' + text[m.end() : m.end() + context]) choice = 'y'
# The link looks like this: @@ -133,7 +134,8 @@ if link_text[0].isupper(): new_page_title = targetPage.title() else: - new_page_title = targetPage.title()[0].lower() + targetPage.title()[1:] + new_page_title = targetPage.title()[0].lower() + \ + targetPage.title()[1:]
# remove preleading ":" if new_page_title[0]==':': @@ -143,9 +145,14 @@ newlink = "[[%s%s]]%s" % (new_page_title, section, trailing_chars) elif replaceit or (new_page_title == link_text and not section): newlink = "[[%s]]" % new_page_title - # check if we can create a link with trailing characters instead of a pipelink - elif len(new_page_title) <= len(link_text) and firstcap(link_text[:len(new_page_title)]) == firstcap(new_page_title) and re.sub(re.compile(linktrail), '', link_text[len(new_page_title):]) == '' and not section: - newlink = "[[%s]]%s" % (link_text[:len(new_page_title)], link_text[len(new_page_title):]) + # check if we can create a link with trailing characters instead of a + # pipelink + elif len(new_page_title) <= len(link_text) and \ + firstcap(link_text[:len(new_page_title)]) == \ + firstcap(new_page_title) and \ + re.sub(re.compile(linktrail), '', link_text[len(new_page_title):]) == '' and not section: + newlink = "[[%s]]%s" % (link_text[:len(new_page_title)], + link_text[len(new_page_title):]) else: newlink = "[[%s%s|%s]]" % (new_page_title, section, link_text) text = text[:m.start()] + newlink + text[m.end():] @@ -155,36 +162,37 @@ pageCache = []
def workon(page): - mysite = wikipedia.getSite() + mysite = pywikibot.getSite() try: text = page.get() - except wikipedia.IsRedirectPage: - wikipedia.output(u'%s is a redirect page. Skipping' % page.aslink()) + except pywikibot.IsRedirectPage: + pywikibot.output(u'%s is a redirect page. Skipping' % page.aslink()) return - except wikipedia.NoPage: - wikipedia.output(u'%s does not exist. Skipping' % page.aslink()) + except pywikibot.NoPage: + pywikibot.output(u'%s does not exist. Skipping' % page.aslink()) return - wikipedia.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" % page.title()) + pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<" + % page.title()) links = page.linkedPages() if len(links) > 0: - wikipedia.getall(mysite,links) + pywikibot.getall(mysite,links) else: - wikipedia.output('Nothing left to do.') + pywikibot.output('Nothing left to do.') return
for page2 in links: try: target = page2.getRedirectTarget() - except (wikipedia.Error,wikipedia.SectionError): + except (pywikibot.Error,pywikibot.SectionError): continue text = treat(text, page2, target) if text != page.get(): - comment = wikipedia.translate(mysite, msg) - wikipedia.showDiff(page.get() ,text) + comment = pywikibot.translate(mysite, msg) + pywikibot.showDiff(page.get() ,text) try: page.put(text, comment) - except (wikipedia.Error): - wikipedia.output('Error: unable to put %s' % page.aslink()) + except (pywikibot.Error): + pywikibot.output('Error: unable to put %s' % page.aslink())
def main(): featured = False @@ -195,20 +203,21 @@ # to work on. genFactory = pagegenerators.GeneratorFactory()
- for arg in wikipedia.handleArgs(): + for arg in pywikibot.handleArgs(): if arg == '-featured': featured = True else: genFactory.handleArg(arg)
- mysite = wikipedia.getSite() + mysite = pywikibot.getSite() if mysite.sitename() == 'wikipedia:nl': - wikipedia.output(u'\03{lightred}There is consensus on the Dutch Wikipedia that bots should not be used to fix redirects.\03{default}') + pywikibot.output( + u'\03{lightred}There is consensus on the Dutch Wikipedia that bots should not be used to fix redirects.\03{default}') sys.exit()
if featured: - featuredList = wikipedia.translate(mysite, featured_articles) - ref = wikipedia.Page(wikipedia.getSite(), featuredList) + featuredList = pywikibot.translate(mysite, featured_articles) + ref = pywikibot.Page(pywikibot.getSite(), featuredList) gen = pagegenerators.ReferringPageGenerator(ref) gen = pagegenerators.NamespaceFilterPageGenerator(gen, [0]) if not gen: @@ -217,10 +226,10 @@ for page in pagegenerators.PreloadingGenerator(gen): workon(page) else: - wikipedia.showHelp('fixing_redirects') + pywikibot.showHelp('fixing_redirects')
if __name__ == "__main__": try: main() finally: - wikipedia.stopme() + pywikibot.stopme()
Modified: trunk/pywikipedia/flickrripper.py =================================================================== --- trunk/pywikipedia/flickrripper.py 2010-09-11 10:03:42 UTC (rev 8519) +++ trunk/pywikipedia/flickrripper.py 2010-09-11 10:40:05 UTC (rev 8520) @@ -32,23 +32,25 @@ __version__ = '$Id$'
import sys, urllib, re, StringIO, hashlib, base64, time -import wikipedia, config, query, imagerecat, upload +import wikipedia as pywikibot +import config, query, imagerecat, upload
import flickrapi # see: http://stuvel.eu/projects/flickrapi import xml.etree.ElementTree from Tkinter import * from PIL import Image, ImageTk # see: http://www.pythonware.com/products/pil/
-flickr_allowed_license = { 0 : False, # All Rights Reserved - 1 : False, # Creative Commons Attribution-NonCommercial-ShareAlike License - 2 : False, # Creative Commons Attribution-NonCommercial License - 3 : False, # Creative Commons Attribution-NonCommercial-NoDerivs License - 4 : True, # Creative Commons Attribution License - 5 : True, # Creative Commons Attribution-ShareAlike License - 6 : False, # Creative Commons Attribution-NoDerivs License - 7 : True, # No known copyright restrictions - 8 : True, # United States Government Work - } +flickr_allowed_license = { + 0 : False, # All Rights Reserved + 1 : False, # Creative Commons Attribution-NonCommercial-ShareAlike License + 2 : False, # Creative Commons Attribution-NonCommercial License + 3 : False, # Creative Commons Attribution-NonCommercial-NoDerivs License + 4 : True, # Creative Commons Attribution License + 5 : True, # Creative Commons Attribution-ShareAlike License + 6 : False, # Creative Commons Attribution-NoDerivs License + 7 : True, # No known copyright restrictions + 8 : True, # United States Government Work +}
def getPhoto(flickr = None, photo_id = ''): ''' @@ -67,7 +69,7 @@ gotPhoto = True except flickrapi.exceptions.FlickrError: gotPhotos = False - wikipedia.output(u'Flickr api problem, sleeping') + pywikibot.output(u'Flickr api problem, sleeping') time.sleep(30) return (photoInfo, photoSizes)
@@ -99,24 +101,25 @@ Download the photo and store it in a StrinIO.StringIO object.
TODO: Add exception handling + ''' imageFile=urllib.urlopen(photoUrl).read() return StringIO.StringIO(imageFile)
-def findDuplicateImages(photo = None, site = wikipedia.getSite(u'commons', u'commons')): - ''' - Takes the photo, calculates the SHA1 hash and asks the mediawiki api for a list of duplicates. +def findDuplicateImages(photo=None, + site=pywikibot.getSite(u'commons', u'commons')): + ''' Takes the photo, calculates the SHA1 hash and asks the mediawiki api + for a list of duplicates.
TODO: Add exception handling, fix site thing + ''' hashObject = hashlib.sha1() hashObject.update(photo.getvalue()) return site.getFilesFromAnHash(base64.b16encode(hashObject.digest()))
def getTags(photoInfo = None): - ''' - Get all the tags on a photo - ''' + ''' Get all the tags on a photo ''' result = [] for tag in photoInfo.find('photo').find('tags').findall('tag'): result.append(tag.text.lower()) @@ -125,20 +128,21 @@
def getFlinfoDescription(photo_id = 0): ''' - Get the description from http://wikipedia.ramselehof.de/flinfo.php + Get the description from http://pywikibot.ramselehof.de/flinfo.php
TODO: Add exception handling, try a couple of times ''' parameters = urllib.urlencode({'id' : photo_id, 'raw' : 'on'})
- rawDescription = urllib.urlopen("http://wikipedia.ramselehof.de/flinfo.php?%s" % parameters).read() + rawDescription = urllib.urlopen( + "http://pywikibot.ramselehof.de/flinfo.php?%s" % parameters).read()
return rawDescription.decode('utf-8')
-def getFilename(photoInfo=None, site=wikipedia.getSite(u'commons', u'commons'), project=u'Flickr'): - ''' - Build a good filename for the upload based on the username and the title. - Prevents naming collisions. +def getFilename(photoInfo=None, site=pywikibot.getSite(u'commons', u'commons'), + project=u'Flickr'): + ''' Build a good filename for the upload based on the username and the + title. Prevents naming collisions.
''' username = photoInfo.find('photo').find('owner').attrib['username'] @@ -148,22 +152,25 @@ else: title = u''
- if wikipedia.Page(site, u'File:%s - %s - %s.jpg' % (project, username, title) ).exists(): + if pywikibot.Page(site, u'File:%s - %s - %s.jpg' + % (project, username, title) ).exists(): i = 1 while True: - if (wikipedia.Page(site, u'File:%s - %s - %s (%s).jpg' % (project, username, title, str(i))).exists()): + if (pywikibot.Page(site, u'File:%s - %s - %s (%s).jpg' + % (project, username, title, str(i))).exists()): i = i + 1 else: - return u'%s - %s - %s (%s).jpg' % (project, username, title, str(i)) + return u'%s - %s - %s (%s).jpg' % (project, username, title, + str(i)) else: return u'%s - %s - %s.jpg' % (project, username, title)
def cleanUpTitle(title): + ''' Clean up the title of a potential mediawiki page. Otherwise the title of + the page might not be allowed by the software. + ''' - Clean up the title of a potential mediawiki page. Otherwise the title of the page might not be allowed by the software. - ''' title = title.strip() - title = re.sub(u"[<{\[]", u"(", title) title = re.sub(u"[>}\]]", u")", title) title = re.sub(u"[ _]?\(!\)", u"", title) @@ -180,39 +187,44 @@ title = re.sub(u",,+", u",", title) title = re.sub(u"[-,^]([.]|$)", u"\1", title) title = title.replace(u" ", u"_") - return title
-def buildDescription(flinfoDescription=u'', flickrreview=False, reviewer=u'', override=u'', addCategory=u'', removeCategories=False): +def buildDescription(flinfoDescription=u'', flickrreview=False, reviewer=u'', + override=u'', addCategory=u'', removeCategories=False): + ''' Build the final description for the image. The description is based on + the info from flickrinfo and improved. + ''' - Build the final description for the image. The description is based on the info from flickrinfo and improved. - ''' description = flinfoDescription - if removeCategories: - description = wikipedia.removeCategoryLinks(description, wikipedia.getSite('commons', 'commons')) - + description = pywikibot.removeCategoryLinks(description, + pywikibot.getSite( + 'commons', 'commons')) if override: description = description.replace(u'{{cc-by-sa-2.0}}\n', u'') description = description.replace(u'{{cc-by-2.0}}\n', u'') description = description.replace(u'{{flickrreview}}\n', u'') - description = description.replace(u'{{copyvio|Flickr, licensed as "All Rights Reserved" which is not a free license --~~~~}}\n', u'') - description = description.replace(u'=={{int:license}}==', u'=={{int:license}}==\n' + override) + description = description.replace( + u'{{copyvio|Flickr, licensed as "All Rights Reserved" which is not a free license --~~~~}}\n', + u'') + description = description.replace(u'=={{int:license}}==', + u'=={{int:license}}==\n' + override) elif flickrreview: if reviewer: - description = description.replace(u'{{flickrreview}}', u'{{flickrreview|' + reviewer + '|{{subst:CURRENTYEAR}}-{{subst:CURRENTMONTH}}-{{subst:CURRENTDAY2}}}}') - + description = description.replace(u'{{flickrreview}}', + u'{{flickrreview|' + reviewer + + '|{{subst:CURRENTYEAR}}-{{subst:CURRENTMONTH}}-{{subst:CURRENTDAY2}}}}') if addCategory: description = description.replace(u'{{subst:unc}}\n', u'') description = description + u'\n[[Category:' + addCategory + ']]\n' description = description.replace(u'\r\n', u'\n') return description
-def processPhoto(flickr=None, photo_id=u'', flickrreview=False, reviewer=u'', override=u'', addCategory=u'', removeCategories=False, autonomous=False): - ''' - Process a single Flickr photo - ''' +def processPhoto(flickr=None, photo_id=u'', flickrreview=False, reviewer=u'', + override=u'', addCategory=u'', removeCategories=False, + autonomous=False): + ''' Process a single Flickr photo ''' if photo_id: print photo_id (photoInfo, photoSizes) = getPhoto(flickr, photo_id) @@ -225,36 +237,43 @@ #Don't upload duplicate images, should add override option duplicates = findDuplicateImages(photo) if duplicates: - wikipedia.output(u'Found duplicate image at %s' % duplicates.pop()) + pywikibot.output(u'Found duplicate image at %s' % duplicates.pop()) else: filename = getFilename(photoInfo) flinfoDescription = getFlinfoDescription(photo_id) - photoDescription = buildDescription(flinfoDescription, flickrreview, reviewer, override, addCategory, removeCategories) - #wikipedia.output(photoDescription) + photoDescription = buildDescription(flinfoDescription, + flickrreview, reviewer, + override, addCategory, + removeCategories) + #pywikibot.output(photoDescription) if not autonomous: - (newPhotoDescription, newFilename, skip)=Tkdialog(photoDescription, photo, filename).run() + (newPhotoDescription, newFilename, skip) = Tkdialog( + photoDescription, photo, filename).run() else: - newPhotoDescription=photoDescription - newFilename=filename - skip=False - #wikipedia.output(newPhotoDescription) - #if (wikipedia.Page(title=u'File:'+ filename, site=wikipedia.getSite()).exists()): + newPhotoDescription = photoDescription + newFilename = filename + skip = False + #pywikibot.output(newPhotoDescription) + #if (pywikibot.Page(title=u'File:'+ filename, site=pywikibot.getSite()).exists()): # I should probably check if the hash is the same and if not upload it under a different name - #wikipedia.output(u'File:' + filename + u' already exists!') + #pywikibot.output(u'File:' + filename + u' already exists!') #else: #Do the actual upload #Would be nice to check before I upload if the file is already at Commons #Not that important for this program, but maybe for derived programs if not skip: - bot = upload.UploadRobot(photoUrl, description=newPhotoDescription, useFilename=newFilename, keepFilename=True, verifyDescription=False) + bot = upload.UploadRobot(photoUrl, + description=newPhotoDescription, + useFilename=newFilename, + keepFilename=True, + verifyDescription=False) bot.upload_image(debug=False) return 1 return 0
+ class Tkdialog: - ''' - The user dialog. - ''' + ''' The user dialog. ''' def __init__(self, photoDescription, photo, filename): self.root=Tk() #"%dx%d%+d%+d" % (width, height, xoffset, yoffset) @@ -310,41 +329,35 @@ self.descriptionScrollbar.grid(row=14, column=5)
def getImage(self, photo, width, height): - ''' - Take the StringIO object and build an imageTK thumbnail - ''' + ''' Take the StringIO object and build an imageTK thumbnail ''' image = Image.open(photo) image.thumbnail((width, height)) imageTk = ImageTk.PhotoImage(image) return imageTk
def okFile(self): - ''' - The user pressed the OK button. - ''' + ''' The user pressed the OK button. ''' self.filename=self.filenameField.get() self.photoDescription=self.descriptionField.get(0.0, END) self.root.destroy()
def skipFile(self): - ''' - The user pressed the Skip button. - ''' + ''' The user pressed the Skip button. ''' self.skip=True self.root.destroy()
def run(self): + ''' Activate the dialog and return the new name and if the image is + skipped. + ''' - Activate the dialog and return the new name and if the image is skipped. - ''' self.root.mainloop() return (self.photoDescription, self.filename, self.skip)
-def getPhotos(flickr=None, user_id=u'', group_id=u'', photoset_id=u'', start_id='', end_id='', tags=u''): - ''' - Loop over a set of Flickr photos.
- ''' +def getPhotos(flickr=None, user_id=u'', group_id=u'', photoset_id=u'', + start_id='', end_id='', tags=u''): + ''' Loop over a set of Flickr photos. ''' result = [] retry = False if not start_id: @@ -356,79 +369,90 @@ # Get the photos in a group if group_id: #First get the total number of photo's in the group - photos = flickr.groups_pools_getPhotos(group_id=group_id, user_id=user_id, tags=tags, per_page='100', page='1') + photos = flickr.groups_pools_getPhotos(group_id=group_id, + user_id=user_id, tags=tags, + per_page='100', page='1') pages = photos.find('photos').attrib['pages']
for i in range(1, int(pages) + 1): gotPhotos = False while not gotPhotos: try: - for photo in flickr.groups_pools_getPhotos(group_id=group_id, user_id=user_id, tags=tags, per_page='100', page=i).find('photos').getchildren(): + for photo in flickr.groups_pools_getPhotos( + group_id=group_id, user_id=user_id, tags=tags, + per_page='100', page=i + ).find('photos').getchildren(): gotPhotos = True if photo.attrib['id']==start_id: found_start_id=True if found_start_id: if photo.attrib['id']==end_id: - wikipedia.output('Found end_id') + pywikibot.output('Found end_id') return else: yield photo.attrib['id']
except flickrapi.exceptions.FlickrError: gotPhotos = False - wikipedia.output(u'Flickr api problem, sleeping') + pywikibot.output(u'Flickr api problem, sleeping') time.sleep(30)
# http://www.flickr.com/services/api/flickr.photosets.getPhotos.html # Get the photos in a photoset elif photoset_id: - photos = flickr.photosets_getPhotos(photoset_id=photoset_id, per_page='100', page='1') + photos = flickr.photosets_getPhotos(photoset_id=photoset_id, + per_page='100', page='1') pages = photos.find('photoset').attrib['pages']
for i in range(1, int(pages)+1): gotPhotos = False while not gotPhotos: try: - for photo in flickr.photosets_getPhotos(photoset_id=photoset_id, per_page='100', page=i).find('photoset').getchildren(): + for photo in flickr.photosets_getPhotos( + photoset_id=photoset_id, per_page='100', page=i + ).find('photoset').getchildren(): gotPhotos = True if photo.attrib['id']==start_id: found_start_id=True if found_start_id: if photo.attrib['id']==end_id: - wikipedia.output('Found end_id') + pywikibot.output('Found end_id') return else: yield photo.attrib['id']
except flickrapi.exceptions.FlickrError: gotPhotos = False - wikipedia.output(u'Flickr api problem, sleeping') + pywikibot.output(u'Flickr api problem, sleeping') time.sleep(30)
# http://www.flickr.com/services/api/flickr.people.getPublicPhotos.html # Get the (public) photos uploaded by a user elif user_id: - photos = flickr.people_getPublicPhotos(user_id=user_id, per_page='100', page='1') + photos = flickr.people_getPublicPhotos(user_id=user_id, + per_page='100', page='1') pages = photos.find('photos').attrib['pages'] #flickrapi.exceptions.FlickrError for i in range(1, int(pages)+1): gotPhotos = False while not gotPhotos: try: - for photo in flickr.people_getPublicPhotos(user_id=user_id, per_page='100', page=i).find('photos').getchildren(): + for photo in flickr.people_getPublicPhotos( + user_id=user_id, per_page='100', page=i + ).find('photos').getchildren(): gotPhotos = True if photo.attrib['id'] == start_id: found_start_id=True if found_start_id: if photo.attrib['id'] == end_id: - wikipedia.output('Found end_id') + pywikibot.output('Found end_id') return else: yield photo.attrib['id']
except flickrapi.exceptions.FlickrError: gotPhotos = False - wikipedia.output(u'Flickr api problem, sleeping') + pywikibot.output(u'Flickr api problem, sleeping') time.sleep(30)
return @@ -439,24 +463,26 @@
TODO : Need more. ''' - wikipedia.output(u"Flickrripper is a tool to transfer flickr photos to Wikimedia Commons") - wikipedia.output(u"-group_id:<group_id>\n") - wikipedia.output(u"-photoset_id:<photoset_id>\n") - wikipedia.output(u"-user_id:<user_id>\n") - wikipedia.output(u"-tags:<tag>\n") + pywikibot.output( + u"Flickrripper is a tool to transfer flickr photos to Wikimedia Commons") + pywikibot.output(u"-group_id:<group_id>\n") + pywikibot.output(u"-photoset_id:<photoset_id>\n") + pywikibot.output(u"-user_id:<user_id>\n") + pywikibot.output(u"-tags:<tag>\n") return
def main(): - site = wikipedia.getSite(u'commons', u'commons') - wikipedia.setSite(site) + site = pywikibot.getSite(u'commons', u'commons') + pywikibot.setSite(site) #imagerecat.initLists()
#Get the api key if config.flickr['api_key']: flickr = flickrapi.FlickrAPI(config.flickr['api_key']) else: - wikipedia.output('Flickr api key not found! Get yourself an api key') - wikipedia.output('Any flickr user can get a key at http://www.flickr.com/services/api/keys/apply/') + pywikibot.output('Flickr api key not found! Get yourself an api key') + pywikibot.output( + 'Any flickr user can get a key at http://www.flickr.com/services/api/keys/apply/') return
group_id = u'' @@ -488,74 +514,81 @@ else: reviewer = u''
- override = u'' # Should be renamed to overrideLicense or something like that - - for arg in wikipedia.handleArgs(): + # Should be renamed to overrideLicense or something like that + override = u'' + for arg in pywikibot.handleArgs(): if arg.startswith('-group_id'): if len(arg) == 9: - group_id = wikipedia.input(u'What is the group_id of the pool?') + group_id = pywikibot.input(u'What is the group_id of the pool?') else: group_id = arg[10:] elif arg.startswith('-photoset_id'): if len(arg) == 12: - photoset_id = wikipedia.input(u'What is the photoset_id?') + photoset_id = pywikibot.input(u'What is the photoset_id?') else: photoset_id = arg[13:] elif arg.startswith('-user_id'): if len(arg) == 8: - user_id = wikipedia.input(u'What is the user_id of the flickr user?') + user_id = pywikibot.input( + u'What is the user_id of the flickr user?') else: user_id = arg[9:] elif arg.startswith('-start_id'): if len(arg) == 9: - start_id = wikipedia.input(u'What is the id of the photo you want to start at?') + start_id = pywikibot.input( + u'What is the id of the photo you want to start at?') else: start_id = arg[10:] elif arg.startswith('-end_id'): if len(arg) == 7: - end_id = wikipedia.input(u'What is the id of the photo you want to end at?') + end_id = pywikibot.input( + u'What is the id of the photo you want to end at?') else: end_id = arg[8:] elif arg.startswith('-tags'): if len(arg) == 5: - tags = wikipedia.input(u'What is the tag you want to filter out (currently only one supported)?') + tags = pywikibot.input( + u'What is the tag you want to filter out (currently only one supported)?') else: tags = arg[6:] elif arg == '-flickrreview': flickrreview = True elif arg.startswith('-reviewer'): if len(arg) == 9: - reviewer = wikipedia.input(u'Who is the reviewer?') + reviewer = pywikibot.input(u'Who is the reviewer?') else: reviewer = arg[10:] elif arg.startswith('-override'): if len(arg) == 9: - override = wikipedia.input(u'What is the override text?') + override = pywikibot.input(u'What is the override text?') else: override = arg[10:] elif arg.startswith('-addcategory'): if len(arg) == 12: - addCategory = wikipedia.input(u'What category do you want to add?') + addCategory = pywikibot.input( + u'What category do you want to add?') else: addCategory = arg[13:] elif arg == '-removecategories': removeCategories = True elif arg == '-autonomous': autonomous = True - + if user_id or group_id or photoset_id: - for photo_id in getPhotos(flickr, user_id, group_id, photoset_id, start_id, end_id, tags): - uploadedPhotos += processPhoto(flickr, photo_id, flickrreview, reviewer, override, addCategory, removeCategories, autonomous) + for photo_id in getPhotos(flickr, user_id, group_id, photoset_id, + start_id, end_id, tags): + uploadedPhotos += processPhoto(flickr, photo_id, flickrreview, + reviewer, override, addCategory, + removeCategories, autonomous) totalPhotos += 1 else: usage() - - wikipedia.output(u'Finished running') - wikipedia.output(u'Total photos: ' + str(totalPhotos)) - wikipedia.output(u'Uploaded photos: ' + str(uploadedPhotos)) + pywikibot.output(u'Finished running') + pywikibot.output(u'Total photos: ' + str(totalPhotos)) + pywikibot.output(u'Uploaded photos: ' + str(uploadedPhotos))
if __name__ == "__main__": try: main() finally: - wikipedia.stopme() + pywikibot.stopme()
pywikipedia-svn@lists.wikimedia.org