http://www.mediawiki.org/wiki/Special:Code/pywikipedia/10831
Revision: 10831 Author: xqt Date: 2012-12-24 14:29:26 +0000 (Mon, 24 Dec 2012) Log Message: ----------- some minor PEP8 changes
Modified Paths: -------------- trunk/pywikipedia/apispec.py trunk/pywikipedia/archivebot.py trunk/pywikipedia/blockpageschecker.py trunk/pywikipedia/commonscat.py trunk/pywikipedia/copyright.py trunk/pywikipedia/featured.py trunk/pywikipedia/gui.py trunk/pywikipedia/imagerecat.py trunk/pywikipedia/wikipedia.py
Modified: trunk/pywikipedia/apispec.py =================================================================== --- trunk/pywikipedia/apispec.py 2012-12-24 13:50:34 UTC (rev 10830) +++ trunk/pywikipedia/apispec.py 2012-12-24 14:29:26 UTC (rev 10831) @@ -43,7 +43,7 @@
def uniso(timestamp): """Removes T and Z from an ISO 8601-formatted text for readability.""" - return timestamp.replace('T',' ').replace('Z','') + return timestamp.replace('T', ' ').replace('Z', '')
def dt(timestamp): """Converts a MediaWiki timestamp to a Python-compatible datetime object""" @@ -212,7 +212,7 @@ ################################################# def __init__(self, site=site, top='new', limit=5000): self.site = site - self.bkdir = ['older','newer'][top=='old'] #a bit strange + self.bkdir = ['older', 'newer'][top=='old'] #a bit strange # bkdir: Direction to list in. #older: List newest blocks first (default). #Note: bkstart has to be later than bkend. @@ -439,8 +439,8 @@ or e-mail it or insert into a wikipage with <pre>. """ w = 21 #width for justification - flags = ['automatic','anononly','nocreate','autoblock','noemail', - 'allowusertalk','hidden'] + flags = ['automatic', 'anononly', 'nocreate', 'autoblock', 'noemail', + 'allowusertalk', 'hidden'] s = 'Data for block #%s' % block['id'] s += '\nBlocked user:'.ljust(w) try: @@ -455,8 +455,8 @@ (block['rangestart'],block['rangeend']) s += '\nAdmin:'.ljust(w) + '%s (#%s)' % (block['by'], block['byid']) s += '\nBeginning in UTC:'.ljust(w) + uniso(block['timestamp']) - s += ( - '\nExpiry%s:' % ['',' in UTC'][block['expiry'][0].isdigit()]).ljust(w) + s += ('\nExpiry%s:' \ + % ['', ' in UTC'][block['expiry'][0].isdigit()]).ljust(w) s += uniso(block['expiry']) s += '\nFlags:'.ljust(w) s += ', '.join(filter(lambda x: x in block, flags))
Modified: trunk/pywikipedia/archivebot.py =================================================================== --- trunk/pywikipedia/archivebot.py 2012-12-24 13:50:34 UTC (rev 10830) +++ trunk/pywikipedia/archivebot.py 2012-12-24 14:29:26 UTC (rev 10831) @@ -238,27 +238,30 @@ if not TIME: TIME = txt2timestamp(TM.group(0), "%Y. %B %d., %H:%M (%Z)") if not TIME: - TIME = txt2timestamp(TM.group(0),"%d. %b %Y kl.%H:%M (%Z)") + TIME = txt2timestamp(TM.group(0), "%d. %b %Y kl.%H:%M (%Z)") if not TIME: - TIME = txt2timestamp(re.sub(' *([^ ]+) *', '', TM.group(0)),"%H:%M, %d %B %Y") + TIME = txt2timestamp(re.sub(' *([^ ]+) *', '', TM.group(0)), + "%H:%M, %d %B %Y") if not TIME: - TIME = txt2timestamp(TM.group(0),"%H:%M, %d %b %Y (%Z)") + TIME = txt2timestamp(TM.group(0), "%H:%M, %d %b %Y (%Z)") if not TIME: - TIME = txt2timestamp(re.sub(' *([^ ]+) *','',TM.group(0)),"%H:%M, %d %b %Y") + TIME = txt2timestamp(re.sub(' *([^ ]+) *', '', TM.group(0)), + "%H:%M, %d %b %Y") if not TIME: - TIME = txt2timestamp(TM.group(0),"%H:%M, %b %d %Y (%Z)") + TIME = txt2timestamp(TM.group(0), "%H:%M, %b %d %Y (%Z)") if not TIME: - TIME = txt2timestamp(TM.group(0),"%H:%M, %B %d %Y (%Z)") + TIME = txt2timestamp(TM.group(0), "%H:%M, %B %d %Y (%Z)") if not TIME: - TIME = txt2timestamp(TM.group(0),"%H:%M, %b %d, %Y (%Z)") + TIME = txt2timestamp(TM.group(0), "%H:%M, %b %d, %Y (%Z)") if not TIME: - TIME = txt2timestamp(TM.group(0),"%H:%M, %B %d, %Y (%Z)") + TIME = txt2timestamp(TM.group(0), "%H:%M, %B %d, %Y (%Z)") if not TIME: TIME = txt2timestamp(TM.group(0),"%d. %Bta %Y kello %H.%M (%Z)") if not TIME: - TIME = txt2timestamp(TM.group(0),"%d %B %Y %H:%M (%Z)") + TIME = txt2timestamp(TM.group(0), "%d %B %Y %H:%M (%Z)") if not TIME: - TIME = txt2timestamp(re.sub(' *([^ ]+) *', '', TM.group(0)), "%H:%M, %d. %b. %Y") + TIME = txt2timestamp(re.sub(' *([^ ]+) *', '', TM.group(0)), + "%H:%M, %d. %b. %Y") if TIME: self.timestamp = max(self.timestamp, time.mktime(TIME)) ## pywikibot.output(u'Time to be parsed: %s' % TM.group(0)) @@ -346,7 +349,7 @@ if sortThreads: pywikibot.output(u'Sorting threads...') self.threads.sort(key = lambda t: t.timestamp) - newtext = re.sub('\n*$','\n\n',self.header) #Fix trailing newlines + newtext = re.sub('\n*$', '\n\n', self.header) #Fix trailing newlines for t in self.threads: newtext += t.toText() if self.full: @@ -385,7 +388,7 @@
def set(self, attr, value, out=True): if attr == 'archive': - value = value.replace('_',' ') + value = value.replace('_', ' ') self.attributes[attr] = [value, out]
def saveables(self): @@ -437,7 +440,7 @@
def analyzePage(self): maxArchSize = str2size(self.get('maxarchivesize')) - archCounter = int(self.get('counter','1')) + archCounter = int(self.get('counter', '1')) oldthreads = self.Page.threads self.Page.threads = [] T = time.mktime(time.gmtime())
Modified: trunk/pywikipedia/blockpageschecker.py =================================================================== --- trunk/pywikipedia/blockpageschecker.py 2012-12-24 13:50:34 UTC (rev 10830) +++ trunk/pywikipedia/blockpageschecker.py 2012-12-24 14:29:26 UTC (rev 10831) @@ -192,7 +192,7 @@ def showQuest(site, page): quest = pywikibot.inputChoice(u'Do you want to open the page?', ['with browser', 'with gui', 'no'], - ['b','g','n'], 'n') + ['b', 'g', 'n'], 'n') pathWiki = site.family.nicepath(site.lang) url = 'http://%s%s%s?&redirect=no' % (pywikibot.getSite().hostname(), pathWiki, page.urlname())
Modified: trunk/pywikipedia/commonscat.py =================================================================== --- trunk/pywikipedia/commonscat.py 2012-12-24 13:50:34 UTC (rev 10830) +++ trunk/pywikipedia/commonscat.py 2012-12-24 14:29:26 UTC (rev 10831) @@ -320,7 +320,7 @@ else: for (inPageTemplate, param) in templatesWithParams: if inPageTemplate == template[0] \ - and template[1] in param[0].replace(' ',''): + and template[1] in param[0].replace(' ', ''): return True return False
Modified: trunk/pywikipedia/copyright.py =================================================================== --- trunk/pywikipedia/copyright.py 2012-12-24 13:50:34 UTC (rev 10830) +++ trunk/pywikipedia/copyright.py 2012-12-24 14:29:26 UTC (rev 10831) @@ -262,7 +262,7 @@ u'Riferimenti bibliografici', u'Collegamenti esterni', u'Pubblicazioni', u'Pubblicazioni principali', u'Bibliografia parziale'], - 'is': [u'Heimildir', u'Tenglar', u'Tengt efni'], + 'is': [u'Heimildir', u'Tenglar', u'Tengt efni'], 'ja': [u'脚注', u'脚注欄', u'脚注・出典', u'出典', u'注釈'], 'zh': [u'參考文獻', u'参考文献', u'參考資料', u'参考资料', u'資料來源', u'资料来源', u'參見', u'参见', u'參閱', u'参阅'], @@ -433,7 +433,7 @@ def read_file(filename, cut_comment = False, cut_newlines = False): text = u""
- f = codecs.open(filename, 'r','utf-8') + f = codecs.open(filename, 'r', 'utf-8') text = f.read() f.close()
@@ -742,12 +742,14 @@ comment.append('[http://www.google.com/search?sourceid=navclient&q=cache:%s Google cache]' % urllib.quote(short_url(add_item))) elif engine == 'yahoo': #cache = False - #comment.append('[%s Yahoo cache]' % re.sub('&appid=[^&]*','', urllib2.unquote(cache_url))) + #comment.append('[%s Yahoo cache]' % re.sub('&appid=[^&]*', '', urllib2.unquote(cache_url))) comment.append("''Yahoo cache''") elif engine == 'msn': - comment.append('[%s Live cache]' % re.sub('&lang=[^&]*','', cache_url)) + comment.append('[%s Live cache]' + % re.sub('&lang=[^&]*', '', cache_url)) else: - comment.append('[http://web.archive.org/*/%s archive.org]' % short_url(add_item)) + comment.append('[http://web.archive.org/*/%s archive.org]' + % short_url(add_item))
for i in range(len(url)): if add_item in url[i]:
Modified: trunk/pywikipedia/featured.py =================================================================== --- trunk/pywikipedia/featured.py 2012-12-24 13:50:34 UTC (rev 10830) +++ trunk/pywikipedia/featured.py 2012-12-24 14:29:26 UTC (rev 10831) @@ -476,7 +476,7 @@ if (not interactive or pywikibot.input( u'Connecting %s -> %s. Proceed? [Y/N]' - % (a.title(), atrans.title())) in ['Y','y'] + % (a.title(), atrans.title())) in ['Y', 'y'] ): m=re_this_iw.search(text) if not m: @@ -521,7 +521,7 @@ if (not interactive or pywikibot.input( u'Connecting %s -> %s. Proceed? [Y/N]' - % (a.title(), atrans.title())) in ['Y','y'] + % (a.title(), atrans.title())) in ['Y', 'y'] ): m=re_this_iw.search(text) if not m:
Modified: trunk/pywikipedia/gui.py =================================================================== --- trunk/pywikipedia/gui.py 2012-12-24 13:50:34 UTC (rev 10830) +++ trunk/pywikipedia/gui.py 2012-12-24 14:29:26 UTC (rev 10831) @@ -43,8 +43,9 @@ 'hilite', fgBg='bg'), insertbackground=idleConf.GetHighlight(currentTheme, 'cursor', fgBg='fg'), - width=idleConf.GetOption('main','EditorWindow','width'), - height=idleConf.GetOption('main','EditorWindow','height') + width=idleConf.GetOption('main', 'EditorWindow', 'width'), + height=idleConf.GetOption('main', 'EditorWindow', + 'height') ) fontWeight = 'normal' if idleConf.GetOption('main', 'EditorWindow', 'font-bold', type='bool'):
Modified: trunk/pywikipedia/imagerecat.py =================================================================== --- trunk/pywikipedia/imagerecat.py 2012-12-24 13:50:34 UTC (rev 10830) +++ trunk/pywikipedia/imagerecat.py 2012-12-24 14:29:26 UTC (rev 10831) @@ -167,12 +167,12 @@ if (matches.group('catnum') > 0): cats = matches.group('cats').splitlines() for cat in cats: - commonshelperCats.append(cat.replace('_',' ')) + commonshelperCats.append(cat.replace('_', ' ')) pywikibot.output(u'category : ' + cat) if (matches.group('galnum') > 0): gals = matches.group('gals').splitlines() for gal in gals: - galleries.append(gal.replace('_',' ')) + galleries.append(gal.replace('_', ' ')) pywikibot.output(u'gallery : ' + gal) commonshelperCats = list(set(commonshelperCats)) galleries = list(set(galleries)) @@ -352,7 +352,6 @@ result.append(subcategory.title(withNamespace=False)) return list(set(result))
- def filterParents(categories): ''' Remove all parent categories from the set to prevent overcategorization.
@@ -360,7 +359,7 @@ result = [] toFilter = u'' for cat in categories: - cat = cat.replace('_',' ') + cat = cat.replace('_', ' ') toFilter = toFilter + "[[Category:" + cat + "]]\n" parameters = urllib.urlencode({'source' : toFilter.encode('utf-8'), 'bot' : '1'})
Modified: trunk/pywikipedia/wikipedia.py =================================================================== --- trunk/pywikipedia/wikipedia.py 2012-12-24 13:50:34 UTC (rev 10830) +++ trunk/pywikipedia/wikipedia.py 2012-12-24 14:29:26 UTC (rev 10831) @@ -3567,7 +3567,7 @@ 'list': 'deletedrevs', 'drfrom': self.title(withNamespace=False), 'drnamespace': self.namespace(), - 'drprop': ['revid','user','comment','content'],#','minor','len','token'], + 'drprop': ['revid', 'user', 'comment', 'content'], # ,'minor', 'len', 'token'], 'drlimit': 100, 'drdir': 'older', #'': '', @@ -4645,10 +4645,14 @@ #FIXME : Should have a cleaner way to get the wiki where the image is used siteparts = gu['wiki'].split('.') if len(siteparts)==3: - if siteparts[0] in self.site().fam().alphabetic and siteparts[1] in ['wikipedia', 'wiktionary', 'wikibooks', 'wikiquote','wikisource']: + if siteparts[0] in self.site().fam().alphabetic and \ + siteparts[1] in ['wikipedia', 'wiktionary', + 'wikibooks', 'wikiquote', + 'wikisource']: code = siteparts[0] fam = siteparts[1] - elif siteparts[0] in ['meta', 'incubator'] and siteparts[1]==u'wikimedia': + elif siteparts[0] in ['meta', 'incubator'] and \ + siteparts[1] == u'wikimedia': code = code = siteparts[0] fam = code = siteparts[0] else: @@ -4824,7 +4828,7 @@ if not successful: output(u"BUG>> title %s (%s) not found in list" % (title, page)) output(u'Expected one of: %s' - % u','.join([unicode(page2) for page2 in self.pages])) + % u', '.join([unicode(page2) for page2 in self.pages])) raise PageNotFound
def headerDone(self, header): @@ -4992,7 +4996,7 @@ if not successful: output(u"BUG>> title %s (%s) not found in list" % (title, page)) output(u'Expected one of: %s' - % u','.join([unicode(page2) for page2 in self.pages])) + % u', '.join([unicode(page2) for page2 in self.pages])) raise PageNotFound
def headerDoneApi(self, header): @@ -6695,7 +6699,7 @@ params = { 'action': 'query', 'meta': 'userinfo', - 'uiprop': ['blockinfo','groups','rights','hasmsg'], + 'uiprop': ['blockinfo', 'groups', 'rights', 'hasmsg'], } if self.versionnumber() >= 12: params['uiprop'].append('ratelimits') @@ -6846,7 +6850,7 @@ @deprecate_arg("get_redirect", None) #20120822 def newpages(self, user=None, returndict=False, number=10, repeat=False, namespace=0, - rcshow = ['!bot','!redirect']): + rcshow = ['!bot', '!redirect']): """Yield new articles (as Page objects) from recent changes.
Starts with the newest article and fetches the number of articles
pywikipedia-svn@lists.wikimedia.org