Revision: 7546 Author: alexsh Date: 2009-10-26 20:20:05 +0000 (Mon, 26 Oct 2009)
Log Message: ----------- cleanup type
Modified Paths: -------------- trunk/pywikipedia/commonscat.py trunk/pywikipedia/copyright.py trunk/pywikipedia/date.py trunk/pywikipedia/query.py trunk/pywikipedia/userlib.py trunk/pywikipedia/wikipedia.py
Modified: trunk/pywikipedia/commonscat.py =================================================================== --- trunk/pywikipedia/commonscat.py 2009-10-26 15:41:20 UTC (rev 7545) +++ trunk/pywikipedia/commonscat.py 2009-10-26 20:20:05 UTC (rev 7546) @@ -194,7 +194,7 @@ templatesInThePage = page.templates() templatesWithParams = page.templatesWithParams() for template in ignoreTemplates[page.site().language()]: - if type(template) != type(tuple()): + if type(template) != tuple: if template in templatesInThePage: return True else:
Modified: trunk/pywikipedia/copyright.py =================================================================== --- trunk/pywikipedia/copyright.py 2009-10-26 15:41:20 UTC (rev 7545) +++ trunk/pywikipedia/copyright.py 2009-10-26 20:20:05 UTC (rev 7546) @@ -466,7 +466,7 @@
def join_family_data(reString, namespace): for s in wikipedia.Family().namespaces[namespace].itervalues(): - if type (s) == type([]): + if type (s) == list: for e in s: reString += '|' + e else: @@ -797,7 +797,7 @@ results = server_results.Responses[0].Results[0] if results: # list or instance? - if type(results) == type([]): + if type(results) == list: for entry in results: cacheurl = None if hasattr(entry, 'CacheUrl'):
Modified: trunk/pywikipedia/date.py =================================================================== --- trunk/pywikipedia/date.py 2009-10-26 15:41:20 UTC (rev 7545) +++ trunk/pywikipedia/date.py 2009-10-26 20:20:05 UTC (rev 7546) @@ -286,8 +286,8 @@ _escPtrnCache2 = {}
# Allow both unicode and single-byte strings -_stringTypes = [type(u''), type('')] -_listTypes = [type([]),type(())] +_stringTypes = [unicode, str] +_listTypes = [tuple, list]
def escapePattern2( pattern ): """Converts a string pattern into a regex expression and cache.
Modified: trunk/pywikipedia/query.py =================================================================== --- trunk/pywikipedia/query.py 2009-10-26 15:41:20 UTC (rev 7545) +++ trunk/pywikipedia/query.py 2009-10-26 20:20:05 UTC (rev 7546) @@ -237,16 +237,16 @@ This method will convert it into a dictionary """ if params is None: - return dict() + return {} pt = type( params ) - if pt == type( {} ): + if pt == dict: return params - elif pt == type( () ): + elif pt == typle: if len( params ) != 2: raise "Tuple size must be 2" return {params[0]:params[1]} - elif pt == type( [] ): + elif pt == list: for p in params: - if p != type( () ) or len( p ) != 2: raise "Every list element must be a 2 item tuple" + if p != tuple or len( p ) != 2: raise "Every list element must be a 2 item tuple" return dict( params ) else: raise "Unknown param type %s" % pt @@ -267,7 +267,7 @@ if len( v1 ) == 0: params1[k] = v2 elif len( v2 ) > 0: - if type('') != type(v1) or type('') != type(v2): + if str in [type(v1), type(v2)]: raise "Both merged values must be of type 'str'" params1[k] = v1 + '|' + v2 # else ignore @@ -309,4 +309,4 @@ return s.encode('utf-8')
def IsString(s): - return type( s ) in [type( '' ), type( u'' )] + return type( s ) in [str, unicode]
Modified: trunk/pywikipedia/userlib.py =================================================================== --- trunk/pywikipedia/userlib.py 2009-10-26 15:41:20 UTC (rev 7545) +++ trunk/pywikipedia/userlib.py 2009-10-26 20:20:05 UTC (rev 7546) @@ -567,7 +567,7 @@ # if not site: site = wikipedia.getSite() - elif type(site) is str or type(site) is unicode: + elif type(site) in [str, unicode]: site = wikipedia.getSite(site)
result = {}
Modified: trunk/pywikipedia/wikipedia.py =================================================================== --- trunk/pywikipedia/wikipedia.py 2009-10-26 15:41:20 UTC (rev 7545) +++ trunk/pywikipedia/wikipedia.py 2009-10-26 20:20:05 UTC (rev 7546) @@ -337,7 +337,7 @@
if site is None: site = getSite() - elif type(site) is str or type(site) is unicode: + elif type(site) in [str, unicode]: site = getSite(site)
self._site = site @@ -3550,7 +3550,7 @@ #API query if infos: for i in infos: - result.append((i['timestamp'], i['user'], "%s\xA1\xD1%s" % (i['width'], i['height']), i['size'], i['comment'])) + result.append((i['timestamp'], i['user'], "%s×%s" % (i['width'], i['height']), i['size'], i['comment']))
return result
@@ -4159,7 +4159,7 @@ }
# if we got a string, compile it as a regular expression - if type(old) is str or type(old) is unicode: + if type(old) in [str, unicode]: if caseInsensitive: old = re.compile(old, re.IGNORECASE | re.UNICODE) else: @@ -5140,14 +5140,6 @@ if not language[0].upper() + language[1:] in self.namespaces(): self._validlanguages.append(language)
- #if persistent_http is None: - # persistent_http = config.persistent_http - #self.persistent_http = persistent_http and self.protocol() in ('http', 'https') - #if persistent_http: - # if self.protocol() == 'http': - # self.conn = httplib.HTTPConnection(self.hostname()) - # elif self.protocol() == 'https': - # self.conn = httplib.HTTPSConnection(self.hostname()) self.persistent_http = False
def _userIndex(self, sysop = False): @@ -5392,9 +5384,6 @@
# TODO: add the authenticate stuff here
- #if False: #self.persistent_http: - # conn = self.conn - #else: if config.proxy['host']: conn = httplib.HTTPConnection(config.proxy['host']) proxyPutAddr = '%s://%s%s' % (self.protocol(), self.hostname(), address) @@ -5427,8 +5416,7 @@ conn.putheader('User-agent', useragent) if cookies: conn.putheader('Cookie', cookies) - #if False: #self.persistent_http: - # conn.putheader('Connection', 'Keep-Alive') + if compress: conn.putheader('Accept-encoding', 'gzip') conn.endheaders() @@ -5453,8 +5441,7 @@ data = data.decode(self.encoding()) response.close()
- if True: #not self.persistent_http: - conn.close() + conn.close()
# If a wiki page, get user data self._getUserDataOld(data, sysop = sysop) @@ -5547,9 +5534,6 @@ if int(headers.get('content-length', '0')) != len(text) and 'content-length' in headers: output(u'Warning! len(text) does not match content-length: %s != %s' % \ (len(text), headers.get('content-length'))) - #if False: #self.persistent_http - # self.conn.close() - # self.conn.connect() return self.getUrl(path, retry, sysop, data, compress, no_hostname, cookie_only, back_response)
if compress and contentEncoding == 'gzip':
pywikipedia-svn@lists.wikimedia.org