Revision: 5326 Author: nicdumz Date: 2008-05-07 22:32:35 +0000 (Wed, 07 May 2008)
Log Message: ----------- Let's use Site.path() and Site.apipath() instead of all these hardcoded paths :)
Modified Paths: -------------- trunk/pywikipedia/checkimages.py trunk/pywikipedia/pageimport.py trunk/pywikipedia/rcsort.py trunk/pywikipedia/redirect.py trunk/pywikipedia/welcome.py
Modified: trunk/pywikipedia/checkimages.py =================================================================== --- trunk/pywikipedia/checkimages.py 2008-05-07 21:51:23 UTC (rev 5325) +++ trunk/pywikipedia/checkimages.py 2008-05-07 22:32:35 UTC (rev 5326) @@ -588,7 +588,7 @@ duplicateRegex = r'\n*(?:[[:Image:%s]] has the following duplicates:|*[[:Image:%s]])$' % (self.convert_to_url(self.image), self.convert_to_url(self.image)) imagePage = wikipedia.ImagePage(self.site, 'Image:%s' % self.image) wikipedia.output(u'Checking if %s has duplicates...' % image) - get_hash = self.site.getUrl('/w/api.php?action=query&format=xml&titles=Image:%s&prop=imageinfo&iiprop=sha1' % self.convert_to_url(self.image)) + get_hash = self.site.getUrl(self.site.apipath() + '?action=query&format=xml&titles=Image:%s&prop=imageinfo&iiprop=sha1' % self.convert_to_url(self.image)) hash_found_list = re.findall(r'<ii sha1="(.*?)" />', get_hash) if hash_found_list != []: hash_found = hash_found_list[0] @@ -598,7 +598,7 @@ else: wikipedia.output(u'Image deleted before getting the Hash. Skipping...') return False # Error, we need to skip the page. - get_duplicates = self.site.getUrl('/w/api.php?action=query&format=xml&list=allimages&aisha1=%s' % hash_found) + get_duplicates = self.site.getUrl(self.site.apipath() + '?action=query&format=xml&list=allimages&aisha1=%s' % hash_found) duplicates = re.findall(r'<img name="(.*?)".*?/>', get_duplicates) if len(duplicates) > 1: if len(duplicates) == 2:
Modified: trunk/pywikipedia/pageimport.py =================================================================== --- trunk/pywikipedia/pageimport.py 2008-05-07 21:51:23 UTC (rev 5325) +++ trunk/pywikipedia/pageimport.py 2008-05-07 22:32:35 UTC (rev 5326) @@ -67,7 +67,7 @@ answer = wikipedia.inputChoice(u'Do you want to import %s?' % target, ['Yes', 'No'], ['y', 'N'], 'N') if answer in ['y', 'Y']: host = self.site().hostname() - address = '/w/index.php?title=%s&action=submit' % self.urlname() + address = self.site().path() + '?title=%s&action=submit' % self.urlname() # You need to be a sysop for the import. self.site().forceLogin(sysop = True) # Getting the token.
Modified: trunk/pywikipedia/rcsort.py =================================================================== --- trunk/pywikipedia/rcsort.py 2008-05-07 21:51:23 UTC (rev 5325) +++ trunk/pywikipedia/rcsort.py 2008-05-07 22:32:35 UTC (rev 5326) @@ -41,7 +41,7 @@ if not 'limit' in form: post += '&limit=1000'
-text = mysite.getUrl('/w/index.php?%s'%post) +text = mysite.getUrl(mysite.path() + '?%s'%post)
text = text.split('\n') rcoptions = False @@ -62,10 +62,10 @@ count += 1 lines.append((user,count,line)) elif line.find('rcoptions') > -1: - print line.replace("/w/index.php?title=Speciaal:RecenteWijzigingen&","rcsort.py?") + print line.replace(mysite.path() + "?title=Speciaal:RecenteWijzigingen&","rcsort.py?") rcoptions = True elif newbies and line.find('Nieuwste') > -1: - line = line.replace("/w/index.php?title=Speciaal:Bijdragen&","rcsort.py?").replace("target=newbies","newbies=true") + line = line.replace(mysite.path() + "?title=Speciaal:Bijdragen&","rcsort.py?").replace("target=newbies","newbies=true") if line.find('</fieldset>') > -1: line = line[line.find('</fieldset>')+11:] print line
Modified: trunk/pywikipedia/redirect.py =================================================================== --- trunk/pywikipedia/redirect.py 2008-05-07 21:51:23 UTC (rev 5325) +++ trunk/pywikipedia/redirect.py 2008-05-07 22:32:35 UTC (rev 5326) @@ -250,12 +250,16 @@ wikipedia.output(u'\nChecking redirect %i of %i...' % (num + 1, len(redict)))
+ # /wiki/ + wiki = re.escape(wikipedia.getSite().nice_get_address('')) + # /w/index.php + index = re.escape(wikipedia.getSite().path()) move_regex = re.compile( - r'<li>.*?<a href="/wiki/User:.*?>.*?</a> ' - r'(<a href="/wiki/User_talk:.*?>Talk</a> | ' - r'<a href="/wiki/Special:Contributions/.*?>contribs</a>) ' - r'moved <a href="/w/index.php?title=.*?>(.*?)</a> to ' - r'<a href="/wiki/.*?>.*?</a>.*?</li>' ) + r'<li>.*?<a href="' + wiki + r'User:.*?>.*?</a> ' + r'(<a href="' + wiki + r'User_talk:.*?>Talk</a> | ' + r'<a href="' + wiki + r'Special:Contributions/.*?>contribs</a>) ' + r'moved <a href="' + index + r'?title=.*?>(.*?)</a> to ' + r'<a href="' + index + r'.*?>.*?</a>.*?</li>' )
def get_moved_pages_redirects(self): '''generate redirects to recently-moved pages''' @@ -263,7 +267,7 @@ site = wikipedia.getSite() while offset <= 10000: # MW won't accept offset value > 10000 move_url = \ - "/w/index.php?title=Special:Log&limit=500&offset=%i&type=move"\ + site.path() + "?title=Special:Log&limit=500&offset=%i&type=move"\ % offset try: move_list = site.getUrl(move_url)
Modified: trunk/pywikipedia/welcome.py =================================================================== --- trunk/pywikipedia/welcome.py 2008-05-07 21:51:23 UTC (rev 5325) +++ trunk/pywikipedia/welcome.py 2008-05-07 22:32:35 UTC (rev 5326) @@ -398,7 +398,8 @@ # and i put them in a list (i find it more easy and secure). while 1: # FIXME: That's the regex, if there are problems, take a look here. - reg = r'(<a href="/w/index.php?title=%s(?P<user>.*?)&(?:amp;|)action=(?:edit|editredlink|edit&redlink=1)"' % talk + + reg = r'(<a href="' + re.escape(wsite.path()) + r'?title=%s(?P<user>.*?)&(?:amp;|)action=(?:edit|editredlink|edit&redlink=1)"' % talk p = re.compile(reg, re.UNICODE) x = p.search(raw, pos) if x == None: @@ -461,7 +462,7 @@ pathWiki = wsite.family.nicepath(wsite.lang) #A little function to check if the user has already been blocked (to skip him). reg = r"""<li>\d\d:\d\d, \d(\d)? (.*?) \d\d\d\d <a href="""" + pathWiki + r"""(.*?)" title="(.*?)">(.*?)</a> (<a href="""" + pathWiki + r"""(.*?)" title="(.*?)">(.*?)</a>""" - block_text = wsite.getUrl('/w/index.php?title=Special:Log/block&page=User:' + username) + block_text = wsite.getUrl(wsite.path() + '?title=Special:Log/block&page=User:' + username) numblock = re.findall(reg, block_text) # If the bot doesn't find block-line (that means the user isn't blocked), it will return False otherwise True. if len(numblock) == 0:
pywikipedia-l@lists.wikimedia.org