Revision: 4771
Author: filnik
Date: 2007-12-28 17:16:52 +0000 (Fri, 28 Dec 2007)
Log Message:
-----------
Adding a new script to the framework. It's already used on it.wiki and en.wiki to tag the lonely pages. It supports a lot of generator (also the standard ones), stable version. Two days of long testing.
Added Paths:
-----------
trunk/pywikipedia/lonelypages.py
Added: trunk/pywikipedia/lonelypages.py
===================================================================
--- trunk/pywikipedia/lonelypages.py (rev 0)
+++ trunk/pywikipedia/lonelypages.py 2007-12-28 17:16:52 UTC (rev 4771)
@@ -0,0 +1,241 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+"""
+This is a script written to add the template "orphan" to the pages that aren't linked by other pages.
+It can give some strange Errors sometime, I hope that all of them are fixed in this version.
+
+-enable: - Enable or disable the bot via a Wiki Page.
+-disambig: - Set a page where the bot save the name of the disambig pages found (default: skip the pages)
+-limit: - Set how many pages check.
+-page: - Work only on the page given.
+-always - Always say yes, won't ask
+-newpages: - Check the newpages (default: the first 50 pages)
+
+-standard arguments (like -start, -cat, -ref and so on)
+
+--- FixMes ---
+* Check that all the code hasn't bugs
+
+--- Credit and Help ---
+This Script has been developed by Pietrodn and Filnik on botwiki. If you want to help us
+improving our script archive and pywikipediabot's archive or you simply need help
+you can find us here: http://botwiki.sno.cc
+
+--- Examples ---
+python lonelypages.py -enable:User:Bot/CheckBot -always
+"""
+#
+# (C) Pietrodn, it.wiki 2006-2007
+# (C) Filnik, it.wiki 2007
+#
+# Distributed under the terms of the MIT license.
+#
+__version__ = '$Id: lonelypages.py,v 1.0 2007/12/28 19.16.00 filnik Exp$'
+#
+
+import wikipedia, pagegenerators
+import re
+
+#####################################################
+# Here you have to put the config for your Project. #
+#####################################################
+
+# ************* Modify only below! ************* #
+
+# Template to add in the orphan pages
+Template = {
+ 'en':u'{{Orphan|date={{subst:CURRENTMONTHNAME}} {{subst:CURRENTYEAR}}}}',
+ 'it':u'{{O||mese={{subst:CURRENTMONTHNAME}} {{subst:CURRENTYEAR}}}}',
+ }
+
+# Comment that the Bot will use to put the template
+commento = {
+ 'en':u'Bot: Orphan page, add template',
+ 'it':u'Bot: Voce orfana, aggiungo template {{O}}',
+ }
+
+# When you add a disambig to the list of disambig pages
+#(if you set disambigPage to None, you can put here nothing)
+commenttodisambig = {
+ 'en':u'Bot: Adding a disambig page',
+ 'it':u'Bot: Aggiungo una disambigua',
+ }
+
+# Use regex to prevent to put the same template twice!
+# If you need help with regex, ask on botwiki ( http://botwiki.sno.cc )
+# Warning: put always "()" inside the regex, so the bot will find "something"
+exception = {
+ 'en': [r'\{\{(?:template:|)(orphan)[\|\}]', r'\{\{(?:template:|)(wi)[\|\}]'],
+ 'it': [r'\{\{(?:template:|)(o)[\|\}]'],
+ }
+
+# ************* Modify only above! ************* #
+
+def main():
+ # Load the configurations in the function namespace
+ global commento; global Template; global disambigPage; global commenttodisambig
+ global exception
+
+ enablePage = None # Check if someone set an enablePage or not
+ limit = 50000 # All the pages! (I hope that there aren't so many lonely pages in a project..)
+ generator = None # Check if the bot should use the default generator or not
+ genFactory = pagegenerators.GeneratorFactory() # Load all the default generators!
+ nwpages = False # Check variable for newpages
+ always = False # Check variable for always
+ disambigPage = None # If no disambigPage given, not use it.
+ # Arguments!
+ for arg in wikipedia.handleArgs():
+ if arg.startswith('-enable'):
+ if len(arg) == 7:
+ enablePage = wikipedia.input(u'Would you like to check if the bot should run or not?')
+ else:
+ enablePage = arg[8:]
+ if arg.startswith('-disambig'):
+ if len(arg) == 9:
+ disambigPage = wikipedia.input(u'In which page should the bot save the disambig pages?')
+ else:
+ disambigPage = arg[10:]
+ elif arg.startswith('-limit'):
+ if len(arg) == 6:
+ limit = int(wikipedia.input(u'How many pages do you want to check?'))
+ else:
+ limit = int(arg[7:])
+ elif arg.startswith('-newpages'):
+ if len(arg) == 9:
+ nwlimit = 50 # Default: 50 pages
+ else:
+ nwlimit = int(arg[10:])
+ generator = wikipedia.getSite().newpages(number = nwlimit)
+ nwpages = True
+ elif arg.startswith('-page'):
+ if len(arg) == 5:
+ generator = [wikipedia.Page(wikipedia.getSite(), wikipedia.input(u'How many pages do you want to check?'))]
+ else:
+ generator = [wikipedia.Page(wikipedia.getSite(), arg[6:])]
+ elif arg == '-always':
+ always = True
+ else:
+ generator = genFactory.handleArg(arg)
+ # Retrive the site
+ wikiSite = wikipedia.getSite()
+ # If the generator is not given, use the default one
+ if generator == None:
+ generator = wikiSite.lonelypages(repeat = True, number = limit)
+ # Take the configurations according to our project
+ comment = wikipedia.translate(wikiSite, commento)
+ commentdisambig = wikipedia.translate(wikiSite, commenttodisambig)
+ template = wikipedia.translate(wikiSite, Template)
+ exception = wikipedia.translate(wikiSite, exception)
+ # EnablePage part
+ if enablePage != None:
+ # Define the Page Object
+ enable = wikipedia.Page(wikiSite, enablePage)
+ # Loading the page's data
+ try:
+ getenable = enable.get()
+ except wikipedia.NoPage:
+ wikipedia.output(u"%s doesn't esist, I use the page as if it was blank!" % enable.title())
+ getenable = ''
+ except wikiepedia.IsRedirect:
+ wikipedia.output(u"%s is a redirect, skip!" % enable.title())
+ getenable = ''
+ # If the enable page is set to disable, turn off the bot
+ # (useful when the bot is run on a server)
+ if getenable != 'enable':
+ wikipedia.output('The bot is disabled')
+ wikipedia.stopme()
+ # DisambigPage part
+ if disambigPage != None:
+ disambigpage = wikipedia.Page(wikiSite, disambigPage)
+ try:
+ disambigtext = disambigpage.get()
+ except wikipedia.NoPage:
+ wikipedia.output(u"%s doesn't esist, skip!" % disambigpage.title())
+ disambigtext = ''
+ except wikiepedia.IsRedirect:
+ wikipedia.output(u"%s is a redirect, don't use it!" % disambigpage.title())
+ disambigPage = None
+ # Main Loop
+ for page in generator:
+ if nwpages == True:
+ page = page[0] # The newpages generator returns a tuple, not a Page object.
+ wikipedia.output(u"Checking %s..." % page.title())
+ # Used to skip the first pages in test phase...
+ #if page.title()[0] in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q']:
+ #continue
+ if page.isRedirectPage(): # If redirect, skip!
+ wikipedia.output(u'%s is a redirect! Skip...' % page.title())
+ continue
+ # refs is not a list, it's a generator while resList... is a list, yes.
+ refs = page.getReferences()
+ refsList = list()
+ for j in refs:
+ if j == None:
+ # We have to find out why the function returns that value
+ wikipedia.output(u'Error: 1 --> Skip page')
+ continue
+ refsList.append(j)
+ # This isn't possible with a generator
+ if refsList != []:
+ wikipedia.output(u"%s isn't orphan! Skip..." % page.title())
+ continue
+ # Never understood how a list can turn in "None", but it happened :-S
+ elif refsList == None:
+ # We have to find out why the function returns that value
+ wikipedia.output(u'Error: 2 --> Skip page')
+ continue
+ else:
+ # Ok, no refs, no redirect... let's check if there's already the template
+ try:
+ oldtxt = page.get()
+ except wikipedia.NoPage:
+ wikipedia.output(u"%s doesn't exist! Skip..." % page.title())
+ continue
+ except wikipedia.IsRedirectPage:
+ wikipedia.output(u"%s is a redirect! Skip..." % page.title())
+ continue
+ # I've used a loop in a loop. If I use continue in the second loop, it won't do anything
+ # in the first. So let's create a variable to avoid this problem.
+ Find = False
+ for regexp in exception:
+ res = re.findall(regexp, oldtxt.lower())
+ # Found a template! Let's skip the page!
+ if res != []:
+ wikipedia.output(u'Your regex has found something in %s, skipping...' % page.title())
+ Find = True
+ break
+ # Skip the page..
+ if Find:
+ continue
+ # Is the page a disambig?
+ if page.isDisambig() and disambigPage != None:
+ wikipedia.output(u'%s is a disambig page, report..' % page.title())
+ disambigtext = u"%s\n*[[%s]]" % (disambigtext, page.title())
+ disambigpage.put(disambigtext, commentdisambig)
+ continue
+ # Is the page a disambig but there's not disambigPage? Skip!
+ elif page.isDisambig():
+ wikipedia.output(u'%s is a disambig page, skip...' % page.title())
+ continue
+ else:
+ # Ok, the page need the template. Let's put it there!
+ newtxt = u"%s\n%s" % (template, oldtxt) # Adding the template in the text
+ wikipedia.output(u"\t\t>>> %s <<<" % page.title()) # Showing the title
+ wikipedia.showDiff(oldtxt, newtxt) # Showing the changes
+ choice = 'y' # Default answer
+ if not always:
+ choice = wikipedia.inputChoice(u'Orphan page found, shall I add the template?', [u'Yes', u'No', u'All'], [u'y', u'n', u'a'], [u'Y', u'N', 'A'])
+ if choice.lower() in [u'a', u'all']:
+ always = True
+ choice = 'y'
+ if choice.lower() in [u'y', u'yes']:
+ try:
+ page.put(newtxt, comment)
+ except wikipedia.EditConflict:
+ wikipedia.output(u'Edit Conflict! Skip...')
+ continue
+if __name__ == '__main__':
+ try:
+ main()
+ finally:
+ wikipedia.stopme()
Revision: 4770
Author: rotem
Date: 2007-12-28 17:12:53 +0000 (Fri, 28 Dec 2007)
Log Message:
-----------
This seems to partially fix the problems.
Modified Paths:
--------------
trunk/pywikipedia/catlib.py
Modified: trunk/pywikipedia/catlib.py
===================================================================
--- trunk/pywikipedia/catlib.py 2007-12-28 16:47:34 UTC (rev 4769)
+++ trunk/pywikipedia/catlib.py 2007-12-28 17:12:53 UTC (rev 4770)
@@ -159,6 +159,7 @@
if self.site().versionnumber() < 4:
Rtitle = re.compile('title\s?=\s?\"([^\"]*)\"')
elif self.site().versionnumber() < 8:
+ # FIXME seems to parse all links
Rtitle = re.compile('/\S*(?: title\s?=\s?)?\"([^\"]*)\"')
else:
Rtitle = re.compile(
@@ -189,35 +190,28 @@
wikipedia.get_throttle()
txt = self.site().getUrl(path)
# index where subcategory listing begins
- try:
- ibegin = txt.index('<div id="mw-subcategories">')
- skippedCategoryDescription = True
- except ValueError:
- try:
+ if self.site().versionnumber() >= 9:
+ # These IDs were introduced in 1.9
+ if '<div id="mw-subcategories">' in txt:
+ ibegin = txt.index('<div id="mw-subcategories">')
+ elif '<div id="mw-pages">' in txt:
ibegin = txt.index('<div id="mw-pages">')
- skippedCategoryDescription = True
- except ValueError:
- if self.site().has_mediawiki_message('category-empty') and self.site().mediawiki_message('category-empty') in txt:
- # No articles or subcategories
- return
- else:
- try:
- ibegin = txt.index('<!-- start content -->') # does not work for cats without text
- # TODO: This parses category text and may think they are
- # pages in category! Check for versions without the message
- # "category-empty".
- skippedCategoryDescription = False
- except ValueError:
- wikipedia.output("\nCategory page detection is not bug free. Please report this error!")
- raise
+ elif '<div id="mw-category-media">' in txt:
+ ibegin = txt.index('<div id="mw-category-media">')
+ else:
+ # No pages
+ return
+ else:
+ ibegin = txt.index('<!-- start content -->') # does not work for cats without text
+ # TODO: This parses category text and may think they are
+ # pages in category! Check for versions before 1.9
# index where article listing ends
- try:
+ if '<div class="printfooter">' in txt:
iend = txt.index('<div class="printfooter">')
- except ValueError:
- try:
- iend = txt.index('<div id="catlinks">')
- except ValueError:
- iend = txt.index('<!-- end content -->')
+ elif '<div class="catlinks">' in txt:
+ iend = txt.index('<div class="catlinks">')
+ else:
+ iend = txt.index('<!-- end content -->')
txt = txt[ibegin:iend]
for title in Rtitle.findall(txt):
if title == self.title():
@@ -244,16 +238,10 @@
# defaultNamespace feature to get everything correctly.
yield ARTICLE, wikipedia.ImagePage(self.site(), title)
# try to find a link to the next list page
- # If skippedCategoryDescription is False, then there are no pages
- # or subcategories, so there cannot be a next list page
- if skippedCategoryDescription:
- matchObj = RLinkToNextPage.search(txt)
- if matchObj:
- currentPageOffset = matchObj.group(1)
- wikipedia.output('There are more articles in %s.'
- % self.title())
- else:
- break
+ matchObj = RLinkToNextPage.search(txt)
+ if matchObj:
+ currentPageOffset = matchObj.group(1)
+ wikipedia.output('There are more articles in %s.' % self.title())
else:
break
Revision: 4769
Author: rotem
Date: 2007-12-28 16:47:34 +0000 (Fri, 28 Dec 2007)
Log Message:
-----------
Avoiding a problematic parsing of the content if there are no entries in the category, using the message category-empty. Adding a TODO note to check behavior for versions in which category-empty does not exist. Also fixing a problem in a message.
Modified Paths:
--------------
trunk/pywikipedia/category.py
trunk/pywikipedia/catlib.py
Modified: trunk/pywikipedia/category.py
===================================================================
--- trunk/pywikipedia/category.py 2007-12-28 16:45:50 UTC (rev 4768)
+++ trunk/pywikipedia/category.py 2007-12-28 16:47:34 UTC (rev 4769)
@@ -412,7 +412,7 @@
if oldMovedTalk is not None:
oldMovedTalk.delete(reason, confirm)
else:
- wikipedia.output('Couldn\'t delete %s - not empty.' % (self.oldCat.title(), self.newCatTitle))
+ wikipedia.output('Couldn\'t delete %s - not empty.' % self.oldCat.title())
class CategoryListifyRobot:
'''
Modified: trunk/pywikipedia/catlib.py
===================================================================
--- trunk/pywikipedia/catlib.py 2007-12-28 16:45:50 UTC (rev 4768)
+++ trunk/pywikipedia/catlib.py 2007-12-28 16:47:34 UTC (rev 4769)
@@ -197,12 +197,19 @@
ibegin = txt.index('<div id="mw-pages">')
skippedCategoryDescription = True
except ValueError:
- try:
- ibegin = txt.index('<!-- start content -->') # does not work for cats without text
- skippedCategoryDescription = False
- except ValueError:
- wikipedia.output("\nCategory page detection is not bug free. Please report this error!")
- raise
+ if self.site().has_mediawiki_message('category-empty') and self.site().mediawiki_message('category-empty') in txt:
+ # No articles or subcategories
+ return
+ else:
+ try:
+ ibegin = txt.index('<!-- start content -->') # does not work for cats without text
+ # TODO: This parses category text and may think they are
+ # pages in category! Check for versions without the message
+ # "category-empty".
+ skippedCategoryDescription = False
+ except ValueError:
+ wikipedia.output("\nCategory page detection is not bug free. Please report this error!")
+ raise
# index where article listing ends
try:
iend = txt.index('<div class="printfooter">')
I am running a MediaWiki wiki, and I am attempting to use pywikipedia to
handle some of the administrative tasks.
I have defined the new family, edited the user_config.py file, and I have
used the bot to preload some content. However I have a problem: the bot
cannot get a page from the server.
As a sample test (run from Python IDE):
import wikipedia
site = wikipedia.getSite()
page = wikipedia.Page(site,"Test")
page.put("This is a test")
And I have a new page (or a reedited page) with the provided content. This
is okay.
However:
page = wikipedia.Page(site,"Test")
print page.get()
returns a NoPage error message.
I have test against Wikipedia using the same installation and I can read the
articles without any problems.
I suspect that I might have a problem in either the definition of the
family, or the configuration of the wiki. I had already defined families
for Wikia and for Wikiversity in Spanish without any problems (but I might
have missed something).
On my site, I have some rewrite rules, however a request such as
/index.php?title=Test&action=raw works fine. (As does /Test?action=raw and
/w-raw/Test ). The wiki is also using a skin designed for that site.
Thank you for your advise.
-- Carlos Th
Bugs item #1859078, was opened at 2007-12-27 22:07
Message generated for change (Comment added) made by rotemliss
You can respond by visiting:
https://sourceforge.net/tracker/?func=detail&atid=603138&aid=1859078&group_…
Please note that this message will contain a full copy of the comment thread,
including the initial issue submission, for this request,
not just the latest update.
Category: None
Group: None
Status: Open
Resolution: None
Priority: 5
Private: No
Submitted By: Nicolas Dumazet (nicdumz)
Assigned to: Nobody/Anonymous (nobody)
Summary: interwiki.py causing crash in wikipedia::removeLanguageLinks
Initial Comment:
Appeared to run correctly for a while, then :
Dump fr (wikipedia) saved
Traceback (most recent call last):
File "interwiki.py", line 1587, in <module>
bot.run()
File "interwiki.py", line 1364, in run
self.queryStep()
File "interwiki.py", line 1338, in queryStep
self.oneQuery()
File "interwiki.py", line 1334, in oneQuery
subject.workDone(self)
File "interwiki.py", line 707, in workDone
elif page.isEmpty() and not page.isCategory():
File "/home/nico/projets/pywikipedia/wikipedia.py", line 846, in isEmpty
txt = removeLanguageLinks(txt)
File "/home/nico/projets/pywikipedia/wikipedia.py", line 3019, in removeLanguageLinks
['nowiki', 'comment', 'math', 'pre'], marker=marker)
File "/home/nico/projets/pywikipedia/wikipedia.py", line 2795, in replaceExcept
import weblinkchecker
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 3: ordinal not in range(128)
For the record, the exact command was :
python interwiki.py -autonomous -skipauto -start:!
Thanks,
Nicolas Dumazet.
----------------------------------------------------------------------
Comment By: Rotem Liss (rotemliss)
Date: 2007-12-28 15:39
Message:
Logged In: YES
user_id=1327030
Originator: NO
Fixed in r4766.
----------------------------------------------------------------------
Comment By: Nicolas Dumazet (nicdumz)
Date: 2007-12-27 22:48
Message:
Logged In: YES
user_id=1963242
Originator: YES
This is apparently a consequence of r4765, since reverting to r4764 is a
temporary fix.
----------------------------------------------------------------------
You can respond by visiting:
https://sourceforge.net/tracker/?func=detail&atid=603138&aid=1859078&group_…
Revision: 4765
Author: rotem
Date: 2007-12-27 18:58:30 +0000 (Thu, 27 Dec 2007)
Log Message:
-----------
(patch 1858181) weblinkcheker.py - ksh language update
Modified Paths:
--------------
trunk/pywikipedia/weblinkchecker.py
Modified: trunk/pywikipedia/weblinkchecker.py
===================================================================
--- trunk/pywikipedia/weblinkchecker.py 2007-12-27 18:50:16 UTC (rev 4764)
+++ trunk/pywikipedia/weblinkchecker.py 2007-12-27 18:58:30 UTC (rev 4765)
@@ -105,6 +105,7 @@
'he': u'בוט: מדווח על קישור חיצוני בלתי זמין',
'ia': u'Robot: Reporto de un ligamine externe non functionante',
'kk': u'Бот: Қатынаулы емес сілтеме туралы есеп беру',
+ 'ksh': u'Bot: Ene Weblengk jeijt nit mih.',
'nds': u'Lenk-Bot: Weblenk geiht nich mehr',
'nl': u'Robot: Melding (tijdelijk) onbereikbare externe link',
'no': u'bot: Rapporter død eksternlenke',
@@ -123,6 +124,7 @@
'he': u'== קישור שבור ==\n\nבמהלך מספר ריצות אוטומטיות של הבוט, נמצא שהקישור החיצוני הבא אינו זמין. אנא בדקו אם הקישור אכן שבור, ותקנו אותו או הסירו אותו במקרה זה!\n\n%s\n%s--~~~~',
'ia': u'== Ligamine defuncte ==\n\nDurante plure sessiones automatic, le robot ha constatate que le sequente ligamine externe non es disponibile. Per favor confirma que le ligamine de facto es defuncte, e in caso de si, repara o elimina lo!\n\n%s\n%s--~~~~',
'kk': u'== Өлі сілтеме ==\n\nӨздікті бот бірнеше жегілгенде келесі сыртқы сілтемеге қатынай алмады. Бұл сілтеменің қатыналуын тексеріп шығыңыз да, не түзетіңіз, не аластаңыз!\n\n%s\n%s--~~~~',
+ 'ksh': u'== Han enne kapdde Weblengk jefonge ==\n\nEsch han bonge die Weblingks paa Mol jetschäck. Se han allemooLde nit jedon Doht ens donnoh loore, un dä Lengk reparreere odo eruß nämme.\n\n%s\n%s--~~~~',
'nds': u'== Weblenk geiht nich mehr ==\n\nDe Bot hett en poor Mal al versöcht, disse Siet optoropen un kunn dor nich bikamen. Schall man een nakieken, wat de Siet noch dor is un den Lenk richten oder rutnehmen.\n\n%s\n%s--~~~~',
'nl': u'== Dode link ==\nTijdens enkele automatische controles bleek de onderstaande externe link onbereikbaar. Controleer alstublieft of de link inderdaad onbereikbaar is. Verwijder deze tekst alstublieft na een succesvolle controle of na het verwijderen of corrigeren van de externe link.\n\n%s\n%s--~~~~[[Categorie:Wikipedia:Onbereikbare externe link]]',
'no': u'{{subst:Bruker:JhsBot/Død lenke}}\n\n%s\n%s~~~~\n\n{{ødelagt lenke}}',
@@ -136,6 +138,7 @@
'en': u'\nThe web page has been saved by the Internet Archive. Please consider linking to an appropriate archived version: [%s]. ',
'he': u'\nעמוד האינטרנט נשמר על־ידי ארכיון האינטרנט. אנא שקלו לקשר לגרסה המאורכבת המתאימה: [%s]',
'kk': u'\nБұл ғаламтордың беті Интернет Мұрағатында сақталған. Мұрағатталған нұсқасына сәйкесті сілтеуді ескеріңіз: [%s]. ',
+ 'ksh': u'De Websick es em ''Internet Archive'' faßjehallde. Kannß jo felleijsj_obb_en Koppi doh verlengke, süsh hee: [%s]. ',
'nl': u'\nDeze website is bewaard in het Internet Archive. Overweeg te linken naar een gearchiveerde pagina: [%s]. ',
'no': u'\nDenne nettsiden er lagra i Internet Archive. Vurder om lenka kan endres til å peke til en av de arkiverte versjonene: [%s]. ',
'pt': u'Esta página web foi gravada na Internet Archive. Por favor considere o link para a versão arquivada: [%s]. ',
Revision: 4766
Author: rotem
Date: 2007-12-28 13:36:00 +0000 (Fri, 28 Dec 2007)
Log Message:
-----------
Fixing an error.
Modified Paths:
--------------
trunk/pywikipedia/weblinkchecker.py
Modified: trunk/pywikipedia/weblinkchecker.py
===================================================================
--- trunk/pywikipedia/weblinkchecker.py 2007-12-27 18:58:30 UTC (rev 4765)
+++ trunk/pywikipedia/weblinkchecker.py 2007-12-28 13:36:00 UTC (rev 4766)
@@ -138,7 +138,7 @@
'en': u'\nThe web page has been saved by the Internet Archive. Please consider linking to an appropriate archived version: [%s]. ',
'he': u'\nעמוד האינטרנט נשמר על־ידי ארכיון האינטרנט. אנא שקלו לקשר לגרסה המאורכבת המתאימה: [%s]',
'kk': u'\nБұл ғаламтордың беті Интернет Мұрағатында сақталған. Мұрағатталған нұсқасына сәйкесті сілтеуді ескеріңіз: [%s]. ',
- 'ksh': u'De Websick es em ''Internet Archive'' faßjehallde. Kannß jo felleijsj_obb_en Koppi doh verlengke, süsh hee: [%s]. ',
+ 'ksh': u"De Websick es em ''Internet Archive'' faßjehallde. Kannß jo felleijsj_obb_en Koppi doh verlengke, süsh hee: [%s]. ",
'nl': u'\nDeze website is bewaard in het Internet Archive. Overweeg te linken naar een gearchiveerde pagina: [%s]. ',
'no': u'\nDenne nettsiden er lagra i Internet Archive. Vurder om lenka kan endres til å peke til en av de arkiverte versjonene: [%s]. ',
'pt': u'Esta página web foi gravada na Internet Archive. Por favor considere o link para a versão arquivada: [%s]. ',
Patches item #1859454, was opened at 2007-12-27 18:52
Message generated for change (Tracker Item Submitted) made by Item Submitter
You can respond by visiting:
https://sourceforge.net/tracker/?func=detail&atid=603140&aid=1859454&group_…
Please note that this message will contain a full copy of the comment thread,
including the initial issue submission, for this request,
not just the latest update.
Category: None
Group: None
Status: Open
Resolution: None
Priority: 5
Private: No
Submitted By: AndreasJS (andreasjs)
Assigned to: Nobody/Anonymous (nobody)
Summary: category.py uses GeneratorFactory
Initial Comment:
The "add" action now uses pagegenerators.GeneratorFactory() to select the pages for greater flexibility. The option -link is set as a default for compatibility with previous versions.
----------------------------------------------------------------------
You can respond by visiting:
https://sourceforge.net/tracker/?func=detail&atid=603140&aid=1859454&group_…