jenkins-bot has submitted this change. ( https://gerrit.wikimedia.org/r/c/pywikibot/core/+/1052427?usp=email )
Change subject: [IMPR] use list comprehensions in several modules ......................................................................
[IMPR] use list comprehensions in several modules
also use dict.values() if key is not used
Change-Id: I7113e50ee624fc346799fa04fcb2f50e395d6603 --- M pywikibot/bot_choice.py M pywikibot/page/_collections.py M pywikibot/site/_apisite.py M scripts/dataextend.py M scripts/patrol.py M tests/site_tests.py M tests/utils.py M tests/wikibase_edit_tests.py 8 files changed, 46 insertions(+), 56 deletions(-)
Approvals: Xqt: Looks good to me, approved jenkins-bot: Verified
diff --git a/pywikibot/bot_choice.py b/pywikibot/bot_choice.py index 035acd8..2548a8b 100644 --- a/pywikibot/bot_choice.py +++ b/pywikibot/bot_choice.py @@ -71,10 +71,9 @@ @staticmethod def formatted(text: str, options: Iterable[Option], default: str | None = None) -> str: - """ - Create a text with the options formatted into it. + """Create a text with the options formatted into it.
- This static method is used by :py:meth:`pywikibot.input_choice`. + This static method is used by :meth:`pywikibot.input_choice`. It calls :py:obj:`format` for all *options* to combine the question for :py:meth:`pywikibot.input`.
@@ -84,9 +83,8 @@
:return: Text with the options formatted into it """ - formatted_options = [] - for option in options: - formatted_options.append(option.format(default=default)) + formatted_options = [option.format(default=default) + for option in options] # remove color highlights before fill function text = f"{text} ({', '.join(formatted_options)})" pattern = '<<[a-z]+>>' diff --git a/pywikibot/page/_collections.py b/pywikibot/page/_collections.py index 8f4ac1a..1abf430 100644 --- a/pywikibot/page/_collections.py +++ b/pywikibot/page/_collections.py @@ -1,6 +1,6 @@ """Structures holding data for Wikibase entities.""" # -# (C) Pywikibot team, 2019-2022 +# (C) Pywikibot team, 2019-2024 # # Distributed under the terms of the MIT license. # @@ -471,14 +471,13 @@ for dbname, sitelink in data.items(): if dbname not in diffto: continue + diffto_link = diffto[dbname] if diffto_link.get('title') == sitelink.get('title'): # compare badges - tmp_badges = [] diffto_badges = diffto_link.get('badges', []) badges = sitelink.get('badges', []) - for badge in set(diffto_badges) - set(badges): - tmp_badges.append('') + tmp_badges = [''] * len(set(diffto_badges) - set(badges)) for badge in set(badges) - set(diffto_badges): tmp_badges.append(badge) if tmp_badges: diff --git a/pywikibot/site/_apisite.py b/pywikibot/site/_apisite.py index 916d894..abca600 100644 --- a/pywikibot/site/_apisite.py +++ b/pywikibot/site/_apisite.py @@ -1651,7 +1651,7 @@ if 'pages' not in result['query']: # No "pages" element might indicate a circular redirect # Check that a "to" link is also a "from" link in redirmap - for _from, _to in redirmap.items(): + for _to in redirmap.values(): if _to['title'] in redirmap: raise CircularRedirectError(page)
diff --git a/scripts/dataextend.py b/scripts/dataextend.py index 4ce1227..4ae3dcc 100755 --- a/scripts/dataextend.py +++ b/scripts/dataextend.py @@ -3190,12 +3190,11 @@ section = self.findbyre( r'(?s)"description">\s*<span[^<>]*>(.*?)</span>', html) if section: - result = [] texts = [] for subsection in section.split(' et '): texts += self.findallbyre(r'(\w[-\s\w&']+)', subsection) - for text in texts[:8]: - result.append(self.findbyre(r'(.+)', text, 'occupation')) + result = [self.findbyre(r'(.+)', text, 'occupation') + for text in texts[:8]] return result return None
@@ -5261,24 +5260,20 @@ if section: parts = self.findallbyre(r'(?s)(<tr><th.*?</tr>\s*<tr>.*?</tr>)', section) - result = [] - for part in parts: - if '[nominee]' not in part: - result.append( - self.findbyre(r'<th[^<>]*>(.*?)<', section, 'award')) + result = [self.findbyre(r'<th[^<>]*>(.*?)<', section, 'award') + for part in parts if '[nominee]' not in part] return result + return None
def findnominations(self, html: str): section = self.findbyre(r'(?s)<div id="awards".*?>(.*?)</table>', html) if section: parts = self.findallbyre(r'(?s)(<tr><th.*?</tr>\s*<tr>.*?</tr>)', section) - result = [] - for part in parts: - if '[nominee]' in part: - result.append( - self.findbyre(r'<th[^<>]*>(.*?)<', section, 'award')) + result = [self.findbyre(r'<th[^<>]*>(.*?)<', section, 'award') + for part in parts if '[nominee]' in part] return result + return None
def findspouses(self, html: str): return self.findallbyre( @@ -6305,14 +6300,15 @@ if section: preresults = self.findallbyre(r'(?s)<tr>(.*?)</tr>', section.replace(' ', ' '))[:5] - results = [] - for preresult in preresults: - if int(self.findbyre(r'">(\d+)</a>', preresult) or 0) > 5: - results.append( - self.findbyre( - r'(?s)"Mathematics Subject Classification">(.*?)<', - preresult, 'subject')) - return results + result = [ + self.findbyre( + r'(?s)"Mathematics Subject Classification">(.*?)<', + preresult, 'subject') + for preresult in preresults + if int(self.findbyre(r'">(\d+)</a>', preresult) or 0) > 5 + ] + return result + return None
def findwebsite(self, html: str): return self.findbyre(r'(?s)<td>Homepage:</td>\s*<td><a[^<>]*>(.*?)<', @@ -8340,15 +8336,16 @@ r'(?s)<b>Woon- en verblijfplaatsen</b>\s*</td>\s*</tr>\s*<tr>(.*?)</tbody>', html) if section: - result = [] subsections = self.findallbyre(r'(?s)(<tr.*?</tr>)', section) - for subsection in subsections: - result.append( - self.findbyre( - r'<td width="auto">([^<>]*)</td>', subsection, 'city') - or self.findbyre( - r'<span[^<>]*>(.*?)<', subsection, 'city')) + result = [ + self.findbyre( + r'<td width="auto">([^<>]*)</td>', subsection, 'city') + or self.findbyre( + r'<span[^<>]*>(.*?)<', subsection, 'city') + for subsection in subsections + ] return result + return None
def findoccupations(self, html: str): section = self.findbyre( @@ -14882,10 +14879,9 @@
def findsources(self, html: str): sources = self.getvalues('670', 'a', html) - result = [] - for source in sources: - if source and ' by ' not in source and ' / ' not in source: - result.append(self.findbyre('(.*)', source, 'source')) + result = [self.findbyre('(.*)', source, 'source') + for source in sources + if source and ' by ' not in source and ' / ' not in source] return result
diff --git a/scripts/patrol.py b/scripts/patrol.py index 5712ba4..ec0259b 100755 --- a/scripts/patrol.py +++ b/scripts/patrol.py @@ -354,9 +354,8 @@ if not self.linkedpages: verbose_output('loading page links on ' + self.page_title) p = pywikibot.Page(self.site, self.page_title) - linkedpages = [] - for linkedpage in p.linkedPages(): - linkedpages.append(linkedpage.title()) + linkedpages = [linkedpage.title() + for linkedpage in p.linkedPages()]
self.linkedpages = linkedpages verbose_output(f'Loaded {len(linkedpages)} page links') diff --git a/tests/site_tests.py b/tests/site_tests.py index 3a087db..0016b2c 100755 --- a/tests/site_tests.py +++ b/tests/site_tests.py @@ -741,7 +741,7 @@
fp2 = pywikibot.FilePage(site, 'File:T276726.png') site.loadimageinfo(fp2, history=True) - for idx, v in fp2._file_revisions.items(): + for v in fp2._file_revisions.values(): if v['timestamp'] in (ts1, ts2): self.assertTrue(hasattr(v, 'commenthidden'))
@@ -753,7 +753,7 @@
fp3 = pywikibot.FilePage(site, 'File:T276726.png') site.loadimageinfo(fp3, history=True) - for idx, v in fp3._file_revisions.items(): + for v in fp3._file_revisions.values(): if v['timestamp'] in (ts1, ts2): self.assertFalse(hasattr(v, 'commenthidden')) self.assertFalse(hasattr(v, 'userhidden')) diff --git a/tests/utils.py b/tests/utils.py index 31aea38..053743f 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -357,11 +357,10 @@ if self.family.name == 'wikisource': extensions.append({'name': 'ProofreadPage'}) self._siteinfo._cache['extensions'] = (extensions, True) - aliases = [] - for alias in ('PrefixIndex', ): - # TODO: Not all follow that scheme (e.g. "BrokenRedirects") - aliases.append( - {'realname': alias.capitalize(), 'aliases': [alias]}) + + # TODO: Not all follow that scheme (e.g. "BrokenRedirects") + aliases = [{'realname': alias.capitalize(), 'aliases': [alias]} + for alias in ('PrefixIndex', )] self._siteinfo._cache['specialpagealiases'] = (aliases, True) self._msgcache = {'*': 'dummy entry', 'hello': 'world'}
diff --git a/tests/wikibase_edit_tests.py b/tests/wikibase_edit_tests.py index 1ee33de..d9069d6 100755 --- a/tests/wikibase_edit_tests.py +++ b/tests/wikibase_edit_tests.py @@ -91,9 +91,8 @@ item = pywikibot.PropertyPage(testsite, 'P115') item.get() if 'P115' in item.claims: - to_remove = [] - for claim in item.claims['P115']: - to_remove.append({'id': claim.toJSON()['id'], 'remove': ''}) + to_remove = [{'id': claim.toJSON()['id'], 'remove': ''} + for claim in item.claims['P115']] item.editEntity({'claims': to_remove})
claim = pywikibot.page.Claim(