diff --git a/pyproject.toml b/pyproject.toml index 97ea4375f..96e2d669a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -76,7 +76,7 @@ dev = [ ] static-analysis = [ "autopep8~=2.0", - "ruff~=0.7.0", + "ruff~=0.8.0", ] test = [ "pytest~=8.1", @@ -186,6 +186,7 @@ ignore = [ "E501", # line-too-long "E731", # lambda-assignment "E741", # ambiguous-variable-name + "UP031", # printf-string-formatting "UP036", # outdated-version-block "B006", # mutable-argument-default "B008", # function-call-in-default-argument @@ -258,9 +259,6 @@ select = [ "A002", # builtin-argument-shadowing "C408", # unnecessary-collection-call ] -"yt_dlp/jsinterp.py" = [ - "UP031", # printf-string-formatting -] [tool.ruff.lint.isort] known-first-party = [ diff --git a/yt_dlp/YoutubeDL.py b/yt_dlp/YoutubeDL.py index a9a8e4133..65b72e026 100644 --- a/yt_dlp/YoutubeDL.py +++ b/yt_dlp/YoutubeDL.py @@ -1116,7 +1116,7 @@ def report_file_delete(self, file_name): def raise_no_formats(self, info, forced=False, *, msg=None): has_drm = info.get('_has_drm') ignored, expected = self.params.get('ignore_no_formats_error'), bool(msg) - msg = msg or has_drm and 'This video is DRM protected' or 'No video formats found!' + msg = msg or (has_drm and 'This video is DRM protected') or 'No video formats found!' if forced or not ignored: raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'], expected=has_drm or ignored or expected) @@ -2196,7 +2196,7 @@ def _select_formats(self, formats, selector): def _default_format_spec(self, info_dict): prefer_best = ( self.params['outtmpl']['default'] == '-' - or info_dict.get('is_live') and not self.params.get('live_from_start')) + or (info_dict.get('is_live') and not self.params.get('live_from_start'))) def can_merge(): merger = FFmpegMergerPP(self) @@ -2365,7 +2365,7 @@ def _merge(formats_pair): vexts=[f['ext'] for f in video_fmts], aexts=[f['ext'] for f in audio_fmts], preferences=(try_call(lambda: self.params['merge_output_format'].split('/')) - or self.params.get('prefer_free_formats') and ('webm', 'mkv'))) + or (self.params.get('prefer_free_formats') and ('webm', 'mkv')))) filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info)) @@ -3541,8 +3541,8 @@ def ffmpeg_fixup(cndn, msg, cls): and info_dict.get('container') == 'm4a_dash', 'writing DASH m4a. Only some players support this container', FFmpegFixupM4aPP) - ffmpeg_fixup(downloader == 'hlsnative' and not self.params.get('hls_use_mpegts') - or info_dict.get('is_live') and self.params.get('hls_use_mpegts') is None, + ffmpeg_fixup((downloader == 'hlsnative' and not self.params.get('hls_use_mpegts')) + or (info_dict.get('is_live') and self.params.get('hls_use_mpegts') is None), 'Possible MPEG-TS in MP4 container or malformed AAC timestamps', FFmpegFixupM3u8PP) ffmpeg_fixup(downloader == 'dashsegments' diff --git a/yt_dlp/__init__.py b/yt_dlp/__init__.py index a1880bf7d..20111175b 100644 --- a/yt_dlp/__init__.py +++ b/yt_dlp/__init__.py @@ -1062,7 +1062,7 @@ def make_row(target, handler): # If we only have a single process attached, then the executable was double clicked # When using `pyinstaller` with `--onefile`, two processes get attached is_onefile = hasattr(sys, '_MEIPASS') and os.path.basename(sys._MEIPASS).startswith('_MEI') - if attached_processes == 1 or is_onefile and attached_processes == 2: + if attached_processes == 1 or (is_onefile and attached_processes == 2): print(parser._generate_error_message( 'Do not double-click the executable, instead call it from a command line.\n' 'Please read the README for further information on how to use yt-dlp: ' @@ -1109,9 +1109,9 @@ def main(argv=None): from .extractor import gen_extractors, list_extractors __all__ = [ - 'main', 'YoutubeDL', - 'parse_options', 'gen_extractors', 'list_extractors', + 'main', + 'parse_options', ] diff --git a/yt_dlp/aes.py b/yt_dlp/aes.py index 0930d36df..9908434a5 100644 --- a/yt_dlp/aes.py +++ b/yt_dlp/aes.py @@ -534,19 +534,17 @@ def ghash(subkey, data): __all__ = [ 'aes_cbc_decrypt', 'aes_cbc_decrypt_bytes', - 'aes_ctr_decrypt', - 'aes_decrypt_text', - 'aes_decrypt', - 'aes_ecb_decrypt', - 'aes_gcm_decrypt_and_verify', - 'aes_gcm_decrypt_and_verify_bytes', - 'aes_cbc_encrypt', 'aes_cbc_encrypt_bytes', + 'aes_ctr_decrypt', 'aes_ctr_encrypt', + 'aes_decrypt', + 'aes_decrypt_text', + 'aes_ecb_decrypt', 'aes_ecb_encrypt', 'aes_encrypt', - + 'aes_gcm_decrypt_and_verify', + 'aes_gcm_decrypt_and_verify_bytes', 'key_expansion', 'pad_block', 'pkcs7_padding', diff --git a/yt_dlp/cookies.py b/yt_dlp/cookies.py index d5b0d3991..772433b0f 100644 --- a/yt_dlp/cookies.py +++ b/yt_dlp/cookies.py @@ -1276,8 +1276,8 @@ def open(self, file, *, write=False): def _really_save(self, f, ignore_discard, ignore_expires): now = time.time() for cookie in self: - if (not ignore_discard and cookie.discard - or not ignore_expires and cookie.is_expired(now)): + if ((not ignore_discard and cookie.discard) + or (not ignore_expires and cookie.is_expired(now))): continue name, value = cookie.name, cookie.value if value is None: diff --git a/yt_dlp/downloader/hls.py b/yt_dlp/downloader/hls.py index 0a00d5dab..da2574da7 100644 --- a/yt_dlp/downloader/hls.py +++ b/yt_dlp/downloader/hls.py @@ -119,12 +119,12 @@ def real_download(self, filename, info_dict): self.to_screen(f'[{self.FD_NAME}] Fragment downloads will be delegated to {real_downloader.get_basename()}') def is_ad_fragment_start(s): - return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s - or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',ad')) + return ((s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s) + or (s.startswith('#UPLYNK-SEGMENT') and s.endswith(',ad'))) def is_ad_fragment_end(s): - return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=master' in s - or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',segment')) + return ((s.startswith('#ANVATO-SEGMENT-INFO') and 'type=master' in s) + or (s.startswith('#UPLYNK-SEGMENT') and s.endswith(',segment'))) fragments = [] diff --git a/yt_dlp/downloader/youtube_live_chat.py b/yt_dlp/downloader/youtube_live_chat.py index 961938d44..ddd912ca2 100644 --- a/yt_dlp/downloader/youtube_live_chat.py +++ b/yt_dlp/downloader/youtube_live_chat.py @@ -123,8 +123,8 @@ def download_and_parse_fragment(url, frag_index, request_data=None, headers=None data, lambda x: x['continuationContents']['liveChatContinuation'], dict) or {} - func = (info_dict['protocol'] == 'youtube_live_chat' and parse_actions_live - or frag_index == 1 and try_refresh_replay_beginning + func = ((info_dict['protocol'] == 'youtube_live_chat' and parse_actions_live) + or (frag_index == 1 and try_refresh_replay_beginning) or parse_actions_replay) return (True, *func(live_chat_continuation)) except HTTPError as err: diff --git a/yt_dlp/extractor/bilibili.py b/yt_dlp/extractor/bilibili.py index f01befcc0..72d5f20cf 100644 --- a/yt_dlp/extractor/bilibili.py +++ b/yt_dlp/extractor/bilibili.py @@ -662,12 +662,12 @@ def _real_extract(self, url): video_id, title = video_data['bvid'], video_data.get('title') # Bilibili anthologies are similar to playlists but all videos share the same video ID as the anthology itself. - page_list_json = not is_festival and traverse_obj( + page_list_json = (not is_festival and traverse_obj( self._download_json( 'https://api.bilibili.com/x/player/pagelist', video_id, fatal=False, query={'bvid': video_id, 'jsonp': 'jsonp'}, note='Extracting videos in anthology', headers=headers), - 'data', expected_type=list) or [] + 'data', expected_type=list)) or [] is_anthology = len(page_list_json) > 1 part_id = int_or_none(parse_qs(url).get('p', [None])[-1]) diff --git a/yt_dlp/extractor/common.py b/yt_dlp/extractor/common.py index ce79e0b62..92ddad2b7 100644 --- a/yt_dlp/extractor/common.py +++ b/yt_dlp/extractor/common.py @@ -3803,7 +3803,7 @@ def _cookies_passed(self): def mark_watched(self, *args, **kwargs): if not self.get_param('mark_watched', False): return - if self.supports_login() and self._get_login_info()[0] is not None or self._cookies_passed: + if (self.supports_login() and self._get_login_info()[0] is not None) or self._cookies_passed: self._mark_watched(*args, **kwargs) def _mark_watched(self, *args, **kwargs): diff --git a/yt_dlp/extractor/funimation.py b/yt_dlp/extractor/funimation.py index d3e61c84f..01b53bcde 100644 --- a/yt_dlp/extractor/funimation.py +++ b/yt_dlp/extractor/funimation.py @@ -193,9 +193,9 @@ def _real_extract(self, url): for lang, version, fmt in self._get_experiences(episode): experience_id = str(fmt['experienceId']) - if (only_initial_experience and experience_id != initial_experience_id - or requested_languages and lang.lower() not in requested_languages - or requested_versions and version.lower() not in requested_versions): + if ((only_initial_experience and experience_id != initial_experience_id) + or (requested_languages and lang.lower() not in requested_languages) + or (requested_versions and version.lower() not in requested_versions)): continue thumbnails.append({'url': fmt.get('poster')}) duration = max(duration, fmt.get('duration', 0)) diff --git a/yt_dlp/extractor/youtube.py b/yt_dlp/extractor/youtube.py index a67f09e62..41cd90db9 100644 --- a/yt_dlp/extractor/youtube.py +++ b/yt_dlp/extractor/youtube.py @@ -2925,7 +2925,7 @@ def _extract_sequence_from_mpd(refresh_sequence, immediate): # Obtain from MPD's maximum seq value old_mpd_url = mpd_url last_error = ctx.pop('last_error', None) - expire_fast = immediate or last_error and isinstance(last_error, HTTPError) and last_error.status == 403 + expire_fast = immediate or (last_error and isinstance(last_error, HTTPError) and last_error.status == 403) mpd_url, stream_number, is_live = (mpd_feed(format_id, 5 if expire_fast else 18000) or (mpd_url, stream_number, False)) if not refresh_sequence: @@ -3995,8 +3995,8 @@ def append_client(*client_names): return prs, player_url def _needs_live_processing(self, live_status, duration): - if (live_status == 'is_live' and self.get_param('live_from_start') - or live_status == 'post_live' and (duration or 0) > 2 * 3600): + if ((live_status == 'is_live' and self.get_param('live_from_start')) + or (live_status == 'post_live' and (duration or 0) > 2 * 3600)): return live_status def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, live_status, duration): @@ -4192,7 +4192,7 @@ def build_fragments(f): skip_manifests = set(self._configuration_arg('skip')) if (not self.get_param('youtube_include_hls_manifest', True) or needs_live_processing == 'is_live' # These will be filtered out by YoutubeDL anyway - or needs_live_processing and skip_bad_formats): + or (needs_live_processing and skip_bad_formats)): skip_manifests.add('hls') if not self.get_param('youtube_include_dash_manifest', True): @@ -4390,14 +4390,14 @@ def _real_extract(self, url): expected_type=dict) translated_title = self._get_text(microformats, (..., 'title')) - video_title = (self._preferred_lang and translated_title + video_title = ((self._preferred_lang and translated_title) or get_first(video_details, 'title') # primary or translated_title or search_meta(['og:title', 'twitter:title', 'title'])) translated_description = self._get_text(microformats, (..., 'description')) original_description = get_first(video_details, 'shortDescription') video_description = ( - self._preferred_lang and translated_description + (self._preferred_lang and translated_description) # If original description is blank, it will be an empty string. # Do not prefer translated description in this case. or original_description if original_description is not None else translated_description) @@ -6837,7 +6837,7 @@ def _extract_tab_id_and_name(self, tab, base_url='https://www.youtube.com'): tab_url = urljoin(base_url, traverse_obj( tab, ('endpoint', 'commandMetadata', 'webCommandMetadata', 'url'))) - tab_id = (tab_url and self._get_url_mobj(tab_url)['tab'][1:] + tab_id = ((tab_url and self._get_url_mobj(tab_url)['tab'][1:]) or traverse_obj(tab, 'tabIdentifier', expected_type=str)) if tab_id: return { diff --git a/yt_dlp/plugins.py b/yt_dlp/plugins.py index 2bf55df71..94335a9a3 100644 --- a/yt_dlp/plugins.py +++ b/yt_dlp/plugins.py @@ -183,4 +183,4 @@ def load_plugins(name, suffix): sys.meta_path.insert(0, PluginFinder(f'{PACKAGE_NAME}.extractor', f'{PACKAGE_NAME}.postprocessor')) -__all__ = ['directories', 'load_plugins', 'PACKAGE_NAME', 'COMPAT_PACKAGE_NAME'] +__all__ = ['COMPAT_PACKAGE_NAME', 'PACKAGE_NAME', 'directories', 'load_plugins'] diff --git a/yt_dlp/postprocessor/__init__.py b/yt_dlp/postprocessor/__init__.py index 164540b5d..7b1620544 100644 --- a/yt_dlp/postprocessor/__init__.py +++ b/yt_dlp/postprocessor/__init__.py @@ -44,4 +44,4 @@ def get_postprocessor(key): globals().update(_PLUGIN_CLASSES) __all__ = [name for name in globals() if name.endswith('PP')] -__all__.extend(('PostProcessor', 'FFmpegPostProcessor')) +__all__.extend(('FFmpegPostProcessor', 'PostProcessor')) diff --git a/yt_dlp/postprocessor/ffmpeg.py b/yt_dlp/postprocessor/ffmpeg.py index d994754fd..8965806ae 100644 --- a/yt_dlp/postprocessor/ffmpeg.py +++ b/yt_dlp/postprocessor/ffmpeg.py @@ -626,7 +626,7 @@ def run(self, info): sub_ext = sub_info['ext'] if sub_ext == 'json': self.report_warning('JSON subtitles cannot be embedded') - elif ext != 'webm' or ext == 'webm' and sub_ext == 'vtt': + elif ext != 'webm' or (ext == 'webm' and sub_ext == 'vtt'): sub_langs.append(lang) sub_names.append(sub_info.get('name')) sub_filenames.append(sub_info['filepath']) diff --git a/yt_dlp/utils/_utils.py b/yt_dlp/utils/_utils.py index 8517b762e..699bf1e7f 100644 --- a/yt_dlp/utils/_utils.py +++ b/yt_dlp/utils/_utils.py @@ -2683,8 +2683,8 @@ def merge_dicts(*dicts): merged = {} for a_dict in dicts: for k, v in a_dict.items(): - if (v is not None and k not in merged - or isinstance(v, str) and merged[k] == ''): + if ((v is not None and k not in merged) + or (isinstance(v, str) and merged[k] == '')): merged[k] = v return merged