1
0
mirror of https://github.com/ytdl-org/youtube-dl.git synced 2024-11-24 15:50:26 +01:00

Switch YTPlaylistIE to API (relevant: #586); fixes #651; fixes #673; fixes #661

This commit is contained in:
Filippo Valsorda 2013-02-26 10:39:26 +01:00
parent f03b88b3fb
commit 6324fd1d74
2 changed files with 71 additions and 52 deletions

View File

@ -8,7 +8,7 @@ import json
import os import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from youtube_dl.InfoExtractors import YoutubeUserIE,YoutubePlaylistIE from youtube_dl.InfoExtractors import YoutubeUserIE, YoutubePlaylistIE, YoutubeIE
from youtube_dl.utils import * from youtube_dl.utils import *
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json") PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
@ -38,11 +38,8 @@ class TestYoutubeLists(unittest.TestCase):
DL = FakeDownloader() DL = FakeDownloader()
IE = YoutubePlaylistIE(DL) IE = YoutubePlaylistIE(DL)
IE.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re') IE.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
self.assertEqual(DL.result, [ self.assertEqual(map(lambda x: YoutubeIE()._extract_id(x[0]), DL.result),
['http://www.youtube.com/watch?v=bV9L5Ht9LgY'], [ 'bV9L5Ht9LgY', 'FXxLjLQi3Fg', 'tU3Bgo5qJZE' ])
['http://www.youtube.com/watch?v=FXxLjLQi3Fg'],
['http://www.youtube.com/watch?v=tU3Bgo5qJZE']
])
def test_youtube_playlist_long(self): def test_youtube_playlist_long(self):
DL = FakeDownloader() DL = FakeDownloader()
@ -50,14 +47,21 @@ class TestYoutubeLists(unittest.TestCase):
IE.extract('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q') IE.extract('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
self.assertTrue(len(DL.result) >= 799) self.assertTrue(len(DL.result) >= 799)
def test_youtube_playlist_with_deleted(self):
DL = FakeDownloader()
IE = YoutubePlaylistIE(DL)
IE.extract('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
self.assertFalse('pElCt5oNDuI' in map(lambda x: YoutubeIE()._extract_id(x[0]), DL.result))
self.assertFalse('KdPEApIVdWM' in map(lambda x: YoutubeIE()._extract_id(x[0]), DL.result))
def test_youtube_course(self): def test_youtube_course(self):
DL = FakeDownloader() DL = FakeDownloader()
IE = YoutubePlaylistIE(DL) IE = YoutubePlaylistIE(DL)
# TODO find a > 100 (paginating?) videos course # TODO find a > 100 (paginating?) videos course
IE.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8') IE.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
self.assertEqual(DL.result[0], ['http://www.youtube.com/watch?v=j9WZyLZCBzs']) self.assertEqual(YoutubeIE()._extract_id(DL.result[0][0]), 'j9WZyLZCBzs')
self.assertEqual(len(DL.result), 25) self.assertEqual(len(DL.result), 25)
self.assertEqual(DL.result[-1], ['http://www.youtube.com/watch?v=rYefUsYuEp0']) self.assertEqual(YoutubeIE()._extract_id(DL.result[-1][0]), 'rYefUsYuEp0')
def test_youtube_channel(self): def test_youtube_channel(self):
# I give up, please find a channel that does paginate and test this like test_youtube_playlist_long # I give up, please find a channel that does paginate and test this like test_youtube_playlist_long

View File

@ -15,6 +15,7 @@ import email.utils
import xml.etree.ElementTree import xml.etree.ElementTree
import random import random
import math import math
import operator
from .utils import * from .utils import *
@ -1662,22 +1663,40 @@ class YahooSearchIE(InfoExtractor):
class YoutubePlaylistIE(InfoExtractor): class YoutubePlaylistIE(InfoExtractor):
"""Information Extractor for YouTube playlists.""" """Information Extractor for YouTube playlists."""
_VALID_URL = r'(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL|EC)?|PL|EC)([0-9A-Za-z-_]{10,})(?:/.*?/([0-9A-Za-z_-]+))?.*' _VALID_URL = r"""(?:
_TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en' (?:https?://)?
_VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&([^&"]+&)*list=.*?%s' (?:\w+\.)?
_MORE_PAGES_INDICATOR = u"Next \N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK}" youtube\.com/
(?:
(?:course|view_play_list|my_playlists|artist|playlist)
\? .*? (p|a|list)=
| user/.*?/user/
| p/
| user/.*?#[pg]/c/
)
(?:PL|EC)?
|PL|EC)
([0-9A-Za-z-_]{10,})
(?:/.*?/([0-9A-Za-z_-]+))?
.*"""
_TEMPLATE_URL = 'https://gdata.youtube.com/feeds/api/playlists/%s?max-results=%i&start-index=%i&v=2&alt=json'
_MAX_RESULTS = 50
IE_NAME = u'youtube:playlist' IE_NAME = u'youtube:playlist'
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def suitable(self, url):
"""Receives a URL and returns True if suitable for this IE."""
return re.match(self._VALID_URL, url, re.VERBOSE) is not None
def report_download_page(self, playlist_id, pagenum): def report_download_page(self, playlist_id, pagenum):
"""Report attempt to download playlist page with given number.""" """Report attempt to download playlist page with given number."""
self._downloader.to_screen(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum)) self._downloader.to_screen(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum))
def _real_extract(self, url): def _real_extract(self, url):
# Extract playlist id # Extract playlist id
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid url: %s' % url) self._downloader.trouble(u'ERROR: invalid url: %s' % url)
return return
@ -1687,55 +1706,51 @@ class YoutubePlaylistIE(InfoExtractor):
self._downloader.download([mobj.group(3)]) self._downloader.download([mobj.group(3)])
return return
# Download playlist pages # Download playlist videos from API
# prefix is 'p' as default for playlists but there are other types that need extra care
playlist_prefix = mobj.group(1)
if playlist_prefix == 'a':
playlist_access = 'artist'
else:
playlist_prefix = 'p'
playlist_access = 'view_play_list'
playlist_id = mobj.group(2) playlist_id = mobj.group(2)
video_ids = [] page_num = 1
pagenum = 1 videos = []
while True: while True:
self.report_download_page(playlist_id, pagenum) self.report_download_page(playlist_id, page_num)
url = self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum)
request = compat_urllib_request.Request(url) url = self._TEMPLATE_URL % (playlist_id, self._MAX_RESULTS, self._MAX_RESULTS * (page_num - 1) + 1)
try: try:
page = compat_urllib_request.urlopen(request).read().decode('utf-8') page = compat_urllib_request.urlopen(url).read().decode('utf8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
return return
# Extract video identifiers try:
ids_in_page = [] response = json.loads(page)
for mobj in re.finditer(self._VIDEO_INDICATOR_TEMPLATE % playlist_id, page): except ValueError as err:
if mobj.group(1) not in ids_in_page: self._downloader.trouble(u'ERROR: Invalid JSON in API response: ' + compat_str(err))
ids_in_page.append(mobj.group(1)) return
video_ids.extend(ids_in_page)
if self._MORE_PAGES_INDICATOR not in page: videos += [(entry['yt$position']['$t'], entry['content']['src']) for entry in response['feed']['entry']]
if len(response['feed']['entry']) < self._MAX_RESULTS:
break break
pagenum = pagenum + 1 page_num += 1
total = len(video_ids) videos = map(operator.itemgetter(1), sorted(videos))
total = len(videos)
playliststart = self._downloader.params.get('playliststart', 1) - 1 playliststart = self._downloader.params.get('playliststart', 1) - 1
playlistend = self._downloader.params.get('playlistend', -1) playlistend = self._downloader.params.get('playlistend', -1)
if playlistend == -1: if playlistend == -1:
video_ids = video_ids[playliststart:] videos = videos[playliststart:]
else: else:
video_ids = video_ids[playliststart:playlistend] videos = videos[playliststart:playlistend]
if len(video_ids) == total: if len(videos) == total:
self._downloader.to_screen(u'[youtube] PL %s: Found %i videos' % (playlist_id, total)) self._downloader.to_screen(u'[youtube] PL %s: Found %i videos' % (playlist_id, total))
else: else:
self._downloader.to_screen(u'[youtube] PL %s: Found %i videos, downloading %i' % (playlist_id, total, len(video_ids))) self._downloader.to_screen(u'[youtube] PL %s: Found %i videos, downloading %i' % (playlist_id, total, len(videos)))
for id in video_ids: for video in videos:
self._downloader.download(['http://www.youtube.com/watch?v=%s' % id]) self._downloader.download([video])
return return
@ -3605,9 +3620,9 @@ class TweetReelIE(InfoExtractor):
'upload_date': upload_date 'upload_date': upload_date
} }
return [info] return [info]
class SteamIE(InfoExtractor): class SteamIE(InfoExtractor):
_VALID_URL = r"""http://store.steampowered.com/ _VALID_URL = r"""http://store.steampowered.com/
(?P<urltype>video|app)/ #If the page is only for videos or for a game (?P<urltype>video|app)/ #If the page is only for videos or for a game
(?P<gameID>\d+)/? (?P<gameID>\d+)/?
(?P<videoID>\d*)(?P<extra>\??) #For urltype == video we sometimes get the videoID (?P<videoID>\d*)(?P<extra>\??) #For urltype == video we sometimes get the videoID
@ -3707,7 +3722,7 @@ class RBMARadioIE(InfoExtractor):
class YouPornIE(InfoExtractor): class YouPornIE(InfoExtractor):
"""Information extractor for youporn.com.""" """Information extractor for youporn.com."""
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+)' _VALID_URL = r'^(?:https?://)?(?:\w+\.)?youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+)'
def _print_formats(self, formats): def _print_formats(self, formats):
"""Print all available formats""" """Print all available formats"""
print(u'Available formats:') print(u'Available formats:')
@ -3769,8 +3784,8 @@ class YouPornIE(InfoExtractor):
links = re.findall(LINK_RE, download_list_html) links = re.findall(LINK_RE, download_list_html)
if(len(links) == 0): if(len(links) == 0):
raise ExtractorError(u'ERROR: no known formats available for video') raise ExtractorError(u'ERROR: no known formats available for video')
self._downloader.to_screen(u'[youporn] Links found: %d' % len(links)) self._downloader.to_screen(u'[youporn] Links found: %d' % len(links))
formats = [] formats = []
for link in links: for link in links:
@ -3821,7 +3836,7 @@ class YouPornIE(InfoExtractor):
return return
return [format] return [format]
class PornotubeIE(InfoExtractor): class PornotubeIE(InfoExtractor):
"""Information extractor for pornotube.com.""" """Information extractor for pornotube.com."""
@ -3893,7 +3908,7 @@ class YouJizzIE(InfoExtractor):
embed_page_url = result.group(0).strip() embed_page_url = result.group(0).strip()
video_id = result.group('videoid') video_id = result.group('videoid')
webpage = self._download_webpage(embed_page_url, video_id) webpage = self._download_webpage(embed_page_url, video_id)
# Get the video URL # Get the video URL
@ -4053,7 +4068,7 @@ class TEDIE(InfoExtractor):
class MySpassIE(InfoExtractor): class MySpassIE(InfoExtractor):
_VALID_URL = r'http://www.myspass.de/.*' _VALID_URL = r'http://www.myspass.de/.*'
def _real_extract(self, url): def _real_extract(self, url):
META_DATA_URL_TEMPLATE = 'http://www.myspass.de/myspass/includes/apps/video/getvideometadataxml.php?id=%s' META_DATA_URL_TEMPLATE = 'http://www.myspass.de/myspass/includes/apps/video/getvideometadataxml.php?id=%s'
@ -4063,12 +4078,12 @@ class MySpassIE(InfoExtractor):
url_parent_path, video_id = os.path.split(url_path) url_parent_path, video_id = os.path.split(url_path)
if not video_id: if not video_id:
_, video_id = os.path.split(url_parent_path) _, video_id = os.path.split(url_parent_path)
# get metadata # get metadata
metadata_url = META_DATA_URL_TEMPLATE % video_id metadata_url = META_DATA_URL_TEMPLATE % video_id
metadata_text = self._download_webpage(metadata_url, video_id) metadata_text = self._download_webpage(metadata_url, video_id)
metadata = xml.etree.ElementTree.fromstring(metadata_text.encode('utf-8')) metadata = xml.etree.ElementTree.fromstring(metadata_text.encode('utf-8'))
# extract values from metadata # extract values from metadata
url_flv_el = metadata.find('url_flv') url_flv_el = metadata.find('url_flv')
if url_flv_el is None: if url_flv_el is None: