1
0
mirror of https://github.com/ytdl-org/youtube-dl.git synced 2024-12-15 17:21:24 +01:00
youtube-dl/youtube_dl/extractor/safari.py

202 lines
6.8 KiB
Python
Raw Normal View History

2016-10-02 13:39:18 +02:00
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
Switch codebase to use sanitized_Request instead of compat_urllib_request.Request [downloader/dash] Use sanitized_Request [downloader/http] Use sanitized_Request [atresplayer] Use sanitized_Request [bambuser] Use sanitized_Request [bliptv] Use sanitized_Request [brightcove] Use sanitized_Request [cbs] Use sanitized_Request [ceskatelevize] Use sanitized_Request [collegerama] Use sanitized_Request [extractor/common] Use sanitized_Request [crunchyroll] Use sanitized_Request [dailymotion] Use sanitized_Request [dcn] Use sanitized_Request [dramafever] Use sanitized_Request [dumpert] Use sanitized_Request [eitb] Use sanitized_Request [escapist] Use sanitized_Request [everyonesmixtape] Use sanitized_Request [extremetube] Use sanitized_Request [facebook] Use sanitized_Request [fc2] Use sanitized_Request [flickr] Use sanitized_Request [4tube] Use sanitized_Request [gdcvault] Use sanitized_Request [extractor/generic] Use sanitized_Request [hearthisat] Use sanitized_Request [hotnewhiphop] Use sanitized_Request [hypem] Use sanitized_Request [iprima] Use sanitized_Request [ivi] Use sanitized_Request [keezmovies] Use sanitized_Request [letv] Use sanitized_Request [lynda] Use sanitized_Request [metacafe] Use sanitized_Request [minhateca] Use sanitized_Request [miomio] Use sanitized_Request [meovideo] Use sanitized_Request [mofosex] Use sanitized_Request [moniker] Use sanitized_Request [mooshare] Use sanitized_Request [movieclips] Use sanitized_Request [mtv] Use sanitized_Request [myvideo] Use sanitized_Request [neteasemusic] Use sanitized_Request [nfb] Use sanitized_Request [niconico] Use sanitized_Request [noco] Use sanitized_Request [nosvideo] Use sanitized_Request [novamov] Use sanitized_Request [nowness] Use sanitized_Request [nuvid] Use sanitized_Request [played] Use sanitized_Request [pluralsight] Use sanitized_Request [pornhub] Use sanitized_Request [pornotube] Use sanitized_Request [primesharetv] Use sanitized_Request [promptfile] Use sanitized_Request [qqmusic] Use sanitized_Request [rtve] Use sanitized_Request [safari] Use sanitized_Request [sandia] Use sanitized_Request [shared] Use sanitized_Request [sharesix] Use sanitized_Request [sina] Use sanitized_Request [smotri] Use sanitized_Request [sohu] Use sanitized_Request [spankwire] Use sanitized_Request [sportdeutschland] Use sanitized_Request [streamcloud] Use sanitized_Request [streamcz] Use sanitized_Request [tapely] Use sanitized_Request [tube8] Use sanitized_Request [tubitv] Use sanitized_Request [twitch] Use sanitized_Request [twitter] Use sanitized_Request [udemy] Use sanitized_Request [vbox7] Use sanitized_Request [veoh] Use sanitized_Request [vessel] Use sanitized_Request [vevo] Use sanitized_Request [viddler] Use sanitized_Request [videomega] Use sanitized_Request [viewvster] Use sanitized_Request [viki] Use sanitized_Request [vk] Use sanitized_Request [vodlocker] Use sanitized_Request [voicerepublic] Use sanitized_Request [wistia] Use sanitized_Request [xfileshare] Use sanitized_Request [xtube] Use sanitized_Request [xvideos] Use sanitized_Request [yandexmusic] Use sanitized_Request [youku] Use sanitized_Request [youporn] Use sanitized_Request [youtube] Use sanitized_Request [patreon] Use sanitized_Request [extractor/common] Remove unused import [nfb] PEP 8
2015-11-21 17:18:17 +01:00
sanitized_Request,
std_headers,
urlencode_postdata,
update_url_query,
)
class SafariBaseIE(InfoExtractor):
_LOGIN_URL = 'https://www.safaribooksonline.com/accounts/login/'
2015-03-26 18:57:46 +01:00
_NETRC_MACHINE = 'safari'
_API_BASE = 'https://www.safaribooksonline.com/api/v1'
2015-03-26 18:57:46 +01:00
_API_FORMAT = 'json'
LOGGED_IN = False
def _real_initialize(self):
2016-03-12 21:08:36 +01:00
self._login()
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
headers = std_headers.copy()
if 'Referer' not in headers:
headers['Referer'] = self._LOGIN_URL
login_page = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login form', headers=headers)
def is_logged(webpage):
return any(re.search(p, webpage) for p in (
r'href=["\']/accounts/logout/', r'>Sign Out<'))
if is_logged(login_page):
self.LOGGED_IN = True
return
csrf = self._html_search_regex(
2015-03-26 18:57:46 +01:00
r"name='csrfmiddlewaretoken'\s+value='([^']+)'",
login_page, 'csrf token')
login_form = {
'csrfmiddlewaretoken': csrf,
'email': username,
'password1': password,
'login': 'Sign In',
'next': '',
}
Switch codebase to use sanitized_Request instead of compat_urllib_request.Request [downloader/dash] Use sanitized_Request [downloader/http] Use sanitized_Request [atresplayer] Use sanitized_Request [bambuser] Use sanitized_Request [bliptv] Use sanitized_Request [brightcove] Use sanitized_Request [cbs] Use sanitized_Request [ceskatelevize] Use sanitized_Request [collegerama] Use sanitized_Request [extractor/common] Use sanitized_Request [crunchyroll] Use sanitized_Request [dailymotion] Use sanitized_Request [dcn] Use sanitized_Request [dramafever] Use sanitized_Request [dumpert] Use sanitized_Request [eitb] Use sanitized_Request [escapist] Use sanitized_Request [everyonesmixtape] Use sanitized_Request [extremetube] Use sanitized_Request [facebook] Use sanitized_Request [fc2] Use sanitized_Request [flickr] Use sanitized_Request [4tube] Use sanitized_Request [gdcvault] Use sanitized_Request [extractor/generic] Use sanitized_Request [hearthisat] Use sanitized_Request [hotnewhiphop] Use sanitized_Request [hypem] Use sanitized_Request [iprima] Use sanitized_Request [ivi] Use sanitized_Request [keezmovies] Use sanitized_Request [letv] Use sanitized_Request [lynda] Use sanitized_Request [metacafe] Use sanitized_Request [minhateca] Use sanitized_Request [miomio] Use sanitized_Request [meovideo] Use sanitized_Request [mofosex] Use sanitized_Request [moniker] Use sanitized_Request [mooshare] Use sanitized_Request [movieclips] Use sanitized_Request [mtv] Use sanitized_Request [myvideo] Use sanitized_Request [neteasemusic] Use sanitized_Request [nfb] Use sanitized_Request [niconico] Use sanitized_Request [noco] Use sanitized_Request [nosvideo] Use sanitized_Request [novamov] Use sanitized_Request [nowness] Use sanitized_Request [nuvid] Use sanitized_Request [played] Use sanitized_Request [pluralsight] Use sanitized_Request [pornhub] Use sanitized_Request [pornotube] Use sanitized_Request [primesharetv] Use sanitized_Request [promptfile] Use sanitized_Request [qqmusic] Use sanitized_Request [rtve] Use sanitized_Request [safari] Use sanitized_Request [sandia] Use sanitized_Request [shared] Use sanitized_Request [sharesix] Use sanitized_Request [sina] Use sanitized_Request [smotri] Use sanitized_Request [sohu] Use sanitized_Request [spankwire] Use sanitized_Request [sportdeutschland] Use sanitized_Request [streamcloud] Use sanitized_Request [streamcz] Use sanitized_Request [tapely] Use sanitized_Request [tube8] Use sanitized_Request [tubitv] Use sanitized_Request [twitch] Use sanitized_Request [twitter] Use sanitized_Request [udemy] Use sanitized_Request [vbox7] Use sanitized_Request [veoh] Use sanitized_Request [vessel] Use sanitized_Request [vevo] Use sanitized_Request [viddler] Use sanitized_Request [videomega] Use sanitized_Request [viewvster] Use sanitized_Request [viki] Use sanitized_Request [vk] Use sanitized_Request [vodlocker] Use sanitized_Request [voicerepublic] Use sanitized_Request [wistia] Use sanitized_Request [xfileshare] Use sanitized_Request [xtube] Use sanitized_Request [xvideos] Use sanitized_Request [yandexmusic] Use sanitized_Request [youku] Use sanitized_Request [youporn] Use sanitized_Request [youtube] Use sanitized_Request [patreon] Use sanitized_Request [extractor/common] Remove unused import [nfb] PEP 8
2015-11-21 17:18:17 +01:00
request = sanitized_Request(
self._LOGIN_URL, urlencode_postdata(login_form), headers=headers)
login_page = self._download_webpage(
request, None, 'Logging in')
if not is_logged(login_page):
2015-03-26 18:57:46 +01:00
raise ExtractorError(
'Login failed; make sure your credentials are correct and try again.',
expected=True)
2017-06-01 20:03:51 +02:00
self.LOGGED_IN = True
2016-03-12 21:08:36 +01:00
class SafariIE(SafariBaseIE):
IE_NAME = 'safari'
IE_DESC = 'safaribooksonline.com online video'
_VALID_URL = r'https?://(?:www\.)?safaribooksonline\.com/library/view/[^/]+/(?P<course_id>[^/]+)/(?P<part>[^/?#&]+)\.html'
2015-03-26 18:57:46 +01:00
_TESTS = [{
'url': 'https://www.safaribooksonline.com/library/view/hadoop-fundamentals-livelessons/9780133392838/part00.html',
'md5': 'dcc5a425e79f2564148652616af1f2a3',
'info_dict': {
'id': '0_qbqx90ic',
'ext': 'mp4',
'title': 'Introduction to Hadoop Fundamentals LiveLessons',
'timestamp': 1437758058,
'upload_date': '20150724',
'uploader_id': 'stork',
2015-03-26 18:57:46 +01:00
},
2015-06-15 17:36:30 +02:00
}, {
# non-digits in course id
'url': 'https://www.safaribooksonline.com/library/view/create-a-nodejs/100000006A0210/part00.html',
'only_matching': True,
}, {
'url': 'https://www.safaribooksonline.com/library/view/learning-path-red/9780134664057/RHCE_Introduction.html',
'only_matching': True,
2015-03-26 18:57:46 +01:00
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = '%s/%s' % (mobj.group('course_id'), mobj.group('part'))
webpage = self._download_webpage(url, video_id)
reference_id = self._search_regex(
2016-09-12 18:05:52 +02:00
r'data-reference-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
webpage, 'kaltura reference id', group='id')
partner_id = self._search_regex(
2016-09-12 18:05:52 +02:00
r'data-partner-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
webpage, 'kaltura widget id', group='id')
ui_id = self._search_regex(
2016-09-12 18:05:52 +02:00
r'data-ui-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
webpage, 'kaltura uiconf id', group='id')
2015-03-26 18:57:46 +01:00
query = {
'wid': '_%s' % partner_id,
'uiconf_id': ui_id,
'flashvars[referenceId]': reference_id,
}
if self.LOGGED_IN:
kaltura_session = self._download_json(
'%s/player/kaltura_session/?reference_id=%s' % (self._API_BASE, reference_id),
video_id, 'Downloading kaltura session JSON',
'Unable to download kaltura session JSON', fatal=False)
if kaltura_session:
session = kaltura_session.get('session')
if session:
query['flashvars[ks]'] = session
return self.url_result(update_url_query(
'https://cdnapisec.kaltura.com/html5/html5lib/v2.37.1/mwEmbedFrame.php', query),
'Kaltura')
class SafariApiIE(SafariBaseIE):
IE_NAME = 'safari:api'
_VALID_URL = r'https?://(?:www\.)?safaribooksonline\.com/api/v1/book/(?P<course_id>[^/]+)/chapter(?:-content)?/(?P<part>[^/?#&]+)\.html'
_TESTS = [{
'url': 'https://www.safaribooksonline.com/api/v1/book/9780133392838/chapter/part00.html',
'only_matching': True,
}, {
'url': 'https://www.safaribooksonline.com/api/v1/book/9780134664057/chapter/RHCE_Introduction.html',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
part = self._download_json(
url, '%s/%s' % (mobj.group('course_id'), mobj.group('part')),
'Downloading part JSON')
return self.url_result(part['web_url'], SafariIE.ie_key())
class SafariCourseIE(SafariBaseIE):
IE_NAME = 'safari:course'
IE_DESC = 'safaribooksonline.com online courses'
_VALID_URL = r'''(?x)
https?://
(?:
(?:www\.)?safaribooksonline\.com/(?:library/view/[^/]+|api/v1/book)|
techbus\.safaribooksonline\.com
)
/(?P<id>[^/]+)/?(?:[#?]|$)
'''
2015-03-26 18:57:46 +01:00
_TESTS = [{
'url': 'https://www.safaribooksonline.com/library/view/hadoop-fundamentals-livelessons/9780133392838/',
'info_dict': {
'id': '9780133392838',
'title': 'Hadoop Fundamentals LiveLessons',
},
'playlist_count': 22,
'skip': 'Requires safaribooksonline account credentials',
}, {
'url': 'https://www.safaribooksonline.com/api/v1/book/9781449396459/?override_format=json',
'only_matching': True,
}, {
'url': 'http://techbus.safaribooksonline.com/9780134426365',
'only_matching': True,
2015-03-26 18:57:46 +01:00
}]
def _real_extract(self, url):
2015-03-26 18:57:46 +01:00
course_id = self._match_id(url)
2015-03-26 18:57:46 +01:00
course_json = self._download_json(
'%s/book/%s/?override_format=%s' % (self._API_BASE, course_id, self._API_FORMAT),
2015-03-26 18:57:46 +01:00
course_id, 'Downloading course JSON')
if 'chapters' not in course_json:
2015-03-26 18:57:46 +01:00
raise ExtractorError(
'No chapters found for course %s' % course_id, expected=True)
entries = [
self.url_result(chapter, SafariApiIE.ie_key())
2015-03-26 18:57:46 +01:00
for chapter in course_json['chapters']]
course_title = course_json['title']
return self.playlist_result(entries, course_id, course_title)