1
0
mirror of https://github.com/ytdl-org/youtube-dl.git synced 2024-11-30 02:14:37 +01:00

Merge remote-tracking branch 'dstftw/ivi'

This commit is contained in:
Philipp Hagemeister 2014-01-28 21:47:05 +01:00
commit 9d2032932c

View File

@ -1,4 +1,5 @@
# encoding: utf-8 # encoding: utf-8
from __future__ import unicode_literals
import re import re
import json import json
@ -11,35 +12,35 @@ from ..utils import (
class IviIE(InfoExtractor): class IviIE(InfoExtractor):
IE_DESC = u'ivi.ru' IE_DESC = 'ivi.ru'
IE_NAME = u'ivi' IE_NAME = 'ivi'
_VALID_URL = r'^https?://(?:www\.)?ivi\.ru/watch(?:/(?P<compilationid>[^/]+))?/(?P<videoid>\d+)' _VALID_URL = r'^https?://(?:www\.)?ivi\.ru/watch(?:/(?P<compilationid>[^/]+))?/(?P<videoid>\d+)'
_TESTS = [ _TESTS = [
# Single movie # Single movie
{ {
u'url': u'http://www.ivi.ru/watch/53141', 'url': 'http://www.ivi.ru/watch/53141',
u'file': u'53141.mp4', 'file': '53141.mp4',
u'md5': u'6ff5be2254e796ed346251d117196cf4', 'md5': '6ff5be2254e796ed346251d117196cf4',
u'info_dict': { 'info_dict': {
u'title': u'Иван Васильевич меняет профессию', 'title': 'Иван Васильевич меняет профессию',
u'description': u'md5:14d8eda24e9d93d29b5857012c6d6346', 'description': 'md5:b924063ea1677c8fe343d8a72ac2195f',
u'duration': 5498, 'duration': 5498,
u'thumbnail': u'http://thumbs.ivi.ru/f20.vcp.digitalaccess.ru/contents/d/1/c3c885163a082c29bceeb7b5a267a6.jpg', 'thumbnail': 'http://thumbs.ivi.ru/f20.vcp.digitalaccess.ru/contents/d/1/c3c885163a082c29bceeb7b5a267a6.jpg',
}, },
u'skip': u'Only works from Russia', 'skip': 'Only works from Russia',
}, },
# Serial's serie # Serial's serie
{ {
u'url': u'http://www.ivi.ru/watch/dezhurnyi_angel/74791', 'url': 'http://www.ivi.ru/watch/dezhurnyi_angel/74791',
u'file': u'74791.mp4', 'file': '74791.mp4',
u'md5': u'3e6cc9a848c1d2ebcc6476444967baa9', 'md5': '3e6cc9a848c1d2ebcc6476444967baa9',
u'info_dict': { 'info_dict': {
u'title': u'Дежурный ангел - 1 серия', 'title': 'Дежурный ангел - 1 серия',
u'duration': 2490, 'duration': 2490,
u'thumbnail': u'http://thumbs.ivi.ru/f7.vcp.digitalaccess.ru/contents/8/e/bc2f6c2b6e5d291152fdd32c059141.jpg', 'thumbnail': 'http://thumbs.ivi.ru/f7.vcp.digitalaccess.ru/contents/8/e/bc2f6c2b6e5d291152fdd32c059141.jpg',
}, },
u'skip': u'Only works from Russia', 'skip': 'Only works from Russia',
} }
] ]
@ -54,7 +55,7 @@ class IviIE(InfoExtractor):
return m.group('description') if m is not None else None return m.group('description') if m is not None else None
def _extract_comment_count(self, html): def _extract_comment_count(self, html):
m = re.search(u'(?s)<a href="#" id="view-comments" class="action-button dim gradient">\s*Комментарии:\s*(?P<commentcount>\d+)\s*</a>', html) m = re.search('(?s)<a href="#" id="view-comments" class="action-button dim gradient">\s*Комментарии:\s*(?P<commentcount>\d+)\s*</a>', html)
return int(m.group('commentcount')) if m is not None else 0 return int(m.group('commentcount')) if m is not None else 0
def _real_extract(self, url): def _real_extract(self, url):
@ -63,49 +64,49 @@ class IviIE(InfoExtractor):
api_url = 'http://api.digitalaccess.ru/api/json/' api_url = 'http://api.digitalaccess.ru/api/json/'
data = {u'method': u'da.content.get', data = {'method': 'da.content.get',
u'params': [video_id, {u'site': u's183', 'params': [video_id, {'site': 's183',
u'referrer': u'http://www.ivi.ru/watch/%s' % video_id, 'referrer': 'http://www.ivi.ru/watch/%s' % video_id,
u'contentid': video_id 'contentid': video_id
} }
] ]
} }
request = compat_urllib_request.Request(api_url, json.dumps(data)) request = compat_urllib_request.Request(api_url, json.dumps(data))
video_json_page = self._download_webpage(request, video_id, u'Downloading video JSON') video_json_page = self._download_webpage(request, video_id, 'Downloading video JSON')
video_json = json.loads(video_json_page) video_json = json.loads(video_json_page)
if u'error' in video_json: if 'error' in video_json:
error = video_json[u'error'] error = video_json['error']
if error[u'origin'] == u'NoRedisValidData': if error['origin'] == 'NoRedisValidData':
raise ExtractorError(u'Video %s does not exist' % video_id, expected=True) raise ExtractorError('Video %s does not exist' % video_id, expected=True)
raise ExtractorError(u'Unable to download video %s: %s' % (video_id, error[u'message']), expected=True) raise ExtractorError('Unable to download video %s: %s' % (video_id, error['message']), expected=True)
result = video_json[u'result'] result = video_json['result']
formats = [{ formats = [{
'url': x[u'url'], 'url': x['url'],
'format_id': x[u'content_format'], 'format_id': x['content_format'],
'preference': self._known_formats.index(x[u'content_format']), 'preference': self._known_formats.index(x['content_format']),
} for x in result[u'files'] if x[u'content_format'] in self._known_formats] } for x in result['files'] if x['content_format'] in self._known_formats]
self._sort_formats(formats) self._sort_formats(formats)
if not formats: if not formats:
raise ExtractorError(u'No media links available for %s' % video_id) raise ExtractorError('No media links available for %s' % video_id)
duration = result[u'duration'] duration = result['duration']
compilation = result[u'compilation'] compilation = result['compilation']
title = result[u'title'] title = result['title']
title = '%s - %s' % (compilation, title) if compilation is not None else title title = '%s - %s' % (compilation, title) if compilation is not None else title
previews = result[u'preview'] previews = result['preview']
previews.sort(key=lambda fmt: self._known_thumbnails.index(fmt['content_format'])) previews.sort(key=lambda fmt: self._known_thumbnails.index(fmt['content_format']))
thumbnail = previews[-1][u'url'] if len(previews) > 0 else None thumbnail = previews[-1]['url'] if len(previews) > 0 else None
video_page = self._download_webpage(url, video_id, u'Downloading video page') video_page = self._download_webpage(url, video_id, 'Downloading video page')
description = self._extract_description(video_page) description = self._extract_description(video_page)
comment_count = self._extract_comment_count(video_page) comment_count = self._extract_comment_count(video_page)
@ -121,8 +122,8 @@ class IviIE(InfoExtractor):
class IviCompilationIE(InfoExtractor): class IviCompilationIE(InfoExtractor):
IE_DESC = u'ivi.ru compilations' IE_DESC = 'ivi.ru compilations'
IE_NAME = u'ivi:compilation' IE_NAME = 'ivi:compilation'
_VALID_URL = r'^https?://(?:www\.)?ivi\.ru/watch/(?!\d+)(?P<compilationid>[a-z\d_-]+)(?:/season(?P<seasonid>\d+))?$' _VALID_URL = r'^https?://(?:www\.)?ivi\.ru/watch/(?!\d+)(?P<compilationid>[a-z\d_-]+)(?:/season(?P<seasonid>\d+))?$'
def _extract_entries(self, html, compilation_id): def _extract_entries(self, html, compilation_id):
@ -135,22 +136,23 @@ class IviCompilationIE(InfoExtractor):
season_id = mobj.group('seasonid') season_id = mobj.group('seasonid')
if season_id is not None: # Season link if season_id is not None: # Season link
season_page = self._download_webpage(url, compilation_id, u'Downloading season %s web page' % season_id) season_page = self._download_webpage(url, compilation_id, 'Downloading season %s web page' % season_id)
playlist_id = '%s/season%s' % (compilation_id, season_id) playlist_id = '%s/season%s' % (compilation_id, season_id)
playlist_title = self._html_search_meta(u'title', season_page, u'title') playlist_title = self._html_search_meta('title', season_page, 'title')
entries = self._extract_entries(season_page, compilation_id) entries = self._extract_entries(season_page, compilation_id)
else: # Compilation link else: # Compilation link
compilation_page = self._download_webpage(url, compilation_id, u'Downloading compilation web page') compilation_page = self._download_webpage(url, compilation_id, 'Downloading compilation web page')
playlist_id = compilation_id playlist_id = compilation_id
playlist_title = self._html_search_meta(u'title', compilation_page, u'title') playlist_title = self._html_search_meta('title', compilation_page, 'title')
seasons = re.findall(r'<a href="/watch/%s/season(\d+)">[^<]+</a>' % compilation_id, compilation_page) seasons = re.findall(r'<a href="/watch/%s/season(\d+)">[^<]+</a>' % compilation_id, compilation_page)
if len(seasons) == 0: # No seasons in this compilation if len(seasons) == 0: # No seasons in this compilation
entries = self._extract_entries(compilation_page, compilation_id) entries = self._extract_entries(compilation_page, compilation_id)
else: else:
entries = [] entries = []
for season_id in seasons: for season_id in seasons:
season_page = self._download_webpage('http://www.ivi.ru/watch/%s/season%s' % (compilation_id, season_id), season_page = self._download_webpage(
compilation_id, u'Downloading season %s web page' % season_id) 'http://www.ivi.ru/watch/%s/season%s' % (compilation_id, season_id),
compilation_id, 'Downloading season %s web page' % season_id)
entries.extend(self._extract_entries(season_page, compilation_id)) entries.extend(self._extract_entries(season_page, compilation_id))
return self.playlist_result(entries, playlist_id, playlist_title) return self.playlist_result(entries, playlist_id, playlist_title)