1
0
mirror of https://github.com/ytdl-org/youtube-dl.git synced 2024-11-15 03:27:46 +01:00

[vlive] Add support for channels

This commit is contained in:
ping 2017-01-24 13:52:17 +08:00 committed by Sergey M
parent ab6f6aee78
commit b92d3c5343
2 changed files with 72 additions and 1 deletions

View File

@ -1129,7 +1129,10 @@ from .vk import (
VKUserVideosIE, VKUserVideosIE,
VKWallPostIE, VKWallPostIE,
) )
from .vlive import VLiveIE from .vlive import (
VLiveIE,
VLiveChannelIE
)
from .vodlocker import VodlockerIE from .vodlocker import VodlockerIE
from .vodplatform import VODPlatformIE from .vodplatform import VODPlatformIE
from .voicerepublic import VoiceRepublicIE from .voicerepublic import VoiceRepublicIE

View File

@ -2,6 +2,8 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import re import re
import time
import itertools
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
@ -169,3 +171,69 @@ class VLiveIE(InfoExtractor):
'subtitles': subtitles, 'subtitles': subtitles,
}) })
return info return info
class VLiveChannelIE(InfoExtractor):
IE_NAME = 'vlive:channel'
_VALID_URL = r'https?://channels\.vlive\.tv/(?P<id>[0-9A-Z]+)/video'
_TEST = {
'url': 'http://channels.vlive.tv/FCD4B/video',
'info_dict': {
'id': 'FCD4B',
'title': 'MAMAMOO',
},
'playlist_mincount': 110
}
_APP_ID = '8c6cc7b45d2568fb668be6e05b6e5a3b'
def _real_extract(self, url):
channel_code = self._match_id(url)
webpage = self._download_webpage(
'http://channels.vlive.tv/%s/video' % channel_code, channel_code)
app_js_url = self._search_regex(
r'(http[^\'\"\s]+app\.js)', webpage, 'app js', default='')
if app_js_url:
app_js = self._download_webpage(app_js_url, channel_code, 'app js')
app_id = self._search_regex(
r'Global\.VFAN_APP_ID\s*=\s*[\'"]([^\'"]+)[\'"]',
app_js, 'app id', default=self._APP_ID)
else:
app_id = self._APP_ID
channel_info = self._download_json(
'http://api.vfan.vlive.tv/vproxy/channelplus/decodeChannelCode',
channel_code, note='decode channel code',
query={'app_id': app_id, 'channelCode': channel_code, '_': int(time.time())})
channel_seq = channel_info['result']['channelSeq']
channel_name = None
entries = []
for page_num in itertools.count(1):
video_list = self._download_json(
'http://api.vfan.vlive.tv/vproxy/channelplus/getChannelVideoList',
channel_code, note='channel list %d' % page_num,
query={
'app_id': app_id,
'channelSeq': channel_seq,
'maxNumOfRows': 1000,
'_': int(time.time()),
'pageNo': page_num
}
)
if not channel_name:
channel_name = video_list['result']['channelInfo']['channelName']
if not video_list['result'].get('videoList'):
break
for video in video_list['result']['videoList']:
video_id = str(video['videoSeq'])
entries.append(
self.url_result(
'http://www.vlive.tv/video/%s' % video_id, 'Vlive', video_id))
return self.playlist_result(
entries, channel_code, channel_name)