2008-07-21 23:12:31 +02:00
#!/usr/bin/env python
# -*- coding: utf-8 -*-
2011-08-23 13:58:22 +02:00
__author__ = (
2011-08-24 23:21:55 +02:00
'Ricardo Garcia Gonzalez',
'Danny Colligan',
'Benjamin Johnson',
'Vasyl\' Vavrychuk',
'Witold Baryluk',
'Paweł Paprota',
'Gergely Imreh',
2011-08-28 23:17:18 +02:00
'Rogério Brito',
2011-08-28 23:44:23 +02:00
'Philipp Hagemeister',
2011-09-06 23:58:00 +02:00
'Sören Schulze',
2011-11-16 09:33:03 +01:00
'Kevin Ngo',
'Ori Avtalion',
2011-11-24 18:58:19 +01:00
'shizeeg',
2011-08-23 13:58:22 +02:00
)
2011-08-24 23:21:55 +02:00
__license__ = 'Public Domain'
2011-12-17 01:35:05 +01:00
__version__ = '2011.12.18'
2011-08-23 13:58:22 +02:00
2011-09-13 23:58:31 +02:00
UPDATE_URL = 'https://raw.github.com/rg3/youtube-dl/master/youtube-dl'
2011-08-28 23:38:40 +02:00
2010-10-23 12:54:00 +02:00
import cookielib
2010-11-30 17:51:00 +01:00
import datetime
2011-01-12 20:20:37 +01:00
import gzip
2008-07-21 23:12:31 +02:00
import htmlentitydefs
2011-09-14 22:26:53 +02:00
import HTMLParser
2008-07-21 23:12:31 +02:00
import httplib
2008-09-13 13:23:24 +02:00
import locale
2008-07-21 23:12:31 +02:00
import math
import netrc
import os
import os.path
import re
import socket
import string
2010-01-03 13:12:11 +01:00
import subprocess
2008-07-21 23:12:31 +02:00
import sys
import time
import urllib
import urllib2
2011-07-07 12:12:20 +02:00
import warnings
2011-01-12 20:20:37 +01:00
import zlib
2010-01-06 10:49:38 +01:00
2011-08-06 11:47:53 +02:00
if os.name == 'nt':
import ctypes
try:
import email.utils
except ImportError: # Python 2.4
import email.Utils
2011-07-07 12:12:20 +02:00
try:
import cStringIO as StringIO
except ImportError:
import StringIO
2010-01-06 10:49:38 +01:00
# parse_qs was moved from the cgi module to the urlparse module recently.
try:
from urlparse import parse_qs
except ImportError:
from cgi import parse_qs
2008-07-21 23:12:31 +02:00
2011-07-07 12:12:20 +02:00
try:
import lxml.etree
2011-08-24 23:04:10 +02:00
except ImportError:
2011-07-07 12:12:20 +02:00
pass # Handled below
2011-09-07 21:36:06 +02:00
try:
import xml.etree.ElementTree
2011-09-15 09:59:03 +02:00
except ImportError: # Python<2.5: Not officially supported, but let it slip
warnings.warn('xml.etree.ElementTree support is missing. Consider upgrading to Python >= 2.5 if you get related errors.')
2011-09-07 21:36:06 +02:00
2009-04-02 20:23:13 +02:00
std_headers = {
2011-08-04 19:14:19 +02:00
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:5.0.1) Gecko/20100101 Firefox/5.0.1',
2008-07-21 23:12:31 +02:00
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
2010-07-13 19:43:06 +02:00
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
2010-12-16 07:09:58 +01:00
'Accept-Encoding': 'gzip, deflate',
2008-07-21 23:12:31 +02:00
'Accept-Language': 'en-us,en;q=0.5',
}
2011-07-10 17:31:54 +02:00
try:
import json
2011-07-18 19:43:21 +02:00
except ImportError: # Python <2.6, use trivialjson (https://github.com/phihag/trivialjson):
2011-07-10 17:31:54 +02:00
import re
class json(object):
@staticmethod
def loads(s):
s = s.decode('UTF-8')
def raiseError(msg, i):
raise ValueError(msg + ' at position ' + str(i) + ' of ' + repr(s) + ': ' + repr(s[i:]))
def skipSpace(i, expectMore=True):
while i < len(s) and s[i] in ' \t\r\n':
i += 1
if expectMore:
if i >= len(s):
raiseError('Premature end', i)
return i
def decodeEscape(match):
esc = match.group(1)
_STATIC = {
'"': '"',
'\\': '\\',
'/': '/',
'b': unichr(0x8),
'f': unichr(0xc),
'n': '\n',
'r': '\r',
't': '\t',
}
if esc in _STATIC:
return _STATIC[esc]
if esc[0] == 'u':
if len(esc) == 1+4:
return unichr(int(esc[1:5], 16))
if len(esc) == 5+6 and esc[5:7] == '\\u':
hi = int(esc[1:5], 16)
low = int(esc[7:11], 16)
return unichr((hi - 0xd800) * 0x400 + low - 0xdc00 + 0x10000)
raise ValueError('Unknown escape ' + str(esc))
def parseString(i):
i += 1
e = i
while True:
e = s.index('"', e)
bslashes = 0
while s[e-bslashes-1] == '\\':
bslashes += 1
if bslashes % 2 == 1:
e += 1
continue
break
rexp = re.compile(r'\\(u[dD][89aAbB][0-9a-fA-F]{2}\\u[0-9a-fA-F]{4}|u[0-9a-fA-F]{4}|.|$)')
stri = rexp.sub(decodeEscape, s[i:e])
return (e+1,stri)
def parseObj(i):
i += 1
res = {}
i = skipSpace(i)
if s[i] == '}': # Empty dictionary
return (i+1,res)
while True:
if s[i] != '"':
raiseError('Expected a string object key', i)
i,key = parseString(i)
i = skipSpace(i)
if i >= len(s) or s[i] != ':':
raiseError('Expected a colon', i)
i,val = parse(i+1)
res[key] = val
i = skipSpace(i)
if s[i] == '}':
return (i+1, res)
if s[i] != ',':
raiseError('Expected comma or closing curly brace', i)
i = skipSpace(i+1)
def parseArray(i):
res = []
i = skipSpace(i+1)
if s[i] == ']': # Empty array
return (i+1,res)
while True:
i,val = parse(i)
res.append(val)
i = skipSpace(i) # Raise exception if premature end
if s[i] == ']':
return (i+1, res)
if s[i] != ',':
raiseError('Expected a comma or closing bracket', i)
i = skipSpace(i+1)
def parseDiscrete(i):
for k,v in {'true': True, 'false': False, 'null': None}.items():
if s.startswith(k, i):
return (i+len(k), v)
raiseError('Not a boolean (or null)', i)
def parseNumber(i):
mobj = re.match('^(-?(0|[1-9][0-9]*)(\.[0-9]*)?([eE][+-]?[0-9]+)?)', s[i:])
if mobj is None:
raiseError('Not a number', i)
nums = mobj.group(1)
if '.' in nums or 'e' in nums or 'E' in nums:
return (i+len(nums), float(nums))
return (i+len(nums), int(nums))
CHARMAP = {'{': parseObj, '[': parseArray, '"': parseString, 't': parseDiscrete, 'f': parseDiscrete, 'n': parseDiscrete}
def parse(i):
i = skipSpace(i)
i,res = CHARMAP.get(s[i], parseNumber)(i)
i = skipSpace(i, False)
return (i,res)
i,res = parse(0)
if i < len(s):
raise ValueError('Extra data at end of input (index ' + str(i) + ' of ' + repr(s) + ': ' + repr(s[i:]) + ')')
return res
2009-09-13 10:45:04 +02:00
def preferredencoding():
"""Get preferred encoding.
Returns the best encoding scheme for the system, based on
locale.getpreferredencoding() and some further tweaks.
"""
2009-09-20 00:08:50 +02:00
def yield_preferredencoding():
try:
pref = locale.getpreferredencoding()
u'TEST'.encode(pref)
except:
pref = 'UTF-8'
while True:
yield pref
return yield_preferredencoding().next()
2009-09-13 10:45:04 +02:00
2011-09-05 09:46:36 +02:00
2010-02-12 21:01:55 +01:00
def htmlentity_transform(matchobj):
"""Transforms an HTML entity to a Unicode character.
2011-01-07 10:22:01 +01:00
2010-02-12 21:01:55 +01:00
This function receives a match object and is intended to be used with
the re.sub() function.
"""
entity = matchobj.group(1)
# Known non-numeric HTML entity
if entity in htmlentitydefs.name2codepoint:
return unichr(htmlentitydefs.name2codepoint[entity])
# Unicode character
mobj = re.match(ur'(?u)#(x?\d+)', entity)
if mobj is not None:
numstr = mobj.group(1)
if numstr.startswith(u'x'):
base = 16
numstr = u'0%s' % numstr
else:
base = 10
return unichr(long(numstr, base))
# Unknown entity in name, return its literal representation
return (u'&%s;' % entity)
2011-09-05 09:46:36 +02:00
2010-02-12 21:01:55 +01:00
def sanitize_title(utitle):
2010-02-13 13:29:25 +01:00
"""Sanitizes a video title so it could be used as part of a filename."""
2010-02-12 21:01:55 +01:00
utitle = re.sub(ur'(?u)&(.+?);', htmlentity_transform, utitle)
return utitle.replace(unicode(os.sep), u'%')
2011-09-05 09:46:36 +02:00
2010-02-13 13:29:25 +01:00
def sanitize_open(filename, open_mode):
"""Try to open the given filename, and slightly tweak it if this fails.
Attempts to open the given filename. If this fails, it tries to change
the filename slightly, step by step, until it's either able to open it
or it fails and raises a final exception, like the standard open()
function.
It returns the tuple (stream, definitive_file_name).
"""
try:
2010-03-19 17:51:20 +01:00
if filename == u'-':
2010-10-23 12:22:42 +02:00
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
2010-03-19 17:51:20 +01:00
return (sys.stdout, filename)
2010-02-13 13:29:25 +01:00
stream = open(filename, open_mode)
return (stream, filename)
except (IOError, OSError), err:
# In case of error, try to remove win32 forbidden chars
2010-06-29 11:10:12 +02:00
filename = re.sub(ur'[/<>:"\|\?\*]', u'#', filename)
2010-02-13 13:29:25 +01:00
# An exception here should be caught in the caller
stream = open(filename, open_mode)
return (stream, filename)
2011-09-05 09:46:36 +02:00
2011-01-27 06:02:51 +01:00
def timeconvert(timestr):
2011-09-05 09:46:36 +02:00
"""Convert RFC 2822 defined time string into system timestamp"""
timestamp = None
timetuple = email.utils.parsedate_tz(timestr)
if timetuple is not None:
timestamp = email.utils.mktime_tz(timetuple)
return timestamp
2011-11-21 20:07:24 +01:00
def _simplify_title(title):
2011-11-21 21:51:24 +01:00
expr = re.compile(ur'[^\w\d_\-]+', flags=re.UNICODE)
return expr.sub(u'_', title).strip(u'_')
2011-01-27 06:02:51 +01:00
2011-12-08 21:39:13 +01:00
def _orderedSet(iterable):
""" Remove all duplicates from the input iterable """
res = []
for el in iterable:
if el not in res:
res.append(el)
return res
2008-07-22 15:52:56 +02:00
class DownloadError(Exception):
"""Download Error exception.
2011-01-07 10:22:01 +01:00
2008-07-22 15:52:56 +02:00
This exception may be thrown by FileDownloader objects if they are not
configured to continue on errors. They will contain the appropriate
error message.
"""
pass
2011-09-05 09:46:36 +02:00
2008-07-22 15:52:56 +02:00
class SameFileError(Exception):
"""Same File exception.
This exception will be thrown by FileDownloader objects if they detect
multiple files would have to be downloaded to the same file on disk.
"""
pass
2011-09-05 09:46:36 +02:00
2008-07-27 12:13:49 +02:00
class PostProcessingError(Exception):
"""Post Processing exception.
This exception may be raised by PostProcessor's .run() method to
indicate an error in the postprocessing task.
"""
pass
2011-12-08 21:39:13 +01:00
class MaxDownloadsReached(Exception):
""" --max-downloads limit has been reached. """
pass
2011-09-05 09:46:36 +02:00
2010-07-22 20:26:37 +02:00
class UnavailableVideoError(Exception):
2009-04-28 07:30:20 +02:00
"""Unavailable Format exception.
This exception will be thrown when a video is requested
in a format that is not available for that video.
"""
2009-05-21 20:59:02 +02:00
pass
2011-09-05 09:46:36 +02:00
2009-05-21 20:59:02 +02:00
class ContentTooShortError(Exception):
"""Content Too Short exception.
This exception may be raised by FileDownloader objects when a file they
download is too small for what the server announced first, indicating
the connection was probably interrupted.
"""
# Both in bytes
downloaded = None
expected = None
def __init__(self, downloaded, expected):
self.downloaded = downloaded
self.expected = expected
2009-04-28 07:30:20 +02:00
2011-09-05 09:46:36 +02:00
2011-01-12 20:20:37 +01:00
class YoutubeDLHandler(urllib2.HTTPHandler):
"""Handler for HTTP requests and responses.
This class, when installed with an OpenerDirector, automatically adds
the standard headers to every HTTP request and handles gzipped and
deflated responses from web servers. If compression is to be avoided in
a particular request, the original request in the program code only has
to include the HTTP header "Youtubedl-No-Compression", which will be
removed before making the real request.
2011-09-05 09:46:36 +02:00
2011-01-12 20:20:37 +01:00
Part of this code was copied from:
2011-09-05 09:46:36 +02:00
http://techknack.net/python-urllib2-handlers/
2011-01-12 20:20:37 +01:00
Andrew Rowls, the author of that code, agreed to release it to the
public domain.
"""
@staticmethod
def deflate(data):
try:
return zlib.decompress(data, -zlib.MAX_WBITS)
except zlib.error:
return zlib.decompress(data)
2011-09-05 09:46:36 +02:00
2011-01-18 20:52:37 +01:00
@staticmethod
def addinfourl_wrapper(stream, headers, url, code):
if hasattr(urllib2.addinfourl, 'getcode'):
return urllib2.addinfourl(stream, headers, url, code)
2011-01-20 20:36:42 +01:00
ret = urllib2.addinfourl(stream, headers, url)
ret.code = code
return ret
2011-09-05 09:46:36 +02:00
2011-01-12 20:20:37 +01:00
def http_request(self, req):
for h in std_headers:
if h in req.headers:
del req.headers[h]
req.add_header(h, std_headers[h])
if 'Youtubedl-no-compression' in req.headers:
if 'Accept-encoding' in req.headers:
del req.headers['Accept-encoding']
del req.headers['Youtubedl-no-compression']
return req
def http_response(self, req, resp):
old_resp = resp
# gzip
if resp.headers.get('Content-encoding', '') == 'gzip':
gz = gzip.GzipFile(fileobj=StringIO.StringIO(resp.read()), mode='r')
2011-01-18 20:52:37 +01:00
resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
2011-01-12 20:20:37 +01:00
resp.msg = old_resp.msg
# deflate
if resp.headers.get('Content-encoding', '') == 'deflate':
gz = StringIO.StringIO(self.deflate(resp.read()))
2011-01-18 20:52:37 +01:00
resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
2011-01-12 20:20:37 +01:00
resp.msg = old_resp.msg
return resp
2011-09-05 09:46:36 +02:00
2008-07-21 23:12:31 +02:00
class FileDownloader(object):
"""File Downloader class.
File downloader objects are the ones responsible of downloading the
actual video file and writing it to disk if the user has requested
it, among some other tasks. In most cases there should be one per
program. As, given a video URL, the downloader doesn't know how to
extract all the needed information, task that InfoExtractors do, it
has to pass the URL to one of them.
For this, file downloader objects have a method that allows
InfoExtractors to be registered in a given order. When it is passed
a URL, the file downloader handles it to the first InfoExtractor it
2009-04-23 22:34:58 +02:00
finds that reports being able to handle it. The InfoExtractor extracts
all the information about the video or videos the URL refers to, and
asks the FileDownloader to process the video information, possibly
downloading the video.
2008-07-21 23:12:31 +02:00
File downloaders accept a lot of parameters. In order not to saturate
the object constructor with arguments, it receives a dictionary of
2009-03-04 22:12:33 +01:00
options instead. These options are available through the params
attribute for the InfoExtractors to use. The FileDownloader also
registers itself as the downloader in charge for the InfoExtractors
that are added to it, so this is a "mutual registration".
2008-07-21 23:12:31 +02:00
Available options:
2010-10-23 12:54:00 +02:00
username: Username for authentication purposes.
password: Password for authentication purposes.
usenetrc: Use netrc for authentication instead.
quiet: Do not print messages to stdout.
forceurl: Force printing final URL.
forcetitle: Force printing title.
forcethumbnail: Force printing thumbnail URL.
forcedescription: Force printing description.
2011-01-25 04:03:16 +01:00
forcefilename: Force printing final filename.
2010-10-23 12:54:00 +02:00
simulate: Do not download the video files.
format: Video format code.
format_limit: Highest quality format to try.
outtmpl: Template for output names.
ignoreerrors: Do not stop on download errors.
ratelimit: Download speed limit, in bytes/sec.
nooverwrites: Prevent overwriting files.
retries: Number of times to retry for HTTP error 5xx
continuedl: Try to continue downloads if possible.
noprogress: Do not print the progress bar.
playliststart: Playlist item to start at.
2010-11-04 23:19:09 +01:00
playlistend: Playlist item to end at.
2011-07-23 09:51:06 +02:00
matchtitle: Download only matching titles.
rejecttitle: Reject downloads for matching titles.
2010-10-23 13:19:26 +02:00
logtostderr: Log messages to stderr instead of stdout.
2011-01-03 15:14:19 +01:00
consoletitle: Display progress in console window's titlebar.
2011-01-07 10:23:18 +01:00
nopart: Do not use temporary .part files.
2011-01-28 19:59:18 +01:00
updatetime: Use the Last-modified header to set output file timestamps.
2011-07-07 12:47:36 +02:00
writedescription: Write the video description to a .description file
2011-07-10 21:39:36 +02:00
writeinfojson: Write the video description to a .info.json file
2008-07-21 23:12:31 +02:00
"""
2009-03-04 22:12:33 +01:00
params = None
2008-07-21 23:12:31 +02:00
_ies = []
2008-07-27 12:13:49 +02:00
_pps = []
2009-04-23 21:43:04 +02:00
_download_retcode = None
2010-04-03 09:54:36 +02:00
_num_downloads = None
2010-10-23 13:19:26 +02:00
_screen_file = None
2008-07-21 23:12:31 +02:00
def __init__(self, params):
2008-07-22 11:41:25 +02:00
"""Create a FileDownloader object with the given options."""
2008-07-21 23:12:31 +02:00
self._ies = []
2008-07-27 12:13:49 +02:00
self._pps = []
2009-04-23 21:43:04 +02:00
self._download_retcode = 0
2010-04-03 09:54:36 +02:00
self._num_downloads = 0
2010-10-23 13:19:26 +02:00
self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
2009-03-04 22:12:33 +01:00
self.params = params
2011-01-07 10:22:01 +01:00
2008-07-21 23:12:31 +02:00
@staticmethod
def format_bytes(bytes):
if bytes is None:
return 'N/A'
2009-08-08 14:54:39 +02:00
if type(bytes) is str:
bytes = float(bytes)
if bytes == 0.0:
2008-07-21 23:12:31 +02:00
exponent = 0
else:
2009-08-08 14:54:39 +02:00
exponent = long(math.log(bytes, 1024.0))
2008-07-21 23:12:31 +02:00
suffix = 'bkMGTPEZY'[exponent]
2011-09-05 09:46:36 +02:00
converted = float(bytes) / float(1024 ** exponent)
2008-07-21 23:12:31 +02:00
return '%.2f%s' % (converted, suffix)
@staticmethod
def calc_percent(byte_counter, data_len):
if data_len is None:
return '---.-%'
return '%6s' % ('%3.1f%%' % (float(byte_counter) / float(data_len) * 100.0))
@staticmethod
def calc_eta(start, now, total, current):
if total is None:
return '--:--'
dif = now - start
if current == 0 or dif < 0.001: # One millisecond
return '--:--'
rate = float(current) / dif
eta = long((float(total) - float(current)) / rate)
(eta_mins, eta_secs) = divmod(eta, 60)
if eta_mins > 99:
return '--:--'
return '%02d:%02d' % (eta_mins, eta_secs)
2009-03-01 00:00:04 +01:00
@staticmethod
2008-07-21 23:12:31 +02:00
def calc_speed(start, now, bytes):
dif = now - start
if bytes == 0 or dif < 0.001: # One millisecond
2008-07-21 23:53:06 +02:00
return '%10s' % '---b/s'
2008-07-21 23:12:31 +02:00
return '%10s' % ('%s/s' % FileDownloader.format_bytes(float(bytes) / dif))
@staticmethod
def best_block_size(elapsed_time, bytes):
new_min = max(bytes / 2.0, 1.0)
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
if elapsed_time < 0.001:
2009-05-27 23:03:56 +02:00
return long(new_max)
2008-07-21 23:12:31 +02:00
rate = bytes / elapsed_time
if rate > new_max:
2009-05-27 23:03:56 +02:00
return long(new_max)
2008-07-21 23:12:31 +02:00
if rate < new_min:
2009-05-27 23:03:56 +02:00
return long(new_min)
return long(rate)
2008-07-21 23:12:31 +02:00
2008-07-24 09:47:07 +02:00
@staticmethod
def parse_bytes(bytestr):
"""Parse a string indicating a byte quantity into a long integer."""
matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
if matchobj is None:
return None
number = float(matchobj.group(1))
multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
return long(round(number * multiplier))
2008-07-21 23:12:31 +02:00
def add_info_extractor(self, ie):
"""Add an InfoExtractor object to the end of the list."""
self._ies.append(ie)
ie.set_downloader(self)
2011-01-07 10:22:01 +01:00
2008-07-27 12:13:49 +02:00
def add_post_processor(self, pp):
"""Add a PostProcessor object to the end of the chain."""
self._pps.append(pp)
pp.set_downloader(self)
2011-01-07 10:22:01 +01:00
2010-10-23 13:19:26 +02:00
def to_screen(self, message, skip_eol=False, ignore_encoding_errors=False):
2008-07-21 23:53:06 +02:00
"""Print message to stdout if not in quiet mode."""
2010-02-28 23:49:14 +01:00
try:
if not self.params.get('quiet', False):
2010-10-23 13:19:26 +02:00
terminator = [u'\n', u''][skip_eol]
print >>self._screen_file, (u'%s%s' % (message, terminator)).encode(preferredencoding()),
self._screen_file.flush()
2010-02-28 23:49:14 +01:00
except (UnicodeEncodeError), err:
if not ignore_encoding_errors:
raise
2011-01-07 10:22:01 +01:00
2008-07-22 00:07:07 +02:00
def to_stderr(self, message):
"""Print message to stderr."""
2009-09-13 10:45:04 +02:00
print >>sys.stderr, message.encode(preferredencoding())
2011-01-07 10:22:01 +01:00
2011-01-03 15:14:19 +01:00
def to_cons_title(self, message):
"""Set console/terminal window title to message."""
if not self.params.get('consoletitle', False):
return
if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow():
# c_wchar_p() might not be necessary if `message` is
# already of type unicode()
ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
elif 'TERM' in os.environ:
sys.stderr.write('\033]0;%s\007' % message.encode(preferredencoding()))
2008-07-22 09:45:49 +02:00
def fixed_template(self):
"""Checks if the output template is fixed."""
2009-03-04 22:12:33 +01:00
return (re.search(ur'(?u)%\(.+?\)s', self.params['outtmpl']) is None)
2008-07-21 23:53:06 +02:00
2008-07-22 11:33:41 +02:00
def trouble(self, message=None):
"""Determine action to take when a download problem appears.
Depending on if the downloader has been configured to ignore
2008-07-22 15:52:56 +02:00
download errors or not, this method may throw an exception or
2009-04-23 21:43:04 +02:00
not when errors are found, after printing the message.
2008-07-22 11:33:41 +02:00
"""
if message is not None:
self.to_stderr(message)
2009-03-04 22:12:33 +01:00
if not self.params.get('ignoreerrors', False):
2008-07-22 15:52:56 +02:00
raise DownloadError(message)
2009-04-23 21:43:04 +02:00
self._download_retcode = 1
2008-07-22 11:33:41 +02:00
2008-07-24 09:47:07 +02:00
def slow_down(self, start_time, byte_counter):
"""Sleep if the download speed is over the rate limit."""
2009-03-04 22:12:33 +01:00
rate_limit = self.params.get('ratelimit', None)
2008-07-24 09:47:07 +02:00
if rate_limit is None or byte_counter == 0:
return
now = time.time()
elapsed = now - start_time
if elapsed <= 0.0:
return
speed = float(byte_counter) / elapsed
if speed > rate_limit:
time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit)
2011-01-07 10:23:18 +01:00
def temp_name(self, filename):
"""Returns a temporary filename for the given filename."""
if self.params.get('nopart', False) or filename == u'-' or \
(os.path.exists(filename) and not os.path.isfile(filename)):
return filename
return filename + u'.part'
2011-01-12 20:21:43 +01:00
def undo_temp_name(self, filename):
if filename.endswith(u'.part'):
return filename[:-len(u'.part')]
return filename
2010-12-04 10:38:53 +01:00
def try_rename(self, old_filename, new_filename):
try:
2010-12-09 19:33:04 +01:00
if old_filename == new_filename:
return
2010-12-04 10:38:53 +01:00
os.rename(old_filename, new_filename)
except (IOError, OSError), err:
self.trouble(u'ERROR: unable to rename file')
2011-09-05 09:46:36 +02:00
2011-01-28 19:59:18 +01:00
def try_utime(self, filename, last_modified_hdr):
"""Try to set the last-modified time of the given file."""
if last_modified_hdr is None:
return
if not os.path.isfile(filename):
return
timestr = last_modified_hdr
if timestr is None:
return
filetime = timeconvert(timestr)
if filetime is None:
2011-04-23 08:55:40 +02:00
return filetime
2011-01-28 19:59:18 +01:00
try:
2011-09-05 09:46:36 +02:00
os.utime(filename, (time.time(), filetime))
2011-01-28 19:59:18 +01:00
except:
pass
2011-04-23 08:55:40 +02:00
return filetime
2008-07-24 09:47:07 +02:00
2011-07-07 12:47:36 +02:00
def report_writedescription(self, descfn):
2011-07-10 21:39:36 +02:00
""" Report that the description file is being written """
self.to_screen(u'[info] Writing video description to: %s' % descfn, ignore_encoding_errors=True)
def report_writeinfojson(self, infofn):
""" Report that the metadata file has been written """
self.to_screen(u'[info] Video description metadata as JSON to: %s' % infofn, ignore_encoding_errors=True)
2011-07-07 12:47:36 +02:00
2008-07-22 22:40:50 +02:00
def report_destination(self, filename):
"""Report destination filename."""
2010-10-23 13:19:26 +02:00
self.to_screen(u'[download] Destination: %s' % filename, ignore_encoding_errors=True)
2011-01-07 10:22:01 +01:00
2008-07-22 22:40:50 +02:00
def report_progress(self, percent_str, data_len_str, speed_str, eta_str):
"""Report download progress."""
2010-03-07 11:24:22 +01:00
if self.params.get('noprogress', False):
return
2010-10-23 13:19:26 +02:00
self.to_screen(u'\r[download] %s of %s at %s ETA %s' %
2008-07-22 22:40:50 +02:00
(percent_str, data_len_str, speed_str, eta_str), skip_eol=True)
2011-01-03 15:14:19 +01:00
self.to_cons_title(u'youtube-dl - %s of %s at %s ETA %s' %
(percent_str.strip(), data_len_str.strip(), speed_str.strip(), eta_str.strip()))
2009-05-27 22:50:18 +02:00
def report_resuming_byte(self, resume_len):
2010-08-12 18:28:34 +02:00
"""Report attempt to resume at given byte."""
2010-10-23 13:19:26 +02:00
self.to_screen(u'[download] Resuming download at byte %s' % resume_len)
2011-01-07 10:22:01 +01:00
2010-05-30 18:34:56 +02:00
def report_retry(self, count, retries):
2010-09-11 09:58:34 +02:00
"""Report retry in case of HTTP error 5xx"""
2010-10-23 13:19:26 +02:00
self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
2011-01-07 10:22:01 +01:00
2009-05-27 22:50:18 +02:00
def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded."""
2010-02-28 23:49:14 +01:00
try:
2010-10-23 13:19:26 +02:00
self.to_screen(u'[download] %s has already been downloaded' % file_name)
2010-02-28 23:49:14 +01:00
except (UnicodeEncodeError), err:
2010-10-23 13:19:26 +02:00
self.to_screen(u'[download] The file has already been downloaded')
2011-01-07 10:22:01 +01:00
2009-05-27 22:50:18 +02:00
def report_unable_to_resume(self):
"""Report it was impossible to resume download."""
2010-10-23 13:19:26 +02:00
self.to_screen(u'[download] Unable to resume')
2011-01-07 10:22:01 +01:00
2008-07-22 22:40:50 +02:00
def report_finish(self):
"""Report download finished."""
2010-03-07 11:24:22 +01:00
if self.params.get('noprogress', False):
2010-10-23 13:19:26 +02:00
self.to_screen(u'[download] Download completed')
2010-03-07 11:24:22 +01:00
else:
2010-10-23 13:19:26 +02:00
self.to_screen(u'')
2011-01-07 10:22:01 +01:00
2010-07-13 19:37:07 +02:00
def increment_downloads(self):
"""Increment the ordinal that assigns a number to each file."""
self._num_downloads += 1
2008-07-22 22:40:50 +02:00
2011-01-25 04:03:16 +01:00
def prepare_filename(self, info_dict):
"""Generate the output filename."""
try:
template_dict = dict(info_dict)
template_dict['epoch'] = unicode(long(time.time()))
template_dict['autonumber'] = unicode('%05d' % self._num_downloads)
filename = self.params['outtmpl'] % template_dict
return filename
except (ValueError, KeyError), err:
self.trouble(u'ERROR: invalid system charset or erroneous output template')
return None
2011-11-29 20:13:06 +01:00
def _match_entry(self, info_dict):
""" Returns None iff the file should be downloaded """
title = info_dict['title']
matchtitle = self.params.get('matchtitle', False)
if matchtitle and not re.search(matchtitle, title, re.IGNORECASE):
return u'[download] "' + title + '" title did not match pattern "' + matchtitle + '"'
rejecttitle = self.params.get('rejecttitle', False)
if rejecttitle and re.search(rejecttitle, title, re.IGNORECASE):
return u'"' + title + '" title matched reject pattern "' + rejecttitle + '"'
return None
2009-04-10 00:59:59 +02:00
def process_info(self, info_dict):
"""Process a single dictionary returned by an InfoExtractor."""
2011-11-28 00:55:44 +01:00
2011-11-29 20:13:06 +01:00
reason = self._match_entry(info_dict)
if reason is not None:
self.to_screen(u'[download] ' + reason)
return
2011-11-28 01:29:46 +01:00
max_downloads = self.params.get('max_downloads')
2011-11-28 00:55:44 +01:00
if max_downloads is not None:
2011-11-28 01:29:46 +01:00
if self._num_downloads > int(max_downloads):
2011-12-08 21:39:13 +01:00
raise MaxDownloadsReached()
2011-11-29 20:13:06 +01:00
2011-01-25 04:03:16 +01:00
filename = self.prepare_filename(info_dict)
2011-09-15 11:36:49 +02:00
# Forced printings
if self.params.get('forcetitle', False):
print info_dict['title'].encode(preferredencoding(), 'xmlcharrefreplace')
if self.params.get('forceurl', False):
print info_dict['url'].encode(preferredencoding(), 'xmlcharrefreplace')
if self.params.get('forcethumbnail', False) and 'thumbnail' in info_dict:
print info_dict['thumbnail'].encode(preferredencoding(), 'xmlcharrefreplace')
if self.params.get('forcedescription', False) and 'description' in info_dict:
print info_dict['description'].encode(preferredencoding(), 'xmlcharrefreplace')
if self.params.get('forcefilename', False) and filename is not None:
print filename.encode(preferredencoding(), 'xmlcharrefreplace')
2011-04-12 21:58:16 +02:00
if self.params.get('forceformat', False):
print info_dict['format'].encode(preferredencoding(), 'xmlcharrefreplace')
2011-09-15 11:36:49 +02:00
2009-04-10 00:59:59 +02:00
# Do nothing else if in simulate mode
if self.params.get('simulate', False):
2009-04-23 21:43:04 +02:00
return
2011-01-07 10:22:01 +01:00
2011-01-25 04:03:16 +01:00
if filename is None:
2010-08-12 18:40:36 +02:00
return
2011-07-23 09:51:06 +02:00
2009-04-10 00:59:59 +02:00
try:
2011-09-06 17:56:05 +02:00
dn = os.path.dirname(filename)
if dn != '' and not os.path.exists(dn):
os.makedirs(dn)
2009-04-10 00:59:59 +02:00
except (OSError, IOError), err:
2011-09-07 09:35:22 +02:00
self.trouble(u'ERROR: unable to create directory ' + unicode(err))
2009-04-23 21:43:04 +02:00
return
2009-04-28 07:30:20 +02:00
2011-07-07 12:47:36 +02:00
if self.params.get('writedescription', False):
try:
descfn = filename + '.description'
2011-07-10 21:39:36 +02:00
self.report_writedescription(descfn)
2011-08-06 12:16:07 +02:00
descfile = open(descfn, 'wb')
try:
2011-07-07 12:47:36 +02:00
descfile.write(info_dict['description'].encode('utf-8'))
2011-08-06 12:16:07 +02:00
finally:
descfile.close()
2011-07-07 12:47:36 +02:00
except (OSError, IOError):
2011-09-07 09:35:22 +02:00
self.trouble(u'ERROR: Cannot write description file ' + descfn)
2011-07-07 12:47:36 +02:00
return
2011-07-10 21:39:36 +02:00
if self.params.get('writeinfojson', False):
infofn = filename + '.info.json'
self.report_writeinfojson(infofn)
try:
json.dump
except (NameError,AttributeError):
self.trouble(u'ERROR: No JSON encoder found. Update to Python 2.6+, setup a json module, or leave out --write-info-json.')
return
try:
2011-08-06 12:16:07 +02:00
infof = open(infofn, 'wb')
try:
2011-09-27 21:42:15 +02:00
json_info_dict = dict((k,v) for k,v in info_dict.iteritems() if not k in ('urlhandle',))
json.dump(json_info_dict, infof)
2011-08-06 12:16:07 +02:00
finally:
infof.close()
2011-07-10 21:39:36 +02:00
except (OSError, IOError):
2011-09-07 09:35:22 +02:00
self.trouble(u'ERROR: Cannot write metadata to JSON file ' + infofn)
2011-07-10 21:39:36 +02:00
return
2011-09-15 11:36:49 +02:00
if not self.params.get('skip_download', False):
2011-12-17 01:35:05 +01:00
if self.params.get('nooverwrites', False) and os.path.exists(filename):
success = True
else:
try:
success = self._do_download(filename, info_dict)
except (OSError, IOError), err:
raise UnavailableVideoError
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self.trouble(u'ERROR: unable to download video data: %s' % str(err))
return
except (ContentTooShortError, ), err:
self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
return
2011-09-15 11:36:49 +02:00
if success:
try:
self.post_process(filename, info_dict)
except (PostProcessingError), err:
self.trouble(u'ERROR: postprocessing: %s' % str(err))
return
2009-04-10 00:59:59 +02:00
2008-07-21 23:12:31 +02:00
def download(self, url_list):
"""Download a given list of URLs."""
2008-07-22 09:45:49 +02:00
if len(url_list) > 1 and self.fixed_template():
2009-03-04 22:12:33 +01:00
raise SameFileError(self.params['outtmpl'])
2008-07-22 09:45:49 +02:00
2008-07-21 23:12:31 +02:00
for url in url_list:
suitable_found = False
for ie in self._ies:
2009-04-10 00:59:59 +02:00
# Go to next InfoExtractor if not suitable
2008-07-21 23:12:31 +02:00
if not ie.suitable(url):
continue
2009-04-10 00:59:59 +02:00
2008-07-21 23:12:31 +02:00
# Suitable InfoExtractor found
suitable_found = True
2009-04-10 00:59:59 +02:00
2009-04-23 22:20:06 +02:00
# Extract information from URL and process it
ie.extract(url)
2008-07-27 12:13:49 +02:00
2009-04-10 00:59:59 +02:00
# Suitable InfoExtractor had been found; go to next URL
2008-07-21 23:12:31 +02:00
break
2009-04-10 00:59:59 +02:00
2008-07-21 23:12:31 +02:00
if not suitable_found:
2010-08-12 18:42:26 +02:00
self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url)
2008-07-22 11:16:32 +02:00
2009-04-23 21:43:04 +02:00
return self._download_retcode
2008-07-27 12:13:49 +02:00
def post_process(self, filename, ie_info):
"""Run the postprocessing chain on the given file."""
info = dict(ie_info)
info['filepath'] = filename
for pp in self._pps:
info = pp.run(info)
if info is None:
break
2011-01-07 10:22:01 +01:00
2010-05-30 19:49:51 +02:00
def _download_with_rtmpdump(self, filename, url, player_url):
2010-01-03 13:12:11 +01:00
self.report_destination(filename)
2010-12-04 10:38:53 +01:00
tmpfilename = self.temp_name(filename)
2010-01-03 13:12:11 +01:00
# Check for rtmpdump first
try:
subprocess.call(['rtmpdump', '-h'], stdout=(file(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
except (OSError, IOError):
self.trouble(u'ERROR: RTMP download detected but "rtmpdump" could not be run')
return False
# Download using rtmpdump. rtmpdump returns exit code 2 when
# the connection was interrumpted and resuming appears to be
# possible. This is part of rtmpdump's normal usage, AFAIK.
2011-09-14 21:17:05 +02:00
basic_args = ['rtmpdump', '-q'] + [[], ['-W', player_url]][player_url is not None] + ['-r', url, '-o', tmpfilename]
2010-01-19 20:04:56 +01:00
retval = subprocess.call(basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)])
while retval == 2 or retval == 1:
2010-12-04 10:38:53 +01:00
prevsize = os.path.getsize(tmpfilename)
2010-10-23 13:19:26 +02:00
self.to_screen(u'\r[rtmpdump] %s bytes' % prevsize, skip_eol=True)
2010-05-30 19:49:51 +02:00
time.sleep(5.0) # This seems to be needed
2010-01-19 20:04:56 +01:00
retval = subprocess.call(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1])
2010-12-04 10:38:53 +01:00
cursize = os.path.getsize(tmpfilename)
2010-05-30 19:49:51 +02:00
if prevsize == cursize and retval == 1:
break
2011-09-14 21:17:05 +02:00
# Some rtmp streams seem abort after ~ 99.8%. Don't complain for those
if prevsize == cursize and retval == 2 and cursize > 1024:
self.to_screen(u'\r[rtmpdump] Could not download the whole video. This can happen for some advertisements.')
retval = 0
break
2010-01-03 13:12:11 +01:00
if retval == 0:
2010-12-04 10:38:53 +01:00
self.to_screen(u'\r[rtmpdump] %s bytes' % os.path.getsize(tmpfilename))
self.try_rename(tmpfilename, filename)
2010-01-03 13:12:11 +01:00
return True
else:
2010-08-12 18:42:26 +02:00
self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval)
2010-01-03 13:12:11 +01:00
return False
2011-09-15 19:22:18 +02:00
def _do_download(self, filename, info_dict):
url = info_dict['url']
player_url = info_dict.get('player_url', None)
2010-12-04 10:38:53 +01:00
# Check file already present
2011-01-07 10:23:18 +01:00
if self.params.get('continuedl', False) and os.path.isfile(filename) and not self.params.get('nopart', False):
2010-12-04 10:38:53 +01:00
self.report_file_already_downloaded(filename)
return True
2010-01-03 13:12:11 +01:00
# Attempt to download using rtmpdump
if url.startswith('rtmp'):
2010-05-30 19:49:51 +02:00
return self._download_with_rtmpdump(filename, url, player_url)
2010-01-03 13:12:11 +01:00
2010-12-04 10:38:53 +01:00
tmpfilename = self.temp_name(filename)
2009-06-07 01:11:50 +02:00
stream = None
2011-01-12 20:20:37 +01:00
# Do not include the Accept-Encoding header
headers = {'Youtubedl-no-compression': 'True'}
basic_request = urllib2.Request(url, None, headers)
request = urllib2.Request(url, None, headers)
2009-05-27 22:50:18 +02:00
2009-12-21 21:43:15 +01:00
# Establish possible resume length
2010-12-04 10:38:53 +01:00
if os.path.isfile(tmpfilename):
resume_len = os.path.getsize(tmpfilename)
2009-06-07 01:11:50 +02:00
else:
resume_len = 0
2009-12-21 21:43:15 +01:00
2011-05-20 08:59:53 +02:00
open_mode = 'wb'
if resume_len != 0:
if self.params.get('continuedl', False):
self.report_resuming_byte(resume_len)
request.add_header('Range','bytes=%d-' % resume_len)
open_mode = 'ab'
else:
resume_len = 0
2009-06-07 01:11:50 +02:00
2010-05-30 18:34:56 +02:00
count = 0
retries = self.params.get('retries', 0)
2010-07-27 20:11:06 +02:00
while count <= retries:
2010-05-30 18:34:56 +02:00
# Establish connection
try:
2011-09-27 21:42:15 +02:00
if count == 0 and 'urlhandle' in info_dict:
data = info_dict['urlhandle']
2010-05-30 18:34:56 +02:00
data = urllib2.urlopen(request)
break
except (urllib2.HTTPError, ), err:
2010-10-03 11:05:20 +02:00
if (err.code < 500 or err.code >= 600) and err.code != 416:
2010-07-27 20:11:06 +02:00
# Unexpected HTTP error
2010-05-30 18:34:56 +02:00
raise
2010-07-27 20:11:06 +02:00
elif err.code == 416:
# Unable to resume (requested range not satisfiable)
try:
# Open the connection again without the range header
data = urllib2.urlopen(basic_request)
content_length = data.info()['Content-Length']
except (urllib2.HTTPError, ), err:
2010-10-03 11:05:20 +02:00
if err.code < 500 or err.code >= 600:
2010-07-27 20:11:06 +02:00
raise
else:
# Examine the reported length
2010-08-01 01:15:43 +02:00
if (content_length is not None and
2011-09-05 09:46:36 +02:00
(resume_len - 100 < long(content_length) < resume_len + 100)):
2010-08-01 01:15:43 +02:00
# The file had already been fully downloaded.
# Explanation to the above condition: in issue #175 it was revealed that
# YouTube sometimes adds or removes a few bytes from the end of the file,
# changing the file size slightly and causing problems for some users. So
# I decided to implement a suggested change and consider the file
# completely downloaded if the file size differs less than 100 bytes from
# the one in the hard drive.
2010-07-27 20:11:06 +02:00
self.report_file_already_downloaded(filename)
2010-12-04 10:38:53 +01:00
self.try_rename(tmpfilename, filename)
2010-07-27 20:11:06 +02:00
return True
else:
# The length does not match, we start the download over
self.report_unable_to_resume()
open_mode = 'wb'
break
# Retry
count += 1
if count <= retries:
self.report_retry(count, retries)
if count > retries:
self.trouble(u'ERROR: giving up after %s retries' % retries)
return False
2009-05-27 22:50:18 +02:00
2008-07-21 23:12:31 +02:00
data_len = data.info().get('Content-length', None)
2010-12-11 11:32:13 +01:00
if data_len is not None:
data_len = long(data_len) + resume_len
2008-07-21 23:12:31 +02:00
data_len_str = self.format_bytes(data_len)
2010-12-11 11:32:13 +01:00
byte_counter = 0 + resume_len
2008-07-21 23:12:31 +02:00
block_size = 1024
start = time.time()
while True:
2008-07-22 22:40:50 +02:00
# Download and write
2008-07-21 23:12:31 +02:00
before = time.time()
data_block = data.read(block_size)
after = time.time()
2010-12-15 21:42:11 +01:00
if len(data_block) == 0:
2008-07-21 23:12:31 +02:00
break
2010-12-15 21:42:11 +01:00
byte_counter += len(data_block)
2009-06-07 01:11:50 +02:00
# Open file just in time
if stream is None:
try:
2010-12-04 10:38:53 +01:00
(stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
2011-09-03 11:32:05 +02:00
assert stream is not None
2011-01-12 20:21:43 +01:00
filename = self.undo_temp_name(tmpfilename)
2009-06-07 01:11:50 +02:00
self.report_destination(filename)
except (OSError, IOError), err:
2010-08-12 18:42:26 +02:00
self.trouble(u'ERROR: unable to open for writing: %s' % str(err))
2009-06-07 01:11:50 +02:00
return False
2010-04-17 18:49:56 +02:00
try:
stream.write(data_block)
except (IOError, OSError), err:
2010-08-12 18:41:29 +02:00
self.trouble(u'\nERROR: unable to write data: %s' % str(err))
return False
2010-12-15 21:42:11 +01:00
block_size = self.best_block_size(after - before, len(data_block))
2008-07-21 23:12:31 +02:00
2009-06-07 01:11:50 +02:00
# Progress message
2010-12-15 21:42:11 +01:00
speed_str = self.calc_speed(start, time.time(), byte_counter - resume_len)
2011-09-15 20:24:21 +02:00
if data_len is None:
self.report_progress('Unknown %', data_len_str, speed_str, 'Unknown ETA')
else:
percent_str = self.calc_percent(byte_counter, data_len)
eta_str = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
self.report_progress(percent_str, data_len_str, speed_str, eta_str)
2009-06-07 01:11:50 +02:00
2008-07-24 09:47:07 +02:00
# Apply rate limit
2010-12-15 21:42:11 +01:00
self.slow_down(start, byte_counter - resume_len)
2008-07-24 09:47:07 +02:00
2011-09-03 11:32:05 +02:00
if stream is None:
self.trouble(u'\nERROR: Did not get any data blocks')
return False
2010-12-05 19:57:46 +01:00
stream.close()
2008-07-22 22:40:50 +02:00
self.report_finish()
2010-12-12 19:21:09 +01:00
if data_len is not None and byte_counter != data_len:
2009-05-21 20:59:02 +02:00
raise ContentTooShortError(byte_counter, long(data_len))
2010-12-04 10:38:53 +01:00
self.try_rename(tmpfilename, filename)
2011-01-28 19:59:18 +01:00
2011-01-27 06:02:51 +01:00
# Update file modification time
2011-01-28 19:59:18 +01:00
if self.params.get('updatetime', True):
2011-09-15 19:22:18 +02:00
info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None))
2011-01-28 19:59:18 +01:00
2011-09-15 19:22:18 +02:00
return True
2008-07-21 23:12:31 +02:00
2011-09-05 09:46:36 +02:00
2008-07-21 23:12:31 +02:00
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information from the video (or videos) the URL refers to. This
information includes the real video URL, the video title and simplified
2009-04-23 22:34:58 +02:00
title, author and others. The information is stored in a dictionary
which is then passed to the FileDownloader. The FileDownloader
processes this information possibly downloading the video to the file
system, among other possible outcomes. The dictionaries must include
2008-07-21 23:12:31 +02:00
the following fields:
id: Video identifier.
url: Final video URL.
uploader: Nickname of the video uploader.
title: Literal title.
stitle: Simplified title.
ext: Video filename extension.
2010-03-19 18:15:43 +01:00
format: Video format.
2010-05-30 19:49:51 +02:00
player_url: SWF Player URL (may be None).
2008-07-21 23:12:31 +02:00
2010-04-04 17:57:59 +02:00
The following fields are optional. Their primary purpose is to allow
youtube-dl to serve as the backend for a video search function, such
as the one in youtube2mp3. They are only used when their respective
forced printing functions are called:
thumbnail: Full URL to a video thumbnail image.
description: One-line video description.
2008-07-21 23:12:31 +02:00
Subclasses of this one should re-define the _real_initialize() and
2011-09-15 10:06:14 +02:00
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
2008-07-21 23:12:31 +02:00
"""
_ready = False
_downloader = None
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self.set_downloader(downloader)
2011-09-15 10:06:14 +02:00
def suitable(self, url):
2008-07-21 23:12:31 +02:00
"""Receives a URL and returns True if suitable for this IE."""
2011-09-15 10:06:14 +02:00
return re.match(self._VALID_URL, url) is not None
2008-07-21 23:12:31 +02:00
def initialize(self):
2008-07-22 11:41:25 +02:00
"""Initializes an instance (authentication, etc)."""
2008-07-21 23:12:31 +02:00
if not self._ready:
self._real_initialize()
self._ready = True
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
self.initialize()
return self._real_extract(url)
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
2011-01-07 10:22:01 +01:00
2008-07-21 23:12:31 +02:00
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
2011-09-05 09:46:36 +02:00
2008-07-21 23:12:31 +02:00
class YoutubeIE(InfoExtractor):
"""Information extractor for youtube.com."""
2011-09-16 22:31:31 +02:00
_VALID_URL = r'^((?:https?://)?(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/)(?!view_play_list|my_playlists|artist|playlist)(?:(?:(?:v|embed|e)/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=))?)?([0-9A-Za-z_-]+)(?(1).+)?$'
2010-07-22 20:24:59 +02:00
_LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
2010-10-09 12:28:15 +02:00
_LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en'
2009-01-31 10:12:22 +01:00
_AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
2008-07-21 23:12:31 +02:00
_NETRC_MACHINE = 'youtube'
2010-07-22 20:29:52 +02:00
# Listed in order of quality
2011-09-17 00:58:14 +02:00
_available_formats = ['38', '37', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13']
2011-12-08 21:39:13 +01:00
_available_formats_prefer_free = ['38', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13']
2009-04-28 07:30:20 +02:00
_video_extensions = {
'13': '3gp',
'17': 'mp4',
'18': 'mp4',
'22': 'mp4',
2009-11-20 21:51:38 +01:00
'37': 'mp4',
2010-07-13 19:01:43 +02:00
'38': 'video', # You actually don't know if this will be MOV, AVI or whatever
2010-05-30 19:46:08 +02:00
'43': 'webm',
2011-09-17 00:51:25 +02:00
'44': 'webm',
2010-05-30 19:46:08 +02:00
'45': 'webm',
2009-04-28 07:30:20 +02:00
}
2011-09-28 01:28:37 +02:00
_video_dimensions = {
'5': '240x400',
'6': '???',
'13': '???',
'17': '144x176',
'18': '360x640',
'22': '720x1280',
'34': '360x640',
'35': '480x854',
'37': '1080x1920',
'38': '3072x4096',
'43': '360x640',
'44': '480x854',
'45': '720x1280',
}
2011-09-15 10:43:49 +02:00
IE_NAME = u'youtube'
2008-07-21 23:12:31 +02:00
2009-01-31 10:12:22 +01:00
def report_lang(self):
"""Report attempt to set language."""
2010-10-23 13:19:26 +02:00
self._downloader.to_screen(u'[youtube] Setting language')
2009-01-31 10:12:22 +01:00
2008-07-22 22:40:50 +02:00
def report_login(self):
"""Report attempt to log in."""
2010-10-23 13:19:26 +02:00
self._downloader.to_screen(u'[youtube] Logging in')
2011-01-07 10:22:01 +01:00
2008-07-22 22:40:50 +02:00
def report_age_confirmation(self):
"""Report attempt to confirm age."""
2010-10-23 13:19:26 +02:00
self._downloader.to_screen(u'[youtube] Confirming age')
2011-01-07 10:22:01 +01:00
2010-05-30 19:49:51 +02:00
def report_video_webpage_download(self, video_id):
"""Report attempt to download video webpage."""
2010-10-23 13:19:26 +02:00
self._downloader.to_screen(u'[youtube] %s: Downloading video webpage' % video_id)
2011-01-07 10:22:01 +01:00
2009-08-08 14:56:06 +02:00
def report_video_info_webpage_download(self, video_id):
"""Report attempt to download video info webpage."""
2010-10-23 13:19:26 +02:00
self._downloader.to_screen(u'[youtube] %s: Downloading video info webpage' % video_id)
2011-01-07 10:22:01 +01:00
2008-07-22 22:40:50 +02:00
def report_information_extraction(self, video_id):
"""Report attempt to extract video information."""
2010-10-23 13:19:26 +02:00
self._downloader.to_screen(u'[youtube] %s: Extracting video information' % video_id)
2011-01-07 10:22:01 +01:00
2009-04-28 07:30:20 +02:00
def report_unavailable_format(self, video_id, format):
"""Report extracted video URL."""
2010-10-23 13:19:26 +02:00
self._downloader.to_screen(u'[youtube] %s: Format %s not available' % (video_id, format))
2011-01-07 10:22:01 +01:00
2010-01-03 13:12:11 +01:00
def report_rtmp_download(self):
"""Indicate the download will use the RTMP protocol."""
2010-10-23 13:19:26 +02:00
self._downloader.to_screen(u'[youtube] RTMP download detected')
2011-01-07 10:22:01 +01:00
2011-09-28 01:28:37 +02:00
def _print_formats(self, formats):
print 'Available formats:'
for x in formats:
print '%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'flv'), self._video_dimensions.get(x, '???'))
2008-07-21 23:12:31 +02:00
def _real_initialize(self):
if self._downloader is None:
return
username = None
password = None
2009-03-04 22:12:33 +01:00
downloader_params = self._downloader.params
2008-07-21 23:12:31 +02:00
# Attempt to use provided username and password or .netrc data
if downloader_params.get('username', None) is not None:
username = downloader_params['username']
password = downloader_params['password']
elif downloader_params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(self._NETRC_MACHINE)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError), err:
2009-04-23 22:20:06 +02:00
self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % str(err))
2008-07-21 23:12:31 +02:00
return
2009-01-31 10:12:22 +01:00
# Set language
2011-01-12 20:20:37 +01:00
request = urllib2.Request(self._LANG_URL)
2009-01-31 10:12:22 +01:00
try:
self.report_lang()
urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
2009-04-23 22:20:06 +02:00
self._downloader.to_stderr(u'WARNING: unable to set language: %s' % str(err))
2009-01-31 10:12:22 +01:00
return
2009-03-02 00:02:56 +01:00
# No authentication to be performed
if username is None:
return
2008-07-21 23:12:31 +02:00
# Log in
2008-07-21 23:53:06 +02:00
login_form = {
'current_form': 'loginForm',
2008-07-21 23:12:31 +02:00
'next': '/',
'action_login': 'Log In',
'username': username,
2008-07-21 23:53:06 +02:00
'password': password,
}
2011-01-12 20:20:37 +01:00
request = urllib2.Request(self._LOGIN_URL, urllib.urlencode(login_form))
2008-07-21 23:12:31 +02:00
try:
2008-07-22 22:40:50 +02:00
self.report_login()
2008-07-21 23:12:31 +02:00
login_results = urllib2.urlopen(request).read()
if re.search(r'(?i)<form[^>]* name="loginForm"', login_results) is not None:
2009-04-23 22:20:06 +02:00
self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password')
2008-07-21 23:12:31 +02:00
return
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
2009-04-23 22:20:06 +02:00
self._downloader.to_stderr(u'WARNING: unable to log in: %s' % str(err))
2008-07-21 23:12:31 +02:00
return
2011-01-07 10:22:01 +01:00
2008-07-21 23:12:31 +02:00
# Confirm age
2008-07-21 23:53:06 +02:00
age_form = {
'next_url': '/',
'action_confirm': 'Confirm',
}
2011-01-12 20:20:37 +01:00
request = urllib2.Request(self._AGE_URL, urllib.urlencode(age_form))
2008-07-21 23:12:31 +02:00
try:
2008-07-22 22:40:50 +02:00
self.report_age_confirmation()
2008-07-21 23:12:31 +02:00
age_results = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
2009-04-23 22:01:28 +02:00
self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err))
2008-07-22 15:52:56 +02:00
return
2008-07-21 23:12:31 +02:00
def _real_extract(self, url):
# Extract video id from URL
2008-07-24 15:53:24 +02:00
mobj = re.match(self._VALID_URL, url)
2008-07-21 23:12:31 +02:00
if mobj is None:
2009-04-23 22:01:28 +02:00
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
2009-04-23 22:20:06 +02:00
return
2008-07-21 23:12:31 +02:00
video_id = mobj.group(2)
2010-07-22 20:29:52 +02:00
# Get video webpage
self.report_video_webpage_download(video_id)
2011-10-12 21:13:43 +02:00
request = urllib2.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id)
2010-07-22 20:29:52 +02:00
try:
video_webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
return
2009-05-24 11:09:30 +02:00
2010-07-22 20:29:52 +02:00
# Attempt to extract SWF player URL
2010-11-16 22:52:23 +01:00
mobj = re.search(r'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
2010-07-22 20:29:52 +02:00
if mobj is not None:
2010-11-16 22:52:23 +01:00
player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
2010-07-22 20:29:52 +02:00
else:
player_url = None
# Get video info
self.report_video_info_webpage_download(video_id)
for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
2011-09-05 09:46:36 +02:00
% (video_id, el_type))
2011-01-12 20:20:37 +01:00
request = urllib2.Request(video_info_url)
2010-05-30 19:49:51 +02:00
try:
2010-07-22 20:29:52 +02:00
video_info_webpage = urllib2.urlopen(request).read()
video_info = parse_qs(video_info_webpage)
if 'token' in video_info:
break
2010-05-30 19:49:51 +02:00
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
2010-07-22 20:29:52 +02:00
self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err))
2010-05-30 19:49:51 +02:00
return
2010-07-25 11:55:49 +02:00
if 'token' not in video_info:
if 'reason' in video_info:
2010-08-22 00:48:55 +02:00
self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0].decode('utf-8'))
2010-07-25 11:55:49 +02:00
else:
self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason')
return
# Start extracting information
2010-07-22 20:29:52 +02:00
self.report_information_extraction(video_id)
# uploader
if 'author' not in video_info:
self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
return
video_uploader = urllib.unquote_plus(video_info['author'][0])
2010-05-30 19:49:51 +02:00
2010-07-22 20:29:52 +02:00
# title
if 'title' not in video_info:
self._downloader.trouble(u'ERROR: unable to extract video title')
return
video_title = urllib.unquote_plus(video_info['title'][0])
video_title = video_title.decode('utf-8')
video_title = sanitize_title(video_title)
# simplified title
2011-11-21 20:15:32 +01:00
simple_title = _simplify_title(video_title)
2010-07-22 20:29:52 +02:00
# thumbnail image
if 'thumbnail_url' not in video_info:
self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
video_thumbnail = ''
else: # don't panic if we can't find it
video_thumbnail = urllib.unquote_plus(video_info['thumbnail_url'][0])
2010-11-17 19:55:30 +01:00
# upload date
upload_date = u'NA'
2011-03-15 20:12:10 +01:00
mobj = re.search(r'id="eow-date.*?>(.*?)</span>', video_webpage, re.DOTALL)
2010-11-17 19:55:30 +01:00
if mobj is not None:
2010-11-30 17:51:00 +01:00
upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
2011-02-25 19:05:35 +01:00
format_expressions = ['%d %B %Y', '%B %d %Y', '%b %d %Y']
2010-11-30 17:51:00 +01:00
for expression in format_expressions:
try:
upload_date = datetime.datetime.strptime(upload_date, expression).strftime('%Y%m%d')
except:
pass
2010-11-17 19:55:30 +01:00
2010-07-22 20:29:52 +02:00
# description
2011-07-07 12:12:20 +02:00
try:
lxml.etree
except NameError:
video_description = u'No description available.'
2011-07-07 12:47:36 +02:00
if self._downloader.params.get('forcedescription', False) or self._downloader.params.get('writedescription', False):
2011-07-07 12:12:20 +02:00
mobj = re.search(r'<meta name="description" content="(.*)"(?:\s*/)?>', video_webpage)
if mobj is not None:
video_description = mobj.group(1).decode('utf-8')
else:
html_parser = lxml.etree.HTMLParser(encoding='utf-8')
vwebpage_doc = lxml.etree.parse(StringIO.StringIO(video_webpage), html_parser)
video_description = u''.join(vwebpage_doc.xpath('id("eow-description")//text()'))
2011-07-18 19:43:21 +02:00
# TODO use another parser
2010-07-22 20:29:52 +02:00
2010-07-24 10:23:06 +02:00
# token
video_token = urllib.unquote_plus(video_info['token'][0])
2010-07-22 20:29:52 +02:00
# Decide which formats to download
2010-12-09 19:57:39 +01:00
req_format = self._downloader.params.get('format', None)
2010-07-24 09:47:01 +02:00
2011-08-06 11:05:57 +02:00
if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
self.report_rtmp_download()
video_url_list = [(None, video_info['conn'][0])]
elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1:
2011-08-04 00:04:55 +02:00
url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',')
2011-08-07 00:29:25 +02:00
url_data = [parse_qs(uds) for uds in url_data_strs]
2011-08-06 11:05:57 +02:00
url_data = filter(lambda ud: 'itag' in ud and 'url' in ud, url_data)
2011-08-07 00:29:25 +02:00
url_map = dict((ud['itag'][0], ud['url'][0]) for ud in url_data)
2011-08-24 23:04:10 +02:00
2010-07-22 20:29:52 +02:00
format_limit = self._downloader.params.get('format_limit', None)
2011-12-08 21:39:13 +01:00
available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats
if format_limit is not None and format_limit in available_formats:
format_list = available_formats[available_formats.index(format_limit):]
2010-05-30 19:49:51 +02:00
else:
2011-12-08 21:39:13 +01:00
format_list = available_formats
2010-07-22 20:29:52 +02:00
existing_formats = [x for x in format_list if x in url_map]
if len(existing_formats) == 0:
self._downloader.trouble(u'ERROR: no known formats available for video')
2009-05-24 11:09:30 +02:00
return
2011-09-28 01:28:37 +02:00
if self._downloader.params.get('listformats', None):
self._print_formats(existing_formats)
2011-09-30 09:07:36 +02:00
return
2011-09-15 18:47:36 +02:00
if req_format is None or req_format == 'best':
2010-12-09 19:22:32 +01:00
video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
2011-04-23 08:56:06 +02:00
elif req_format == 'worst':
video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality
2011-09-15 18:47:36 +02:00
elif req_format in ('-1', 'all'):
2010-12-09 19:22:32 +01:00
video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
2010-07-22 20:29:52 +02:00
else:
2011-09-15 18:47:36 +02:00
# Specific formats. We pick the first in a slash-delimeted sequence.
# For example, if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'.
req_formats = req_format.split('/')
video_url_list = None
for rf in req_formats:
if rf in url_map:
video_url_list = [(rf, url_map[rf])]
break
if video_url_list is None:
2010-12-11 11:34:10 +01:00
self._downloader.trouble(u'ERROR: requested format not available')
return
2010-07-22 20:29:52 +02:00
else:
2011-08-07 00:02:50 +02:00
self._downloader.trouble(u'ERROR: no conn or url_encoded_fmt_stream_map information found in video info')
2010-07-22 20:29:52 +02:00
return
2009-04-28 07:30:20 +02:00
2010-07-22 20:29:52 +02:00
for format_param, video_real_url in video_url_list:
# At this point we have a new video
self._downloader.increment_downloads()
# Extension
video_extension = self._video_extensions.get(format_param, 'flv')
2010-04-04 17:57:59 +02:00
2009-05-24 11:09:30 +02:00
try:
2009-04-28 07:30:20 +02:00
# Process video information
self._downloader.process_info({
'id': video_id.decode('utf-8'),
'url': video_real_url.decode('utf-8'),
'uploader': video_uploader.decode('utf-8'),
2010-11-19 19:31:26 +01:00
'upload_date': upload_date,
2009-04-28 07:30:20 +02:00
'title': video_title,
'stitle': simple_title,
'ext': video_extension.decode('utf-8'),
2010-03-19 18:15:43 +01:00
'format': (format_param is None and u'NA' or format_param.decode('utf-8')),
2010-04-04 17:57:59 +02:00
'thumbnail': video_thumbnail.decode('utf-8'),
2011-07-07 12:12:20 +02:00
'description': video_description,
2010-05-30 19:49:51 +02:00
'player_url': player_url,
2009-04-28 07:30:20 +02:00
})
2010-07-22 20:29:52 +02:00
except UnavailableVideoError, err:
2011-01-03 11:22:49 +01:00
self._downloader.trouble(u'\nERROR: unable to download video')
2009-04-28 23:39:23 +02:00
2008-07-21 23:12:31 +02:00
2008-07-24 15:53:24 +02:00
class MetacafeIE(InfoExtractor):
"""Information Extractor for metacafe.com."""
_VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*'
2008-09-13 13:23:24 +02:00
_DISCLAIMER = 'http://www.metacafe.com/family_filter/'
2009-04-25 11:52:33 +02:00
_FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
2008-07-24 15:53:24 +02:00
_youtube_ie = None
2011-09-15 10:43:49 +02:00
IE_NAME = u'metacafe'
2008-07-24 15:53:24 +02:00
def __init__(self, youtube_ie, downloader=None):
InfoExtractor.__init__(self, downloader)
self._youtube_ie = youtube_ie
def report_disclaimer(self):
"""Report disclaimer retrieval."""
2010-10-23 13:19:26 +02:00
self._downloader.to_screen(u'[metacafe] Retrieving disclaimer')
2008-07-24 15:53:24 +02:00
def report_age_confirmation(self):
"""Report attempt to confirm age."""
2010-10-23 13:19:26 +02:00
self._downloader.to_screen(u'[metacafe] Confirming age')
2011-01-07 10:22:01 +01:00
2008-07-24 15:53:24 +02:00
def report_download_webpage(self, video_id):
"""Report webpage download."""
2010-10-23 13:19:26 +02:00
self._downloader.to_screen(u'[metacafe] %s: Downloading webpage' % video_id)
2011-01-07 10:22:01 +01:00
2008-07-24 15:53:24 +02:00
def report_extraction(self, video_id):
"""Report information extraction."""
2010-10-23 13:19:26 +02:00
self._downloader.to_screen(u'[metacafe] %s: Extracting information' % video_id)
2008-07-24 15:53:24 +02:00
def _real_initialize(self):
# Retrieve disclaimer
2011-01-12 20:20:37 +01:00
request = urllib2.Request(self._DISCLAIMER)
2008-07-24 15:53:24 +02:00
try:
self.report_disclaimer()
disclaimer = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
2009-04-23 22:01:28 +02:00
self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % str(err))
2008-07-24 15:53:24 +02:00
return
# Confirm age
disclaimer_form = {
2008-09-13 13:23:24 +02:00
'filters': '0',
2008-07-24 15:53:24 +02:00
'submit': "Continue - I'm over 18",
}
2011-01-12 20:20:37 +01:00
request = urllib2.Request(self._FILTER_POST, urllib.urlencode(disclaimer_form))
2008-07-24 15:53:24 +02:00
try:
self.report_age_confirmation()
disclaimer = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
2009-04-23 22:01:28 +02:00
self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err))
2008-07-24 15:53:24 +02:00
return
2011-01-07 10:22:01 +01:00
2008-07-24 15:53:24 +02:00
def _real_extract(self, url):
# Extract id and simplified title from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
2009-04-23 22:01:28 +02:00
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
2009-04-23 22:20:06 +02:00
return
2008-07-24 15:53:24 +02:00
video_id = mobj.group(1)
# Check if video comes from YouTube
mobj2 = re.match(r'^yt-(.*)$', video_id)
if mobj2 is not None:
2009-04-23 22:20:06 +02:00
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % mobj2.group(1))
return
2008-07-24 15:53:24 +02:00
2010-07-13 19:37:07 +02:00
# At this point we have a new video
2010-07-22 20:27:35 +02:00
self._downloader.increment_downloads()
2010-07-13 19:37:07 +02:00
2008-07-24 15:53:24 +02:00
simple_title = mobj.group(2).decode('utf-8')
# Retrieve video webpage to extract further information
request = urllib2.Request('http://www.metacafe.com/watch/%s/' % video_id)
try:
self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
2009-04-23 22:01:28 +02:00
self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err))
2009-04-23 22:20:06 +02:00
return
2008-07-24 15:53:24 +02:00
# Extract URL, uploader and title from webpage
self.report_extraction(video_id)
2009-08-02 12:18:52 +02:00
mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
2010-08-12 19:15:26 +02:00
if mobj is not None:
mediaURL = urllib.unquote(mobj.group(1))
2010-08-12 19:21:06 +02:00
video_extension = mediaURL[-3:]
2011-01-07 10:22:01 +01:00
2010-08-12 19:15:26 +02:00
# Extract gdaKey if available
mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
if mobj is None:
video_url = mediaURL
else:
gdaKey = mobj.group(1)
video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
2010-08-04 19:05:53 +02:00
else:
2010-08-12 19:15:26 +02:00
mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL')
return
vardict = parse_qs(mobj.group(1))
if 'mediaData' not in vardict:
self._downloader.trouble(u'ERROR: unable to extract media URL')
return
mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0])
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL')
return
2010-08-12 19:21:06 +02:00
mediaURL = mobj.group(1).replace('\\/', '/')
video_extension = mediaURL[-3:]
video_url = '%s?__gda__=%s' % (mediaURL, mobj.group(2))
2008-07-24 15:53:24 +02:00
2008-09-13 13:23:24 +02:00
mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
2008-07-24 15:53:24 +02:00
if mobj is None:
2009-04-23 22:01:28 +02:00
self._downloader.trouble(u'ERROR: unable to extract title')
2009-04-23 22:20:06 +02:00
return
2008-07-24 15:53:24 +02:00
video_title = mobj.group(1).decode('utf-8')
2010-02-12 21:01:55 +01:00
video_title = sanitize_title(video_title)
2008-07-24 15:53:24 +02:00
2009-11-24 20:40:34 +01:00
mobj = re.search(r'(?ms)By:\s*<a .*?>(.+?)<', webpage)
2008-07-24 15:53:24 +02:00
if mobj is None:
2009-04-23 22:01:28 +02:00
self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
2009-04-23 22:20:06 +02:00
return
2009-04-25 11:52:33 +02:00
video_uploader = mobj.group(1)
2008-07-24 15:53:24 +02:00
2009-04-28 23:39:23 +02:00
try:
# Process video information
self._downloader.process_info({
'id': video_id.decode('utf-8'),
'url': video_url.decode('utf-8'),
'uploader': video_uploader.decode('utf-8'),
2010-11-19 19:31:26 +01:00
'upload_date': u'NA',
2009-04-28 23:39:23 +02:00
'title': video_title,
'stitle': simple_title,
'ext': video_extension.decode('utf-8'),
2010-03-19 18:15:43 +01:00
'format': u'NA',
2010-05-30 19:49:51 +02:00
'player_url': None,
2009-04-28 23:39:23 +02:00
})
2010-07-22 20:26:37 +02:00
except UnavailableVideoError:
2011-01-03 11:22:49 +01:00
self._downloader.trouble(u'\nERROR: unable to download video')
2008-07-24 15:53:24 +02:00
2009-02-02 19:59:48 +01:00
2010-07-02 01:53:47 +02:00
class DailymotionIE(InfoExtractor):
"""Information Extractor for Dailymotion"""
_VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^_/]+)_([^/]+)'
2011-09-15 10:43:49 +02:00
IE_NAME = u'dailymotion'
2010-07-02 01:53:47 +02:00
def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id):
"""Report webpage download."""
2010-10-23 13:19:26 +02:00
self._downloader.to_screen(u'[dailymotion] %s: Downloading webpage' % video_id)
2011-01-07 10:22:01 +01:00
2010-07-02 01:53:47 +02:00
def report_extraction(self, video_id):
"""Report information extraction."""
2010-10-23 13:19:26 +02:00
self._downloader.to_screen(u'[dailymotion] %s: Extracting information' % video_id)
2010-07-02 01:53:47 +02:00
def _real_extract(self, url):
2011-12-15 20:32:37 +01:00
htmlParser = HTMLParser.HTMLParser()
2010-07-02 01:53:47 +02:00
# Extract id and simplified title from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
2010-07-13 19:37:07 +02:00
# At this point we have a new video
2010-07-22 20:27:35 +02:00
self._downloader.increment_downloads()
2010-07-02 01:53:47 +02:00
video_id = mobj.group(1)
video_extension = 'flv'
# Retrieve video webpage to extract further information
request = urllib2.Request(url)
2011-05-07 15:53:37 +02:00
request.add_header('Cookie', 'family_filter=off')
2010-07-02 01:53:47 +02:00
try:
self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err))
return
# Extract URL, uploader and title from webpage
self.report_extraction(video_id)
2011-05-07 15:53:37 +02:00
mobj = re.search(r'(?i)addVariable\(\"sequence\"\s*,\s*\"([^\"]+?)\"\)', webpage)
2010-07-02 01:53:47 +02:00
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL')
return
2011-05-07 15:53:37 +02:00
sequence = urllib.unquote(mobj.group(1))
mobj = re.search(r',\"sdURL\"\:\"([^\"]+?)\",', sequence)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL')
return
mediaURL = urllib.unquote(mobj.group(1)).replace('\\', '')
2010-07-02 01:53:47 +02:00
# if needed add http://www.dailymotion.com/ if relative URL
video_url = mediaURL
2011-12-15 20:32:37 +01:00
mobj = re.search(r'<meta property="og:title" content="(?P<title>[^"]*)" />', webpage)
2010-07-02 01:53:47 +02:00
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title')
return
2011-12-15 20:32:37 +01:00
video_title = htmlParser.unescape(mobj.group('title')).decode('utf-8')
2010-07-02 01:53:47 +02:00
video_title = sanitize_title(video_title)
2011-12-15 20:32:37 +01:00
simple_title = _simplify_title(video_title)
2010-07-02 01:53:47 +02:00
2011-05-07 15:53:37 +02:00
mobj = re.search(r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a></span>', webpage)
2010-07-02 01:53:47 +02:00
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
return
video_uploader = mobj.group(1)
try:
# Process video information
self._downloader.process_info({
'id': video_id.decode('utf-8'),
'url': video_url.decode('utf-8'),
'uploader': video_uploader.decode('utf-8'),
2010-11-19 19:31:26 +01:00
'upload_date': u'NA',
2010-07-02 01:53:47 +02:00
'title': video_title,
'stitle': simple_title,
'ext': video_extension.decode('utf-8'),
'format': u'NA',
'player_url': None,
})
2010-07-22 20:26:37 +02:00
except UnavailableVideoError:
2011-01-03 11:22:49 +01:00
self._downloader.trouble(u'\nERROR: unable to download video')
2010-07-02 01:53:47 +02:00
2011-09-05 09:46:36 +02:00
2010-01-15 22:26:41 +01:00
class GoogleIE(InfoExtractor):
"""Information extractor for video.google.com."""
2010-02-12 21:01:55 +01:00
_VALID_URL = r'(?:http://)?video\.google\.(?:com(?:\.au)?|co\.(?:uk|jp|kr|cr)|ca|de|es|fr|it|nl|pl)/videoplay\?docid=([^\&]+).*'
2011-09-15 10:43:49 +02:00
IE_NAME = u'video.google'
2010-01-15 22:26:41 +01:00
def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id):
"""Report webpage download."""
2010-10-23 13:19:26 +02:00
self._downloader.to_screen(u'[video.google] %s: Downloading webpage' % video_id)
2010-01-15 22:26:41 +01:00
def report_extraction(self, video_id):
"""Report information extraction."""
2010-10-23 13:19:26 +02:00
self._downloader.to_screen(u'[video.google] %s: Extracting information' % video_id)
2010-01-15 22:26:41 +01:00
def _real_extract(self, url):
# Extract id from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return
2010-07-13 19:37:07 +02:00
# At this point we have a new video
2010-07-22 20:27:35 +02:00
self._downloader.increment_downloads()
2010-01-15 22:26:41 +01:00
video_id = mobj.group(1)
video_extension = 'mp4'
# Retrieve video webpage to extract further information
2010-02-12 21:01:55 +01:00
request = urllib2.Request('http://video.google.com/videoplay?docid=%s&hl=en&oe=utf-8' % video_id)
2010-01-15 22:26:41 +01:00
try:
self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
return
# Extract URL, uploader, and title from webpage
self.report_extraction(video_id)
2010-02-12 21:01:55 +01:00
mobj = re.search(r"download_url:'([^']+)'", webpage)
if mobj is None:
video_extension = 'flv'
mobj = re.search(r"(?i)videoUrl\\x3d(.+?)\\x26", webpage)
2010-01-15 22:26:41 +01:00
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL')
return
mediaURL = urllib.unquote(mobj.group(1))
mediaURL = mediaURL.replace('\\x3d', '\x3d')
mediaURL = mediaURL.replace('\\x26', '\x26')
video_url = mediaURL
mobj = re.search(r'<title>(.*)</title>', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title')
return
video_title = mobj.group(1).decode('utf-8')
2010-02-12 21:01:55 +01:00
video_title = sanitize_title(video_title)
2011-11-21 20:15:32 +01:00
simple_title = _simplify_title(video_title)
2010-01-15 22:26:41 +01:00
2010-04-04 17:57:59 +02:00
# Extract video description
mobj = re.search(r'<span id=short-desc-content>([^<]*)</span>', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video description')
return
video_description = mobj.group(1).decode('utf-8')
if not video_description:
video_description = 'No description available.'
# Extract video thumbnail
if self._downloader.params.get('forcethumbnail', False):
request = urllib2.Request('http://video.google.com/videosearch?q=%s+site:video.google.com&hl=en' % abs(int(video_id)))
try:
webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
return
mobj = re.search(r'<img class=thumbnail-img (?:.* )?src=(http.*)>', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
return
video_thumbnail = mobj.group(1)
else: # we need something to pass to process_info
video_thumbnail = ''
2010-01-15 22:26:41 +01:00
try:
# Process video information
self._downloader.process_info({
'id': video_id.decode('utf-8'),
'url': video_url.decode('utf-8'),
2010-03-19 18:15:43 +01:00
'uploader': u'NA',
2010-11-19 19:31:26 +01:00
'upload_date': u'NA',
2010-02-12 21:01:55 +01:00
'title': video_title,
2010-02-21 00:13:34 +01:00
'stitle': simple_title,
2010-01-15 22:26:41 +01:00
'ext': video_extension.decode('utf-8'),
2010-03-19 18:15:43 +01:00
'format': u'NA',
2010-05-30 19:49:51 +02:00
'player_url': None,
2010-01-15 22:26:41 +01:00
})
2010-07-22 20:26:37 +02:00
except UnavailableVideoError:
2011-01-03 11:22:49 +01:00
self._downloader.trouble(u'\nERROR: unable to download video')
2010-01-15 22:26:41 +01:00
class PhotobucketIE(InfoExtractor):
"""Information extractor for photobucket.com."""
_VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)'
2011-09-15 10:43:49 +02:00
IE_NAME = u'photobucket'
2010-01-15 22:26:41 +01:00
def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id):
"""Report webpage download."""
2010-10-23 13:19:26 +02:00
self._downloader.to_screen(u'[photobucket] %s: Downloading webpage' % video_id)
2010-01-15 22:26:41 +01:00
def report_extraction(self, video_id):
"""Report information extraction."""
2010-10-23 13:19:26 +02:00
self._downloader.to_screen(u'[photobucket] %s: Extracting information' % video_id)
2010-01-15 22:26:41 +01:00
def _real_extract(self, url):
# Extract id from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return
2010-07-13 19:37:07 +02:00
# At this point we have a new video
2010-07-22 20:27:35 +02:00
self._downloader.increment_downloads()
2010-01-15 22:26:41 +01:00
video_id = mobj.group(1)
video_extension = 'flv'
# Retrieve video webpage to extract further information
request = urllib2.Request(url)
try:
self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
return
# Extract URL, uploader, and title from webpage
self.report_extraction(video_id)
mobj = re.search(r'<link rel="video_src" href=".*\?file=([^"]+)" />', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL')
return
mediaURL = urllib.unquote(mobj.group(1))
video_url = mediaURL
mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title')
return
video_title = mobj.group(1).decode('utf-8')
2010-02-12 21:01:55 +01:00
video_title = sanitize_title(video_title)
2011-11-21 20:15:32 +01:00
simple_title = _simplify_title(vide_title)
2010-01-15 22:26:41 +01:00
video_uploader = mobj.group(2).decode('utf-8')
try:
# Process video information
self._downloader.process_info({
'id': video_id.decode('utf-8'),
'url': video_url.decode('utf-8'),
2010-02-12 21:01:55 +01:00
'uploader': video_uploader,
2010-11-19 19:31:26 +01:00
'upload_date': u'NA',
2010-02-12 21:01:55 +01:00
'title': video_title,
2010-02-21 00:13:34 +01:00
'stitle': simple_title,
2010-02-12 21:01:55 +01:00
'ext': video_extension.decode('utf-8'),
2010-03-19 18:15:43 +01:00
'format': u'NA',
2010-05-30 19:49:51 +02:00
'player_url': None,
2010-02-12 21:01:55 +01:00
})
2010-07-22 20:26:37 +02:00
except UnavailableVideoError:
2011-01-03 11:22:49 +01:00
self._downloader.trouble(u'\nERROR: unable to download video')
2010-02-12 21:01:55 +01:00
2010-04-01 20:55:43 +02:00
class YahooIE(InfoExtractor):
"""Information extractor for video.yahoo.com."""
# _VALID_URL matches all Yahoo! Video URLs
# _VPAGE_URL matches only the extractable '/watch/' URLs
_VALID_URL = r'(?:http://)?(?:[a-z]+\.)?video\.yahoo\.com/(?:watch|network)/([0-9]+)(?:/|\?v=)([0-9]+)(?:[#\?].*)?'
_VPAGE_URL = r'(?:http://)?video\.yahoo\.com/watch/([0-9]+)/([0-9]+)(?:[#\?].*)?'
2011-09-15 10:43:49 +02:00
IE_NAME = u'video.yahoo'
2010-04-01 20:55:43 +02:00
def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id):
"""Report webpage download."""
2010-10-23 13:19:26 +02:00
self._downloader.to_screen(u'[video.yahoo] %s: Downloading webpage' % video_id)
2010-04-01 20:55:43 +02:00
def report_extraction(self, video_id):
"""Report information extraction."""
2010-10-23 13:19:26 +02:00
self._downloader.to_screen(u'[video.yahoo] %s: Extracting information' % video_id)
2010-04-01 20:55:43 +02:00
2010-07-13 19:37:07 +02:00
def _real_extract(self, url, new_video=True):
2010-04-01 20:55:43 +02:00
# Extract ID from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return
2010-07-13 19:37:07 +02:00
# At this point we have a new video
2010-07-22 20:27:35 +02:00
self._downloader.increment_downloads()
2010-04-01 20:55:43 +02:00
video_id = mobj.group(2)
video_extension = 'flv'
# Rewrite valid but non-extractable URLs as
# extractable English language /watch/ URLs
if re.match(self._VPAGE_URL, url) is None:
request = urllib2.Request(url)
try:
webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
return
mobj = re.search(r'\("id", "([0-9]+)"\);', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: Unable to extract id field')
return
yahoo_id = mobj.group(1)
mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: Unable to extract vid field')
return
yahoo_vid = mobj.group(1)
url = 'http://video.yahoo.com/watch/%s/%s' % (yahoo_vid, yahoo_id)
2010-07-13 19:37:07 +02:00
return self._real_extract(url, new_video=False)
2010-04-01 20:55:43 +02:00
# Retrieve video webpage to extract further information
request = urllib2.Request(url)
try:
self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
return
# Extract uploader and title from webpage
self.report_extraction(video_id)
mobj = re.search(r'<meta name="title" content="(.*)" />', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video title')
return
video_title = mobj.group(1).decode('utf-8')
2011-11-21 20:15:32 +01:00
simple_title = _simplify_title(video_title)
2010-04-01 20:55:43 +02:00
mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video uploader')
return
video_uploader = mobj.group(1).decode('utf-8')
2010-04-04 17:57:59 +02:00
# Extract video thumbnail
mobj = re.search(r'<link rel="image_src" href="(.*)" />', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
return
video_thumbnail = mobj.group(1).decode('utf-8')
# Extract video description
mobj = re.search(r'<meta name="description" content="(.*)" />', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video description')
return
video_description = mobj.group(1).decode('utf-8')
2011-09-05 09:46:36 +02:00
if not video_description:
video_description = 'No description available.'
2010-04-04 17:57:59 +02:00
2010-04-01 20:55:43 +02:00
# Extract video height and width
mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video height')
return
yv_video_height = mobj.group(1)
mobj = re.search(r'<meta name="video_width" content="([0-9]+)" />', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video width')
return
yv_video_width = mobj.group(1)
# Retrieve video playlist to extract media URL
# I'm not completely sure what all these options are, but we
# seem to need most of them, otherwise the server sends a 401.
yv_lg = 'R0xx6idZnW2zlrKP8xxAIR' # not sure what this represents
yv_bitrate = '700' # according to Wikipedia this is hard-coded
request = urllib2.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id +
2011-09-05 09:46:36 +02:00
'&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height +
'&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797')
2010-04-01 20:55:43 +02:00
try:
self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
return
# Extract media URL from playlist XML
mobj = re.search(r'<STREAM APP="(http://.*)" FULLPATH="/?(/.*\.flv\?[^"]*)"', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: Unable to extract media URL')
return
video_url = urllib.unquote(mobj.group(1) + mobj.group(2)).decode('utf-8')
video_url = re.sub(r'(?u)&(.+?);', htmlentity_transform, video_url)
try:
# Process video information
self._downloader.process_info({
'id': video_id.decode('utf-8'),
'url': video_url,
'uploader': video_uploader,
2010-11-19 19:31:26 +01:00
'upload_date': u'NA',
2010-04-01 20:55:43 +02:00
'title': video_title,
'stitle': simple_title,
'ext': video_extension.decode('utf-8'),
2010-04-04 17:57:59 +02:00
'thumbnail': video_thumbnail.decode('utf-8'),
'description': video_description,
'thumbnail': video_thumbnail,
2010-05-30 19:49:51 +02:00
'player_url': None,
2010-04-01 20:55:43 +02:00
})
2010-07-22 20:26:37 +02:00
except UnavailableVideoError:
2011-01-03 11:22:49 +01:00
self._downloader.trouble(u'\nERROR: unable to download video')
2010-04-01 20:55:43 +02:00
2010-11-25 07:24:45 +01:00
class VimeoIE(InfoExtractor):
"""Information extractor for vimeo.com."""
# _VALID_URL matches Vimeo URLs
2011-04-21 02:20:55 +02:00
_VALID_URL = r'(?:https?://)?(?:(?:www|player).)?vimeo\.com/(?:groups/[^/]+/)?(?:videos?/)?([0-9]+)'
2011-09-15 10:43:49 +02:00
IE_NAME = u'vimeo'
2010-11-25 07:24:45 +01:00
def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id):
"""Report webpage download."""
2011-04-21 02:07:57 +02:00
self._downloader.to_screen(u'[vimeo] %s: Downloading webpage' % video_id)
2010-11-25 07:24:45 +01:00
def report_extraction(self, video_id):
"""Report information extraction."""
2011-04-21 02:07:57 +02:00
self._downloader.to_screen(u'[vimeo] %s: Extracting information' % video_id)
2010-11-25 07:24:45 +01:00
def _real_extract(self, url, new_video=True):
# Extract ID from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return
# At this point we have a new video
self._downloader.increment_downloads()
video_id = mobj.group(1)
# Retrieve video webpage to extract further information
request = urllib2.Request("http://vimeo.com/moogaloop/load/clip:%s" % video_id, None, std_headers)
try:
self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
return
2011-02-04 07:02:29 +01:00
# Now we begin extracting as much information as we can from what we
# retrieved. First we extract the information common to all extractors,
# and latter we extract those that are Vimeo specific.
2010-11-25 07:24:45 +01:00
self.report_extraction(video_id)
2011-02-04 07:02:29 +01:00
# Extract title
2011-01-29 07:13:54 +01:00
mobj = re.search(r'<caption>(.*?)</caption>', webpage)
2010-11-25 07:24:45 +01:00
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video title')
return
video_title = mobj.group(1).decode('utf-8')
2011-11-23 10:35:17 +01:00
simple_title = _simplify_title(video_title)
2010-11-25 07:24:45 +01:00
2011-02-04 07:02:29 +01:00
# Extract uploader
2011-01-29 07:13:54 +01:00
mobj = re.search(r'<uploader_url>http://vimeo.com/(.*?)</uploader_url>', webpage)
2010-11-25 07:24:45 +01:00
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video uploader')
return
video_uploader = mobj.group(1).decode('utf-8')
# Extract video thumbnail
2011-01-29 07:13:54 +01:00
mobj = re.search(r'<thumbnail>(.*?)</thumbnail>', webpage)
2010-11-25 07:24:45 +01:00
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
return
video_thumbnail = mobj.group(1).decode('utf-8')
# # Extract video description
# mobj = re.search(r'<meta property="og:description" content="(.*)" />', webpage)
# if mobj is None:
# self._downloader.trouble(u'ERROR: unable to extract video description')
# return
# video_description = mobj.group(1).decode('utf-8')
# if not video_description: video_description = 'No description available.'
video_description = 'Foo.'
2011-02-04 07:02:29 +01:00
# Vimeo specific: extract request signature
2011-01-29 07:13:54 +01:00
mobj = re.search(r'<request_signature>(.*?)</request_signature>', webpage)
2010-11-25 07:24:45 +01:00
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract request signature')
return
sig = mobj.group(1).decode('utf-8')
2011-10-18 23:47:19 +02:00
# Vimeo specific: extract video quality information
mobj = re.search(r'<isHD>(\d+)</isHD>', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video quality information')
return
quality = mobj.group(1).decode('utf-8')
if int(quality) == 1:
quality = 'hd'
else:
quality = 'sd'
2011-02-04 07:02:29 +01:00
# Vimeo specific: Extract request signature expiration
2011-01-29 07:13:54 +01:00
mobj = re.search(r'<request_signature_expires>(.*?)</request_signature_expires>', webpage)
2010-11-25 07:24:45 +01:00
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract request signature expiration')
return
sig_exp = mobj.group(1).decode('utf-8')
2011-10-18 23:47:19 +02:00
video_url = "http://vimeo.com/moogaloop/play/clip:%s/%s/%s/?q=%s" % (video_id, sig, sig_exp, quality)
2010-11-25 07:24:45 +01:00
try:
# Process video information
self._downloader.process_info({
'id': video_id.decode('utf-8'),
'url': video_url,
'uploader': video_uploader,
'upload_date': u'NA',
'title': video_title,
'stitle': simple_title,
2011-04-21 02:29:29 +02:00
'ext': u'mp4',
2010-11-25 07:24:45 +01:00
'thumbnail': video_thumbnail.decode('utf-8'),
'description': video_description,
'thumbnail': video_thumbnail,
'description': video_description,
'player_url': None,
})
except UnavailableVideoError:
self._downloader.trouble(u'ERROR: unable to download video')
2010-02-12 21:01:55 +01:00
class GenericIE(InfoExtractor):
"""Generic last-resort information extractor."""
2011-09-15 10:43:49 +02:00
_VALID_URL = r'.*'
IE_NAME = u'generic'
2011-09-15 10:06:14 +02:00
2010-02-12 21:01:55 +01:00
def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id):
"""Report webpage download."""
2010-10-23 13:19:26 +02:00
self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.')
self._downloader.to_screen(u'[generic] %s: Downloading webpage' % video_id)
2010-02-12 21:01:55 +01:00
def report_extraction(self, video_id):
"""Report information extraction."""
2010-10-23 13:19:26 +02:00
self._downloader.to_screen(u'[generic] %s: Extracting information' % video_id)
2010-02-12 21:01:55 +01:00
def _real_extract(self, url):
2010-07-13 19:37:07 +02:00
# At this point we have a new video
2010-07-22 20:27:35 +02:00
self._downloader.increment_downloads()
2010-07-13 19:37:07 +02:00
2010-02-12 21:01:55 +01:00
video_id = url.split('/')[-1]
request = urllib2.Request(url)
try:
self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
return
except ValueError, err:
# since this is the last-resort InfoExtractor, if
# this error is thrown, it'll be thrown here
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return
2010-12-05 19:48:22 +01:00
self.report_extraction(video_id)
2010-02-12 21:01:55 +01:00
# Start with something easy: JW Player in SWFObject
mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
if mobj is None:
# Broaden the search a little bit
mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return
# It's possible that one of the regexes
# matched, but returned an empty group:
if mobj.group(1) is None:
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
return
video_url = urllib.unquote(mobj.group(1))
2011-09-05 09:46:36 +02:00
video_id = os.path.basename(video_url)
2010-02-12 21:01:55 +01:00
# here's a fun little line of code for you:
video_extension = os.path.splitext(video_id)[1][1:]
2011-09-05 09:46:36 +02:00
video_id = os.path.splitext(video_id)[0]
2010-02-12 21:01:55 +01:00
# it's tempting to parse this further, but you would
# have to take into account all the variations like
# Video Title - Site Name
# Site Name | Video Title
# Video Title - Tagline | Site Name
# and so on and so forth; it's just not practical
mobj = re.search(r'<title>(.*)</title>', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title')
return
video_title = mobj.group(1).decode('utf-8')
video_title = sanitize_title(video_title)
2011-11-21 20:15:32 +01:00
simple_title = _simplify_title(video_title)
2010-02-12 21:01:55 +01:00
# video uploader is domain name
mobj = re.match(r'(?:https?://)?([^/]*)/.*', url)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title')
return
video_uploader = mobj.group(1).decode('utf-8')
try:
# Process video information
self._downloader.process_info({
'id': video_id.decode('utf-8'),
'url': video_url.decode('utf-8'),
'uploader': video_uploader,
2010-11-19 19:31:26 +01:00
'upload_date': u'NA',
2010-02-12 21:01:55 +01:00
'title': video_title,
2010-02-21 00:13:34 +01:00
'stitle': simple_title,
2010-01-15 22:26:41 +01:00
'ext': video_extension.decode('utf-8'),
2010-03-19 18:15:43 +01:00
'format': u'NA',
2010-05-30 19:49:51 +02:00
'player_url': None,
2010-01-15 22:26:41 +01:00
})
2010-07-22 20:26:37 +02:00
except UnavailableVideoError, err:
2011-01-03 11:22:49 +01:00
self._downloader.trouble(u'\nERROR: unable to download video')
2010-01-15 22:26:41 +01:00
2009-02-02 19:59:48 +01:00
class YoutubeSearchIE(InfoExtractor):
"""Information Extractor for YouTube search queries."""
2011-09-15 10:06:14 +02:00
_VALID_URL = r'ytsearch(\d+|all)?:[\s\S]+'
2009-02-02 19:59:48 +01:00
_TEMPLATE_URL = 'http://www.youtube.com/results?search_query=%s&page=%s&gl=US&hl=en'
_VIDEO_INDICATOR = r'href="/watch\?v=.+?"'
2009-08-15 00:33:50 +02:00
_MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
2009-02-02 19:59:48 +01:00
_youtube_ie = None
2009-04-07 02:39:16 +02:00
_max_youtube_results = 1000
2011-09-15 10:43:49 +02:00
IE_NAME = u'youtube:search'
2009-02-02 19:59:48 +01:00
2009-04-02 20:23:13 +02:00
def __init__(self, youtube_ie, downloader=None):
2009-02-02 19:59:48 +01:00
InfoExtractor.__init__(self, downloader)
self._youtube_ie = youtube_ie
2011-01-07 10:22:01 +01:00
2009-02-02 19:59:48 +01:00
def report_download_page(self, query, pagenum):
"""Report attempt to download playlist page with given number."""
2010-02-12 21:01:55 +01:00
query = query.decode(preferredencoding())
2010-10-23 13:19:26 +02:00
self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum))
2009-02-02 19:59:48 +01:00
def _real_initialize(self):
self._youtube_ie.initialize()
2011-01-07 10:22:01 +01:00
2009-02-02 19:59:48 +01:00
def _real_extract(self, query):
2011-09-15 10:06:14 +02:00
mobj = re.match(self._VALID_URL, query)
2009-02-02 19:59:48 +01:00
if mobj is None:
2009-04-23 22:01:28 +02:00
self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
2009-04-23 22:20:06 +02:00
return
2009-02-02 19:59:48 +01:00
prefix, query = query.split(':')
prefix = prefix[8:]
2011-09-05 09:46:36 +02:00
query = query.encode('utf-8')
2009-04-02 20:23:13 +02:00
if prefix == '':
2009-04-23 22:20:06 +02:00
self._download_n_results(query, 1)
return
2009-04-02 20:23:13 +02:00
elif prefix == 'all':
2009-04-23 22:20:06 +02:00
self._download_n_results(query, self._max_youtube_results)
return
2009-04-02 20:23:13 +02:00
else:
2009-02-02 19:59:48 +01:00
try:
2009-05-27 23:03:56 +02:00
n = long(prefix)
2009-02-02 19:59:48 +01:00
if n <= 0:
2009-04-23 22:01:28 +02:00
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
2009-04-23 22:20:06 +02:00
return
2009-04-07 17:21:27 +02:00
elif n > self._max_youtube_results:
2011-09-05 09:46:36 +02:00
self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
2009-04-07 17:21:27 +02:00
n = self._max_youtube_results
2009-04-23 22:20:06 +02:00
self._download_n_results(query, n)
return
2009-05-27 23:03:56 +02:00
except ValueError: # parsing prefix as integer fails
2009-04-23 22:20:06 +02:00
self._download_n_results(query, 1)
return
2009-02-02 19:59:48 +01:00
def _download_n_results(self, query, n):
"""Downloads a specified number of results for a query"""
video_ids = []
already_seen = set()
pagenum = 1
while True:
self.report_download_page(query, pagenum)
2009-02-02 20:29:44 +01:00
result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum)
2011-01-12 20:20:37 +01:00
request = urllib2.Request(result_url)
2009-02-02 19:59:48 +01:00
try:
page = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
2009-04-23 22:01:28 +02:00
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
2009-04-23 22:20:06 +02:00
return
2009-02-02 19:59:48 +01:00
# Extract video identifiers
for mobj in re.finditer(self._VIDEO_INDICATOR, page):
video_id = page[mobj.span()[0]:mobj.span()[1]].split('=')[2][:-1]
if video_id not in already_seen:
video_ids.append(video_id)
already_seen.add(video_id)
if len(video_ids) == n:
# Specified n videos reached
for id in video_ids:
2009-04-23 22:20:06 +02:00
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
return
2009-02-02 19:59:48 +01:00
2009-08-15 00:33:50 +02:00
if re.search(self._MORE_PAGES_INDICATOR, page) is None:
2009-02-02 19:59:48 +01:00
for id in video_ids:
2009-04-23 22:20:06 +02:00
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
return
2009-02-02 19:59:48 +01:00
pagenum = pagenum + 1
2011-09-05 09:46:36 +02:00
2010-04-04 17:57:59 +02:00
class GoogleSearchIE(InfoExtractor):
"""Information Extractor for Google Video search queries."""
2011-09-15 10:06:14 +02:00
_VALID_URL = r'gvsearch(\d+|all)?:[\s\S]+'
2010-04-04 17:57:59 +02:00
_TEMPLATE_URL = 'http://video.google.com/videosearch?q=%s+site:video.google.com&start=%s&hl=en'
_VIDEO_INDICATOR = r'videoplay\?docid=([^\&>]+)\&'
_MORE_PAGES_INDICATOR = r'<span>Next</span>'
_google_ie = None
_max_google_results = 1000
2011-09-15 10:43:49 +02:00
IE_NAME = u'video.google:search'
2010-04-04 17:57:59 +02:00
def __init__(self, google_ie, downloader=None):
InfoExtractor.__init__(self, downloader)
self._google_ie = google_ie
2011-01-07 10:22:01 +01:00
2010-04-04 17:57:59 +02:00
def report_download_page(self, query, pagenum):
"""Report attempt to download playlist page with given number."""
query = query.decode(preferredencoding())
2010-10-23 13:19:26 +02:00
self._downloader.to_screen(u'[video.google] query "%s": Downloading page %s' % (query, pagenum))
2010-04-04 17:57:59 +02:00
def _real_initialize(self):
self._google_ie.initialize()
2011-01-07 10:22:01 +01:00
2010-04-04 17:57:59 +02:00
def _real_extract(self, query):
2011-09-15 10:06:14 +02:00
mobj = re.match(self._VALID_URL, query)
2010-04-04 17:57:59 +02:00
if mobj is None:
self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
return
prefix, query = query.split(':')
prefix = prefix[8:]
2011-09-05 09:46:36 +02:00
query = query.encode('utf-8')
2010-04-04 17:57:59 +02:00
if prefix == '':
self._download_n_results(query, 1)
return
elif prefix == 'all':
self._download_n_results(query, self._max_google_results)
return
else:
try:
n = long(prefix)
if n <= 0:
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
return
elif n > self._max_google_results:
2011-09-05 09:46:36 +02:00
self._downloader.to_stderr(u'WARNING: gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
2010-04-04 17:57:59 +02:00
n = self._max_google_results
self._download_n_results(query, n)
return
except ValueError: # parsing prefix as integer fails
self._download_n_results(query, 1)
return
def _download_n_results(self, query, n):
"""Downloads a specified number of results for a query"""
video_ids = []
already_seen = set()
pagenum = 1
while True:
self.report_download_page(query, pagenum)
result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum)
2011-01-12 20:20:37 +01:00
request = urllib2.Request(result_url)
2010-04-04 17:57:59 +02:00
try:
page = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
return
# Extract video identifiers
for mobj in re.finditer(self._VIDEO_INDICATOR, page):
video_id = mobj.group(1)
if video_id not in already_seen:
video_ids.append(video_id)
already_seen.add(video_id)
if len(video_ids) == n:
# Specified n videos reached
for id in video_ids:
self._google_ie.extract('http://video.google.com/videoplay?docid=%s' % id)
return
if re.search(self._MORE_PAGES_INDICATOR, page) is None:
for id in video_ids:
self._google_ie.extract('http://video.google.com/videoplay?docid=%s' % id)
return
pagenum = pagenum + 1
2011-09-05 09:46:36 +02:00
2010-04-04 17:57:59 +02:00
class YahooSearchIE(InfoExtractor):
"""Information Extractor for Yahoo! Video search queries."""
2011-09-15 10:06:14 +02:00
_VALID_URL = r'yvsearch(\d+|all)?:[\s\S]+'
2010-04-04 17:57:59 +02:00
_TEMPLATE_URL = 'http://video.yahoo.com/search/?p=%s&o=%s'
_VIDEO_INDICATOR = r'href="http://video\.yahoo\.com/watch/([0-9]+/[0-9]+)"'
_MORE_PAGES_INDICATOR = r'\s*Next'
_yahoo_ie = None
_max_yahoo_results = 1000
2011-09-15 10:43:49 +02:00
IE_NAME = u'video.yahoo:search'
2010-04-04 17:57:59 +02:00
def __init__(self, yahoo_ie, downloader=None):
InfoExtractor.__init__(self, downloader)
self._yahoo_ie = yahoo_ie
2011-01-07 10:22:01 +01:00
2010-04-04 17:57:59 +02:00
def report_download_page(self, query, pagenum):
"""Report attempt to download playlist page with given number."""
query = query.decode(preferredencoding())
2010-10-23 13:19:26 +02:00
self._downloader.to_screen(u'[video.yahoo] query "%s": Downloading page %s' % (query, pagenum))
2010-04-04 17:57:59 +02:00
def _real_initialize(self):
self._yahoo_ie.initialize()
2011-01-07 10:22:01 +01:00
2010-04-04 17:57:59 +02:00
def _real_extract(self, query):
2011-09-15 10:06:14 +02:00
mobj = re.match(self._VALID_URL, query)
2010-04-04 17:57:59 +02:00
if mobj is None:
self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
return
prefix, query = query.split(':')
prefix = prefix[8:]
2011-09-05 09:46:36 +02:00
query = query.encode('utf-8')
2010-04-04 17:57:59 +02:00
if prefix == '':
self._download_n_results(query, 1)
return
elif prefix == 'all':
self._download_n_results(query, self._max_yahoo_results)
return
else:
try:
n = long(prefix)
if n <= 0:
self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
return
elif n > self._max_yahoo_results:
2011-09-05 09:46:36 +02:00
self._downloader.to_stderr(u'WARNING: yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
2010-04-04 17:57:59 +02:00
n = self._max_yahoo_results
self._download_n_results(query, n)
return
except ValueError: # parsing prefix as integer fails
self._download_n_results(query, 1)
return
def _download_n_results(self, query, n):
"""Downloads a specified number of results for a query"""
video_ids = []
already_seen = set()
pagenum = 1
while True:
self.report_download_page(query, pagenum)
result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum)
2011-01-12 20:20:37 +01:00
request = urllib2.Request(result_url)
2010-04-04 17:57:59 +02:00
try:
page = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
return
# Extract video identifiers
for mobj in re.finditer(self._VIDEO_INDICATOR, page):
video_id = mobj.group(1)
if video_id not in already_seen:
video_ids.append(video_id)
already_seen.add(video_id)
if len(video_ids) == n:
# Specified n videos reached
for id in video_ids:
self._yahoo_ie.extract('http://video.yahoo.com/watch/%s' % id)
return
if re.search(self._MORE_PAGES_INDICATOR, page) is None:
for id in video_ids:
self._yahoo_ie.extract('http://video.yahoo.com/watch/%s' % id)
return
pagenum = pagenum + 1
2011-09-05 09:46:36 +02:00
2008-07-25 12:55:01 +02:00
class YoutubePlaylistIE(InfoExtractor):
"""Information Extractor for YouTube playlists."""
2011-11-15 10:35:39 +01:00
_VALID_URL = r'(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL)?([0-9A-Za-z-_]+)(?:/.*?/([0-9A-Za-z_-]+))?.*'
2011-01-31 11:54:47 +01:00
_TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
2008-07-25 12:55:01 +02:00
_VIDEO_INDICATOR = r'/watch\?v=(.+?)&'
2010-04-02 19:51:54 +02:00
_MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
2008-07-25 12:55:01 +02:00
_youtube_ie = None
2011-09-15 10:43:49 +02:00
IE_NAME = u'youtube:playlist'
2008-07-25 12:55:01 +02:00
def __init__(self, youtube_ie, downloader=None):
InfoExtractor.__init__(self, downloader)
self._youtube_ie = youtube_ie
2011-01-07 10:22:01 +01:00
2008-07-25 12:55:01 +02:00
def report_download_page(self, playlist_id, pagenum):
"""Report attempt to download playlist page with given number."""
2010-10-23 13:19:26 +02:00
self._downloader.to_screen(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum))
2008-07-25 12:55:01 +02:00
def _real_initialize(self):
self._youtube_ie.initialize()
2011-01-07 10:22:01 +01:00
2008-07-25 12:55:01 +02:00
def _real_extract(self, url):
# Extract playlist id
mobj = re.match(self._VALID_URL, url)
if mobj is None:
2009-04-23 22:01:28 +02:00
self._downloader.trouble(u'ERROR: invalid url: %s' % url)
2009-04-23 22:20:06 +02:00
return
2008-07-25 12:55:01 +02:00
2011-02-12 20:19:20 +01:00
# Single video case
if mobj.group(3) is not None:
self._youtube_ie.extract(mobj.group(3))
return
2008-07-25 12:55:01 +02:00
# Download playlist pages
2011-01-31 11:54:47 +01:00
# prefix is 'p' as default for playlists but there are other types that need extra care
playlist_prefix = mobj.group(1)
if playlist_prefix == 'a':
playlist_access = 'artist'
else:
2011-02-13 12:02:56 +01:00
playlist_prefix = 'p'
2011-01-31 11:54:47 +01:00
playlist_access = 'view_play_list'
playlist_id = mobj.group(2)
2008-07-25 12:55:01 +02:00
video_ids = []
pagenum = 1
while True:
self.report_download_page(playlist_id, pagenum)
2011-11-15 10:35:39 +01:00
url = self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum)
request = urllib2.Request(url)
2008-07-25 12:55:01 +02:00
try:
page = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
2009-04-23 22:01:28 +02:00
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
2009-04-23 22:20:06 +02:00
return
2008-07-25 12:55:01 +02:00
# Extract video identifiers
2008-11-01 15:52:51 +01:00
ids_in_page = []
2008-07-25 12:55:01 +02:00
for mobj in re.finditer(self._VIDEO_INDICATOR, page):
2008-11-01 15:52:51 +01:00
if mobj.group(1) not in ids_in_page:
ids_in_page.append(mobj.group(1))
video_ids.extend(ids_in_page)
2008-07-25 12:55:01 +02:00
2010-04-02 19:51:54 +02:00
if re.search(self._MORE_PAGES_INDICATOR, page) is None:
2008-07-25 12:55:01 +02:00
break
pagenum = pagenum + 1
2010-11-04 23:19:09 +01:00
playliststart = self._downloader.params.get('playliststart', 1) - 1
playlistend = self._downloader.params.get('playlistend', -1)
video_ids = video_ids[playliststart:playlistend]
2008-07-25 12:55:01 +02:00
for id in video_ids:
2009-04-23 22:20:06 +02:00
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
return
2008-07-25 12:55:01 +02:00
2011-09-05 09:46:36 +02:00
2009-11-25 22:34:34 +01:00
class YoutubeUserIE(InfoExtractor):
"""Information Extractor for YouTube users."""
2011-09-25 20:28:20 +02:00
_VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/user/)|ytuser:)([A-Za-z0-9_-]+)'
2009-11-25 22:34:34 +01:00
_TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s'
2011-01-29 11:55:20 +01:00
_GDATA_PAGE_SIZE = 50
_GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d'
2011-11-06 14:42:43 +01:00
_VIDEO_INDICATOR = r'/watch\?v=(.+?)[\<&]'
2009-11-25 22:34:34 +01:00
_youtube_ie = None
2011-09-15 10:43:49 +02:00
IE_NAME = u'youtube:user'
2009-11-25 22:34:34 +01:00
def __init__(self, youtube_ie, downloader=None):
InfoExtractor.__init__(self, downloader)
self._youtube_ie = youtube_ie
2011-01-07 10:22:01 +01:00
2011-01-29 11:55:20 +01:00
def report_download_page(self, username, start_index):
2009-11-25 22:34:34 +01:00
"""Report attempt to download user page."""
2011-01-29 11:55:20 +01:00
self._downloader.to_screen(u'[youtube] user %s: Downloading video ids from %d to %d' %
2011-09-05 09:46:36 +02:00
(username, start_index, start_index + self._GDATA_PAGE_SIZE))
2009-11-25 22:34:34 +01:00
def _real_initialize(self):
self._youtube_ie.initialize()
2011-01-07 10:22:01 +01:00
2009-11-25 22:34:34 +01:00
def _real_extract(self, url):
# Extract username
mobj = re.match(self._VALID_URL, url)
if mobj is None:
self._downloader.trouble(u'ERROR: invalid url: %s' % url)
return
username = mobj.group(1)
2011-01-29 11:55:20 +01:00
# Download video ids using YouTube Data API. Result size per
# query is limited (currently to 50 videos) so we need to query
# page by page until there are no video ids - it means we got
# all of them.
2009-11-25 22:34:34 +01:00
video_ids = []
2011-01-29 11:55:20 +01:00
pagenum = 0
2009-11-25 22:34:34 +01:00
2011-01-29 11:55:20 +01:00
while True:
start_index = pagenum * self._GDATA_PAGE_SIZE + 1
self.report_download_page(username, start_index)
2009-11-25 22:34:34 +01:00
2011-01-29 11:55:20 +01:00
request = urllib2.Request(self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index))
2009-11-25 22:34:34 +01:00
2011-01-29 11:55:20 +01:00
try:
page = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
return
2009-11-25 22:34:34 +01:00
2011-01-29 11:55:20 +01:00
# Extract video identifiers
ids_in_page = []
for mobj in re.finditer(self._VIDEO_INDICATOR, page):
if mobj.group(1) not in ids_in_page:
ids_in_page.append(mobj.group(1))
video_ids.extend(ids_in_page)
# A little optimization - if current page is not
# "full", ie. does not contain PAGE_SIZE video ids then
# we can assume that this page is the last one - there
# are no more ids on further pages - no need to query
# again.
if len(ids_in_page) < self._GDATA_PAGE_SIZE:
break
pagenum += 1
all_ids_count = len(video_ids)
2010-11-04 23:19:09 +01:00
playliststart = self._downloader.params.get('playliststart', 1) - 1
playlistend = self._downloader.params.get('playlistend', -1)
2010-08-04 18:52:00 +02:00
2011-01-29 11:55:20 +01:00
if playlistend == -1:
video_ids = video_ids[playliststart:]
else:
video_ids = video_ids[playliststart:playlistend]
2011-08-23 14:01:51 +02:00
2011-01-29 11:55:20 +01:00
self._downloader.to_screen("[youtube] user %s: Collected %d video ids (downloading %d of them)" %
2011-09-05 09:46:36 +02:00
(username, all_ids_count, len(video_ids)))
2011-01-29 11:55:20 +01:00
for video_id in video_ids:
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % video_id)
2009-11-25 22:34:34 +01:00
2010-12-05 20:09:14 +01:00
class DepositFilesIE(InfoExtractor):
"""Information extractor for depositfiles.com"""
2011-09-25 20:28:20 +02:00
_VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles\.com/(?:../(?#locale))?files/(.+)'
2011-09-15 10:43:49 +02:00
IE_NAME = u'DepositFiles'
2010-12-05 20:09:14 +01:00
def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, file_id):
"""Report webpage download."""
self._downloader.to_screen(u'[DepositFiles] %s: Downloading webpage' % file_id)
def report_extraction(self, file_id):
"""Report information extraction."""
self._downloader.to_screen(u'[DepositFiles] %s: Extracting information' % file_id)
def _real_extract(self, url):
# At this point we have a new file
self._downloader.increment_downloads()
file_id = url.split('/')[-1]
# Rebuild url in english locale
url = 'http://depositfiles.com/en/files/' + file_id
# Retrieve file webpage with 'Free download' button pressed
free_download_indication = { 'gateway_result' : '1' }
2011-01-12 20:20:37 +01:00
request = urllib2.Request(url, urllib.urlencode(free_download_indication))
2010-12-05 20:09:14 +01:00
try:
self.report_download_webpage(file_id)
webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % str(err))
return
# Search for the real file URL
mobj = re.search(r'<form action="(http://fileshare.+?)"', webpage)
if (mobj is None) or (mobj.group(1) is None):
# Try to figure out reason of the error.
mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL)
if (mobj is not None) and (mobj.group(1) is not None):
restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip()
self._downloader.trouble(u'ERROR: %s' % restriction_message)
else:
self._downloader.trouble(u'ERROR: unable to extract download URL from: %s' % url)
return
file_url = mobj.group(1)
file_extension = os.path.splitext(file_url)[1][1:]
# Search for file title
mobj = re.search(r'<b title="(.*?)">', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title')
return
file_title = mobj.group(1).decode('utf-8')
try:
# Process file information
self._downloader.process_info({
'id': file_id.decode('utf-8'),
'url': file_url.decode('utf-8'),
'uploader': u'NA',
'upload_date': u'NA',
'title': file_title,
'stitle': file_title,
'ext': file_extension.decode('utf-8'),
'format': u'NA',
'player_url': None,
})
except UnavailableVideoError, err:
self._downloader.trouble(u'ERROR: unable to download file')
2011-09-05 09:46:36 +02:00
2011-02-13 14:26:58 +01:00
class FacebookIE(InfoExtractor):
"""Information Extractor for Facebook"""
2011-11-01 15:07:05 +01:00
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:video/video|photo)\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)'
2011-02-13 14:26:58 +01:00
_LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&'
_NETRC_MACHINE = 'facebook'
2011-10-21 00:26:42 +02:00
_available_formats = ['video', 'highqual', 'lowqual']
2011-02-13 14:26:58 +01:00
_video_extensions = {
2011-10-21 00:26:42 +02:00
'video': 'mp4',
2011-02-13 14:26:58 +01:00
'highqual': 'mp4',
'lowqual': 'mp4',
}
2011-09-15 10:43:49 +02:00
IE_NAME = u'facebook'
2011-02-13 14:26:58 +01:00
def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader)
def _reporter(self, message):
"""Add header and report message."""
self._downloader.to_screen(u'[facebook] %s' % message)
def report_login(self):
"""Report attempt to log in."""
self._reporter(u'Logging in')
def report_video_webpage_download(self, video_id):
"""Report attempt to download video webpage."""
self._reporter(u'%s: Downloading video webpage' % video_id)
def report_information_extraction(self, video_id):
"""Report attempt to extract video information."""
self._reporter(u'%s: Extracting video information' % video_id)
def _parse_page(self, video_webpage):
"""Extract video information from page"""
# General data
2011-10-21 00:27:48 +02:00
data = {'title': r'\("video_title", "(.*?)"\)',
2011-02-13 14:26:58 +01:00
'description': r'<div class="datawrap">(.*?)</div>',
'owner': r'\("video_owner_name", "(.*?)"\)',
'thumbnail': r'\("thumb_url", "(?P<THUMB>.*?)"\)',
}
video_info = {}
for piece in data.keys():
mobj = re.search(data[piece], video_webpage)
if mobj is not None:
video_info[piece] = urllib.unquote_plus(mobj.group(1).decode("unicode_escape"))
# Video urls
video_urls = {}
for fmt in self._available_formats:
mobj = re.search(r'\("%s_src\", "(.+?)"\)' % fmt, video_webpage)
if mobj is not None:
# URL is in a Javascript segment inside an escaped Unicode format within
# the generally utf-8 page
video_urls[fmt] = urllib.unquote_plus(mobj.group(1).decode("unicode_escape"))
video_info['video_urls'] = video_urls
return video_info
def _real_initialize(self):
if self._downloader is None:
return
useremail = None
password = None
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get('username', None) is not None:
useremail = downloader_params['username']
password = downloader_params['password']
elif downloader_params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(self._NETRC_MACHINE)
if info is not None:
useremail = info[0]
password = info[2]
else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError), err:
self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % str(err))
return
if useremail is None:
return
# Log in
login_form = {
'email': useremail,
'pass': password,
'login': 'Log+In'
}
request = urllib2.Request(self._LOGIN_URL, urllib.urlencode(login_form))
try:
self.report_login()
login_results = urllib2.urlopen(request).read()
if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
self._downloader.to_stderr(u'WARNING: unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
return
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.to_stderr(u'WARNING: unable to log in: %s' % str(err))
return
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
video_id = mobj.group('ID')
# Get video webpage
self.report_video_webpage_download(video_id)
request = urllib2.Request('https://www.facebook.com/video/video.php?v=%s' % video_id)
try:
page = urllib2.urlopen(request)
video_webpage = page.read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
return
# Start extracting information
self.report_information_extraction(video_id)
# Extract information
video_info = self._parse_page(video_webpage)
# uploader
if 'owner' not in video_info:
self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
return
video_uploader = video_info['owner']
# title
if 'title' not in video_info:
self._downloader.trouble(u'ERROR: unable to extract video title')
return
video_title = video_info['title']
video_title = video_title.decode('utf-8')
video_title = sanitize_title(video_title)
2011-11-21 20:15:32 +01:00
simple_title = _simplify_title(video_title)
2011-02-13 14:26:58 +01:00
# thumbnail image
if 'thumbnail' not in video_info:
self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
video_thumbnail = ''
else:
video_thumbnail = video_info['thumbnail']
# upload date
upload_date = u'NA'
if 'upload_date' in video_info:
upload_time = video_info['upload_date']
timetuple = email.utils.parsedate_tz(upload_time)
if timetuple is not None:
try:
upload_date = time.strftime('%Y%m%d', timetuple[0:9])
except:
pass
# description
2011-07-07 12:47:36 +02:00
video_description = video_info.get('description', 'No description available.')
2011-02-13 14:26:58 +01:00
url_map = video_info['video_urls']
if len(url_map.keys()) > 0:
# Decide which formats to download
req_format = self._downloader.params.get('format', None)
format_limit = self._downloader.params.get('format_limit', None)
if format_limit is not None and format_limit in self._available_formats:
format_list = self._available_formats[self._available_formats.index(format_limit):]
else:
format_list = self._available_formats
existing_formats = [x for x in format_list if x in url_map]
if len(existing_formats) == 0:
self._downloader.trouble(u'ERROR: no known formats available for video')
return
if req_format is None:
video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
2011-04-23 08:56:06 +02:00
elif req_format == 'worst':
video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality
2011-02-13 14:26:58 +01:00
elif req_format == '-1':
video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
else:
# Specific format
if req_format not in url_map:
self._downloader.trouble(u'ERROR: requested format not available')
return
video_url_list = [(req_format, url_map[req_format])] # Specific format
for format_param, video_real_url in video_url_list:
# At this point we have a new video
self._downloader.increment_downloads()
# Extension
video_extension = self._video_extensions.get(format_param, 'mp4')
try:
# Process video information
self._downloader.process_info({
'id': video_id.decode('utf-8'),
'url': video_real_url.decode('utf-8'),
'uploader': video_uploader.decode('utf-8'),
'upload_date': upload_date,
'title': video_title,
'stitle': simple_title,
'ext': video_extension.decode('utf-8'),
'format': (format_param is None and u'NA' or format_param.decode('utf-8')),
'thumbnail': video_thumbnail.decode('utf-8'),
'description': video_description.decode('utf-8'),
'player_url': None,
})
except UnavailableVideoError, err:
self._downloader.trouble(u'\nERROR: unable to download video')
2011-06-21 22:24:58 +02:00
class BlipTVIE(InfoExtractor):
"""Information extractor for blip.tv"""
2011-08-18 09:31:36 +02:00
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv(/.+)$'
2011-06-21 22:24:58 +02:00
_URL_EXT = r'^.*\.([a-z0-9]+)$'
2011-09-15 10:43:49 +02:00
IE_NAME = u'blip.tv'
2011-06-21 22:24:58 +02:00
def report_extraction(self, file_id):
"""Report information extraction."""
2011-09-27 21:42:15 +02:00
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id))
def report_direct_download(self, title):
"""Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Direct download detected' % (self.IE_NAME, title))
2011-06-21 22:24:58 +02:00
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
2011-08-06 12:16:07 +02:00
if '?' in url:
cchar = '&'
else:
cchar = '?'
json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
2011-06-21 22:24:58 +02:00
request = urllib2.Request(json_url)
2011-06-25 19:26:29 +02:00
self.report_extraction(mobj.group(1))
2011-09-27 21:42:15 +02:00
info = None
2011-06-21 22:24:58 +02:00
try:
2011-09-27 21:42:15 +02:00
urlh = urllib2.urlopen(request)
if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download
basename = url.split('/')[-1]
title,ext = os.path.splitext(basename)
2011-11-21 21:51:24 +01:00
title = title.decode('UTF-8')
2011-09-27 21:42:15 +02:00
ext = ext.replace('.', '')
self.report_direct_download(title)
info = {
'id': title,
'url': url,
'title': title,
2011-11-21 20:15:32 +01:00
'stitle': _simplify_title(title),
2011-09-27 21:42:15 +02:00
'ext': ext,
'urlhandle': urlh
}
2011-06-21 22:24:58 +02:00
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err))
return
2011-09-27 21:42:15 +02:00
if info is None: # Regular URL
try:
json_code = urlh.read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % str(err))
return
2011-06-21 22:24:58 +02:00
2011-09-27 21:42:15 +02:00
try:
json_data = json.loads(json_code)
if 'Post' in json_data:
data = json_data['Post']
else:
data = json_data
upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
video_url = data['media']['url']
umobj = re.match(self._URL_EXT, video_url)
if umobj is None:
raise ValueError('Can not determine filename extension')
ext = umobj.group(1)
info = {
'id': data['item_id'],
'url': video_url,
'uploader': data['display_name'],
'upload_date': upload_date,
'title': data['title'],
2011-11-21 20:15:32 +01:00
'stitle': _simplify_title(data['title']),
2011-09-27 21:42:15 +02:00
'ext': ext,
'format': data['media']['mimeType'],
'thumbnail': data['thumbnailUrl'],
'description': data['description'],
'player_url': data['embedUrl']
}
except (ValueError,KeyError), err:
self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err))
return
2011-07-07 14:10:25 +02:00
2011-09-27 21:42:15 +02:00
self._downloader.increment_downloads()
2011-06-21 22:24:58 +02:00
try:
self._downloader.process_info(info)
except UnavailableVideoError, err:
self._downloader.trouble(u'\nERROR: unable to download video')
2011-09-06 23:56:32 +02:00
class MyVideoIE(InfoExtractor):
"""Information Extractor for myvideo.de."""
_VALID_URL = r'(?:http://)?(?:www\.)?myvideo\.de/watch/([0-9]+)/([^?/]+).*'
2011-09-15 10:43:49 +02:00
IE_NAME = u'myvideo'
2011-09-06 23:56:32 +02:00
def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self._downloader.to_screen(u'[myvideo] %s: Downloading webpage' % video_id)
def report_extraction(self, video_id):
"""Report information extraction."""
self._downloader.to_screen(u'[myvideo] %s: Extracting information' % video_id)
def _real_extract(self,url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
self._download.trouble(u'ERROR: invalid URL: %s' % url)
return
video_id = mobj.group(1)
# Get video webpage
request = urllib2.Request('http://www.myvideo.de/watch/%s' % video_id)
try:
self.report_download_webpage(video_id)
webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
return
self.report_extraction(video_id)
mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/[^.]+\.jpg\' />',
webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL')
return
video_url = mobj.group(1) + ('/%s.flv' % video_id)
mobj = re.search('<title>([^<]+)</title>', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract title')
return
video_title = mobj.group(1)
video_title = sanitize_title(video_title)
2011-11-21 20:15:32 +01:00
simple_title = _simplify_title(video_title)
2011-09-06 23:56:32 +02:00
try:
self._downloader.process_info({
'id': video_id,
'url': video_url,
'uploader': u'NA',
'upload_date': u'NA',
'title': video_title,
'stitle': simple_title,
'ext': u'flv',
'format': u'NA',
'player_url': None,
})
except UnavailableVideoError:
self._downloader.trouble(u'\nERROR: Unable to download video')
2011-09-07 21:36:06 +02:00
class ComedyCentralIE(InfoExtractor):
2011-09-13 21:51:44 +02:00
"""Information extractor for The Daily Show and Colbert Report """
2011-09-07 21:36:06 +02:00
2011-09-15 10:43:49 +02:00
_VALID_URL = r'^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport))|(https?://)?(www\.)?(?P<showname>thedailyshow|colbertnation)\.com/full-episodes/(?P<episode>.*)$'
IE_NAME = u'comedycentral'
2011-09-07 21:36:06 +02:00
def report_extraction(self, episode_id):
self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id)
def report_config_download(self, episode_id):
self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration' % episode_id)
2011-09-14 21:17:05 +02:00
def report_index_download(self, episode_id):
self._downloader.to_screen(u'[comedycentral] %s: Downloading show index' % episode_id)
2011-09-07 22:06:09 +02:00
def report_player_url(self, episode_id):
self._downloader.to_screen(u'[comedycentral] %s: Determining player URL' % episode_id)
2011-09-07 21:36:06 +02:00
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
2011-09-13 21:51:44 +02:00
if mobj.group('shortname'):
if mobj.group('shortname') in ('tds', 'thedailyshow'):
2011-11-21 21:51:24 +01:00
url = u'http://www.thedailyshow.com/full-episodes/'
2011-09-13 21:51:44 +02:00
else:
2011-11-21 21:51:24 +01:00
url = u'http://www.colbertnation.com/full-episodes/'
2011-09-13 21:51:44 +02:00
mobj = re.match(self._VALID_URL, url)
assert mobj is not None
dlNewest = not mobj.group('episode')
if dlNewest:
epTitle = mobj.group('showname')
else:
epTitle = mobj.group('episode')
2011-09-07 21:36:06 +02:00
req = urllib2.Request(url)
self.report_extraction(epTitle)
try:
2011-09-13 21:51:44 +02:00
htmlHandle = urllib2.urlopen(req)
html = htmlHandle.read()
2011-09-07 21:36:06 +02:00
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % unicode(err))
return
2011-09-13 21:51:44 +02:00
if dlNewest:
url = htmlHandle.geturl()
mobj = re.match(self._VALID_URL, url)
if mobj is None:
self._downloader.trouble(u'ERROR: Invalid redirected URL: ' + url)
return
if mobj.group('episode') == '':
self._downloader.trouble(u'ERROR: Redirected URL is still not specific: ' + url)
return
epTitle = mobj.group('episode')
2011-09-07 21:36:06 +02:00
2011-09-14 21:17:05 +02:00
mMovieParams = re.findall('<param name="movie" value="(http://media.mtvnservices.com/([^"]*episode.*?:.*?))"/>', html)
2011-09-07 21:36:06 +02:00
if len(mMovieParams) == 0:
self._downloader.trouble(u'ERROR: unable to find Flash URL in webpage ' + url)
return
2011-09-14 21:17:05 +02:00
playerUrl_raw = mMovieParams[0][0]
2011-09-07 22:06:09 +02:00
self.report_player_url(epTitle)
try:
2011-09-14 21:17:05 +02:00
urlHandle = urllib2.urlopen(playerUrl_raw)
playerUrl = urlHandle.geturl()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to find out player URL: ' + unicode(err))
return
uri = mMovieParams[0][1]
indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + urllib.urlencode({'uri': uri})
self.report_index_download(epTitle)
try:
indexXml = urllib2.urlopen(indexUrl).read()
2011-09-07 22:06:09 +02:00
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
2011-09-14 21:17:05 +02:00
self._downloader.trouble(u'ERROR: unable to download episode index: ' + unicode(err))
2011-09-07 22:06:09 +02:00
return
2011-09-14 21:17:05 +02:00
idoc = xml.etree.ElementTree.fromstring(indexXml)
itemEls = idoc.findall('.//item')
for itemEl in itemEls:
mediaId = itemEl.findall('./guid')[0].text
shortMediaId = mediaId.split(':')[-1]
showId = mediaId.split(':')[-2].replace('.com', '')
officialTitle = itemEl.findall('./title')[0].text
officialDate = itemEl.findall('./pubDate')[0].text
2011-09-07 21:36:06 +02:00
configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' +
urllib.urlencode({'uri': mediaId}))
configReq = urllib2.Request(configUrl)
self.report_config_download(epTitle)
try:
configXml = urllib2.urlopen(configReq).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % unicode(err))
return
2011-09-07 22:42:33 +02:00
2011-09-07 21:36:06 +02:00
cdoc = xml.etree.ElementTree.fromstring(configXml)
turls = []
for rendition in cdoc.findall('.//rendition'):
finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text)
turls.append(finfo)
2011-09-07 23:15:26 +02:00
if len(turls) == 0:
2011-09-14 21:17:05 +02:00
self._downloader.trouble(u'\nERROR: unable to download ' + mediaId + ': No videos found')
2011-09-07 23:15:26 +02:00
continue
2011-09-07 21:36:06 +02:00
# For now, just pick the highest bitrate
format,video_url = turls[-1]
self._downloader.increment_downloads()
2011-09-07 23:15:26 +02:00
2011-11-21 21:51:24 +01:00
effTitle = showId + u'-' + epTitle
2011-09-07 21:36:06 +02:00
info = {
2011-09-14 21:17:05 +02:00
'id': shortMediaId,
2011-09-07 21:36:06 +02:00
'url': video_url,
2011-09-14 21:17:05 +02:00
'uploader': showId,
'upload_date': officialDate,
2011-09-07 23:15:26 +02:00
'title': effTitle,
2011-11-22 21:26:38 +01:00
'stitle': _simplify_title(effTitle),
2011-09-07 21:36:06 +02:00
'ext': 'mp4',
'format': format,
'thumbnail': None,
2011-09-14 21:17:05 +02:00
'description': officialTitle,
'player_url': playerUrl
2011-09-07 21:36:06 +02:00
}
2011-09-07 22:42:33 +02:00
2011-09-07 21:36:06 +02:00
try:
self._downloader.process_info(info)
except UnavailableVideoError, err:
2011-09-14 21:17:05 +02:00
self._downloader.trouble(u'\nERROR: unable to download ' + mediaId)
2011-09-07 23:15:26 +02:00
continue
2011-09-07 21:36:06 +02:00
2011-09-14 22:26:53 +02:00
class EscapistIE(InfoExtractor):
"""Information extractor for The Escapist """
2011-09-25 20:28:20 +02:00
_VALID_URL = r'^(https?://)?(www\.)?escapistmagazine\.com/videos/view/(?P<showname>[^/]+)/(?P<episode>[^/?]+)[/?]?.*$'
2011-09-15 10:43:49 +02:00
IE_NAME = u'escapist'
2011-09-14 22:26:53 +02:00
def report_extraction(self, showName):
self._downloader.to_screen(u'[escapist] %s: Extracting information' % showName)
def report_config_download(self, showName):
self._downloader.to_screen(u'[escapist] %s: Downloading configuration' % showName)
def _real_extract(self, url):
htmlParser = HTMLParser.HTMLParser()
mobj = re.match(self._VALID_URL, url)
if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
showName = mobj.group('showname')
videoId = mobj.group('episode')
self.report_extraction(showName)
try:
webPage = urllib2.urlopen(url).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download webpage: ' + unicode(err))
return
descMatch = re.search('<meta name="description" content="([^"]*)"', webPage)
description = htmlParser.unescape(descMatch.group(1))
imgMatch = re.search('<meta property="og:image" content="([^"]*)"', webPage)
imgUrl = htmlParser.unescape(imgMatch.group(1))
playerUrlMatch = re.search('<meta property="og:video" content="([^"]*)"', webPage)
playerUrl = htmlParser.unescape(playerUrlMatch.group(1))
configUrlMatch = re.search('config=(.*)$', playerUrl)
configUrl = urllib2.unquote(configUrlMatch.group(1))
self.report_config_download(showName)
try:
configJSON = urllib2.urlopen(configUrl).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download configuration: ' + unicode(err))
return
# Technically, it's JavaScript, not JSON
configJSON = configJSON.replace("'", '"')
try:
config = json.loads(configJSON)
except (ValueError,), err:
self._downloader.trouble(u'ERROR: Invalid JSON in configuration file: ' + unicode(err))
return
playlist = config['playlist']
videoUrl = playlist[1]['url']
self._downloader.increment_downloads()
info = {
'id': videoId,
'url': videoUrl,
'uploader': showName,
'upload_date': None,
'title': showName,
2011-11-21 20:15:32 +01:00
'stitle': _simplify_title(showName),
2011-09-14 22:26:53 +02:00
'ext': 'flv',
'format': 'flv',
'thumbnail': imgUrl,
'description': description,
'player_url': playerUrl,
}
try:
self._downloader.process_info(info)
except UnavailableVideoError, err:
self._downloader.trouble(u'\nERROR: unable to download ' + videoId)
2011-10-12 21:13:43 +02:00
class CollegeHumorIE(InfoExtractor):
"""Information extractor for collegehumor.com"""
_VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/video/(?P<videoid>[0-9]+)/(?P<shorttitle>.*)$'
IE_NAME = u'collegehumor'
def report_webpage(self, video_id):
"""Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
def report_extraction(self, video_id):
"""Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
def _real_extract(self, url):
htmlParser = HTMLParser.HTMLParser()
mobj = re.match(self._VALID_URL, url)
if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
video_id = mobj.group('videoid')
self.report_webpage(video_id)
request = urllib2.Request(url)
try:
webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
return
m = re.search(r'id="video:(?P<internalvideoid>[0-9]+)"', webpage)
if m is None:
self._downloader.trouble(u'ERROR: Cannot extract internal video ID')
return
internal_video_id = m.group('internalvideoid')
info = {
'id': video_id,
'internal_id': internal_video_id,
}
self.report_extraction(video_id)
xmlUrl = 'http://www.collegehumor.com/moogaloop/video:' + internal_video_id
try:
metaXml = urllib2.urlopen(xmlUrl).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % str(err))
return
mdoc = xml.etree.ElementTree.fromstring(metaXml)
try:
videoNode = mdoc.findall('./video')[0]
info['description'] = videoNode.findall('./description')[0].text
info['title'] = videoNode.findall('./caption')[0].text
2011-11-21 20:15:32 +01:00
info['stitle'] = _simplify_title(info['title'])
2011-10-12 21:13:43 +02:00
info['url'] = videoNode.findall('./file')[0].text
info['thumbnail'] = videoNode.findall('./thumbnail')[0].text
info['ext'] = info['url'].rpartition('.')[2]
info['format'] = info['ext']
except IndexError:
self._downloader.trouble(u'\nERROR: Invalid metadata XML file')
return
self._downloader.increment_downloads()
try:
self._downloader.process_info(info)
except UnavailableVideoError, err:
self._downloader.trouble(u'\nERROR: unable to download video')
2011-09-14 22:26:53 +02:00
2011-10-13 21:44:20 +02:00
class XVideosIE(InfoExtractor):
"""Information extractor for xvideos.com"""
_VALID_URL = r'^(?:https?://)?(?:www\.)?xvideos\.com/video([0-9]+)(?:.*)'
IE_NAME = u'xvideos'
def report_webpage(self, video_id):
"""Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
def report_extraction(self, video_id):
"""Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
def _real_extract(self, url):
htmlParser = HTMLParser.HTMLParser()
mobj = re.match(self._VALID_URL, url)
if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
video_id = mobj.group(1).decode('utf-8')
self.report_webpage(video_id)
2011-10-18 22:38:17 +02:00
request = urllib2.Request(r'http://www.xvideos.com/video' + video_id)
2011-10-13 21:44:20 +02:00
try:
webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
return
self.report_extraction(video_id)
# Extract video URL
mobj = re.search(r'flv_url=(.+?)&', webpage)
if mobj is None:
2011-10-18 22:41:02 +02:00
self._downloader.trouble(u'ERROR: unable to extract video url')
2011-10-13 21:44:20 +02:00
return
video_url = urllib2.unquote(mobj.group(1).decode('utf-8'))
# Extract title
2011-10-18 22:42:01 +02:00
mobj = re.search(r'<title>(.*?)\s+-\s+XVID', webpage)
2011-10-13 21:44:20 +02:00
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video title')
return
video_title = mobj.group(1).decode('utf-8')
# Extract video thumbnail
mobj = re.search(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]/[a-fA-F0-9]/[a-fA-F0-9]/([a-fA-F0-9.]+jpg)', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
return
video_thumbnail = mobj.group(1).decode('utf-8')
self._downloader.increment_downloads()
info = {
'id': video_id,
'url': video_url,
'uploader': None,
'upload_date': None,
'title': video_title,
2011-11-21 20:15:32 +01:00
'stitle': _simplify_title(video_title),
2011-10-13 21:44:20 +02:00
'ext': 'flv',
'format': 'flv',
'thumbnail': video_thumbnail,
'description': None,
'player_url': None,
}
try:
self._downloader.process_info(info)
except UnavailableVideoError, err:
self._downloader.trouble(u'\nERROR: unable to download ' + video_id)
2011-11-10 10:04:33 +01:00
class SoundcloudIE(InfoExtractor):
2011-11-09 10:52:36 +01:00
"""Information extractor for soundcloud.com
2011-11-10 10:04:33 +01:00
To access the media, the uid of the song and a stream token
must be extracted from the page source and the script must make
a request to media.soundcloud.com/crossdomain.xml. Then
the media can be grabbed by requesting from an url composed
of the stream token and uid
"""
2011-11-08 03:02:10 +01:00
2011-11-08 09:03:35 +01:00
_VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/([\w\d-]+)'
2011-11-08 03:02:10 +01:00
IE_NAME = u'soundcloud'
2011-11-10 10:04:33 +01:00
def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader)
2011-11-08 09:03:35 +01:00
def report_webpage(self, video_id):
"""Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
def report_extraction(self, video_id):
"""Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
def _real_extract(self, url):
htmlParser = HTMLParser.HTMLParser()
mobj = re.match(self._VALID_URL, url)
if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
2011-11-10 10:04:33 +01:00
# extract uploader (which is in the url)
uploader = mobj.group(1).decode('utf-8')
# extract simple title (uploader + slug of song title)
slug_title = mobj.group(2).decode('utf-8')
2011-11-08 09:03:35 +01:00
simple_title = uploader + '-' + slug_title
self.report_webpage('%s/%s' % (uploader, slug_title))
request = urllib2.Request('http://soundcloud.com/%s/%s' % (uploader, slug_title))
try:
webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
return
self.report_extraction('%s/%s' % (uploader, slug_title))
2011-11-13 02:08:40 +01:00
# extract uid and stream token that soundcloud hands out for access
2011-11-21 19:31:20 +01:00
mobj = re.search('"uid":"([\w\d]+?)".*?stream_token=([\w\d]+)', webpage)
2011-11-10 10:04:33 +01:00
if mobj:
2011-11-13 01:48:43 +01:00
video_id = mobj.group(1)
stream_token = mobj.group(2)
2011-11-10 10:04:33 +01:00
2011-11-13 02:08:40 +01:00
# extract unsimplified title
mobj = re.search('"title":"(.*?)",', webpage)
if mobj:
title = mobj.group(1)
# construct media url (with uid/token)
2011-11-10 10:04:33 +01:00
mediaURL = "http://media.soundcloud.com/stream/%s?stream_token=%s"
mediaURL = mediaURL % (video_id, stream_token)
# description
description = u'No description available'
2011-11-13 01:48:43 +01:00
mobj = re.search('track-description-value"><p>(.*?)</p>', webpage)
2011-11-10 10:04:33 +01:00
if mobj:
description = mobj.group(1)
# upload date
2011-11-13 01:48:43 +01:00
upload_date = None
mobj = re.search("pretty-date'>on ([\w]+ [\d]+, [\d]+ \d+:\d+)</abbr></h2>", webpage)
2011-11-10 10:04:33 +01:00
if mobj:
try:
2011-11-13 01:48:43 +01:00
upload_date = datetime.datetime.strptime(mobj.group(1), '%B %d, %Y %H:%M').strftime('%Y%m%d')
2011-11-22 15:32:53 +01:00
except Exception, e:
2011-11-13 01:48:43 +01:00
print str(e)
2011-11-10 10:04:33 +01:00
2011-11-13 02:08:40 +01:00
# for soundcloud, a request to a cross domain is required for cookies
2011-11-10 10:04:33 +01:00
request = urllib2.Request('http://media.soundcloud.com/crossdomain.xml', std_headers)
try:
self._downloader.process_info({
2011-11-13 02:08:40 +01:00
'id': video_id.decode('utf-8'),
2011-11-13 01:48:43 +01:00
'url': mediaURL,
2011-11-13 02:08:40 +01:00
'uploader': uploader.decode('utf-8'),
2011-11-09 10:52:36 +01:00
'upload_date': upload_date,
2011-11-13 02:08:40 +01:00
'title': simple_title.decode('utf-8'),
'stitle': simple_title.decode('utf-8'),
2011-11-08 09:03:35 +01:00
'ext': u'mp3',
'format': u'NA',
'player_url': None,
2011-11-13 02:08:40 +01:00
'description': description.decode('utf-8')
2011-11-10 10:04:33 +01:00
})
except UnavailableVideoError:
self._downloader.trouble(u'\nERROR: unable to download video')
2011-11-08 03:02:10 +01:00
2011-11-13 02:10:21 +01:00
2011-11-15 22:13:12 +01:00
class InfoQIE(InfoExtractor):
"""Information extractor for infoq.com"""
_VALID_URL = r'^(?:https?://)?(?:www\.)?infoq\.com/[^/]+/[^/]+$'
IE_NAME = u'infoq'
def report_webpage(self, video_id):
"""Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
def report_extraction(self, video_id):
"""Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
def _real_extract(self, url):
htmlParser = HTMLParser.HTMLParser()
mobj = re.match(self._VALID_URL, url)
if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
self.report_webpage(url)
request = urllib2.Request(url)
try:
webpage = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err))
return
self.report_extraction(url)
# Extract video URL
mobj = re.search(r"jsclassref='([^']*)'", webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video url')
return
video_url = 'rtmpe://video.infoq.com/cfx/st/' + urllib2.unquote(mobj.group(1).decode('base64'))
# Extract title
mobj = re.search(r'contentTitle = "(.*?)";', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract video title')
return
video_title = mobj.group(1).decode('utf-8')
# Extract description
video_description = u'No description available.'
mobj = re.search(r'<meta name="description" content="(.*)"(?:\s*/)?>', webpage)
if mobj is not None:
video_description = mobj.group(1).decode('utf-8')
video_filename = video_url.split('/')[-1]
video_id, extension = video_filename.split('.')
self._downloader.increment_downloads()
info = {
'id': video_id,
'url': video_url,
'uploader': None,
'upload_date': None,
'title': video_title,
2011-11-21 20:15:32 +01:00
'stitle': _simplify_title(video_title),
2011-11-15 22:13:12 +01:00
'ext': extension,
'format': extension, # Extension is always(?) mp4, but seems to be flv
'thumbnail': None,
'description': video_description,
'player_url': None,
}
try:
self._downloader.process_info(info)
except UnavailableVideoError, err:
self._downloader.trouble(u'\nERROR: unable to download ' + video_url)
2011-11-24 17:45:14 +01:00
class MixcloudIE(InfoExtractor):
"""Information extractor for www.mixcloud.com"""
_VALID_URL = r'^(?:https?://)?(?:www\.)?mixcloud\.com/([\w\d-]+)/([\w\d-]+)'
IE_NAME = u'mixcloud'
2011-11-15 22:13:12 +01:00
2011-11-24 17:45:14 +01:00
def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader)
def report_download_json(self, file_id):
"""Report JSON download."""
self._downloader.to_screen(u'[%s] Downloading json' % self.IE_NAME)
def report_extraction(self, file_id):
"""Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id))
def get_urls(self, jsonData, fmt, bitrate='best'):
"""Get urls from 'audio_formats' section in json"""
file_url = None
try:
bitrate_list = jsonData[fmt]
if bitrate is None or bitrate == 'best' or bitrate not in bitrate_list:
bitrate = max(bitrate_list) # select highest
url_list = jsonData[fmt][bitrate]
except TypeError: # we have no bitrate info.
url_list = jsonData[fmt]
return url_list
2011-11-24 18:02:12 +01:00
2011-11-24 17:45:14 +01:00
def check_urls(self, url_list):
"""Returns 1st active url from list"""
for url in url_list:
try:
urllib2.urlopen(url)
return url
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
url = None
return None
def _print_formats(self, formats):
print 'Available formats:'
for fmt in formats.keys():
for b in formats[fmt]:
try:
ext = formats[fmt][b][0]
print '%s\t%s\t[%s]' % (fmt, b, ext.split('.')[-1])
except TypeError: # we have no bitrate info
ext = formats[fmt][0]
print '%s\t%s\t[%s]' % (fmt, '??', ext.split('.')[-1])
break
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
# extract uploader & filename from url
uploader = mobj.group(1).decode('utf-8')
file_id = uploader + "-" + mobj.group(2).decode('utf-8')
# construct API request
file_url = 'http://www.mixcloud.com/api/1/cloudcast/' + '/'.join(url.split('/')[-3:-1]) + '.json'
# retrieve .json file with links to files
request = urllib2.Request(file_url)
try:
self.report_download_json(file_url)
jsonData = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: Unable to retrieve file: %s' % str(err))
return
# parse JSON
json_data = json.loads(jsonData)
player_url = json_data['player_swf_url']
formats = dict(json_data['audio_formats'])
req_format = self._downloader.params.get('format', None)
bitrate = None
if self._downloader.params.get('listformats', None):
self._print_formats(formats)
return
if req_format is None or req_format == 'best':
for format_param in formats.keys():
url_list = self.get_urls(formats, format_param)
# check urls
file_url = self.check_urls(url_list)
if file_url is not None:
break # got it!
else:
if req_format not in formats.keys():
self._downloader.trouble(u'ERROR: format is not available')
return
url_list = self.get_urls(formats, req_format)
file_url = self.check_urls(url_list)
format_param = req_format
# We have audio
self._downloader.increment_downloads()
try:
# Process file information
self._downloader.process_info({
'id': file_id.decode('utf-8'),
'url': file_url.decode('utf-8'),
'uploader': uploader.decode('utf-8'),
'upload_date': u'NA',
'title': json_data['name'],
'stitle': _simplify_title(json_data['name']),
'ext': file_url.split('.')[-1].decode('utf-8'),
'format': (format_param is None and u'NA' or format_param.decode('utf-8')),
'thumbnail': json_data['thumbnail_url'],
'description': json_data['description'],
'player_url': player_url.decode('utf-8'),
})
except UnavailableVideoError, err:
self._downloader.trouble(u'ERROR: unable to download file')
2011-11-15 23:00:31 +01:00
2011-12-08 21:39:13 +01:00
class StanfordOpenClassroomIE(InfoExtractor):
"""Information extractor for Stanford's Open ClassRoom"""
_VALID_URL = r'^(?:https?://)?openclassroom.stanford.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$'
IE_NAME = u'stanfordoc'
def report_download_webpage(self, objid):
"""Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, objid))
def report_extraction(self, video_id):
"""Report information extraction."""
self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return
if mobj.group('course') and mobj.group('video'): # A specific video
course = mobj.group('course')
video = mobj.group('video')
info = {
'id': _simplify_title(course + '_' + video),
}
self.report_extraction(info['id'])
baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/'
xmlUrl = baseUrl + video + '.xml'
try:
metaXml = urllib2.urlopen(xmlUrl).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % unicode(err))
return
mdoc = xml.etree.ElementTree.fromstring(metaXml)
try:
info['title'] = mdoc.findall('./title')[0].text
info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text
except IndexError:
self._downloader.trouble(u'\nERROR: Invalid metadata XML file')
return
info['stitle'] = _simplify_title(info['title'])
info['ext'] = info['url'].rpartition('.')[2]
info['format'] = info['ext']
self._downloader.increment_downloads()
try:
self._downloader.process_info(info)
except UnavailableVideoError, err:
self._downloader.trouble(u'\nERROR: unable to download video')
elif mobj.group('course'): # A course page
unescapeHTML = HTMLParser.HTMLParser().unescape
course = mobj.group('course')
info = {
'id': _simplify_title(course),
'type': 'playlist',
}
self.report_download_webpage(info['id'])
try:
coursepage = urllib2.urlopen(url).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download course info page: ' + unicode(err))
return
m = re.search('<h1>([^<]+)</h1>', coursepage)
if m:
info['title'] = unescapeHTML(m.group(1))
else:
info['title'] = info['id']
info['stitle'] = _simplify_title(info['title'])
m = re.search('<description>([^<]+)</description>', coursepage)
if m:
info['description'] = unescapeHTML(m.group(1))
links = _orderedSet(re.findall('<a href="(VideoPage.php\?[^"]+)">', coursepage))
info['list'] = [
{
'type': 'reference',
'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(vpage),
}
for vpage in links]
for entry in info['list']:
assert entry['type'] == 'reference'
self.extract(entry['url'])
else: # Root page
unescapeHTML = HTMLParser.HTMLParser().unescape
info = {
'id': 'Stanford OpenClassroom',
'type': 'playlist',
}
self.report_download_webpage(info['id'])
rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php'
try:
rootpage = urllib2.urlopen(rootURL).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download course info page: ' + unicode(err))
return
info['title'] = info['id']
info['stitle'] = _simplify_title(info['title'])
links = _orderedSet(re.findall('<a href="(CoursePage.php\?[^"]+)">', rootpage))
info['list'] = [
{
'type': 'reference',
'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(cpage),
}
for cpage in links]
for entry in info['list']:
assert entry['type'] == 'reference'
self.extract(entry['url'])
2011-11-24 18:02:12 +01:00
2008-07-27 12:13:49 +02:00
class PostProcessor(object):
"""Post Processor class.
PostProcessor objects can be added to downloaders with their
add_post_processor() method. When the downloader has finished a
successful download, it will take its internal chain of PostProcessors
and start calling the run() method on each one of them, first with
an initial argument and then with the returned value of the previous
PostProcessor.
The chain will be stopped if one of them ever returns None or the end
of the chain is reached.
PostProcessor objects follow a "mutual registration" process similar
to InfoExtractor objects.
"""
_downloader = None
def __init__(self, downloader=None):
self._downloader = downloader
def set_downloader(self, downloader):
"""Sets the downloader for this PP."""
self._downloader = downloader
2011-01-07 10:22:01 +01:00
2008-07-27 12:13:49 +02:00
def run(self, information):
"""Run the PostProcessor.
The "information" argument is a dictionary like the ones
2009-04-25 14:33:52 +02:00
composed by InfoExtractors. The only difference is that this
2008-07-27 12:13:49 +02:00
one has an extra field called "filepath" that points to the
downloaded file.
When this method returns None, the postprocessing chain is
stopped. However, this method may return an information
dictionary that will be passed to the next postprocessing
object in the chain. It can be the one it received after
changing some fields.
In addition, this method may raise a PostProcessingError
exception that will be taken into account by the downloader
it was called from.
"""
return information # by default, do nothing
2011-01-07 10:22:01 +01:00
2011-12-17 01:35:05 +01:00
class AudioConversionError(BaseException):
def __init__(self, message):
self.message = message
2011-09-05 09:46:36 +02:00
2011-02-25 19:06:58 +01:00
class FFmpegExtractAudioPP(PostProcessor):
2011-09-15 12:43:27 +02:00
def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, keepvideo=False):
2011-02-25 19:06:58 +01:00
PostProcessor.__init__(self, downloader)
if preferredcodec is None:
preferredcodec = 'best'
self._preferredcodec = preferredcodec
2011-06-07 02:46:37 +02:00
self._preferredquality = preferredquality
self._keepvideo = keepvideo
2011-02-25 19:06:58 +01:00
@staticmethod
def get_audio_codec(path):
2011-02-25 21:53:26 +01:00
try:
2011-03-15 20:04:20 +01:00
cmd = ['ffprobe', '-show_streams', '--', path]
handle = subprocess.Popen(cmd, stderr=file(os.path.devnull, 'w'), stdout=subprocess.PIPE)
2011-02-25 21:53:26 +01:00
output = handle.communicate()[0]
if handle.wait() != 0:
return None
except (IOError, OSError):
2011-02-25 19:06:58 +01:00
return None
audio_codec = None
for line in output.split('\n'):
if line.startswith('codec_name='):
audio_codec = line.split('=')[1].strip()
elif line.strip() == 'codec_type=audio' and audio_codec is not None:
return audio_codec
return None
@staticmethod
def run_ffmpeg(path, out_path, codec, more_opts):
2011-12-17 01:35:05 +01:00
if codec is None:
acodec_opts = []
else:
acodec_opts = ['-acodec', codec]
cmd = ['ffmpeg', '-y', '-i', path, '-vn'] + acodec_opts + more_opts + ['--', out_path]
2011-02-25 19:06:58 +01:00
try:
2011-12-17 01:35:05 +01:00
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = p.communicate()
2011-02-25 19:06:58 +01:00
except (IOError, OSError):
2011-12-17 01:35:05 +01:00
e = sys.exc_info()[1]
if isinstance(e, OSError) and e.errno == 2:
raise AudioConversionError('ffmpeg not found. Please install ffmpeg.')
else:
raise e
if p.returncode != 0:
msg = stderr.strip().split('\n')[-1]
raise AudioConversionError(msg)
2011-02-25 19:06:58 +01:00
def run(self, information):
path = information['filepath']
filecodec = self.get_audio_codec(path)
if filecodec is None:
2011-02-25 21:53:26 +01:00
self._downloader.to_stderr(u'WARNING: unable to obtain file audio codec with ffprobe')
2011-02-25 19:06:58 +01:00
return None
more_opts = []
2011-12-08 21:39:13 +01:00
if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'):
if self._preferredcodec == 'm4a' and filecodec == 'aac':
# Lossless, but in another container
acodec = 'copy'
extension = self._preferredcodec
more_opts = ['-absf', 'aac_adtstoasc']
elif filecodec in ['aac', 'mp3', 'vorbis']:
2011-02-25 19:06:58 +01:00
# Lossless if possible
acodec = 'copy'
extension = filecodec
if filecodec == 'aac':
more_opts = ['-f', 'adts']
2011-09-21 18:29:25 +02:00
if filecodec == 'vorbis':
extension = 'ogg'
2011-02-25 19:06:58 +01:00
else:
# MP3 otherwise.
acodec = 'libmp3lame'
extension = 'mp3'
2011-09-15 12:43:27 +02:00
more_opts = []
if self._preferredquality is not None:
more_opts += ['-ab', self._preferredquality]
2011-02-25 19:06:58 +01:00
else:
# We convert the audio (lossy)
2011-12-17 01:35:05 +01:00
acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec]
2011-02-25 19:06:58 +01:00
extension = self._preferredcodec
2011-09-15 12:43:27 +02:00
more_opts = []
if self._preferredquality is not None:
more_opts += ['-ab', self._preferredquality]
2011-02-25 19:06:58 +01:00
if self._preferredcodec == 'aac':
more_opts += ['-f', 'adts']
2011-12-08 21:39:13 +01:00
if self._preferredcodec == 'm4a':
more_opts += ['-absf', 'aac_adtstoasc']
2011-09-21 18:29:25 +02:00
if self._preferredcodec == 'vorbis':
extension = 'ogg'
2011-12-17 01:35:05 +01:00
if self._preferredcodec == 'wav':
extension = 'wav'
more_opts += ['-f', 'wav']
2011-02-25 19:06:58 +01:00
(prefix, ext) = os.path.splitext(path)
new_path = prefix + '.' + extension
self._downloader.to_screen(u'[ffmpeg] Destination: %s' % new_path)
2011-12-17 01:35:05 +01:00
try:
self.run_ffmpeg(path, new_path, acodec, more_opts)
except:
etype,e,tb = sys.exc_info()
if isinstance(e, AudioConversionError):
self._downloader.to_stderr(u'ERROR: audio conversion failed: ' + e.message)
else:
self._downloader.to_stderr(u'ERROR: error running ffmpeg')
2011-02-25 19:06:58 +01:00
return None
2011-04-23 08:55:40 +02:00
# Try to update the date time for extracted audio file.
if information.get('filetime') is not None:
try:
os.utime(new_path, (time.time(), information['filetime']))
except:
self._downloader.to_stderr(u'WARNING: Cannot update utime of audio file')
2011-06-07 02:46:37 +02:00
if not self._keepvideo:
try:
os.remove(path)
except (IOError, OSError):
self._downloader.to_stderr(u'WARNING: Unable to remove downloaded video file')
return None
2011-02-25 19:06:58 +01:00
information['filepath'] = new_path
return information
2011-08-23 14:37:35 +02:00
def updateSelf(downloader, filename):
''' Update the program file with the latest version from the repository '''
# Note: downloader only used for options
if not os.access(filename, os.W_OK):
sys.exit('ERROR: no write permissions on %s' % filename)
2011-08-28 23:38:40 +02:00
downloader.to_screen('Updating to latest version...')
2011-08-23 14:37:35 +02:00
2008-07-21 23:12:31 +02:00
try:
2011-08-28 23:38:40 +02:00
try:
urlh = urllib.urlopen(UPDATE_URL)
newcontent = urlh.read()
2011-09-15 20:09:30 +02:00
vmatch = re.search("__version__ = '([^']+)'", newcontent)
if vmatch is not None and vmatch.group(1) == __version__:
downloader.to_screen('youtube-dl is up-to-date (' + __version__ + ')')
return
2011-08-28 23:38:40 +02:00
finally:
urlh.close()
2011-08-23 14:37:35 +02:00
except (IOError, OSError), err:
sys.exit('ERROR: unable to download latest version')
2008-07-22 10:14:13 +02:00
2011-08-23 14:37:35 +02:00
try:
2011-08-28 23:38:40 +02:00
outf = open(filename, 'wb')
try:
outf.write(newcontent)
finally:
outf.close()
2011-08-23 14:37:35 +02:00
except (IOError, OSError), err:
sys.exit('ERROR: unable to overwrite current version')
2009-11-19 20:19:47 +01:00
2011-09-15 20:10:27 +02:00
downloader.to_screen('Updated youtube-dl. Restart youtube-dl to use the new version.')
2010-10-23 12:54:00 +02:00
2011-08-23 14:53:36 +02:00
def parseOpts():
# Deferred imports
import getpass
import optparse
2011-11-28 01:29:46 +01:00
import shlex
def _readOptions(filename):
try:
optionf = open(filename)
except IOError:
return [] # silently skip if file is not present
try:
res = []
for l in optionf:
res += shlex.split(l, comments=True)
finally:
optionf.close()
return res
2011-01-03 11:47:23 +01:00
2011-08-23 14:53:36 +02:00
def _format_option_string(option):
''' ('-o', '--option') -> -o, --format METAVAR'''
2010-10-23 12:54:00 +02:00
2011-08-23 14:53:36 +02:00
opts = []
if option._short_opts: opts.append(option._short_opts[0])
if option._long_opts: opts.append(option._long_opts[0])
if len(opts) > 1: opts.insert(1, ', ')
if option.takes_value(): opts.append(' %s' % option.metavar)
return "".join(opts)
2011-08-23 16:03:28 +02:00
def _find_term_columns():
columns = os.environ.get('COLUMNS', None)
2011-08-24 23:21:55 +02:00
if columns:
return int(columns)
2011-08-24 23:28:30 +02:00
try:
sp = subprocess.Popen(['stty', 'size'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out,err = sp.communicate()
2011-08-25 00:08:59 +02:00
return int(out.split()[1])
2011-08-24 23:28:30 +02:00
except:
pass
2011-08-24 23:21:55 +02:00
return None
2011-08-23 16:03:28 +02:00
2011-08-23 15:42:51 +02:00
max_width = 80
max_help_position = 80
# No need to wrap help messages if we're on a wide console
2011-08-23 16:03:28 +02:00
columns = _find_term_columns()
2011-08-23 15:42:51 +02:00
if columns: max_width = columns
fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
2011-08-23 14:53:36 +02:00
fmt.format_option_strings = _format_option_string
kw = {
'version' : __version__,
'formatter' : fmt,
2011-09-14 21:19:33 +02:00
'usage' : '%prog [options] url [url...]',
2011-08-23 14:53:36 +02:00
'conflict_handler' : 'resolve',
}
parser = optparse.OptionParser(**kw)
# option groups
general = optparse.OptionGroup(parser, 'General Options')
2011-07-23 09:51:06 +02:00
selection = optparse.OptionGroup(parser, 'Video Selection')
2011-08-23 14:53:36 +02:00
authentication = optparse.OptionGroup(parser, 'Authentication Options')
video_format = optparse.OptionGroup(parser, 'Video Format Options')
postproc = optparse.OptionGroup(parser, 'Post-processing Options')
filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
general.add_option('-h', '--help',
action='help', help='print this help text and exit')
general.add_option('-v', '--version',
action='version', help='print program version and exit')
general.add_option('-U', '--update',
2011-08-31 21:28:40 +02:00
action='store_true', dest='update_self', help='update this program to latest version')
2011-08-23 14:53:36 +02:00
general.add_option('-i', '--ignore-errors',
action='store_true', dest='ignoreerrors', help='continue on download errors', default=False)
general.add_option('-r', '--rate-limit',
dest='ratelimit', metavar='LIMIT', help='download rate limit (e.g. 50k or 44.6m)')
general.add_option('-R', '--retries',
dest='retries', metavar='RETRIES', help='number of retries (default is 10)', default=10)
general.add_option('--dump-user-agent',
action='store_true', dest='dump_user_agent',
help='display the current browser identification', default=False)
2011-09-15 10:43:49 +02:00
general.add_option('--list-extractors',
action='store_true', dest='list_extractors',
help='List all supported extractors and the URLs they would handle', default=False)
2011-08-23 14:53:36 +02:00
2011-07-23 09:51:06 +02:00
selection.add_option('--playlist-start',
dest='playliststart', metavar='NUMBER', help='playlist video to start at (default is 1)', default=1)
selection.add_option('--playlist-end',
dest='playlistend', metavar='NUMBER', help='playlist video to end at (default is last)', default=-1)
selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)')
selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)')
2011-11-28 00:55:44 +01:00
selection.add_option('--max-downloads', metavar='NUMBER', dest='max_downloads', help='Abort after downloading NUMBER files', default=None)
2011-07-23 09:51:06 +02:00
2011-08-23 14:53:36 +02:00
authentication.add_option('-u', '--username',
dest='username', metavar='USERNAME', help='account username')
authentication.add_option('-p', '--password',
dest='password', metavar='PASSWORD', help='account password')
authentication.add_option('-n', '--netrc',
action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
video_format.add_option('-f', '--format',
action='store', dest='format', metavar='FORMAT', help='video format code')
video_format.add_option('--all-formats',
2011-09-15 18:47:36 +02:00
action='store_const', dest='format', help='download all available video formats', const='all')
2011-12-08 21:39:13 +01:00
video_format.add_option('--prefer-free-formats',
action='store_true', dest='prefer_free_formats', default=False, help='prefer free video formats unless a specific one is requested')
2011-08-23 14:53:36 +02:00
video_format.add_option('--max-quality',
action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
2011-09-30 09:07:36 +02:00
video_format.add_option('-F', '--list-formats',
action='store_true', dest='listformats', help='list all available formats (currently youtube only)')
2011-08-23 14:53:36 +02:00
verbosity.add_option('-q', '--quiet',
action='store_true', dest='quiet', help='activates quiet mode', default=False)
verbosity.add_option('-s', '--simulate',
2011-09-15 11:36:49 +02:00
action='store_true', dest='simulate', help='do not download the video and do not write anything to disk', default=False)
verbosity.add_option('--skip-download',
action='store_true', dest='skip_download', help='do not download the video', default=False)
2011-08-23 14:53:36 +02:00
verbosity.add_option('-g', '--get-url',
action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
verbosity.add_option('-e', '--get-title',
action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
verbosity.add_option('--get-thumbnail',
action='store_true', dest='getthumbnail',
help='simulate, quiet but print thumbnail URL', default=False)
verbosity.add_option('--get-description',
action='store_true', dest='getdescription',
help='simulate, quiet but print video description', default=False)
verbosity.add_option('--get-filename',
action='store_true', dest='getfilename',
help='simulate, quiet but print output filename', default=False)
2011-04-12 21:58:16 +02:00
verbosity.add_option('--get-format',
action='store_true', dest='getformat',
help='simulate, quiet but print output format', default=False)
2011-08-23 14:53:36 +02:00
verbosity.add_option('--no-progress',
action='store_true', dest='noprogress', help='do not print progress bar', default=False)
verbosity.add_option('--console-title',
action='store_true', dest='consoletitle',
help='display progress in console titlebar', default=False)
filesystem.add_option('-t', '--title',
action='store_true', dest='usetitle', help='use title in file name', default=False)
filesystem.add_option('-l', '--literal',
action='store_true', dest='useliteral', help='use literal title in file name', default=False)
filesystem.add_option('-A', '--auto-number',
action='store_true', dest='autonumber',
help='number downloaded files starting from 00000', default=False)
filesystem.add_option('-o', '--output',
2011-11-29 20:22:27 +01:00
dest='outtmpl', metavar='TEMPLATE', help='output filename template. Use %(stitle)s to get the title, %(uploader)s for the uploader name, %(autonumber)s to get an automatically incremented number, %(ext)s for the filename extension, %(upload_date)s for the upload date (YYYYMMDD), and %% for a literal percent. Use - to output to stdout.')
2011-08-23 14:53:36 +02:00
filesystem.add_option('-a', '--batch-file',
dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
filesystem.add_option('-w', '--no-overwrites',
action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
filesystem.add_option('-c', '--continue',
2011-09-15 19:27:21 +02:00
action='store_true', dest='continue_dl', help='resume partially downloaded files', default=False)
2011-09-15 19:12:04 +02:00
filesystem.add_option('--no-continue',
action='store_false', dest='continue_dl',
help='do not resume partially downloaded files (restart from beginning)')
2011-08-23 14:53:36 +02:00
filesystem.add_option('--cookies',
2011-09-18 18:50:23 +02:00
dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in')
2011-08-23 14:53:36 +02:00
filesystem.add_option('--no-part',
action='store_true', dest='nopart', help='do not use .part files', default=False)
filesystem.add_option('--no-mtime',
action='store_false', dest='updatetime',
help='do not use the Last-modified header to set the file modification time', default=True)
2011-08-24 23:21:55 +02:00
filesystem.add_option('--write-description',
action='store_true', dest='writedescription',
help='write video description to a .description file', default=False)
filesystem.add_option('--write-info-json',
action='store_true', dest='writeinfojson',
help='write video metadata to a .info.json file', default=False)
2011-08-23 14:53:36 +02:00
postproc.add_option('--extract-audio', action='store_true', dest='extractaudio', default=False,
help='convert video files to audio-only files (requires ffmpeg and ffprobe)')
postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best',
2011-12-17 01:35:05 +01:00
help='"best", "aac", "vorbis", "mp3", "m4a", or "wav"; best by default')
2011-09-15 12:43:27 +02:00
postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='128K',
help='ffmpeg audio bitrate specification, 128k by default')
postproc.add_option('-k', '--keep-video', action='store_true', dest='keepvideo', default=False,
help='keeps the video file on disk after the post-processing; the video is erased by default')
2011-08-23 14:53:36 +02:00
parser.add_option_group(general)
2011-07-23 09:51:06 +02:00
parser.add_option_group(selection)
2011-08-23 14:53:36 +02:00
parser.add_option_group(filesystem)
parser.add_option_group(verbosity)
parser.add_option_group(video_format)
parser.add_option_group(authentication)
parser.add_option_group(postproc)
2011-11-29 20:13:06 +01:00
xdg_config_home = os.environ.get('XDG_CONFIG_HOME')
if xdg_config_home:
userConf = os.path.join(xdg_config_home, 'youtube-dl.conf')
else:
userConf = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf')
argv = _readOptions('/etc/youtube-dl.conf') + _readOptions(userConf) + sys.argv[1:]
2011-11-28 01:29:46 +01:00
opts, args = parser.parse_args(argv)
2011-08-23 14:53:36 +02:00
return parser, opts, args
2011-09-15 10:43:49 +02:00
def gen_extractors():
""" Return a list of an instance of every supported extractor.
The order does matter; the first extractor matched is the one handling the URL.
"""
youtube_ie = YoutubeIE()
google_ie = GoogleIE()
yahoo_ie = YahooIE()
return [
YoutubePlaylistIE(youtube_ie),
YoutubeUserIE(youtube_ie),
YoutubeSearchIE(youtube_ie),
2011-09-16 22:31:31 +02:00
youtube_ie,
MetacafeIE(youtube_ie),
DailymotionIE(),
2011-09-15 10:43:49 +02:00
google_ie,
GoogleSearchIE(google_ie),
PhotobucketIE(),
yahoo_ie,
YahooSearchIE(yahoo_ie),
DepositFilesIE(),
FacebookIE(),
BlipTVIE(),
VimeoIE(),
MyVideoIE(),
ComedyCentralIE(),
EscapistIE(),
2011-10-12 21:13:43 +02:00
CollegeHumorIE(),
2011-10-13 21:44:20 +02:00
XVideosIE(),
2011-11-13 02:28:26 +01:00
SoundcloudIE(),
2011-11-15 22:13:12 +01:00
InfoQIE(),
2011-11-24 17:45:14 +01:00
MixcloudIE(),
2011-12-08 21:39:13 +01:00
StanfordOpenClassroomIE(),
2011-09-15 10:43:49 +02:00
GenericIE()
]
2011-11-21 19:59:59 +01:00
def _real_main():
2011-08-23 15:48:08 +02:00
parser, opts, args = parseOpts()
2011-08-23 14:53:36 +02:00
2011-08-23 15:48:08 +02:00
# Open appropriate CookieJar
if opts.cookiefile is None:
jar = cookielib.CookieJar()
else:
2010-11-04 23:19:09 +01:00
try:
2011-08-23 15:48:08 +02:00
jar = cookielib.MozillaCookieJar(opts.cookiefile)
if os.path.isfile(opts.cookiefile) and os.access(opts.cookiefile, os.R_OK):
jar.load()
except (IOError, OSError), err:
sys.exit(u'ERROR: unable to open cookie file')
2010-10-23 12:54:00 +02:00
2011-08-23 15:48:08 +02:00
# Dump user agent
if opts.dump_user_agent:
print std_headers['User-Agent']
sys.exit(0)
2011-01-03 11:47:23 +01:00
2011-08-23 15:48:08 +02:00
# Batch file verification
batchurls = []
if opts.batchfile is not None:
2010-11-04 23:19:09 +01:00
try:
2011-08-23 15:48:08 +02:00
if opts.batchfile == '-':
batchfd = sys.stdin
2009-11-19 20:19:47 +01:00
else:
2011-08-23 15:48:08 +02:00
batchfd = open(opts.batchfile, 'r')
batchurls = batchfd.readlines()
batchurls = [x.strip() for x in batchurls]
batchurls = [x for x in batchurls if len(x) > 0 and not re.search(r'^[#/;]', x)]
except IOError:
sys.exit(u'ERROR: batch file could not be read')
all_urls = batchurls + args
2011-09-15 10:43:49 +02:00
# General configuration
cookie_processor = urllib2.HTTPCookieProcessor(jar)
opener = urllib2.build_opener(urllib2.ProxyHandler(), cookie_processor, YoutubeDLHandler())
urllib2.install_opener(opener)
socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
extractors = gen_extractors()
if opts.list_extractors:
for ie in extractors:
print(ie.IE_NAME)
matchedUrls = filter(lambda url: ie.suitable(url), all_urls)
all_urls = filter(lambda url: url not in matchedUrls, all_urls)
for mu in matchedUrls:
print(u' ' + mu)
sys.exit(0)
2011-08-23 15:48:08 +02:00
# Conflicting, missing and erroneous options
if opts.usenetrc and (opts.username is not None or opts.password is not None):
parser.error(u'using .netrc conflicts with giving username/password')
if opts.password is not None and opts.username is None:
parser.error(u'account username missing')
if opts.outtmpl is not None and (opts.useliteral or opts.usetitle or opts.autonumber):
parser.error(u'using output template conflicts with using title, literal title or auto number')
if opts.usetitle and opts.useliteral:
parser.error(u'using title conflicts with using literal title')
if opts.username is not None and opts.password is None:
opts.password = getpass.getpass(u'Type account password and press return:')
if opts.ratelimit is not None:
numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
if numeric_limit is None:
parser.error(u'invalid rate limit specified')
opts.ratelimit = numeric_limit
if opts.retries is not None:
2010-11-04 23:19:09 +01:00
try:
2011-08-23 15:48:08 +02:00
opts.retries = long(opts.retries)
2010-11-04 23:19:09 +01:00
except (TypeError, ValueError), err:
2011-08-23 15:48:08 +02:00
parser.error(u'invalid retry count specified')
try:
2011-08-24 23:21:55 +02:00
opts.playliststart = int(opts.playliststart)
2011-08-23 15:48:08 +02:00
if opts.playliststart <= 0:
2011-08-24 23:21:55 +02:00
raise ValueError(u'Playlist start must be positive')
2011-08-23 15:48:08 +02:00
except (TypeError, ValueError), err:
parser.error(u'invalid playlist start number specified')
try:
2011-08-24 23:21:55 +02:00
opts.playlistend = int(opts.playlistend)
2011-08-23 15:48:08 +02:00
if opts.playlistend != -1 and (opts.playlistend <= 0 or opts.playlistend < opts.playliststart):
2011-08-24 23:21:55 +02:00
raise ValueError(u'Playlist end must be greater than playlist start')
2011-08-23 15:48:08 +02:00
except (TypeError, ValueError), err:
parser.error(u'invalid playlist end number specified')
if opts.extractaudio:
2011-12-17 01:35:05 +01:00
if opts.audioformat not in ['best', 'aac', 'mp3', 'vorbis', 'm4a', 'wav']:
2011-08-23 15:48:08 +02:00
parser.error(u'invalid audio format specified')
# File downloader
fd = FileDownloader({
'usenetrc': opts.usenetrc,
'username': opts.username,
'password': opts.password,
2011-04-12 21:58:16 +02:00
'quiet': (opts.quiet or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat),
2011-08-23 15:48:08 +02:00
'forceurl': opts.geturl,
'forcetitle': opts.gettitle,
'forcethumbnail': opts.getthumbnail,
'forcedescription': opts.getdescription,
'forcefilename': opts.getfilename,
2011-04-12 21:58:16 +02:00
'forceformat': opts.getformat,
2011-09-15 11:36:49 +02:00
'simulate': opts.simulate,
2011-04-12 21:58:16 +02:00
'skip_download': (opts.skip_download or opts.simulate or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat),
2011-08-23 15:48:08 +02:00
'format': opts.format,
'format_limit': opts.format_limit,
2011-09-28 01:28:37 +02:00
'listformats': opts.listformats,
2011-08-23 15:48:08 +02:00
'outtmpl': ((opts.outtmpl is not None and opts.outtmpl.decode(preferredencoding()))
or (opts.format == '-1' and opts.usetitle and u'%(stitle)s-%(id)s-%(format)s.%(ext)s')
or (opts.format == '-1' and opts.useliteral and u'%(title)s-%(id)s-%(format)s.%(ext)s')
or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s')
or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(stitle)s-%(id)s.%(ext)s')
or (opts.useliteral and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s')
or (opts.usetitle and u'%(stitle)s-%(id)s.%(ext)s')
or (opts.useliteral and u'%(title)s-%(id)s.%(ext)s')
or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
or u'%(id)s.%(ext)s'),
'ignoreerrors': opts.ignoreerrors,
'ratelimit': opts.ratelimit,
'nooverwrites': opts.nooverwrites,
'retries': opts.retries,
'continuedl': opts.continue_dl,
'noprogress': opts.noprogress,
'playliststart': opts.playliststart,
'playlistend': opts.playlistend,
'logtostderr': opts.outtmpl == '-',
'consoletitle': opts.consoletitle,
'nopart': opts.nopart,
'updatetime': opts.updatetime,
2011-08-24 23:21:55 +02:00
'writedescription': opts.writedescription,
'writeinfojson': opts.writeinfojson,
2011-07-23 09:51:06 +02:00
'matchtitle': opts.matchtitle,
'rejecttitle': opts.rejecttitle,
2011-11-28 01:29:46 +01:00
'max_downloads': opts.max_downloads,
2011-12-08 21:39:13 +01:00
'prefer_free_formats': opts.prefer_free_formats,
2011-08-23 15:48:08 +02:00
})
2011-09-14 21:39:41 +02:00
for extractor in extractors:
fd.add_info_extractor(extractor)
2011-08-23 15:48:08 +02:00
# PostProcessors
if opts.extractaudio:
2011-09-15 12:43:27 +02:00
fd.add_post_processor(FFmpegExtractAudioPP(preferredcodec=opts.audioformat, preferredquality=opts.audioquality, keepvideo=opts.keepvideo))
2011-08-23 15:48:08 +02:00
# Update version
if opts.update_self:
updateSelf(fd, sys.argv[0])
# Maybe do nothing
if len(all_urls) < 1:
if not opts.update_self:
parser.error(u'you must provide at least one URL')
else:
sys.exit()
2011-12-08 21:39:13 +01:00
try:
retcode = fd.download(all_urls)
except MaxDownloadsReached:
fd.to_screen(u'--max-download limit reached, aborting.')
retcode = 101
2010-10-23 12:54:00 +02:00
2011-08-23 15:48:08 +02:00
# Dump cookie jar if requested
if opts.cookiefile is not None:
try:
jar.save()
except (IOError, OSError), err:
sys.exit(u'ERROR: unable to save cookie jar')
2010-10-23 12:54:00 +02:00
2011-08-23 15:48:08 +02:00
sys.exit(retcode)
2010-10-23 12:54:00 +02:00
2011-11-21 19:59:59 +01:00
def main():
2011-08-23 15:48:08 +02:00
try:
2011-11-21 19:59:59 +01:00
_real_main()
2008-07-22 15:52:56 +02:00
except DownloadError:
sys.exit(1)
except SameFileError:
2008-07-25 13:28:41 +02:00
sys.exit(u'ERROR: fixed output name but more than one file to download')
2008-07-21 23:12:31 +02:00
except KeyboardInterrupt:
2008-07-25 13:28:41 +02:00
sys.exit(u'\nERROR: Interrupted by user')
2011-08-23 13:45:26 +02:00
2011-11-21 19:59:59 +01:00
if __name__ == '__main__':
main()
2011-08-23 13:45:26 +02:00
# vim: set ts=4 sw=4 sts=4 noet ai si filetype=python: