2013-06-19 04:14:21 +08:00
#!/usr/bin/env python
# -*- coding: utf-8 -*-
2014-01-05 08:52:03 +08:00
from __future__ import absolute_import , unicode_literals
2013-06-19 04:14:21 +08:00
2013-12-10 05:00:42 +08:00
import collections
2013-10-06 10:27:09 +08:00
import errno
2013-06-19 04:14:21 +08:00
import io
2013-11-20 13:18:24 +08:00
import json
2013-06-19 04:14:21 +08:00
import os
2013-11-23 02:57:52 +08:00
import platform
2013-06-19 04:14:21 +08:00
import re
import shutil
2013-11-23 02:57:52 +08:00
import subprocess
2013-06-19 04:14:21 +08:00
import socket
import sys
import time
import traceback
2013-11-17 18:39:52 +08:00
if os . name == ' nt ' :
import ctypes
2013-11-17 23:47:52 +08:00
from . utils import (
2013-11-23 02:57:52 +08:00
compat_cookiejar ,
2013-11-17 23:47:52 +08:00
compat_http_client ,
compat_str ,
compat_urllib_error ,
compat_urllib_request ,
ContentTooShortError ,
date_from_str ,
DateRange ,
determine_ext ,
DownloadError ,
encodeFilename ,
ExtractorError ,
2013-11-25 10:12:26 +08:00
format_bytes ,
2013-12-16 11:15:10 +08:00
formatSeconds ,
2013-12-10 01:29:07 +08:00
get_term_width ,
2013-11-17 23:47:52 +08:00
locked_file ,
2013-11-23 02:57:52 +08:00
make_HTTPS_handler ,
2013-11-17 23:47:52 +08:00
MaxDownloadsReached ,
PostProcessingError ,
2013-11-23 02:57:52 +08:00
platform_name ,
2013-11-17 23:47:52 +08:00
preferredencoding ,
SameFileError ,
sanitize_filename ,
subtitles_filename ,
takewhile_inclusive ,
UnavailableVideoError ,
2013-12-17 11:13:36 +08:00
url_basename ,
2013-11-17 23:47:52 +08:00
write_json_file ,
write_string ,
2013-11-23 02:57:52 +08:00
YoutubeDLHandler ,
2014-01-04 20:13:51 +08:00
prepend_extension ,
2013-11-17 23:47:52 +08:00
)
2013-06-28 05:51:06 +08:00
from . extractor import get_info_extractor , gen_extractors
2013-09-23 23:59:27 +08:00
from . downloader import get_suitable_downloader
2014-01-07 12:49:17 +08:00
from . postprocessor import FFmpegMergerPP
2013-11-23 02:57:52 +08:00
from . version import __version__
2013-06-19 04:14:21 +08:00
class YoutubeDL ( object ) :
""" YoutubeDL class.
YoutubeDL objects are the ones responsible of downloading the
actual video file and writing it to disk if the user has requested
it , among some other tasks . In most cases there should be one per
program . As , given a video URL , the downloader doesn ' t know how to
extract all the needed information , task that InfoExtractors do , it
has to pass the URL to one of them .
For this , YoutubeDL objects have a method that allows
InfoExtractors to be registered in a given order . When it is passed
a URL , the YoutubeDL object handles it to the first InfoExtractor it
finds that reports being able to handle it . The InfoExtractor extracts
all the information about the video or videos the URL refers to , and
YoutubeDL process the extracted information , possibly using a File
Downloader to download the video .
YoutubeDL objects accept a lot of parameters . In order not to saturate
the object constructor with arguments , it receives a dictionary of
options instead . These options are available through the params
attribute for the InfoExtractors to use . The YoutubeDL also
registers itself as the downloader in charge for the InfoExtractors
that are added to it , so this is a " mutual registration " .
Available options :
username : Username for authentication purposes .
password : Password for authentication purposes .
2013-06-26 04:22:32 +08:00
videopassword : Password for acces a video .
2013-06-19 04:14:21 +08:00
usenetrc : Use netrc for authentication instead .
verbose : Print additional info to stdout .
quiet : Do not print messages to stdout .
forceurl : Force printing final URL .
forcetitle : Force printing title .
forceid : Force printing ID .
forcethumbnail : Force printing thumbnail URL .
forcedescription : Force printing description .
forcefilename : Force printing final filename .
2013-12-16 11:15:10 +08:00
forceduration : Force printing duration .
2013-11-20 13:18:24 +08:00
forcejson : Force printing info_dict as JSON .
2013-06-19 04:14:21 +08:00
simulate : Do not download the video files .
format : Video format code .
format_limit : Highest quality format to try .
outtmpl : Template for output names .
restrictfilenames : Do not allow " & " and spaces in file names
ignoreerrors : Do not stop on download errors .
nooverwrites : Prevent overwriting files .
playliststart : Playlist item to start at .
playlistend : Playlist item to end at .
matchtitle : Download only matching titles .
rejecttitle : Reject downloads for matching titles .
2013-11-24 13:08:11 +08:00
logger : Log messages to a logging . Logger instance .
2013-06-19 04:14:21 +08:00
logtostderr : Log messages to stderr instead of stdout .
writedescription : Write the video description to a . description file
writeinfojson : Write the video description to a . info . json file
2013-10-14 13:18:58 +08:00
writeannotations : Write the video annotations to a . annotations . xml file
2013-06-19 04:14:21 +08:00
writethumbnail : Write the thumbnail image to a file
writesubtitles : Write the video subtitles to a file
2013-06-26 05:45:16 +08:00
writeautomaticsub : Write the automatic subtitles to a file
2013-06-19 04:14:21 +08:00
allsubtitles : Downloads all the subtitles of the video
2013-09-14 17:14:40 +08:00
( requires writesubtitles or writeautomaticsub )
2013-06-19 04:14:21 +08:00
listsubtitles : Lists all available subtitles for the video
2013-06-26 17:59:29 +08:00
subtitlesformat : Subtitle format [ srt / sbv / vtt ] ( default = srt )
2013-08-24 00:34:57 +08:00
subtitleslangs : List of languages of the subtitles to download
2013-06-19 04:14:21 +08:00
keepvideo : Keep the video file after post - processing
daterange : A DateRange object , download only if the upload_date is in the range .
skip_download : Skip the actual download of the video file
2013-09-22 17:09:25 +08:00
cachedir : Location of the cache files in the filesystem .
2013-09-25 03:04:43 +08:00
None to disable filesystem cache .
2013-10-01 04:26:25 +08:00
noplaylist : Download single video instead of a playlist if in doubt .
2013-10-06 12:06:30 +08:00
age_limit : An integer representing the user ' s age in years.
Unsuitable videos for the given age are skipped .
2013-12-16 10:09:49 +08:00
min_views : An integer representing the minimum view count the video
must have in order to not be skipped .
Videos without view count information are always
downloaded . None for no limit .
max_views : An integer representing the maximum view count .
Videos that are more popular than that are not
downloaded .
Videos without view count information are always
downloaded . None for no limit .
download_archive : File name of a file where all downloads are recorded .
2013-10-06 10:27:09 +08:00
Videos already present in the file are not downloaded
again .
2013-11-23 02:57:52 +08:00
cookiefile : File name where cookies should be read from and dumped to .
2013-11-24 22:03:25 +08:00
nocheckcertificate : Do not verify SSL certificates
proxy : URL of the proxy server to use
2013-12-01 18:42:02 +08:00
socket_timeout : Time to wait for unresponsive hosts , in seconds
2013-12-09 11:08:51 +08:00
bidi_workaround : Work around buggy terminals without bidirectional text
support , using fridibi
2013-12-29 22:28:32 +08:00
debug_printtraffic : Print out sent and received HTTP traffic
2014-01-21 09:09:49 +08:00
include_ads : Download ads as well
2013-10-22 20:49:34 +08:00
2013-06-19 04:14:21 +08:00
The following parameters are not used by YoutubeDL itself , they are used by
the FileDownloader :
nopart , updatetime , buffersize , ratelimit , min_filesize , max_filesize , test ,
noresizebuffer , retries , continuedl , noprogress , consoletitle
2014-01-09 00:53:34 +08:00
The following options are used by the post processors :
prefer_ffmpeg : If True , use ffmpeg instead of avconv if both are available ,
otherwise prefer avconv .
2013-06-19 04:14:21 +08:00
"""
params = None
_ies = [ ]
_pps = [ ]
_download_retcode = None
_num_downloads = None
_screen_file = None
2013-11-29 22:25:09 +08:00
def __init__ ( self , params = None ) :
2013-06-19 04:14:21 +08:00
""" Create a FileDownloader object with the given options. """
2013-12-31 20:34:52 +08:00
if params is None :
params = { }
2013-06-19 04:14:21 +08:00
self . _ies = [ ]
2013-07-08 21:14:27 +08:00
self . _ies_instances = { }
2013-06-19 04:14:21 +08:00
self . _pps = [ ]
2013-12-23 17:37:27 +08:00
self . _progress_hooks = [ ]
2013-06-19 04:14:21 +08:00
self . _download_retcode = 0
self . _num_downloads = 0
self . _screen_file = [ sys . stdout , sys . stderr ] [ params . get ( ' logtostderr ' , False ) ]
2013-12-09 11:08:51 +08:00
self . _err_file = sys . stderr
2013-12-31 20:34:52 +08:00
self . params = params
2013-09-21 17:48:07 +08:00
2013-12-09 11:08:51 +08:00
if params . get ( ' bidi_workaround ' , False ) :
2013-12-10 01:29:07 +08:00
try :
import pty
master , slave = pty . openpty ( )
width = get_term_width ( )
if width is None :
width_args = [ ]
else :
width_args = [ ' -w ' , str ( width ) ]
2013-12-23 11:19:20 +08:00
sp_kwargs = dict (
2013-12-10 01:29:07 +08:00
stdin = subprocess . PIPE ,
stdout = slave ,
stderr = self . _err_file )
2013-12-23 11:19:20 +08:00
try :
self . _output_process = subprocess . Popen (
[ ' bidiv ' ] + width_args , * * sp_kwargs
)
except OSError :
self . _output_process = subprocess . Popen (
[ ' fribidi ' , ' -c ' , ' UTF-8 ' ] + width_args , * * sp_kwargs )
self . _output_channel = os . fdopen ( master , ' rb ' )
2013-12-10 01:29:07 +08:00
except OSError as ose :
if ose . errno == 2 :
2014-01-05 08:52:03 +08:00
self . report_warning ( ' Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH. ' )
2013-12-10 01:29:07 +08:00
else :
raise
2013-12-09 11:08:51 +08:00
2013-09-21 17:48:07 +08:00
if ( sys . version_info > = ( 3 , ) and sys . platform != ' win32 ' and
sys . getfilesystemencoding ( ) in [ ' ascii ' , ' ANSI_X3.4-1968 ' ]
and not params [ ' restrictfilenames ' ] ) :
# On Python 3, the Unicode filesystem API will throw errors (#1474)
self . report_warning (
2014-01-05 08:52:03 +08:00
' Assuming --restrict-filenames since file system encoding '
' cannot encode all charactes. '
' Set the LC_ALL environment variable to fix this. ' )
2013-11-27 01:53:36 +08:00
self . params [ ' restrictfilenames ' ] = True
2013-09-21 17:48:07 +08:00
2013-11-26 04:55:20 +08:00
if ' %(stitle)s ' in self . params . get ( ' outtmpl ' , ' ' ) :
2014-01-05 08:52:03 +08:00
self . report_warning ( ' %(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead. ' )
2013-06-19 04:14:21 +08:00
2013-11-23 02:57:52 +08:00
self . _setup_opener ( )
2013-06-19 04:14:21 +08:00
def add_info_extractor ( self , ie ) :
""" Add an InfoExtractor object to the end of the list. """
self . _ies . append ( ie )
2013-07-08 21:14:27 +08:00
self . _ies_instances [ ie . ie_key ( ) ] = ie
2013-06-19 04:14:21 +08:00
ie . set_downloader ( self )
2013-07-08 21:14:27 +08:00
def get_info_extractor ( self , ie_key ) :
"""
Get an instance of an IE with name ie_key , it will try to get one from
the _ies list , if there ' s no instance it will create a new one and add
it to the extractor list .
"""
ie = self . _ies_instances . get ( ie_key )
if ie is None :
ie = get_info_extractor ( ie_key ) ( )
self . add_info_extractor ( ie )
return ie
2013-06-28 05:51:06 +08:00
def add_default_info_extractors ( self ) :
"""
Add the InfoExtractors returned by gen_extractors to the end of the list
"""
for ie in gen_extractors ( ) :
self . add_info_extractor ( ie )
2013-06-19 04:14:21 +08:00
def add_post_processor ( self , pp ) :
""" Add a PostProcessor object to the end of the chain. """
self . _pps . append ( pp )
pp . set_downloader ( self )
2013-12-23 17:37:27 +08:00
def add_progress_hook ( self , ph ) :
""" Add the progress hook (currently only for the file downloader) """
self . _progress_hooks . append ( ph )
2013-09-24 00:09:28 +08:00
2013-12-10 01:29:07 +08:00
def _bidi_workaround ( self , message ) :
2013-12-23 11:19:20 +08:00
if not hasattr ( self , ' _output_channel ' ) :
2013-12-10 01:29:07 +08:00
return message
2013-12-23 11:19:20 +08:00
assert hasattr ( self , ' _output_process ' )
2014-01-05 08:52:03 +08:00
assert type ( message ) == type ( ' ' )
line_count = message . count ( ' \n ' ) + 1
self . _output_process . stdin . write ( ( message + ' \n ' ) . encode ( ' utf-8 ' ) )
2013-12-23 11:19:20 +08:00
self . _output_process . stdin . flush ( )
2014-01-05 08:52:03 +08:00
res = ' ' . join ( self . _output_channel . readline ( ) . decode ( ' utf-8 ' )
2013-12-10 01:29:07 +08:00
for _ in range ( line_count ) )
2014-01-05 08:52:03 +08:00
return res [ : - len ( ' \n ' ) ]
2013-12-10 01:29:07 +08:00
2013-06-19 04:14:21 +08:00
def to_screen ( self , message , skip_eol = False ) :
2013-12-09 11:08:51 +08:00
""" Print message to stdout if not in quiet mode. """
return self . to_stdout ( message , skip_eol , check_quiet = True )
def to_stdout ( self , message , skip_eol = False , check_quiet = False ) :
2013-06-19 04:14:21 +08:00
""" Print message to stdout if not in quiet mode. """
2013-11-24 13:08:11 +08:00
if self . params . get ( ' logger ' ) :
2013-11-23 16:22:18 +08:00
self . params [ ' logger ' ] . debug ( message )
2013-12-09 11:08:51 +08:00
elif not check_quiet or not self . params . get ( ' quiet ' , False ) :
2013-12-10 01:29:07 +08:00
message = self . _bidi_workaround ( message )
2014-01-05 08:52:03 +08:00
terminator = [ ' \n ' , ' ' ] [ skip_eol ]
2013-06-19 04:14:21 +08:00
output = message + terminator
2013-12-10 01:29:07 +08:00
2013-09-16 12:55:33 +08:00
write_string ( output , self . _screen_file )
2013-06-19 04:14:21 +08:00
def to_stderr ( self , message ) :
""" Print message to stderr. """
2014-01-05 08:52:03 +08:00
assert type ( message ) == type ( ' ' )
2013-11-24 13:08:11 +08:00
if self . params . get ( ' logger ' ) :
2013-11-23 16:22:18 +08:00
self . params [ ' logger ' ] . error ( message )
else :
2013-12-10 01:29:07 +08:00
message = self . _bidi_workaround ( message )
2014-01-05 08:52:03 +08:00
output = message + ' \n '
2013-12-09 11:08:51 +08:00
write_string ( output , self . _err_file )
2013-06-19 04:14:21 +08:00
2013-11-17 18:39:52 +08:00
def to_console_title ( self , message ) :
if not self . params . get ( ' consoletitle ' , False ) :
return
if os . name == ' nt ' and ctypes . windll . kernel32 . GetConsoleWindow ( ) :
# c_wchar_p() might not be necessary if `message` is
# already of type unicode()
ctypes . windll . kernel32 . SetConsoleTitleW ( ctypes . c_wchar_p ( message ) )
elif ' TERM ' in os . environ :
2014-01-05 08:52:03 +08:00
write_string ( ' \033 ]0; %s \007 ' % message , self . _screen_file )
2013-11-17 18:39:52 +08:00
2013-11-18 04:05:14 +08:00
def save_console_title ( self ) :
if not self . params . get ( ' consoletitle ' , False ) :
return
if ' TERM ' in os . environ :
2013-11-18 23:35:41 +08:00
# Save the title on stack
2014-01-05 08:52:03 +08:00
write_string ( ' \033 [22;0t ' , self . _screen_file )
2013-11-18 04:05:14 +08:00
def restore_console_title ( self ) :
if not self . params . get ( ' consoletitle ' , False ) :
return
if ' TERM ' in os . environ :
2013-11-18 23:35:41 +08:00
# Restore the title from stack
2014-01-05 08:52:03 +08:00
write_string ( ' \033 [23;0t ' , self . _screen_file )
2013-11-18 04:05:14 +08:00
def __enter__ ( self ) :
self . save_console_title ( )
return self
def __exit__ ( self , * args ) :
self . restore_console_title ( )
2013-11-23 02:57:52 +08:00
if self . params . get ( ' cookiefile ' ) is not None :
self . cookiejar . save ( )
2013-11-18 04:05:14 +08:00
2013-06-19 04:14:21 +08:00
def trouble ( self , message = None , tb = None ) :
""" Determine action to take when a download problem appears.
Depending on if the downloader has been configured to ignore
download errors or not , this method may throw an exception or
not when errors are found , after printing the message .
tb , if given , is additional traceback information .
"""
if message is not None :
self . to_stderr ( message )
if self . params . get ( ' verbose ' ) :
if tb is None :
if sys . exc_info ( ) [ 0 ] : # if .trouble has been called from an except block
2014-01-05 08:52:03 +08:00
tb = ' '
2013-06-19 04:14:21 +08:00
if hasattr ( sys . exc_info ( ) [ 1 ] , ' exc_info ' ) and sys . exc_info ( ) [ 1 ] . exc_info [ 0 ] :
2014-01-05 08:52:03 +08:00
tb + = ' ' . join ( traceback . format_exception ( * sys . exc_info ( ) [ 1 ] . exc_info ) )
2013-06-19 04:14:21 +08:00
tb + = compat_str ( traceback . format_exc ( ) )
else :
tb_data = traceback . format_list ( traceback . extract_stack ( ) )
2014-01-05 08:52:03 +08:00
tb = ' ' . join ( tb_data )
2013-06-19 04:14:21 +08:00
self . to_stderr ( tb )
if not self . params . get ( ' ignoreerrors ' , False ) :
if sys . exc_info ( ) [ 0 ] and hasattr ( sys . exc_info ( ) [ 1 ] , ' exc_info ' ) and sys . exc_info ( ) [ 1 ] . exc_info [ 0 ] :
exc_info = sys . exc_info ( ) [ 1 ] . exc_info
else :
exc_info = sys . exc_info ( )
raise DownloadError ( message , exc_info )
self . _download_retcode = 1
def report_warning ( self , message ) :
'''
Print the message to stderr , it will be prefixed with ' WARNING: '
If stderr is a tty file the ' WARNING: ' will be colored
'''
2013-12-09 11:08:51 +08:00
if self . _err_file . isatty ( ) and os . name != ' nt ' :
2014-01-05 08:52:03 +08:00
_msg_header = ' \033 [0;33mWARNING: \033 [0m '
2013-06-19 04:14:21 +08:00
else :
2014-01-05 08:52:03 +08:00
_msg_header = ' WARNING: '
warning_message = ' %s %s ' % ( _msg_header , message )
2013-06-19 04:14:21 +08:00
self . to_stderr ( warning_message )
def report_error ( self , message , tb = None ) :
'''
Do the same as trouble , but prefixes the message with ' ERROR: ' , colored
in red if stderr is a tty file .
'''
2013-12-09 11:08:51 +08:00
if self . _err_file . isatty ( ) and os . name != ' nt ' :
2014-01-05 08:52:03 +08:00
_msg_header = ' \033 [0;31mERROR: \033 [0m '
2013-06-19 04:14:21 +08:00
else :
2014-01-05 08:52:03 +08:00
_msg_header = ' ERROR: '
error_message = ' %s %s ' % ( _msg_header , message )
2013-06-19 04:14:21 +08:00
self . trouble ( error_message , tb )
def report_file_already_downloaded ( self , file_name ) :
""" Report file has already been fully downloaded. """
try :
2014-01-05 08:52:03 +08:00
self . to_screen ( ' [download] %s has already been downloaded ' % file_name )
2013-11-17 23:47:52 +08:00
except UnicodeEncodeError :
2014-01-05 08:52:03 +08:00
self . to_screen ( ' [download] The file has already been downloaded ' )
2013-06-19 04:14:21 +08:00
def increment_downloads ( self ) :
""" Increment the ordinal that assigns a number to each file. """
self . _num_downloads + = 1
def prepare_filename ( self , info_dict ) :
""" Generate the output filename. """
try :
template_dict = dict ( info_dict )
template_dict [ ' epoch ' ] = int ( time . time ( ) )
autonumber_size = self . params . get ( ' autonumber_size ' )
if autonumber_size is None :
autonumber_size = 5
2014-01-05 08:52:03 +08:00
autonumber_templ = ' % 0 ' + str ( autonumber_size ) + ' d '
2013-06-19 04:14:21 +08:00
template_dict [ ' autonumber ' ] = autonumber_templ % self . _num_downloads
2013-10-29 05:01:37 +08:00
if template_dict . get ( ' playlist_index ' ) is not None :
2014-01-05 08:52:03 +08:00
template_dict [ ' playlist_index ' ] = ' %05d ' % template_dict [ ' playlist_index ' ]
2013-06-19 04:14:21 +08:00
2013-10-23 04:28:19 +08:00
sanitize = lambda k , v : sanitize_filename (
2013-12-10 18:23:35 +08:00
compat_str ( v ) ,
2013-06-19 04:14:21 +08:00
restricted = self . params . get ( ' restrictfilenames ' ) ,
2014-01-05 08:52:03 +08:00
is_id = ( k == ' id ' ) )
2013-10-23 04:28:19 +08:00
template_dict = dict ( ( k , sanitize ( k , v ) )
2013-12-10 18:23:35 +08:00
for k , v in template_dict . items ( )
if v is not None )
2014-01-05 08:52:03 +08:00
template_dict = collections . defaultdict ( lambda : ' NA ' , template_dict )
2013-06-19 04:14:21 +08:00
2013-10-23 04:28:19 +08:00
tmpl = os . path . expanduser ( self . params [ ' outtmpl ' ] )
filename = tmpl % template_dict
2013-06-19 04:14:21 +08:00
return filename
except ValueError as err :
2014-01-05 08:52:03 +08:00
self . report_error ( ' Error in output template: ' + str ( err ) + ' (encoding: ' + repr ( preferredencoding ( ) ) + ' ) ' )
2013-06-19 04:14:21 +08:00
return None
def _match_entry ( self , info_dict ) :
""" Returns None iff the file should be downloaded """
2014-01-05 08:52:03 +08:00
video_title = info_dict . get ( ' title ' , info_dict . get ( ' id ' , ' video ' ) )
2013-11-23 05:46:46 +08:00
if ' title ' in info_dict :
# This can happen when we're just evaluating the playlist
title = info_dict [ ' title ' ]
matchtitle = self . params . get ( ' matchtitle ' , False )
if matchtitle :
if not re . search ( matchtitle , title , re . IGNORECASE ) :
2014-01-05 08:52:03 +08:00
return ' " ' + title + ' " title did not match pattern " ' + matchtitle + ' " '
2013-11-23 05:46:46 +08:00
rejecttitle = self . params . get ( ' rejecttitle ' , False )
if rejecttitle :
if re . search ( rejecttitle , title , re . IGNORECASE ) :
2014-01-05 08:52:03 +08:00
return ' " ' + title + ' " title matched reject pattern " ' + rejecttitle + ' " '
2013-06-19 04:14:21 +08:00
date = info_dict . get ( ' upload_date ' , None )
if date is not None :
dateRange = self . params . get ( ' daterange ' , DateRange ( ) )
if date not in dateRange :
2014-01-05 08:52:03 +08:00
return ' %s upload date is not in range %s ' % ( date_from_str ( date ) . isoformat ( ) , dateRange )
2013-12-16 10:09:49 +08:00
view_count = info_dict . get ( ' view_count ' , None )
if view_count is not None :
min_views = self . params . get ( ' min_views ' )
if min_views is not None and view_count < min_views :
2014-01-05 08:52:03 +08:00
return ' Skipping %s , because it has not reached minimum view count ( %d / %d ) ' % ( video_title , view_count , min_views )
2013-12-16 10:09:49 +08:00
max_views = self . params . get ( ' max_views ' )
if max_views is not None and view_count > max_views :
2014-01-05 08:52:03 +08:00
return ' Skipping %s , because it has exceeded the maximum view count ( %d / %d ) ' % ( video_title , view_count , max_views )
2013-10-06 12:06:30 +08:00
age_limit = self . params . get ( ' age_limit ' )
if age_limit is not None :
2013-10-06 22:23:06 +08:00
if age_limit < info_dict . get ( ' age_limit ' , 0 ) :
2014-01-05 08:52:03 +08:00
return ' Skipping " ' + title + ' " because it is age restricted '
2013-10-06 10:27:09 +08:00
if self . in_download_archive ( info_dict ) :
2014-01-05 08:52:03 +08:00
return ' %s has already been recorded in archive ' % video_title
2013-06-19 04:14:21 +08:00
return None
2013-10-22 20:49:34 +08:00
2013-11-03 18:56:45 +08:00
@staticmethod
def add_extra_info ( info_dict , extra_info ) :
''' Set the keys from extra_info in info dict if they are missing '''
for key , value in extra_info . items ( ) :
info_dict . setdefault ( key , value )
2013-12-05 21:29:08 +08:00
def extract_info ( self , url , download = True , ie_key = None , extra_info = { } ,
process = True ) :
2013-06-19 04:14:21 +08:00
'''
Returns a list with a dictionary for each video we find .
If ' download ' , also downloads the videos .
extra_info is a dict containing the extra values to add to each result
'''
2013-10-22 20:49:34 +08:00
2013-06-19 04:14:21 +08:00
if ie_key :
2013-07-08 21:14:27 +08:00
ies = [ self . get_info_extractor ( ie_key ) ]
2013-06-19 04:14:21 +08:00
else :
ies = self . _ies
for ie in ies :
if not ie . suitable ( url ) :
continue
if not ie . working ( ) :
2014-01-05 08:52:03 +08:00
self . report_warning ( ' The program functionality for this site has been marked as broken, '
' and will probably not work. ' )
2013-06-19 04:14:21 +08:00
try :
ie_result = ie . extract ( url )
if ie_result is None : # Finished already (backwards compatibility; listformats and friends should be moved here)
break
if isinstance ( ie_result , list ) :
# Backwards compatibility: old IE result format
ie_result = {
' _type ' : ' compat_list ' ,
' entries ' : ie_result ,
}
2013-11-03 19:11:13 +08:00
self . add_extra_info ( ie_result ,
{
' extractor ' : ie . IE_NAME ,
2013-11-03 19:14:44 +08:00
' webpage_url ' : url ,
2013-12-17 11:13:36 +08:00
' webpage_url_basename ' : url_basename ( url ) ,
2013-11-03 19:14:44 +08:00
' extractor_key ' : ie . ie_key ( ) ,
2013-11-03 19:11:13 +08:00
} )
2013-12-05 21:29:08 +08:00
if process :
return self . process_ie_result ( ie_result , download , extra_info )
else :
return ie_result
2013-06-19 04:14:21 +08:00
except ExtractorError as de : # An error we somewhat expected
self . report_error ( compat_str ( de ) , de . format_traceback ( ) )
break
except Exception as e :
if self . params . get ( ' ignoreerrors ' , False ) :
self . report_error ( compat_str ( e ) , tb = compat_str ( traceback . format_exc ( ) ) )
break
else :
raise
else :
2014-01-05 08:52:03 +08:00
self . report_error ( ' no suitable InfoExtractor: %s ' % url )
2013-10-22 20:49:34 +08:00
2013-06-19 04:14:21 +08:00
def process_ie_result ( self , ie_result , download = True , extra_info = { } ) :
"""
Take the result of the ie ( may be modified ) and resolve all unresolved
references ( URLs , playlist items ) .
It will also download the videos if ' download ' .
Returns the resolved ie_result .
"""
result_type = ie_result . get ( ' _type ' , ' video ' ) # If not given we suppose it's a video, support the default old system
if result_type == ' video ' :
2013-11-03 18:56:45 +08:00
self . add_extra_info ( ie_result , extra_info )
2013-11-15 18:04:26 +08:00
return self . process_video_result ( ie_result , download = download )
2013-06-19 04:14:21 +08:00
elif result_type == ' url ' :
# We have to add extra_info to the results because it may be
# contained in a playlist
return self . extract_info ( ie_result [ ' url ' ] ,
download ,
ie_key = ie_result . get ( ' ie_key ' ) ,
extra_info = extra_info )
2013-12-05 21:29:08 +08:00
elif result_type == ' url_transparent ' :
# Use the information from the embedding page
info = self . extract_info (
ie_result [ ' url ' ] , ie_key = ie_result . get ( ' ie_key ' ) ,
extra_info = extra_info , download = False , process = False )
def make_result ( embedded_info ) :
new_result = ie_result . copy ( )
for f in ( ' _type ' , ' url ' , ' ext ' , ' player_url ' , ' formats ' ,
2013-12-23 22:48:00 +08:00
' entries ' , ' ie_key ' , ' duration ' ,
2013-12-06 16:15:04 +08:00
' subtitles ' , ' annotations ' , ' format ' ,
' thumbnail ' , ' thumbnails ' ) :
2013-12-05 21:29:08 +08:00
if f in new_result :
del new_result [ f ]
if f in embedded_info :
new_result [ f ] = embedded_info [ f ]
return new_result
new_result = make_result ( info )
assert new_result . get ( ' _type ' ) != ' url_transparent '
if new_result . get ( ' _type ' ) == ' compat_list ' :
new_result [ ' entries ' ] = [
make_result ( e ) for e in new_result [ ' entries ' ] ]
return self . process_ie_result (
new_result , download = download , extra_info = extra_info )
2013-06-19 04:14:21 +08:00
elif result_type == ' playlist ' :
# We process each entry in the playlist
playlist = ie_result . get ( ' title ' , None ) or ie_result . get ( ' id ' , None )
2014-01-05 08:52:03 +08:00
self . to_screen ( ' [download] Downloading playlist: %s ' % playlist )
2013-06-19 04:14:21 +08:00
playlist_results = [ ]
n_all_entries = len ( ie_result [ ' entries ' ] )
playliststart = self . params . get ( ' playliststart ' , 1 ) - 1
2013-12-16 20:16:20 +08:00
playlistend = self . params . get ( ' playlistend ' , None )
# For backwards compatibility, interpret -1 as whole list
2013-06-19 04:14:21 +08:00
if playlistend == - 1 :
2013-12-16 20:16:20 +08:00
playlistend = None
2013-06-19 04:14:21 +08:00
2013-12-16 20:16:20 +08:00
entries = ie_result [ ' entries ' ] [ playliststart : playlistend ]
2013-06-19 04:14:21 +08:00
n_entries = len ( entries )
2013-12-16 20:16:20 +08:00
self . to_screen (
2014-01-05 08:52:03 +08:00
" [ %s ] playlist ' %s ' : Collected %d video ids (downloading %d of them) " %
2013-06-19 04:14:21 +08:00
( ie_result [ ' extractor ' ] , playlist , n_all_entries , n_entries ) )
2013-10-22 20:49:34 +08:00
for i , entry in enumerate ( entries , 1 ) :
2014-01-05 08:52:03 +08:00
self . to_screen ( ' [download] Downloading video # %s of %s ' % ( i , n_entries ) )
2013-06-19 04:14:21 +08:00
extra = {
2013-10-22 20:49:34 +08:00
' playlist ' : playlist ,
' playlist_index ' : i + playliststart ,
2013-11-03 18:56:45 +08:00
' extractor ' : ie_result [ ' extractor ' ] ,
2013-11-03 19:11:13 +08:00
' webpage_url ' : ie_result [ ' webpage_url ' ] ,
2013-12-17 11:13:36 +08:00
' webpage_url_basename ' : url_basename ( ie_result [ ' webpage_url ' ] ) ,
2013-11-03 19:14:44 +08:00
' extractor_key ' : ie_result [ ' extractor_key ' ] ,
2013-10-22 20:49:34 +08:00
}
2013-11-23 05:46:46 +08:00
reason = self . _match_entry ( entry )
if reason is not None :
2014-01-05 08:52:03 +08:00
self . to_screen ( ' [download] ' + reason )
2013-11-23 05:46:46 +08:00
continue
2013-06-19 04:14:21 +08:00
entry_result = self . process_ie_result ( entry ,
download = download ,
extra_info = extra )
playlist_results . append ( entry_result )
ie_result [ ' entries ' ] = playlist_results
return ie_result
elif result_type == ' compat_list ' :
def _fixup ( r ) :
2013-11-03 18:56:45 +08:00
self . add_extra_info ( r ,
2013-11-03 19:11:13 +08:00
{
' extractor ' : ie_result [ ' extractor ' ] ,
' webpage_url ' : ie_result [ ' webpage_url ' ] ,
2013-12-17 11:13:36 +08:00
' webpage_url_basename ' : url_basename ( ie_result [ ' webpage_url ' ] ) ,
2013-11-03 19:14:44 +08:00
' extractor_key ' : ie_result [ ' extractor_key ' ] ,
2013-11-03 19:11:13 +08:00
} )
2013-06-19 04:14:21 +08:00
return r
ie_result [ ' entries ' ] = [
2013-11-03 18:56:45 +08:00
self . process_ie_result ( _fixup ( r ) , download , extra_info )
2013-06-19 04:14:21 +08:00
for r in ie_result [ ' entries ' ]
]
return ie_result
else :
raise Exception ( ' Invalid result type: %s ' % result_type )
2013-10-21 19:19:58 +08:00
def select_format ( self , format_spec , available_formats ) :
if format_spec == ' best ' or format_spec is None :
return available_formats [ - 1 ]
elif format_spec == ' worst ' :
return available_formats [ 0 ]
else :
2014-01-05 08:52:03 +08:00
extensions = [ ' mp4 ' , ' flv ' , ' webm ' , ' 3gp ' ]
2013-10-21 19:31:55 +08:00
if format_spec in extensions :
filter_f = lambda f : f [ ' ext ' ] == format_spec
else :
filter_f = lambda f : f [ ' format_id ' ] == format_spec
2013-10-22 20:49:34 +08:00
matches = list ( filter ( filter_f , available_formats ) )
2013-10-21 19:19:58 +08:00
if matches :
return matches [ - 1 ]
return None
2013-07-02 16:08:58 +08:00
def process_video_result ( self , info_dict , download = True ) :
assert info_dict . get ( ' _type ' , ' video ' ) == ' video '
if ' playlist ' not in info_dict :
# It isn't part of a playlist
info_dict [ ' playlist ' ] = None
info_dict [ ' playlist_index ' ] = None
2013-07-13 23:51:26 +08:00
# This extractors handle format selection themselves
2014-01-05 08:52:03 +08:00
if info_dict [ ' extractor ' ] in [ ' Youku ' ] :
2013-10-22 06:01:59 +08:00
if download :
self . process_info ( info_dict )
2013-07-13 23:51:26 +08:00
return info_dict
2013-07-02 16:08:58 +08:00
# We now pick which formats have to be downloaded
if info_dict . get ( ' formats ' ) is None :
# There's only one format available
formats = [ info_dict ]
else :
formats = info_dict [ ' formats ' ]
# We check that all the formats have the format and format_id fields
for ( i , format ) in enumerate ( formats ) :
if format . get ( ' format_id ' ) is None :
2013-07-14 23:31:52 +08:00
format [ ' format_id ' ] = compat_str ( i )
2013-10-21 20:09:38 +08:00
if format . get ( ' format ' ) is None :
2014-01-05 08:52:03 +08:00
format [ ' format ' ] = ' {id} - {res} {note} ' . format (
2013-10-21 20:09:38 +08:00
id = format [ ' format_id ' ] ,
res = self . format_resolution ( format ) ,
2014-01-05 08:52:03 +08:00
note = ' ( {0} ) ' . format ( format [ ' format_note ' ] ) if format . get ( ' format_note ' ) is not None else ' ' ,
2013-10-21 20:09:38 +08:00
)
2013-10-28 18:28:02 +08:00
# Automatically determine file extension if missing
if ' ext ' not in format :
format [ ' ext ' ] = determine_ext ( format [ ' url ' ] )
2013-07-02 16:08:58 +08:00
2013-07-08 18:10:47 +08:00
format_limit = self . params . get ( ' format_limit ' , None )
if format_limit :
2013-10-18 06:46:35 +08:00
formats = list ( takewhile_inclusive (
lambda f : f [ ' format_id ' ] != format_limit , formats
) )
2013-12-24 19:25:22 +08:00
# TODO Central sorting goes here
2013-07-08 18:10:47 +08:00
2013-12-23 17:23:13 +08:00
if formats [ 0 ] is not info_dict :
# only set the 'formats' fields if the original info_dict list them
# otherwise we end up with a circular reference, the first (and unique)
# element in the 'formats' field in info_dict is info_dict itself,
# wich can't be exported to json
info_dict [ ' formats ' ] = formats
2013-12-19 04:24:39 +08:00
if self . params . get ( ' listformats ' , None ) :
self . list_formats ( info_dict )
return
2013-07-02 16:08:58 +08:00
req_format = self . params . get ( ' format ' , ' best ' )
2013-10-21 19:19:58 +08:00
if req_format is None :
req_format = ' best '
2013-07-02 16:08:58 +08:00
formats_to_download = [ ]
# The -1 is for supporting YoutubeIE
2013-10-21 19:19:58 +08:00
if req_format in ( ' -1 ' , ' all ' ) :
2013-07-02 16:08:58 +08:00
formats_to_download = formats
else :
2014-01-05 10:44:08 +08:00
# We can accept formats requested in the format: 34/5/best, we pick
2013-10-18 06:49:45 +08:00
# the first that is available, starting from left
2013-07-02 16:08:58 +08:00
req_formats = req_format . split ( ' / ' )
for rf in req_formats :
2014-01-04 20:13:51 +08:00
if re . match ( r ' .+? \ +.+? ' , rf ) is not None :
# Two formats have been requested like '137+139'
format_1 , format_2 = rf . split ( ' + ' )
formats_info = ( self . select_format ( format_1 , formats ) ,
self . select_format ( format_2 , formats ) )
if all ( formats_info ) :
2014-01-05 10:44:08 +08:00
selected_format = {
' requested_formats ' : formats_info ,
' format ' : rf ,
' ext ' : formats_info [ 0 ] [ ' ext ' ] ,
}
2014-01-04 20:13:51 +08:00
else :
selected_format = None
else :
selected_format = self . select_format ( rf , formats )
2013-10-21 19:19:58 +08:00
if selected_format is not None :
formats_to_download = [ selected_format ]
2013-07-02 16:08:58 +08:00
break
if not formats_to_download :
2014-01-05 08:52:03 +08:00
raise ExtractorError ( ' requested format not available ' ,
2013-10-28 18:41:43 +08:00
expected = True )
2013-07-02 16:08:58 +08:00
if download :
if len ( formats_to_download ) > 1 :
2014-01-05 08:52:03 +08:00
self . to_screen ( ' [info] %s : downloading video in %s formats ' % ( info_dict [ ' id ' ] , len ( formats_to_download ) ) )
2013-07-02 16:08:58 +08:00
for format in formats_to_download :
new_info = dict ( info_dict )
new_info . update ( format )
self . process_info ( new_info )
# We update the info dict with the best quality format (backwards compatibility)
info_dict . update ( formats_to_download [ - 1 ] )
return info_dict
2013-06-19 04:14:21 +08:00
def process_info ( self , info_dict ) :
""" Process a single resolved IE result. """
assert info_dict . get ( ' _type ' , ' video ' ) == ' video '
#We increment the download the download count here to match the previous behaviour.
self . increment_downloads ( )
info_dict [ ' fulltitle ' ] = info_dict [ ' title ' ]
if len ( info_dict [ ' title ' ] ) > 200 :
2014-01-05 08:52:03 +08:00
info_dict [ ' title ' ] = info_dict [ ' title ' ] [ : 197 ] + ' ... '
2013-06-19 04:14:21 +08:00
# Keep for backwards compatibility
info_dict [ ' stitle ' ] = info_dict [ ' title ' ]
if not ' format ' in info_dict :
info_dict [ ' format ' ] = info_dict [ ' ext ' ]
reason = self . _match_entry ( info_dict )
if reason is not None :
2014-01-05 08:52:03 +08:00
self . to_screen ( ' [download] ' + reason )
2013-06-19 04:14:21 +08:00
return
max_downloads = self . params . get ( ' max_downloads ' )
if max_downloads is not None :
if self . _num_downloads > int ( max_downloads ) :
raise MaxDownloadsReached ( )
filename = self . prepare_filename ( info_dict )
# Forced printings
if self . params . get ( ' forcetitle ' , False ) :
2013-12-09 11:08:51 +08:00
self . to_stdout ( info_dict [ ' fulltitle ' ] )
2013-06-19 04:14:21 +08:00
if self . params . get ( ' forceid ' , False ) :
2013-12-09 11:08:51 +08:00
self . to_stdout ( info_dict [ ' id ' ] )
2013-06-19 04:14:21 +08:00
if self . params . get ( ' forceurl ' , False ) :
2013-08-28 18:14:45 +08:00
# For RTMP URLs, also include the playpath
2014-01-05 08:52:03 +08:00
self . to_stdout ( info_dict [ ' url ' ] + info_dict . get ( ' play_path ' , ' ' ) )
2013-10-28 23:28:35 +08:00
if self . params . get ( ' forcethumbnail ' , False ) and info_dict . get ( ' thumbnail ' ) is not None :
2013-12-09 11:08:51 +08:00
self . to_stdout ( info_dict [ ' thumbnail ' ] )
2013-10-28 23:28:35 +08:00
if self . params . get ( ' forcedescription ' , False ) and info_dict . get ( ' description ' ) is not None :
2013-12-09 11:08:51 +08:00
self . to_stdout ( info_dict [ ' description ' ] )
2013-06-19 04:14:21 +08:00
if self . params . get ( ' forcefilename ' , False ) and filename is not None :
2013-12-09 11:08:51 +08:00
self . to_stdout ( filename )
2013-12-16 11:15:10 +08:00
if self . params . get ( ' forceduration ' , False ) and info_dict . get ( ' duration ' ) is not None :
self . to_stdout ( formatSeconds ( info_dict [ ' duration ' ] ) )
2013-06-19 04:14:21 +08:00
if self . params . get ( ' forceformat ' , False ) :
2013-12-09 11:08:51 +08:00
self . to_stdout ( info_dict [ ' format ' ] )
2013-11-19 21:59:22 +08:00
if self . params . get ( ' forcejson ' , False ) :
2013-12-09 11:31:18 +08:00
info_dict [ ' _filename ' ] = filename
2013-12-09 11:08:51 +08:00
self . to_stdout ( json . dumps ( info_dict ) )
2013-06-19 04:14:21 +08:00
# Do nothing else if in simulate mode
if self . params . get ( ' simulate ' , False ) :
return
if filename is None :
return
try :
dn = os . path . dirname ( encodeFilename ( filename ) )
if dn != ' ' and not os . path . exists ( dn ) :
os . makedirs ( dn )
except ( OSError , IOError ) as err :
2014-01-05 08:52:03 +08:00
self . report_error ( ' unable to create directory ' + compat_str ( err ) )
2013-06-19 04:14:21 +08:00
return
if self . params . get ( ' writedescription ' , False ) :
2014-01-05 08:52:03 +08:00
descfn = filename + ' .description '
2013-12-16 11:39:04 +08:00
if self . params . get ( ' nooverwrites ' , False ) and os . path . exists ( encodeFilename ( descfn ) ) :
2014-01-05 08:52:03 +08:00
self . to_screen ( ' [info] Video description is already present ' )
2013-12-16 11:39:04 +08:00
else :
try :
2014-01-05 08:52:03 +08:00
self . to_screen ( ' [info] Writing video description to: ' + descfn )
2013-12-16 11:39:04 +08:00
with io . open ( encodeFilename ( descfn ) , ' w ' , encoding = ' utf-8 ' ) as descfile :
descfile . write ( info_dict [ ' description ' ] )
except ( KeyError , TypeError ) :
2014-01-05 08:52:03 +08:00
self . report_warning ( ' There \' s no description to write. ' )
2013-12-16 11:39:04 +08:00
except ( OSError , IOError ) :
2014-01-05 08:52:03 +08:00
self . report_error ( ' Cannot write description file ' + descfn )
2013-12-16 11:39:04 +08:00
return
2013-06-19 04:14:21 +08:00
2013-10-14 13:18:58 +08:00
if self . params . get ( ' writeannotations ' , False ) :
2014-01-05 08:52:03 +08:00
annofn = filename + ' .annotations.xml '
2013-12-16 11:39:04 +08:00
if self . params . get ( ' nooverwrites ' , False ) and os . path . exists ( encodeFilename ( annofn ) ) :
2014-01-05 08:52:03 +08:00
self . to_screen ( ' [info] Video annotations are already present ' )
2013-12-16 11:39:04 +08:00
else :
try :
2014-01-05 08:52:03 +08:00
self . to_screen ( ' [info] Writing video annotations to: ' + annofn )
2013-12-16 11:39:04 +08:00
with io . open ( encodeFilename ( annofn ) , ' w ' , encoding = ' utf-8 ' ) as annofile :
annofile . write ( info_dict [ ' annotations ' ] )
except ( KeyError , TypeError ) :
2014-01-05 08:52:03 +08:00
self . report_warning ( ' There are no annotations to write. ' )
2013-12-16 11:39:04 +08:00
except ( OSError , IOError ) :
2014-01-05 08:52:03 +08:00
self . report_error ( ' Cannot write annotations file: ' + annofn )
2013-12-16 11:39:04 +08:00
return
2013-10-14 13:18:58 +08:00
2013-06-26 06:02:15 +08:00
subtitles_are_requested = any ( [ self . params . get ( ' writesubtitles ' , False ) ,
2013-09-14 17:14:40 +08:00
self . params . get ( ' writeautomaticsub ' ) ] )
2013-06-26 06:02:15 +08:00
2013-10-22 20:49:34 +08:00
if subtitles_are_requested and ' subtitles ' in info_dict and info_dict [ ' subtitles ' ] :
2013-06-19 04:14:21 +08:00
# subtitles download errors are already managed as troubles in relevant IE
# that way it will silently go on when used with unsupporting IE
subtitles = info_dict [ ' subtitles ' ]
2013-11-14 00:06:02 +08:00
sub_format = self . params . get ( ' subtitlesformat ' , ' srt ' )
2013-06-26 17:03:44 +08:00
for sub_lang in subtitles . keys ( ) :
sub = subtitles [ sub_lang ]
2013-07-20 18:59:47 +08:00
if sub is None :
continue
2013-06-19 04:14:21 +08:00
try :
2013-07-20 18:48:57 +08:00
sub_filename = subtitles_filename ( filename , sub_lang , sub_format )
2013-12-16 11:39:04 +08:00
if self . params . get ( ' nooverwrites ' , False ) and os . path . exists ( encodeFilename ( sub_filename ) ) :
2014-01-05 08:52:03 +08:00
self . to_screen ( ' [info] Video subtitle %s . %s is already_present ' % ( sub_lang , sub_format ) )
2013-12-16 11:39:04 +08:00
else :
2014-01-05 08:52:03 +08:00
self . to_screen ( ' [info] Writing video subtitles to: ' + sub_filename )
2013-12-16 11:39:04 +08:00
with io . open ( encodeFilename ( sub_filename ) , ' w ' , encoding = ' utf-8 ' ) as subfile :
subfile . write ( sub )
2013-06-19 04:14:21 +08:00
except ( OSError , IOError ) :
2014-01-05 08:52:03 +08:00
self . report_error ( ' Cannot write subtitles file ' + descfn )
2013-06-19 04:14:21 +08:00
return
if self . params . get ( ' writeinfojson ' , False ) :
2014-01-05 08:52:03 +08:00
infofn = os . path . splitext ( filename ) [ 0 ] + ' .info.json '
2013-12-16 11:39:04 +08:00
if self . params . get ( ' nooverwrites ' , False ) and os . path . exists ( encodeFilename ( infofn ) ) :
2014-01-05 08:52:03 +08:00
self . to_screen ( ' [info] Video description metadata is already present ' )
2013-12-16 11:39:04 +08:00
else :
2014-01-05 08:52:03 +08:00
self . to_screen ( ' [info] Writing video description metadata as JSON to: ' + infofn )
2013-12-16 11:39:04 +08:00
try :
2013-12-23 22:48:00 +08:00
write_json_file ( info_dict , encodeFilename ( infofn ) )
2013-12-16 11:39:04 +08:00
except ( OSError , IOError ) :
2014-01-05 08:52:03 +08:00
self . report_error ( ' Cannot write metadata to JSON file ' + infofn )
2013-12-16 11:39:04 +08:00
return
2013-06-19 04:14:21 +08:00
if self . params . get ( ' writethumbnail ' , False ) :
2013-07-13 04:11:59 +08:00
if info_dict . get ( ' thumbnail ' ) is not None :
2014-01-05 08:52:03 +08:00
thumb_format = determine_ext ( info_dict [ ' thumbnail ' ] , ' jpg ' )
thumb_filename = os . path . splitext ( filename ) [ 0 ] + ' . ' + thumb_format
2013-12-17 00:44:28 +08:00
if self . params . get ( ' nooverwrites ' , False ) and os . path . exists ( encodeFilename ( thumb_filename ) ) :
2014-01-05 08:52:03 +08:00
self . to_screen ( ' [ %s ] %s : Thumbnail is already present ' %
2013-12-16 11:39:04 +08:00
( info_dict [ ' extractor ' ] , info_dict [ ' id ' ] ) )
else :
2014-01-05 08:52:03 +08:00
self . to_screen ( ' [ %s ] %s : Downloading thumbnail ... ' %
2013-12-16 11:39:04 +08:00
( info_dict [ ' extractor ' ] , info_dict [ ' id ' ] ) )
try :
uf = compat_urllib_request . urlopen ( info_dict [ ' thumbnail ' ] )
with open ( thumb_filename , ' wb ' ) as thumbf :
shutil . copyfileobj ( uf , thumbf )
2014-01-05 08:52:03 +08:00
self . to_screen ( ' [ %s ] %s : Writing thumbnail to: %s ' %
2013-12-16 11:39:04 +08:00
( info_dict [ ' extractor ' ] , info_dict [ ' id ' ] , thumb_filename ) )
except ( compat_urllib_error . URLError , compat_http_client . HTTPException , socket . error ) as err :
2014-01-05 08:52:03 +08:00
self . report_warning ( ' Unable to download thumbnail " %s " : %s ' %
2013-12-16 11:39:04 +08:00
( info_dict [ ' thumbnail ' ] , compat_str ( err ) ) )
2013-06-19 04:14:21 +08:00
if not self . params . get ( ' skip_download ' , False ) :
if self . params . get ( ' nooverwrites ' , False ) and os . path . exists ( encodeFilename ( filename ) ) :
success = True
else :
try :
2014-01-04 20:13:51 +08:00
def dl ( name , info ) :
fd = get_suitable_downloader ( info ) ( self , self . params )
for ph in self . _progress_hooks :
fd . add_progress_hook ( ph )
return fd . download ( name , info )
if info_dict . get ( ' requested_formats ' ) is not None :
downloaded = [ ]
success = True
2014-01-15 19:59:15 +08:00
merger = FFmpegMergerPP ( self )
if not merger . _get_executable ( ) :
postprocessors = [ ]
self . report_warning ( ' You have requested multiple '
' formats but ffmpeg or avconv are not installed. '
' The formats won \' t be merged ' )
else :
postprocessors = [ merger ]
2014-01-04 20:13:51 +08:00
for f in info_dict [ ' requested_formats ' ] :
new_info = dict ( info_dict )
new_info . update ( f )
fname = self . prepare_filename ( new_info )
fname = prepend_extension ( fname , ' f %s ' % f [ ' format_id ' ] )
downloaded . append ( fname )
partial_success = dl ( fname , new_info )
success = success and partial_success
2014-01-15 19:59:15 +08:00
info_dict [ ' __postprocessors ' ] = postprocessors
2014-01-04 20:13:51 +08:00
info_dict [ ' __files_to_merge ' ] = downloaded
else :
# Just a single file
success = dl ( filename , info_dict )
2013-06-19 04:14:21 +08:00
except ( compat_urllib_error . URLError , compat_http_client . HTTPException , socket . error ) as err :
2014-01-05 08:52:03 +08:00
self . report_error ( ' unable to download video data: %s ' % str ( err ) )
2013-06-19 04:14:21 +08:00
return
2013-09-20 19:26:03 +08:00
except ( OSError , IOError ) as err :
raise UnavailableVideoError ( err )
2013-06-19 04:14:21 +08:00
except ( ContentTooShortError , ) as err :
2014-01-05 08:52:03 +08:00
self . report_error ( ' content too short (expected %s bytes and served %s ) ' % ( err . expected , err . downloaded ) )
2013-06-19 04:14:21 +08:00
return
if success :
try :
self . post_process ( filename , info_dict )
except ( PostProcessingError ) as err :
2014-01-05 08:52:03 +08:00
self . report_error ( ' postprocessing: %s ' % str ( err ) )
2013-06-19 04:14:21 +08:00
return
2013-10-06 10:27:09 +08:00
self . record_download_archive ( info_dict )
2013-06-19 04:14:21 +08:00
def download ( self , url_list ) :
""" Download a given list of URLs. """
2013-11-26 05:15:20 +08:00
if ( len ( url_list ) > 1 and
' % ' not in self . params [ ' outtmpl ' ]
and self . params . get ( ' max_downloads ' ) != 1 ) :
2013-06-19 04:14:21 +08:00
raise SameFileError ( self . params [ ' outtmpl ' ] )
for url in url_list :
try :
#It also downloads the videos
2013-11-23 02:57:52 +08:00
self . extract_info ( url )
2013-06-19 04:14:21 +08:00
except UnavailableVideoError :
2014-01-05 08:52:03 +08:00
self . report_error ( ' unable to download video ' )
2013-06-19 04:14:21 +08:00
except MaxDownloadsReached :
2014-01-05 08:52:03 +08:00
self . to_screen ( ' [info] Maximum number of downloaded files reached. ' )
2013-06-19 04:14:21 +08:00
raise
return self . _download_retcode
2013-11-22 21:57:53 +08:00
def download_with_info_file ( self , info_filename ) :
2013-12-09 11:59:50 +08:00
with io . open ( info_filename , ' r ' , encoding = ' utf-8 ' ) as f :
2013-11-22 21:57:53 +08:00
info = json . load ( f )
2013-12-04 03:16:52 +08:00
try :
self . process_ie_result ( info , download = True )
except DownloadError :
webpage_url = info . get ( ' webpage_url ' )
if webpage_url is not None :
2014-01-05 08:52:03 +08:00
self . report_warning ( ' The info failed to download, trying with " %s " ' % webpage_url )
2013-12-04 03:16:52 +08:00
return self . download ( [ webpage_url ] )
else :
raise
return self . _download_retcode
2013-11-22 21:57:53 +08:00
2013-06-19 04:14:21 +08:00
def post_process ( self , filename , ie_info ) :
""" Run all the postprocessors on the given file. """
info = dict ( ie_info )
info [ ' filepath ' ] = filename
keep_video = None
2014-01-04 20:13:51 +08:00
pps_chain = [ ]
if ie_info . get ( ' __postprocessors ' ) is not None :
pps_chain . extend ( ie_info [ ' __postprocessors ' ] )
pps_chain . extend ( self . _pps )
for pp in pps_chain :
2013-06-19 04:14:21 +08:00
try :
2013-10-22 20:49:34 +08:00
keep_video_wish , new_info = pp . run ( info )
2013-06-19 04:14:21 +08:00
if keep_video_wish is not None :
if keep_video_wish :
keep_video = keep_video_wish
elif keep_video is None :
# No clear decision yet, let IE decide
keep_video = keep_video_wish
except PostProcessingError as e :
2013-08-01 03:20:46 +08:00
self . report_error ( e . msg )
2013-06-19 04:14:21 +08:00
if keep_video is False and not self . params . get ( ' keepvideo ' , False ) :
try :
2014-01-05 08:52:03 +08:00
self . to_screen ( ' Deleting original file %s (pass -k to keep) ' % filename )
2013-06-19 04:14:21 +08:00
os . remove ( encodeFilename ( filename ) )
except ( IOError , OSError ) :
2014-01-05 08:52:03 +08:00
self . report_warning ( ' Unable to remove downloaded video file ' )
2013-10-06 10:27:09 +08:00
2013-11-25 22:46:54 +08:00
def _make_archive_id ( self , info_dict ) :
# Future-proof against any change in case
# and backwards compatibility with prior versions
2013-11-26 05:57:15 +08:00
extractor = info_dict . get ( ' extractor_key ' )
2013-11-23 05:46:46 +08:00
if extractor is None :
if ' id ' in info_dict :
extractor = info_dict . get ( ' ie_key ' ) # key in a playlist
if extractor is None :
2013-11-25 22:46:54 +08:00
return None # Incomplete video information
2014-01-05 08:52:03 +08:00
return extractor . lower ( ) + ' ' + info_dict [ ' id ' ]
2013-11-25 22:46:54 +08:00
def in_download_archive ( self , info_dict ) :
fn = self . params . get ( ' download_archive ' )
if fn is None :
return False
vid_id = self . _make_archive_id ( info_dict )
if vid_id is None :
2013-11-23 05:46:46 +08:00
return False # Incomplete video information
2013-11-25 22:46:54 +08:00
2013-10-06 10:27:09 +08:00
try :
with locked_file ( fn , ' r ' , encoding = ' utf-8 ' ) as archive_file :
for line in archive_file :
if line . strip ( ) == vid_id :
return True
except IOError as ioe :
if ioe . errno != errno . ENOENT :
raise
return False
def record_download_archive ( self , info_dict ) :
fn = self . params . get ( ' download_archive ' )
if fn is None :
return
2013-11-25 22:46:54 +08:00
vid_id = self . _make_archive_id ( info_dict )
assert vid_id
2013-10-06 10:27:09 +08:00
with locked_file ( fn , ' a ' , encoding = ' utf-8 ' ) as archive_file :
2014-01-05 08:52:03 +08:00
archive_file . write ( vid_id + ' \n ' )
2013-07-02 16:08:58 +08:00
2013-10-21 20:09:38 +08:00
@staticmethod
2013-10-28 18:31:12 +08:00
def format_resolution ( format , default = ' unknown ' ) :
2013-11-26 05:34:56 +08:00
if format . get ( ' vcodec ' ) == ' none ' :
return ' audio only '
2013-12-24 18:56:02 +08:00
if format . get ( ' resolution ' ) is not None :
return format [ ' resolution ' ]
2013-10-21 20:09:38 +08:00
if format . get ( ' height ' ) is not None :
if format . get ( ' width ' ) is not None :
2014-01-05 08:52:03 +08:00
res = ' %s x %s ' % ( format [ ' width ' ] , format [ ' height ' ] )
2013-10-21 20:09:38 +08:00
else :
2014-01-05 08:52:03 +08:00
res = ' %s p ' % format [ ' height ' ]
2013-12-24 18:56:02 +08:00
elif format . get ( ' width ' ) is not None :
2014-01-05 08:52:03 +08:00
res = ' ?x %d ' % format [ ' width ' ]
2013-10-21 20:09:38 +08:00
else :
2013-10-28 18:31:12 +08:00
res = default
2013-10-21 20:09:38 +08:00
return res
2013-07-02 16:08:58 +08:00
def list_formats ( self , info_dict ) :
2013-11-16 08:08:43 +08:00
def format_note ( fdict ) :
2014-01-05 08:52:03 +08:00
res = ' '
2013-12-30 13:50:12 +08:00
if fdict . get ( ' ext ' ) in [ ' f4f ' , ' f4m ' ] :
2014-01-05 08:52:03 +08:00
res + = ' (unsupported) '
2013-11-25 10:12:26 +08:00
if fdict . get ( ' format_note ' ) is not None :
2014-01-05 08:52:03 +08:00
res + = fdict [ ' format_note ' ] + ' '
2013-12-25 22:18:40 +08:00
if fdict . get ( ' tbr ' ) is not None :
2014-01-05 08:52:03 +08:00
res + = ' %4d k ' % fdict [ ' tbr ' ]
2013-11-26 05:34:56 +08:00
if ( fdict . get ( ' vcodec ' ) is not None and
fdict . get ( ' vcodec ' ) != ' none ' ) :
2014-01-07 15:08:48 +08:00
res + = ' %-5s ' % fdict [ ' vcodec ' ]
if fdict . get ( ' vbr ' ) is not None :
res + = ' @ '
2013-12-24 18:56:02 +08:00
elif fdict . get ( ' vbr ' ) is not None and fdict . get ( ' abr ' ) is not None :
2014-01-05 08:52:03 +08:00
res + = ' video@ '
2013-11-16 08:08:43 +08:00
if fdict . get ( ' vbr ' ) is not None :
2014-01-05 08:52:03 +08:00
res + = ' %4d k ' % fdict [ ' vbr ' ]
2013-11-16 08:08:43 +08:00
if fdict . get ( ' acodec ' ) is not None :
if res :
2014-01-05 08:52:03 +08:00
res + = ' , '
res + = ' %-5s ' % fdict [ ' acodec ' ]
2013-11-16 08:33:12 +08:00
elif fdict . get ( ' abr ' ) is not None :
if res :
2014-01-05 08:52:03 +08:00
res + = ' , '
2013-11-16 08:33:12 +08:00
res + = ' audio '
2013-11-16 08:08:43 +08:00
if fdict . get ( ' abr ' ) is not None :
2014-01-05 08:52:03 +08:00
res + = ' @ %3d k ' % fdict [ ' abr ' ]
2013-11-25 10:12:26 +08:00
if fdict . get ( ' filesize ' ) is not None :
if res :
2014-01-05 08:52:03 +08:00
res + = ' , '
2013-11-25 10:12:26 +08:00
res + = format_bytes ( fdict [ ' filesize ' ] )
2013-11-16 08:08:43 +08:00
return res
2013-11-25 10:12:26 +08:00
def line ( format , idlen = 20 ) :
2014-01-05 08:52:03 +08:00
return ( ( ' % - ' + compat_str ( idlen + 1 ) + ' s %-10s %-12s %s ' ) % (
2013-10-21 20:09:38 +08:00
format [ ' format_id ' ] ,
format [ ' ext ' ] ,
self . format_resolution ( format ) ,
2013-11-16 08:08:43 +08:00
format_note ( format ) ,
2013-11-25 10:12:26 +08:00
) )
2013-10-29 22:09:45 +08:00
2013-10-30 08:09:26 +08:00
formats = info_dict . get ( ' formats ' , [ info_dict ] )
2014-01-05 08:52:03 +08:00
idlen = max ( len ( ' format code ' ) ,
2013-11-25 10:12:26 +08:00
max ( len ( f [ ' format_id ' ] ) for f in formats ) )
formats_s = [ line ( f , idlen ) for f in formats ]
2013-10-30 08:09:26 +08:00
if len ( formats ) > 1 :
2013-11-16 08:39:45 +08:00
formats_s [ 0 ] + = ( ' ' if format_note ( formats [ 0 ] ) else ' ' ) + ' (worst) '
formats_s [ - 1 ] + = ( ' ' if format_note ( formats [ - 1 ] ) else ' ' ) + ' (best) '
2013-10-29 22:09:45 +08:00
header_line = line ( {
2014-01-05 08:52:03 +08:00
' format_id ' : ' format code ' , ' ext ' : ' extension ' ,
' resolution ' : ' resolution ' , ' format_note ' : ' note ' } , idlen = idlen )
self . to_screen ( ' [info] Available formats for %s : \n %s \n %s ' %
( info_dict [ ' id ' ] , header_line , ' \n ' . join ( formats_s ) ) )
2013-11-23 02:57:52 +08:00
def urlopen ( self , req ) :
""" Start an HTTP download """
return self . _opener . open ( req )
def print_debug_header ( self ) :
if not self . params . get ( ' verbose ' ) :
return
2014-01-05 08:52:03 +08:00
write_string ( ' [debug] youtube-dl version ' + __version__ + ' \n ' )
2013-11-23 02:57:52 +08:00
try :
sp = subprocess . Popen (
[ ' git ' , ' rev-parse ' , ' --short ' , ' HEAD ' ] ,
stdout = subprocess . PIPE , stderr = subprocess . PIPE ,
cwd = os . path . dirname ( os . path . abspath ( __file__ ) ) )
out , err = sp . communicate ( )
out = out . decode ( ) . strip ( )
if re . match ( ' [0-9a-f]+ ' , out ) :
2014-01-05 08:52:03 +08:00
write_string ( ' [debug] Git HEAD: ' + out + ' \n ' )
2013-11-23 02:57:52 +08:00
except :
try :
sys . exc_clear ( )
except :
pass
2014-01-05 08:52:03 +08:00
write_string ( ' [debug] Python version %s - %s ' %
( platform . python_version ( ) , platform_name ( ) ) + ' \n ' )
2013-11-23 02:57:52 +08:00
proxy_map = { }
for handler in self . _opener . handlers :
if hasattr ( handler , ' proxies ' ) :
proxy_map . update ( handler . proxies )
2014-01-05 08:52:03 +08:00
write_string ( ' [debug] Proxy map: ' + compat_str ( proxy_map ) + ' \n ' )
2013-11-23 02:57:52 +08:00
2013-12-01 18:42:02 +08:00
def _setup_opener ( self ) :
2013-12-02 20:37:05 +08:00
timeout_val = self . params . get ( ' socket_timeout ' )
timeout = 600 if timeout_val is None else float ( timeout_val )
2013-11-23 02:57:52 +08:00
opts_cookiefile = self . params . get ( ' cookiefile ' )
opts_proxy = self . params . get ( ' proxy ' )
if opts_cookiefile is None :
self . cookiejar = compat_cookiejar . CookieJar ( )
else :
self . cookiejar = compat_cookiejar . MozillaCookieJar (
opts_cookiefile )
if os . access ( opts_cookiefile , os . R_OK ) :
self . cookiejar . load ( )
cookie_processor = compat_urllib_request . HTTPCookieProcessor (
self . cookiejar )
if opts_proxy is not None :
if opts_proxy == ' ' :
proxies = { }
else :
proxies = { ' http ' : opts_proxy , ' https ' : opts_proxy }
else :
proxies = compat_urllib_request . getproxies ( )
# Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805)
if ' http ' in proxies and ' https ' not in proxies :
proxies [ ' https ' ] = proxies [ ' http ' ]
proxy_handler = compat_urllib_request . ProxyHandler ( proxies )
2013-12-29 22:28:32 +08:00
debuglevel = 1 if self . params . get ( ' debug_printtraffic ' ) else 0
2013-11-23 02:57:52 +08:00
https_handler = make_HTTPS_handler (
2013-12-29 22:28:32 +08:00
self . params . get ( ' nocheckcertificate ' , False ) , debuglevel = debuglevel )
ydlh = YoutubeDLHandler ( debuglevel = debuglevel )
2013-11-23 02:57:52 +08:00
opener = compat_urllib_request . build_opener (
2013-12-29 22:28:32 +08:00
https_handler , proxy_handler , cookie_processor , ydlh )
2013-11-23 02:57:52 +08:00
# Delete the default user-agent header, which would otherwise apply in
# cases where our custom HTTP handler doesn't come into play
# (See https://github.com/rg3/youtube-dl/issues/1309 for details)
opener . addheaders = [ ]
self . _opener = opener
# TODO remove this global modification
compat_urllib_request . install_opener ( opener )
socket . setdefaulttimeout ( timeout )