1
0
mirror of https://github.com/l1ving/youtube-dl synced 2025-03-26 20:17:24 +08:00

Merge branch 'master' into multipart_videos

Conflicts:
	youtube_dl/extractor/common.py
	youtube_dl/extractor/mtv.py
	youtube_dl/options.py
	youtube_dl/postprocessor/__init__.py
	youtube_dl/postprocessor/ffmpeg.py
This commit is contained in:
Mark Lee 2014-11-29 19:07:46 -08:00
commit 1037943c8f
354 changed files with 9985 additions and 4229 deletions

1
.gitignore vendored
View File

@ -30,3 +30,4 @@ updates_key.pem
*.swp
test/testdata
.tox
youtube-dl.zsh

90
AUTHORS Normal file
View File

@ -0,0 +1,90 @@
Ricardo Garcia Gonzalez
Danny Colligan
Benjamin Johnson
Vasyl' Vavrychuk
Witold Baryluk
Paweł Paprota
Gergely Imreh
Rogério Brito
Philipp Hagemeister
Sören Schulze
Kevin Ngo
Ori Avtalion
shizeeg
Filippo Valsorda
Christian Albrecht
Dave Vasilevsky
Jaime Marquínez Ferrándiz
Jeff Crouse
Osama Khalid
Michael Walter
M. Yasoob Ullah Khalid
Julien Fraichard
Johny Mo Swag
Axel Noack
Albert Kim
Pierre Rudloff
Huarong Huo
Ismael Mejía
Steffan 'Ruirize' James
Andras Elso
Jelle van der Waa
Marcin Cieślak
Anton Larionov
Takuya Tsuchida
Sergey M.
Michael Orlitzky
Chris Gahan
Saimadhav Heblikar
Mike Col
Oleg Prutz
pulpe
Andreas Schmitz
Michael Kaiser
Niklas Laxström
David Triendl
Anthony Weems
David Wagner
Juan C. Olivares
Mattias Harrysson
phaer
Sainyam Kapoor
Nicolas Évrard
Jason Normore
Hoje Lee
Adam Thalhammer
Georg Jähnig
Ralf Haring
Koki Takahashi
Ariset Llerena
Adam Malcontenti-Wilson
Tobias Bell
Naglis Jonaitis
Charles Chen
Hassaan Ali
Dobrosław Żybort
David Fabijan
Sebastian Haas
Alexander Kirk
Erik Johnson
Keith Beckman
Ole Ernst
Aaron McDaniel (mcd1992)
Magnus Kolstad
Hari Padmanaban
Carlos Ramos
5moufl
lenaten
Dennis Scheiba
Damon Timm
winwon
Xavier Beynon
Gabriel Schubiner
xantares
Jan Matějka
Mauroy Sébastien
William Sewell
Dao Hoang Son
Oskar Jauch
Matthew Rayfield
t0mm0

View File

@ -1,7 +1,7 @@
all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.fish
all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
clean:
rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz youtube-dl.fish
rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz youtube-dl.zsh youtube-dl.fish *.dump *.part
cleanall: clean
rm -f youtube-dl youtube-dl.exe
@ -9,6 +9,7 @@ cleanall: clean
PREFIX ?= /usr/local
BINDIR ?= $(PREFIX)/bin
MANDIR ?= $(PREFIX)/man
SHAREDIR ?= $(PREFIX)/share
PYTHON ?= /usr/bin/env python
# set SYSCONFDIR to /etc if PREFIX=/usr or PREFIX=/usr/local
@ -22,13 +23,15 @@ else
endif
endif
install: youtube-dl youtube-dl.1 youtube-dl.bash-completion
install: youtube-dl youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
install -d $(DESTDIR)$(BINDIR)
install -m 755 youtube-dl $(DESTDIR)$(BINDIR)
install -d $(DESTDIR)$(MANDIR)/man1
install -m 644 youtube-dl.1 $(DESTDIR)$(MANDIR)/man1
install -d $(DESTDIR)$(SYSCONFDIR)/bash_completion.d
install -m 644 youtube-dl.bash-completion $(DESTDIR)$(SYSCONFDIR)/bash_completion.d/youtube-dl
install -d $(DESTDIR)$(SHAREDIR)/zsh/site-functions
install -m 644 youtube-dl.zsh $(DESTDIR)$(SHAREDIR)/zsh/site-functions/_youtube-dl
install -d $(DESTDIR)$(SYSCONFDIR)/fish/completions
install -m 644 youtube-dl.fish $(DESTDIR)$(SYSCONFDIR)/fish/completions/youtube-dl.fish
@ -38,7 +41,7 @@ test:
tar: youtube-dl.tar.gz
.PHONY: all clean install test tar bash-completion pypi-files fish-completion
.PHONY: all clean install test tar bash-completion pypi-files zsh-completion fish-completion
pypi-files: youtube-dl.bash-completion README.txt youtube-dl.1 youtube-dl.fish
@ -66,12 +69,17 @@ youtube-dl.bash-completion: youtube_dl/*.py youtube_dl/*/*.py devscripts/bash-co
bash-completion: youtube-dl.bash-completion
youtube-dl.zsh: youtube_dl/*.py youtube_dl/*/*.py devscripts/zsh-completion.in
python devscripts/zsh-completion.py
zsh-completion: youtube-dl.zsh
youtube-dl.fish: youtube_dl/*.py youtube_dl/*/*.py devscripts/fish-completion.in
python devscripts/fish-completion.py
fish-completion: youtube-dl.fish
youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.fish
youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
@tar -czf youtube-dl.tar.gz --transform "s|^|youtube-dl/|" --owner 0 --group 0 \
--exclude '*.DS_Store' \
--exclude '*.kate-swp' \
@ -86,5 +94,5 @@ youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-
bin devscripts test youtube_dl docs \
LICENSE README.md README.txt \
Makefile MANIFEST.in youtube-dl.1 youtube-dl.bash-completion \
youtube-dl.fish setup.py \
youtube-dl.zsh youtube-dl.fish setup.py \
youtube-dl

113
README.md
View File

@ -30,7 +30,7 @@ Alternatively, refer to the developer instructions below for how to check out an
# DESCRIPTION
**youtube-dl** is a small command-line program to download videos from
YouTube.com and a few more sites. It requires the Python interpreter, version
2.6, 2.7, or 3.3+, and it is not platform specific. It should work on
2.6, 2.7, or 3.2+, and it is not platform specific. It should work on
your Unix box, on Windows or on Mac OS X. It is released to the public domain,
which means you can modify it, redistribute it or use it however you like.
@ -69,6 +69,8 @@ which means you can modify it, redistribute it or use it however you like.
configuration in ~/.config/youtube-dl.conf
(%APPDATA%/youtube-dl/config.txt on
Windows)
--flat-playlist Do not extract the videos of a playlist,
only list them.
## Video Selection:
--playlist-start NUMBER playlist video to start at (default is 1)
@ -91,7 +93,8 @@ which means you can modify it, redistribute it or use it however you like.
COUNT views
--max-views COUNT Do not download any videos with more than
COUNT views
--no-playlist download only the currently playing video
--no-playlist If the URL refers to a video and a
playlist, download only the video.
--age-limit YEARS download only videos suitable for the given
age
--download-archive FILE Download only videos not listed in the
@ -99,8 +102,6 @@ which means you can modify it, redistribute it or use it however you like.
downloaded videos in it.
--include-ads Download advertisements as well
(experimental)
--youtube-include-dash-manifest Try to download the DASH manifest on
YouTube videos (experimental)
## Download Options:
-r, --rate-limit LIMIT maximum download rate in bytes per second
@ -131,17 +132,19 @@ which means you can modify it, redistribute it or use it however you like.
%(upload_date)s for the upload date
(YYYYMMDD), %(extractor)s for the provider
(youtube, metacafe, etc), %(id)s for the
video id, %(playlist)s for the playlist the
video id, %(playlist_title)s,
%(playlist_id)s, or %(playlist)s (=title if
present, ID otherwise) for the playlist the
video is in, %(playlist_index)s for the
position in the playlist and %% for a
literal percent. %(height)s and %(width)s
for the width and height of the video
format. %(resolution)s for a textual
position in the playlist. %(height)s and
%(width)s for the width and height of the
video format. %(resolution)s for a textual
description of the resolution of the video
format. Use - to output to stdout. Can also
be used to download to a different
directory, for example with -o '/my/downloa
ds/%(uploader)s/%(title)s-%(id)s.%(ext)s' .
format. %% for a literal percent. Use - to
output to stdout. Can also be used to
download to a different directory, for
example with -o '/my/downloads/%(uploader)s
/%(title)s-%(id)s.%(ext)s' .
--autonumber-size NUMBER Specifies the number of digits in
%(autonumber)s when it is present in output
filename template or --auto-number option
@ -158,7 +161,8 @@ which means you can modify it, redistribute it or use it however you like.
downloads if possible.
--no-continue do not resume partially downloaded files
(restart from beginning)
--no-part do not use .part files
--no-part do not use .part files - write directly
into output file
--no-mtime do not use the Last-modified header to set
the file modification time
--write-description write video description to a .description
@ -198,6 +202,10 @@ which means you can modify it, redistribute it or use it however you like.
-j, --dump-json simulate, quiet but print JSON information.
See --output for a description of available
keys.
-J, --dump-single-json simulate, quiet but print JSON information
for each command-line argument. If the URL
refers to a playlist, dump the whole
playlist information in a single line.
--newline output progress bar as new lines
--no-progress do not print progress bar
--console-title display progress in console titlebar
@ -216,7 +224,7 @@ which means you can modify it, redistribute it or use it however you like.
information about the video. (Currently
supported only for YouTube)
--user-agent UA specify a custom user agent
--referer REF specify a custom referer, use if the video
--referer URL specify a custom referer, use if the video
access is restricted to one domain
--add-header FIELD:VALUE specify a custom HTTP header and its value,
separated by a colon ':'. You can use this
@ -227,17 +235,27 @@ which means you can modify it, redistribute it or use it however you like.
## Video Format Options:
-f, --format FORMAT video format code, specify the order of
preference using slashes: "-f 22/17/18".
"-f mp4" and "-f flv" are also supported.
You can also use the special names "best",
"bestvideo", "bestaudio", "worst",
"worstvideo" and "worstaudio". By default,
youtube-dl will pick the best quality.
preference using slashes: -f 22/17/18 . -f
mp4 , -f m4a and -f flv are also
supported. You can also use the special
names "best", "bestvideo", "bestaudio",
"worst", "worstvideo" and "worstaudio". By
default, youtube-dl will pick the best
quality. Use commas to download multiple
audio formats, such as -f
136/137/mp4/bestvideo,140/m4a/bestaudio.
You can merge the video and audio of two
formats into a single file using -f <video-
format>+<audio-format> (requires ffmpeg or
avconv), for example -f
bestvideo+bestaudio.
--all-formats download all available video formats
--prefer-free-formats prefer free video formats unless a specific
one is requested
--max-quality FORMAT highest quality format to download
-F, --list-formats list all available formats
--youtube-skip-dash-manifest Do not download the DASH manifest on
YouTube videos
## Subtitle Options:
--write-sub write subtitle file
@ -253,7 +271,7 @@ which means you can modify it, redistribute it or use it however you like.
language tags like 'en,pt'
## Authentication Options:
-u, --username USERNAME account username
-u, --username USERNAME login with this account ID
-p, --password PASSWORD account password
-2, --twofactor TWOFACTOR two-factor auth code
-n, --netrc use .netrc authentication data
@ -264,7 +282,7 @@ which means you can modify it, redistribute it or use it however you like.
(requires ffmpeg or avconv and ffprobe or
avprobe)
--audio-format FORMAT "best", "aac", "vorbis", "mp3", "m4a",
"opus", or "wav"; best by default
"opus", or "wav"; "best" by default
--audio-quality QUALITY ffmpeg/avconv audio quality specification,
insert a value between 0 (better) and 9
(worse) for VBR or a specific bitrate like
@ -345,21 +363,34 @@ $ youtube-dl --dateafter 20000101 --datebefore 20091231
# FAQ
### I'm getting an error `Unable to extract OpenGraph title` on YouTube playlists
### How do I update youtube-dl?
YouTube changed their playlist format in March 2014 and later on, so you'll need at least youtube-dl 2014.07.25 to download all YouTube videos.
If you've followed [our manual installation instructions](http://rg3.github.io/youtube-dl/download.html), you can simply run `youtube-dl -U` (or, on Linux, `sudo youtube-dl -U`).
If you have installed youtube-dl with a package manager, pip, setup.py or a tarball, please use that to update. Note that Ubuntu packages do not seem to get updated anymore. Since we are not affiliated with Ubuntu, there is little we can do. Feel free to report bugs to the Ubuntu packaging guys - all they have to do is update the package to a somewhat recent version.
If you have used pip, a simple `sudo pip install -U youtube-dl` is sufficient to update.
Alternatively, uninstall the youtube-dl package and follow [our manual installation instructions](http://rg3.github.io/youtube-dl/download.html). In a pinch, this should do if you used `apt-get` before to install youtube-dl:
If you have installed youtube-dl using a package manager like *apt-get* or *yum*, use the standard system update mechanism to update. Note that distribution packages are often outdated. As a rule of thumb, youtube-dl releases at least once a month, and often weekly or even daily. Simply go to http://yt-dl.org/ to find out the current version. Unfortunately, there is nothing we youtube-dl developers can do if your distributions serves a really outdated version. You can (and should) complain to your distribution in their bugtracker or support forum.
As a last resort, you can also uninstall the version installed by your package manager and follow our manual installation instructions. For that, remove the distribution's package, with a line like
sudo apt-get remove -y youtube-dl
Afterwards, simply follow [our manual installation instructions](http://rg3.github.io/youtube-dl/download.html):
```
sudo apt-get remove -y youtube-dl
sudo wget https://yt-dl.org/latest/youtube-dl -O /usr/local/bin/youtube-dl
sudo chmod a+x /usr/local/bin/youtube-dl
hash -r
```
Again, from then on you'll be able to update with `sudo youtube-dl -U`.
### I'm getting an error `Unable to extract OpenGraph title` on YouTube playlists
YouTube changed their playlist format in March 2014 and later on, so you'll need at least youtube-dl 2014.07.25 to download all YouTube videos.
If you have installed youtube-dl with a package manager, pip, setup.py or a tarball, please use that to update. Note that Ubuntu packages do not seem to get updated anymore. Since we are not affiliated with Ubuntu, there is little we can do. Feel free to [report bugs](https://bugs.launchpad.net/ubuntu/+source/youtube-dl/+filebug) to the [Ubuntu packaging guys](mailto:ubuntu-motu@lists.ubuntu.com?subject=outdated%20version%20of%20youtube-dl) - all they have to do is update the package to a somewhat recent version. See above for a way to update.
### Do I always have to pass in `--max-quality FORMAT`, or `-citw`?
By default, youtube-dl intends to have the best options (incidentally, if you have a convincing case that these should be different, [please file an issue where you explain that](https://yt-dl.org/bug)). Therefore, it is unnecessary and sometimes harmful to copy long option strings from webpages. In particular, `--max-quality` *limits* the video quality (so if you want the best quality, do NOT pass it in), and the only option out of `-citw` that is regularly useful is `-i`.
@ -439,8 +470,6 @@ If you want to add support for a new site, you can follow this quick list (assum
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
@ -448,7 +477,7 @@ If you want to add support for a new site, you can follow this quick list (assum
_VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://yourextractor.com/watch/42',
'md5': 'TODO: md5 sum of the first 10KiB of the video file',
'md5': 'TODO: md5 sum of the first 10241 bytes of the video file (use --test)',
'info_dict': {
'id': '42',
'ext': 'mp4',
@ -463,16 +492,16 @@ If you want to add support for a new site, you can follow this quick list (assum
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
# TODO more code goes here, for example ...
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title')
return {
'id': video_id,
'title': title,
'description': self._og_search_description(webpage),
# TODO more properties (see youtube_dl/extractor/common.py)
}
```
@ -480,7 +509,7 @@ If you want to add support for a new site, you can follow this quick list (assum
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will be then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
7. Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Add tests and code for as many as you want.
8. If you can, check the code with [pyflakes](https://pypi.python.org/pypi/pyflakes) (a good idea) and [pep8](https://pypi.python.org/pypi/pep8) (optional, ignore E501).
9. When the tests pass, [add](https://www.kernel.org/pub/software/scm/git/docs/git-add.html) the new files and [commit](https://www.kernel.org/pub/software/scm/git/docs/git-commit.html) them and [push](https://www.kernel.org/pub/software/scm/git/docs/git-push.html) the result, like this:
9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
$ git add youtube_dl/extractor/__init__.py
$ git add youtube_dl/extractor/yourextractor.py
@ -491,6 +520,20 @@ If you want to add support for a new site, you can follow this quick list (assum
In any case, thank you very much for your contributions!
# EMBEDDING YOUTUBE-DL
youtube-dl makes the best effort to be a good command-line program, and thus should be callable from any programming language. If you encounter any problems parsing its output, feel free to [create a report](https://github.com/rg3/youtube-dl/issues/new).
From a Python program, you can embed youtube-dl in a more powerful fashion, like this:
import youtube_dl
ydl_opts = {}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download(['http://www.youtube.com/watch?v=BaW_jenozKc'])
Most likely, you'll want to use various options. For a list of what can be done, have a look at [youtube_dl/YoutubeDL.py](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L69). For a start, if you want to intercept youtube-dl's output, set a `logger` object.
# BUGS
Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues> . Unless you were prompted so or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email.

View File

@ -1,4 +1,6 @@
#!/usr/bin/env python
from __future__ import unicode_literals
import os
from os.path import dirname as dirn
import sys
@ -9,16 +11,17 @@ import youtube_dl
BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in"
def build_completion(opt_parser):
opts_flag = []
for group in opt_parser.option_groups:
for option in group.option_list:
#for every long flag
# for every long flag
opts_flag.append(option.get_opt_string())
with open(BASH_COMPLETION_TEMPLATE) as f:
template = f.read()
with open(BASH_COMPLETION_FILE, "w") as f:
#just using the special char
# just using the special char
filled_template = template.replace("{{flags}}", " ".join(opts_flag))
f.write(filled_template)

View File

@ -142,7 +142,7 @@ def win_service_set_status(handle, status_code):
def win_service_main(service_name, real_main, argc, argv_raw):
try:
#args = [argv_raw[i].value for i in range(argc)]
# args = [argv_raw[i].value for i in range(argc)]
stop_event = threading.Event()
handler = HandlerEx(functools.partial(stop_event, win_service_handler))
h = advapi32.RegisterServiceCtrlHandlerExW(service_name, handler, None)
@ -233,6 +233,7 @@ def rmtree(path):
#==============================================================================
class BuildError(Exception):
def __init__(self, output, code=500):
self.output = output
@ -369,7 +370,7 @@ class Builder(PythonBuilder, GITBuilder, YoutubeDLBuilder, DownloadBuilder, Clea
class BuildHTTPRequestHandler(BaseHTTPRequestHandler):
actionDict = { 'build': Builder, 'download': Builder } # They're the same, no more caching.
actionDict = {'build': Builder, 'download': Builder} # They're the same, no more caching.
def do_GET(self):
path = urlparse.urlparse(self.path)

View File

@ -1,4 +1,5 @@
#!/usr/bin/env python
from __future__ import unicode_literals
"""
This script employs a VERY basic heuristic ('porn' in webpage.lower()) to check

View File

@ -23,13 +23,13 @@ EXTRA_ARGS = {
'batch-file': ['--require-parameter'],
}
def build_completion(opt_parser):
commands = []
for group in opt_parser.option_groups:
for option in group.option_list:
long_option = option.get_opt_string().strip('-')
help_msg = shell_quote([option.help])
complete_cmd = ['complete', '--command', 'youtube-dl', '--long-option', long_option]
if option._short_opts:
complete_cmd += ['--short-option', option._short_opts[0].strip('-')]

View File

@ -1,4 +1,5 @@
#!/usr/bin/env python3
from __future__ import unicode_literals
import json
import sys

View File

@ -1,8 +1,7 @@
#!/usr/bin/env python3
from __future__ import unicode_literals
import hashlib
import shutil
import subprocess
import tempfile
import urllib.request
import json

View File

@ -1,4 +1,5 @@
#!/usr/bin/env python3
from __future__ import unicode_literals, with_statement
import rsa
import json
@ -11,22 +12,23 @@ except NameError:
versions_info = json.load(open('update/versions.json'))
if 'signature' in versions_info:
del versions_info['signature']
del versions_info['signature']
print('Enter the PKCS1 private key, followed by a blank line:')
privkey = b''
while True:
try:
line = input()
except EOFError:
break
if line == '':
break
privkey += line.encode('ascii') + b'\n'
try:
line = input()
except EOFError:
break
if line == '':
break
privkey += line.encode('ascii') + b'\n'
privkey = rsa.PrivateKey.load_pkcs1(privkey)
signature = hexlify(rsa.pkcs1.sign(json.dumps(versions_info, sort_keys=True).encode('utf-8'), privkey, 'SHA-256')).decode()
print('signature: ' + signature)
versions_info['signature'] = signature
json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True)
with open('update/versions.json', 'w') as versionsf:
json.dump(versions_info, versionsf, indent=4, sort_keys=True)

View File

@ -1,11 +1,11 @@
#!/usr/bin/env python
# coding: utf-8
from __future__ import with_statement
from __future__ import with_statement, unicode_literals
import datetime
import glob
import io # For Python 2 compatibilty
import io # For Python 2 compatibilty
import os
import re
@ -13,7 +13,7 @@ year = str(datetime.datetime.now().year)
for fn in glob.glob('*.html*'):
with io.open(fn, encoding='utf-8') as f:
content = f.read()
newc = re.sub(u'(?P<copyright>Copyright © 2006-)(?P<year>[0-9]{4})', u'Copyright © 2006-' + year, content)
newc = re.sub(r'(?P<copyright>Copyright © 2006-)(?P<year>[0-9]{4})', 'Copyright © 2006-' + year, content)
if content != newc:
tmpFn = fn + '.part'
with io.open(tmpFn, 'wt', encoding='utf-8') as outf:

View File

@ -1,4 +1,5 @@
#!/usr/bin/env python3
from __future__ import unicode_literals
import datetime
import io
@ -73,4 +74,3 @@ atom_template = atom_template.replace('@ENTRIES@', entries_str)
with io.open('update/releases.atom', 'w', encoding='utf-8') as atom_file:
atom_file.write(atom_template)

View File

@ -1,4 +1,5 @@
#!/usr/bin/env python3
from __future__ import unicode_literals
import sys
import os
@ -9,6 +10,7 @@ sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(
import youtube_dl
def main():
with open('supportedsites.html.in', 'r', encoding='utf-8') as tmplf:
template = tmplf.read()
@ -21,7 +23,7 @@ def main():
continue
elif ie_desc is not None:
ie_html += ': {}'.format(ie.IE_DESC)
if ie.working() == False:
if not ie.working():
ie_html += ' (Currently broken)'
ie_htmls.append('<li>{}</li>'.format(ie_html))

View File

@ -1,3 +1,5 @@
from __future__ import unicode_literals
import io
import sys
import re

View File

@ -1,3 +1,4 @@
from __future__ import unicode_literals
import io
import os.path

View File

@ -1,40 +0,0 @@
#!/usr/bin/env python
import sys, os
try:
import urllib.request as compat_urllib_request
except ImportError: # Python 2
import urllib2 as compat_urllib_request
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
sys.stderr.write(u'The new location of the binaries is https://github.com/rg3/youtube-dl/downloads, not the git repository.\n\n')
try:
raw_input()
except NameError: # Python 3
input()
filename = sys.argv[0]
API_URL = "https://api.github.com/repos/rg3/youtube-dl/downloads"
BIN_URL = "https://github.com/downloads/rg3/youtube-dl/youtube-dl"
if not os.access(filename, os.W_OK):
sys.exit('ERROR: no write permissions on %s' % filename)
try:
urlh = compat_urllib_request.urlopen(BIN_URL)
newcontent = urlh.read()
urlh.close()
except (IOError, OSError) as err:
sys.exit('ERROR: unable to download latest version')
try:
with open(filename, 'wb') as outf:
outf.write(newcontent)
except (IOError, OSError) as err:
sys.exit('ERROR: unable to overwrite current version')
sys.stderr.write(u'Done! Now you can run youtube-dl.\n')

View File

@ -1,12 +0,0 @@
from distutils.core import setup
import py2exe
py2exe_options = {
"bundle_files": 1,
"compressed": 1,
"optimize": 2,
"dist_dir": '.',
"dll_excludes": ['w9xpopen.exe']
}
setup(console=['youtube-dl.py'], options={ "py2exe": py2exe_options }, zipfile=None)

View File

@ -1,102 +0,0 @@
#!/usr/bin/env python
import sys, os
import urllib2
import json, hashlib
def rsa_verify(message, signature, key):
from struct import pack
from hashlib import sha256
from sys import version_info
def b(x):
if version_info[0] == 2: return x
else: return x.encode('latin1')
assert(type(message) == type(b('')))
block_size = 0
n = key[0]
while n:
block_size += 1
n >>= 8
signature = pow(int(signature, 16), key[1], key[0])
raw_bytes = []
while signature:
raw_bytes.insert(0, pack("B", signature & 0xFF))
signature >>= 8
signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
if signature[0:2] != b('\x00\x01'): return False
signature = signature[2:]
if not b('\x00') in signature: return False
signature = signature[signature.index(b('\x00'))+1:]
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
signature = signature[19:]
if signature != sha256(message).digest(): return False
return True
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
sys.stderr.write(u'From now on, get the binaries from http://rg3.github.com/youtube-dl/download.html, not from the git repository.\n\n')
raw_input()
filename = sys.argv[0]
UPDATE_URL = "http://rg3.github.io/youtube-dl/update/"
VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
JSON_URL = UPDATE_URL + 'versions.json'
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
if not os.access(filename, os.W_OK):
sys.exit('ERROR: no write permissions on %s' % filename)
exe = os.path.abspath(filename)
directory = os.path.dirname(exe)
if not os.access(directory, os.W_OK):
sys.exit('ERROR: no write permissions on %s' % directory)
try:
versions_info = urllib2.urlopen(JSON_URL).read().decode('utf-8')
versions_info = json.loads(versions_info)
except:
sys.exit(u'ERROR: can\'t obtain versions info. Please try again later.')
if not 'signature' in versions_info:
sys.exit(u'ERROR: the versions file is not signed or corrupted. Aborting.')
signature = versions_info['signature']
del versions_info['signature']
if not rsa_verify(json.dumps(versions_info, sort_keys=True), signature, UPDATES_RSA_KEY):
sys.exit(u'ERROR: the versions file signature is invalid. Aborting.')
version = versions_info['versions'][versions_info['latest']]
try:
urlh = urllib2.urlopen(version['exe'][0])
newcontent = urlh.read()
urlh.close()
except (IOError, OSError) as err:
sys.exit('ERROR: unable to download latest version')
newcontent_hash = hashlib.sha256(newcontent).hexdigest()
if newcontent_hash != version['exe'][1]:
sys.exit(u'ERROR: the downloaded file hash does not match. Aborting.')
try:
with open(exe + '.new', 'wb') as outf:
outf.write(newcontent)
except (IOError, OSError) as err:
sys.exit(u'ERROR: unable to write the new version')
try:
bat = os.path.join(directory, 'youtube-dl-updater.bat')
b = open(bat, 'w')
b.write("""
echo Updating youtube-dl...
ping 127.0.0.1 -n 5 -w 1000 > NUL
move /Y "%s.new" "%s"
del "%s"
\n""" %(exe, exe, bat))
b.close()
os.startfile(bat)
except (IOError, OSError) as err:
sys.exit('ERROR: unable to overwrite current version')
sys.stderr.write(u'Done! Now you can run youtube-dl.\n')

View File

@ -0,0 +1,28 @@
#compdef youtube-dl
__youtube_dl() {
local curcontext="$curcontext" fileopts diropts cur prev
typeset -A opt_args
fileopts="{{fileopts}}"
diropts="{{diropts}}"
cur=$words[CURRENT]
case $cur in
:)
_arguments '*: :(::ytfavorites ::ytrecommended ::ytsubscriptions ::ytwatchlater ::ythistory)'
;;
*)
prev=$words[CURRENT-1]
if [[ ${prev} =~ ${fileopts} ]]; then
_path_files
elif [[ ${prev} =~ ${diropts} ]]; then
_path_files -/
elif [[ ${prev} == "--recode-video" ]]; then
_arguments '*: :(mp4 flv ogg webm mkv)'
else
_arguments '*: :({{flags}})'
fi
;;
esac
}
__youtube_dl

48
devscripts/zsh-completion.py Executable file
View File

@ -0,0 +1,48 @@
#!/usr/bin/env python
from __future__ import unicode_literals
import os
from os.path import dirname as dirn
import sys
sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
import youtube_dl
ZSH_COMPLETION_FILE = "youtube-dl.zsh"
ZSH_COMPLETION_TEMPLATE = "devscripts/zsh-completion.in"
def build_completion(opt_parser):
opts = [opt for group in opt_parser.option_groups
for opt in group.option_list]
opts_file = [opt for opt in opts if opt.metavar == "FILE"]
opts_dir = [opt for opt in opts if opt.metavar == "DIR"]
fileopts = []
for opt in opts_file:
if opt._short_opts:
fileopts.extend(opt._short_opts)
if opt._long_opts:
fileopts.extend(opt._long_opts)
diropts = []
for opt in opts_dir:
if opt._short_opts:
diropts.extend(opt._short_opts)
if opt._long_opts:
diropts.extend(opt._long_opts)
flags = [opt.get_opt_string() for opt in opts]
with open(ZSH_COMPLETION_TEMPLATE) as f:
template = f.read()
template = template.replace("{{fileopts}}", "|".join(fileopts))
template = template.replace("{{diropts}}", "|".join(diropts))
template = template.replace("{{flags}}", " ".join(flags))
with open(ZSH_COMPLETION_FILE, "w") as f:
f.write(template)
parser = youtube_dl.parseOpts()[0]
build_completion(parser)

View File

@ -44,8 +44,8 @@ copyright = u'2014, Ricardo Garcia Gonzalez'
# built documents.
#
# The short X.Y version.
import youtube_dl
version = youtube_dl.__version__
from youtube_dl.version import __version__
version = __version__
# The full version, including alpha/beta/rc tags.
release = version

View File

@ -4,7 +4,6 @@
from __future__ import print_function
import os.path
import pkg_resources
import warnings
import sys
@ -103,7 +102,9 @@ setup(
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3"
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
],
**params

View File

@ -1,3 +1,5 @@
from __future__ import unicode_literals
import errno
import io
import hashlib
@ -12,6 +14,7 @@ from youtube_dl import YoutubeDL
from youtube_dl.utils import (
compat_str,
preferredencoding,
write_string,
)
@ -40,10 +43,10 @@ def report_warning(message):
If stderr is a tty file the 'WARNING:' will be colored
'''
if sys.stderr.isatty() and os.name != 'nt':
_msg_header = u'\033[0;33mWARNING:\033[0m'
_msg_header = '\033[0;33mWARNING:\033[0m'
else:
_msg_header = u'WARNING:'
output = u'%s %s\n' % (_msg_header, message)
_msg_header = 'WARNING:'
output = '%s %s\n' % (_msg_header, message)
if 'b' in getattr(sys.stderr, 'mode', '') or sys.version_info[0] < 3:
output = output.encode(preferredencoding())
sys.stderr.write(output)
@ -54,9 +57,9 @@ class FakeYDL(YoutubeDL):
# Different instances of the downloader can't share the same dictionary
# some test set the "sublang" parameter, which would break the md5 checks.
params = get_params(override=override)
super(FakeYDL, self).__init__(params)
super(FakeYDL, self).__init__(params, auto_init=False)
self.result = []
def to_screen(self, s, skip_eol=None):
print(s)
@ -69,8 +72,10 @@ class FakeYDL(YoutubeDL):
def expect_warning(self, regex):
# Silence an expected warning matching a regex
old_report_warning = self.report_warning
def report_warning(self, message):
if re.match(regex, message): return
if re.match(regex, message):
return
old_report_warning(message)
self.report_warning = types.MethodType(report_warning, self)
@ -103,22 +108,22 @@ def expect_info_dict(self, expected_dict, got_dict):
self.assertTrue(
isinstance(got, compat_str),
u'Expected a %s object, but got %s for field %s' % (
'Expected a %s object, but got %s for field %s' % (
compat_str.__name__, type(got).__name__, info_field))
self.assertTrue(
match_rex.match(got),
u'field %s (value: %r) should match %r' % (info_field, got, match_str))
'field %s (value: %r) should match %r' % (info_field, got, match_str))
elif isinstance(expected, type):
got = got_dict.get(info_field)
self.assertTrue(isinstance(got, expected),
u'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
else:
if isinstance(expected, compat_str) and expected.startswith('md5:'):
got = 'md5:' + md5(got_dict.get(info_field))
else:
got = got_dict.get(info_field)
self.assertEqual(expected, got,
u'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
# Check for the presence of mandatory fields
if got_dict.get('_type') != 'playlist':
@ -127,15 +132,24 @@ def expect_info_dict(self, expected_dict, got_dict):
self.assertTrue(any(key in info_dict.keys() and info_dict[key] for key in ('url', 'parts')))
# Check for mandatory fields that are automatically set by YoutubeDL
for key in ['webpage_url', 'extractor', 'extractor_key']:
self.assertTrue(got_dict.get(key), u'Missing field: %s' % key)
self.assertTrue(got_dict.get(key), 'Missing field: %s' % key)
# Are checkable fields missing from the test case definition?
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
for key, value in got_dict.items()
if value and key in ('title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location'))
for key, value in got_dict.items()
if value and key in ('title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location'))
missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
if missing_keys:
sys.stderr.write(u'\n"info_dict": ' + json.dumps(test_info_dict, ensure_ascii=False, indent=4) + u'\n')
def _repr(v):
if isinstance(v, compat_str):
return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'").replace('\n', '\\n')
else:
return repr(v)
info_dict_str = ''.join(
' %s: %s,\n' % (_repr(k), _repr(v))
for k, v in test_info_dict.items())
write_string(
'\n\'info_dict\': {\n' + info_dict_str + '}\n', out=sys.stderr)
self.assertFalse(
missing_keys,
'Missing keys in test definition: %s' % (
@ -161,3 +175,13 @@ def assertGreaterEqual(self, got, expected, msg=None):
if msg is None:
msg = '%r not greater than or equal to %r' % (got, expected)
self.assertTrue(got >= expected, msg)
def expect_warnings(ydl, warnings_re):
real_warning = ydl.report_warning
def _report_warning(w):
if not any(re.search(w_re, w) for w_re in warnings_re):
real_warning(w)
ydl.report_warning = _report_warning

View File

@ -0,0 +1,18 @@
// input: []
// output: 4
package {
public class ConstArrayAccess {
private static const x:int = 2;
private static const ar:Array = ["42", "3411"];
public static function main():int{
var c:ConstArrayAccess = new ConstArrayAccess();
return c.f();
}
public function f(): int {
return ar[1].length;
}
}
}

View File

@ -0,0 +1,12 @@
// input: []
// output: 2
package {
public class ConstantInt {
private static const x:int = 2;
public static function main():int{
return x;
}
}
}

10
test/swftests/DictCall.as Normal file
View File

@ -0,0 +1,10 @@
// input: [{"x": 1, "y": 2}]
// output: 3
package {
public class DictCall {
public static function main(d:Object):int{
return d.x + d.y;
}
}
}

View File

@ -0,0 +1,10 @@
// input: []
// output: false
package {
public class EqualsOperator {
public static function main():Boolean{
return 1 == 2;
}
}
}

View File

@ -0,0 +1,22 @@
// input: [1]
// output: 2
package {
public class MemberAssignment {
public var v:int;
public function g():int {
return this.v;
}
public function f(a:int):int{
this.v = a;
return this.v + this.g();
}
public static function main(a:int): int {
var v:MemberAssignment = new MemberAssignment();
return v.f(a);
}
}
}

View File

@ -0,0 +1,24 @@
// input: []
// output: 123
package {
public class NeOperator {
public static function main(): int {
var res:int = 0;
if (1 != 2) {
res += 3;
} else {
res += 4;
}
if (2 != 2) {
res += 10;
} else {
res += 20;
}
if (9 == 9) {
res += 100;
}
return res;
}
}
}

View File

@ -0,0 +1,22 @@
// input: []
// output: 9
package {
public class PrivateVoidCall {
public static function main():int{
var f:OtherClass = new OtherClass();
f.func();
return 9;
}
}
}
class OtherClass {
private function pf():void {
;
}
public function func():void {
this.pf();
}
}

View File

@ -0,0 +1,11 @@
// input: []
// output: 3
package {
public class StringBasics {
public static function main():int{
var s:String = "abc";
return s.length;
}
}
}

View File

@ -0,0 +1,11 @@
// input: []
// output: 9897
package {
public class StringCharCodeAt {
public static function main():int{
var s:String = "abc";
return s.charCodeAt(1) * 100 + s.charCodeAt();
}
}
}

View File

@ -0,0 +1,11 @@
// input: []
// output: 2
package {
public class StringConversion {
public static function main():int{
var s:String = String(99);
return s.length;
}
}
}

View File

@ -266,6 +266,7 @@ class TestFormatSelection(unittest.TestCase):
'ext': 'mp4',
'width': None,
}
def fname(templ):
ydl = YoutubeDL({'outtmpl': templ})
return ydl.prepare_filename(info)

View File

@ -1,4 +1,5 @@
#!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
@ -19,7 +20,7 @@ def _download_restricted(url, filename, age):
'age_limit': age,
'skip_download': True,
'writeinfojson': True,
"outtmpl": "%(id)s.%(ext)s",
'outtmpl': '%(id)s.%(ext)s',
}
ydl = YoutubeDL(params)
ydl.add_default_info_extractors()

View File

@ -14,7 +14,7 @@ from test.helper import gettestcases
from youtube_dl.extractor import (
FacebookIE,
gen_extractors,
JustinTVIE,
TwitchIE,
YoutubeIE,
)
@ -32,19 +32,19 @@ class TestAllURLsMatching(unittest.TestCase):
def test_youtube_playlist_matching(self):
assertPlaylist = lambda url: self.assertMatch(url, ['youtube:playlist'])
assertPlaylist('ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') #585
assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') # 585
assertPlaylist('PL63F0C78739B09958')
assertPlaylist('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
assertPlaylist('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
assertPlaylist('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') #668
assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') # 668
self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M'))
# Top tracks
assertPlaylist('https://www.youtube.com/playlist?list=MCUS.20142101')
def test_youtube_matching(self):
self.assertTrue(YoutubeIE.suitable('PLtS2H6bU1M'))
self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668
self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) # 668
self.assertMatch('http://youtu.be/BaW_jenozKc', ['youtube'])
self.assertMatch('http://www.youtube.com/v/BaW_jenozKc', ['youtube'])
self.assertMatch('https://youtube.googleapis.com/v/BaW_jenozKc', ['youtube'])
@ -72,21 +72,17 @@ class TestAllURLsMatching(unittest.TestCase):
self.assertMatch('http://www.youtube.com/results?search_query=making+mustard', ['youtube:search_url'])
self.assertMatch('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', ['youtube:search_url'])
def test_justin_tv_channelid_matching(self):
self.assertTrue(JustinTVIE.suitable('justin.tv/vanillatv'))
self.assertTrue(JustinTVIE.suitable('twitch.tv/vanillatv'))
self.assertTrue(JustinTVIE.suitable('www.justin.tv/vanillatv'))
self.assertTrue(JustinTVIE.suitable('www.twitch.tv/vanillatv'))
self.assertTrue(JustinTVIE.suitable('http://www.justin.tv/vanillatv'))
self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/vanillatv'))
self.assertTrue(JustinTVIE.suitable('http://www.justin.tv/vanillatv/'))
self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/vanillatv/'))
def test_twitch_channelid_matching(self):
self.assertTrue(TwitchIE.suitable('twitch.tv/vanillatv'))
self.assertTrue(TwitchIE.suitable('www.twitch.tv/vanillatv'))
self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/vanillatv'))
self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/vanillatv/'))
def test_justintv_videoid_matching(self):
self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/vanillatv/b/328087483'))
def test_twitch_videoid_matching(self):
self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/vanillatv/b/328087483'))
def test_justin_tv_chapterid_matching(self):
self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/tsm_theoddone/c/2349361'))
def test_twitch_chapterid_matching(self):
self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/tsm_theoddone/c/2349361'))
def test_youtube_extract(self):
assertExtractId = lambda url, id: self.assertEqual(YoutubeIE.extract_id(url), id)

46
test/test_compat.py Normal file
View File

@ -0,0 +1,46 @@
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from youtube_dl.utils import get_filesystem_encoding
from youtube_dl.compat import (
compat_getenv,
compat_expanduser,
)
class TestCompat(unittest.TestCase):
def test_compat_getenv(self):
test_str = 'тест'
os.environ['YOUTUBE-DL-TEST'] = (
test_str if sys.version_info >= (3, 0)
else test_str.encode(get_filesystem_encoding()))
self.assertEqual(compat_getenv('YOUTUBE-DL-TEST'), test_str)
def test_compat_expanduser(self):
old_home = os.environ.get('HOME')
test_str = 'C:\Documents and Settings\тест\Application Data'
os.environ['HOME'] = (
test_str if sys.version_info >= (3, 0)
else test_str.encode(get_filesystem_encoding()))
self.assertEqual(compat_expanduser('~'), test_str)
os.environ['HOME'] = old_home
def test_all_present(self):
import youtube_dl.compat
all_names = youtube_dl.compat.__all__
present_names = set(filter(
lambda c: '_' in c and not c.startswith('_'),
dir(youtube_dl.compat))) - set(['unicode_literals'])
self.assertEqual(all_names, sorted(present_names))
if __name__ == '__main__':
unittest.main()

View File

@ -1,5 +1,7 @@
#!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
@ -8,6 +10,7 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import (
assertGreaterEqual,
expect_warnings,
get_params,
gettestcases,
expect_info_dict,
@ -22,10 +25,12 @@ import json
import socket
import youtube_dl.YoutubeDL
from youtube_dl.utils import (
from youtube_dl.compat import (
compat_http_client,
compat_urllib_error,
compat_HTTPError,
)
from youtube_dl.utils import (
DownloadError,
ExtractorError,
format_bytes,
@ -35,18 +40,22 @@ from youtube_dl.extractor import get_info_extractor
RETRIES = 3
class YoutubeDL(youtube_dl.YoutubeDL):
def __init__(self, *args, **kwargs):
self.to_stderr = self.to_screen
self.processed_info_dicts = []
super(YoutubeDL, self).__init__(*args, **kwargs)
def report_warning(self, message):
# Don't accept warnings during tests
raise ExtractorError(message)
def process_info(self, info_dict):
self.processed_info_dicts.append(info_dict)
return super(YoutubeDL, self).process_info(info_dict)
def _file_md5(fn):
with open(fn, 'rb') as f:
return hashlib.md5(f.read()).hexdigest()
@ -56,10 +65,13 @@ defs = gettestcases()
class TestDownload(unittest.TestCase):
maxDiff = None
def setUp(self):
self.defs = defs
### Dynamically generate tests
# Dynamically generate tests
def generator(test_case):
def test_template(self):
@ -85,7 +97,7 @@ def generator(test_case):
return
for other_ie in other_ies:
if not other_ie.working():
print_skipping(u'test depends on %sIE, marked as not WORKING' % other_ie.ie_key())
print_skipping('test depends on %sIE, marked as not WORKING' % other_ie.ie_key())
return
params = get_params(test_case.get('params', {}))
@ -93,18 +105,21 @@ def generator(test_case):
params.setdefault('extract_flat', True)
params.setdefault('skip_download', True)
ydl = YoutubeDL(params)
ydl = YoutubeDL(params, auto_init=False)
ydl.add_default_info_extractors()
finished_hook_called = set()
def _hook(status):
if status['status'] == 'finished':
finished_hook_called.add(status['filename'])
ydl.add_progress_hook(_hook)
expect_warnings(ydl, test_case.get('expected_warnings', []))
def get_tc_filename(tc):
return tc.get('file') or ydl.prepare_filename(tc.get('info_dict', {}))
res_dict = None
def try_rm_tcs_files(tcs=None):
if tcs is None:
tcs = test_cases
@ -128,7 +143,7 @@ def generator(test_case):
raise
if try_num == RETRIES:
report_warning(u'Failed due to network errors, skipping...')
report_warning('Failed due to network errors, skipping...')
return
print('Retrying: {0} failed tries\n\n##########\n\n'.format(try_num))
@ -139,7 +154,9 @@ def generator(test_case):
if is_playlist:
self.assertEqual(res_dict['_type'], 'playlist')
self.assertTrue('entries' in res_dict)
expect_info_dict(self, test_case.get('info_dict', {}), res_dict)
if 'playlist_mincount' in test_case:
assertGreaterEqual(
self,
@ -181,14 +198,16 @@ def generator(test_case):
md5_for_file = _file_md5(tc_filename)
self.assertEqual(md5_for_file, tc['md5'])
info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json'
self.assertTrue(os.path.exists(info_json_fn))
self.assertTrue(
os.path.exists(info_json_fn),
'Missing info file %s' % info_json_fn)
with io.open(info_json_fn, encoding='utf-8') as infof:
info_dict = json.load(infof)
expect_info_dict(self, tc.get('info_dict', {}), info_dict)
finally:
try_rm_tcs_files()
if is_playlist and res_dict is not None:
if is_playlist and res_dict is not None and res_dict.get('entries'):
# Remove all other files that may have been extracted if the
# extractor returns full results even with extract_flat
res_tcs = [{'info_dict': e} for e in res_dict['entries']]
@ -196,15 +215,15 @@ def generator(test_case):
return test_template
### And add them to TestDownload
# And add them to TestDownload
for n, test_case in enumerate(defs):
test_method = generator(test_case)
tname = 'test_' + str(test_case['name'])
i = 1
while hasattr(TestDownload, tname):
tname = 'test_' + str(test_case['name']) + '_' + str(i)
tname = 'test_%s_%d' % (test_case['name'], i)
i += 1
test_method.__name__ = tname
test_method.__name__ = str(tname)
setattr(TestDownload, test_method.__name__, test_method)
del test_method

View File

@ -1,3 +1,6 @@
#!/usr/bin/env python
from __future__ import unicode_literals
import unittest
import sys
@ -6,17 +9,19 @@ import subprocess
rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
try:
_DEV_NULL = subprocess.DEVNULL
except AttributeError:
_DEV_NULL = open(os.devnull, 'wb')
class TestExecution(unittest.TestCase):
def test_import(self):
subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir)
def test_module_exec(self):
if sys.version_info >= (2,7): # Python 2.6 doesn't support package execution
if sys.version_info >= (2, 7): # Python 2.6 doesn't support package execution
subprocess.check_call([sys.executable, '-m', 'youtube_dl', '--version'], cwd=rootDir, stdout=_DEV_NULL)
def test_main_exec(self):

View File

@ -1,4 +1,5 @@
#!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
@ -15,12 +16,14 @@ from youtube_dl.extractor import (
DailymotionIE,
TEDIE,
VimeoIE,
WallaIE,
)
class BaseTestSubtitles(unittest.TestCase):
url = None
IE = None
def setUp(self):
self.DL = FakeYDL()
self.ie = self.IE(self.DL)
@ -73,7 +76,7 @@ class TestYoutubeSubtitles(BaseTestSubtitles):
self.assertEqual(md5(subtitles['en']), '3cb210999d3e021bd6c7f0ea751eab06')
def test_youtube_list_subtitles(self):
self.DL.expect_warning(u'Video doesn\'t have automatic captions')
self.DL.expect_warning('Video doesn\'t have automatic captions')
self.DL.params['listsubtitles'] = True
info_dict = self.getInfoDict()
self.assertEqual(info_dict, None)
@ -86,7 +89,7 @@ class TestYoutubeSubtitles(BaseTestSubtitles):
self.assertTrue(subtitles['it'] is not None)
def test_youtube_nosubtitles(self):
self.DL.expect_warning(u'video doesn\'t have subtitles')
self.DL.expect_warning('video doesn\'t have subtitles')
self.url = 'n5BB19UTcdA'
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
@ -100,7 +103,7 @@ class TestYoutubeSubtitles(BaseTestSubtitles):
self.DL.params['subtitleslangs'] = langs
subtitles = self.getSubtitles()
for lang in langs:
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
class TestDailymotionSubtitles(BaseTestSubtitles):
@ -129,20 +132,20 @@ class TestDailymotionSubtitles(BaseTestSubtitles):
self.assertEqual(len(subtitles.keys()), 5)
def test_list_subtitles(self):
self.DL.expect_warning(u'Automatic Captions not supported by this server')
self.DL.expect_warning('Automatic Captions not supported by this server')
self.DL.params['listsubtitles'] = True
info_dict = self.getInfoDict()
self.assertEqual(info_dict, None)
def test_automatic_captions(self):
self.DL.expect_warning(u'Automatic Captions not supported by this server')
self.DL.expect_warning('Automatic Captions not supported by this server')
self.DL.params['writeautomaticsub'] = True
self.DL.params['subtitleslang'] = ['en']
subtitles = self.getSubtitles()
self.assertTrue(len(subtitles.keys()) == 0)
def test_nosubtitles(self):
self.DL.expect_warning(u'video doesn\'t have subtitles')
self.DL.expect_warning('video doesn\'t have subtitles')
self.url = 'http://www.dailymotion.com/video/x12u166_le-zapping-tele-star-du-08-aout-2013_tv'
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
@ -155,7 +158,7 @@ class TestDailymotionSubtitles(BaseTestSubtitles):
self.DL.params['subtitleslangs'] = langs
subtitles = self.getSubtitles()
for lang in langs:
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
class TestTedSubtitles(BaseTestSubtitles):
@ -184,13 +187,13 @@ class TestTedSubtitles(BaseTestSubtitles):
self.assertTrue(len(subtitles.keys()) >= 28)
def test_list_subtitles(self):
self.DL.expect_warning(u'Automatic Captions not supported by this server')
self.DL.expect_warning('Automatic Captions not supported by this server')
self.DL.params['listsubtitles'] = True
info_dict = self.getInfoDict()
self.assertEqual(info_dict, None)
def test_automatic_captions(self):
self.DL.expect_warning(u'Automatic Captions not supported by this server')
self.DL.expect_warning('Automatic Captions not supported by this server')
self.DL.params['writeautomaticsub'] = True
self.DL.params['subtitleslang'] = ['en']
subtitles = self.getSubtitles()
@ -202,7 +205,7 @@ class TestTedSubtitles(BaseTestSubtitles):
self.DL.params['subtitleslangs'] = langs
subtitles = self.getSubtitles()
for lang in langs:
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
class TestBlipTVSubtitles(BaseTestSubtitles):
@ -210,13 +213,13 @@ class TestBlipTVSubtitles(BaseTestSubtitles):
IE = BlipTVIE
def test_list_subtitles(self):
self.DL.expect_warning(u'Automatic Captions not supported by this server')
self.DL.expect_warning('Automatic Captions not supported by this server')
self.DL.params['listsubtitles'] = True
info_dict = self.getInfoDict()
self.assertEqual(info_dict, None)
def test_allsubtitles(self):
self.DL.expect_warning(u'Automatic Captions not supported by this server')
self.DL.expect_warning('Automatic Captions not supported by this server')
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
@ -250,20 +253,20 @@ class TestVimeoSubtitles(BaseTestSubtitles):
self.assertEqual(set(subtitles.keys()), set(['de', 'en', 'es', 'fr']))
def test_list_subtitles(self):
self.DL.expect_warning(u'Automatic Captions not supported by this server')
self.DL.expect_warning('Automatic Captions not supported by this server')
self.DL.params['listsubtitles'] = True
info_dict = self.getInfoDict()
self.assertEqual(info_dict, None)
def test_automatic_captions(self):
self.DL.expect_warning(u'Automatic Captions not supported by this server')
self.DL.expect_warning('Automatic Captions not supported by this server')
self.DL.params['writeautomaticsub'] = True
self.DL.params['subtitleslang'] = ['en']
subtitles = self.getSubtitles()
self.assertTrue(len(subtitles.keys()) == 0)
def test_nosubtitles(self):
self.DL.expect_warning(u'video doesn\'t have subtitles')
self.DL.expect_warning('video doesn\'t have subtitles')
self.url = 'http://vimeo.com/56015672'
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
@ -276,7 +279,34 @@ class TestVimeoSubtitles(BaseTestSubtitles):
self.DL.params['subtitleslangs'] = langs
subtitles = self.getSubtitles()
for lang in langs:
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
class TestWallaSubtitles(BaseTestSubtitles):
url = 'http://vod.walla.co.il/movie/2705958/the-yes-men'
IE = WallaIE
def test_list_subtitles(self):
self.DL.expect_warning('Automatic Captions not supported by this server')
self.DL.params['listsubtitles'] = True
info_dict = self.getInfoDict()
self.assertEqual(info_dict, None)
def test_allsubtitles(self):
self.DL.expect_warning('Automatic Captions not supported by this server')
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['heb']))
self.assertEqual(md5(subtitles['heb']), 'e758c5d7cb982f6bef14f377ec7a3920')
def test_nosubtitles(self):
self.DL.expect_warning('video doesn\'t have subtitles')
self.url = 'http://vod.walla.co.il/movie/2642630/one-direction-all-for-one'
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(len(subtitles), 0)
if __name__ == '__main__':

View File

@ -1,4 +1,5 @@
#!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
@ -37,7 +38,9 @@ def _make_testfunc(testfile):
or os.path.getmtime(swf_file) < os.path.getmtime(as_file)):
# Recompile
try:
subprocess.check_call(['mxmlc', '-output', swf_file, as_file])
subprocess.check_call([
'mxmlc', '-output', swf_file,
'-static-link-runtime-shared-libraries', as_file])
except OSError as ose:
if ose.errno == errno.ENOENT:
print('mxmlc not found! Skipping test.')

View File

@ -9,14 +9,13 @@ rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
IGNORED_FILES = [
'setup.py', # http://bugs.python.org/issue13943
'conf.py',
'buildserver.py',
]
class TestUnicodeLiterals(unittest.TestCase):
def test_all_files(self):
print('Skipping this test (not yet fully implemented)')
return
for dirpath, _, filenames in os.walk(rootDir):
for basename in filenames:
if not basename.endswith('.py'):
@ -30,10 +29,10 @@ class TestUnicodeLiterals(unittest.TestCase):
if "'" not in code and '"' not in code:
continue
imps = 'from __future__ import unicode_literals'
self.assertTrue(
imps in code,
' %s missing in %s' % (imps, fn))
self.assertRegexpMatches(
code,
r'(?:#.*\n*)?from __future__ import (?:[a-z_]+,\s*)*unicode_literals',
'unicode_literals import missing in %s' % fn)
m = re.search(r'(?<=\s)u[\'"](?!\)|,|$)', code)
if m is not None:

View File

@ -16,13 +16,14 @@ import json
import xml.etree.ElementTree
from youtube_dl.utils import (
clean_html,
DateRange,
encodeFilename,
find_xpath_attr,
fix_xml_ampersands,
get_meta_content,
orderedSet,
PagedList,
OnDemandPagedList,
InAdvancePagedList,
parse_duration,
read_batch_urls,
sanitize_filename,
@ -43,6 +44,10 @@ from youtube_dl.utils import (
limit_length,
escape_rfc3986,
escape_url,
js_to_json,
intlist_to_bytes,
args_to_str,
parse_filesize,
)
@ -115,16 +120,16 @@ class TestUtil(unittest.TestCase):
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(orderedSet([]), [])
self.assertEqual(orderedSet([1]), [1])
#keep the list ordered
# keep the list ordered
self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
def test_unescape_html(self):
self.assertEqual(unescapeHTML('%20;'), '%20;')
self.assertEqual(
unescapeHTML('&eacute;'), 'é')
def test_daterange(self):
_20century = DateRange("19000101","20000101")
_20century = DateRange("19000101", "20000101")
self.assertFalse("17890714" in _20century)
_ac = DateRange("00010101")
self.assertTrue("19690721" in _ac)
@ -137,6 +142,7 @@ class TestUtil(unittest.TestCase):
self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
self.assertEqual(unified_strdate('1968-12-10'), '19681210')
self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128')
def test_find_xpath_attr(self):
testxml = '''<root>
@ -151,17 +157,6 @@ class TestUtil(unittest.TestCase):
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1])
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
def test_meta_parser(self):
testhtml = '''
<head>
<meta name="description" content="foo &amp; bar">
<meta content='Plato' name='author'/>
</head>
'''
get_meta = lambda name: get_meta_content(name, testhtml)
self.assertEqual(get_meta('description'), 'foo & bar')
self.assertEqual(get_meta('author'), 'Plato')
def test_xpath_with_ns(self):
testxml = '''<root xmlns:media="http://example.com/">
<media:song>
@ -176,7 +171,7 @@ class TestUtil(unittest.TestCase):
self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3')
def test_smuggle_url(self):
data = {u"ö": u"ö", u"abc": [3]}
data = {"ö": "ö", "abc": [3]}
url = 'https://foo.bar/baz?x=y#a'
smug_url = smuggle_url(url, data)
unsmug_url, unsmug_data = unsmuggle_url(smug_url)
@ -224,6 +219,7 @@ class TestUtil(unittest.TestCase):
self.assertEqual(parse_duration('0m0s'), 0)
self.assertEqual(parse_duration('0s'), 0)
self.assertEqual(parse_duration('01:02:03.05'), 3723.05)
self.assertEqual(parse_duration('T30M38S'), 1838)
def test_fix_xml_ampersands(self):
self.assertEqual(
@ -246,10 +242,14 @@ class TestUtil(unittest.TestCase):
for i in range(firstid, upto):
yield i
pl = PagedList(get_page, pagesize)
pl = OnDemandPagedList(get_page, pagesize)
got = pl.getslice(*sliceargs)
self.assertEqual(got, expected)
iapl = InAdvancePagedList(get_page, size // pagesize + 1, pagesize)
got = iapl.getslice(*sliceargs)
self.assertEqual(got, expected)
testPL(5, 2, (), [0, 1, 2, 3, 4])
testPL(5, 2, (1,), [1, 2, 3, 4])
testPL(5, 2, (2,), [2, 3, 4])
@ -279,12 +279,17 @@ class TestUtil(unittest.TestCase):
self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266)
def test_strip_jsonp(self):
stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
d = json.loads(stripped)
self.assertEqual(d, [{"id": "532cb", "x": 3}])
stripped = strip_jsonp('parseMetadata({"STATUS":"OK"})\n\n\n//epc')
d = json.loads(stripped)
self.assertEqual(d, {'STATUS': 'OK'})
def test_uppercase_escape(self):
self.assertEqual(uppercase_escape(''), '')
self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐')
@ -325,5 +330,52 @@ class TestUtil(unittest.TestCase):
)
self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
def test_js_to_json_realworld(self):
inp = '''{
'clip':{'provider':'pseudo'}
}'''
self.assertEqual(js_to_json(inp), '''{
"clip":{"provider":"pseudo"}
}''')
json.loads(js_to_json(inp))
inp = '''{
'playlist':[{'controls':{'all':null}}]
}'''
self.assertEqual(js_to_json(inp), '''{
"playlist":[{"controls":{"all":null}}]
}''')
def test_js_to_json_edgecases(self):
on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"})
on = js_to_json('{"abc": true}')
self.assertEqual(json.loads(on), {'abc': True})
def test_clean_html(self):
self.assertEqual(clean_html('a:\nb'), 'a: b')
self.assertEqual(clean_html('a:\n "b"'), 'a: "b"')
def test_intlist_to_bytes(self):
self.assertEqual(
intlist_to_bytes([0, 1, 127, 128, 255]),
b'\x00\x01\x7f\x80\xff')
def test_args_to_str(self):
self.assertEqual(
args_to_str(['foo', 'ba/r', '-baz', '2 be', '']),
'foo ba/r -baz \'2 be\' \'\''
)
def test_parse_filesize(self):
self.assertEqual(parse_filesize(None), None)
self.assertEqual(parse_filesize(''), None)
self.assertEqual(parse_filesize('91 B'), 91)
self.assertEqual(parse_filesize('foobar'), None)
self.assertEqual(parse_filesize('2 MiB'), 2097152)
self.assertEqual(parse_filesize('5 GB'), 5000000000)
self.assertEqual(parse_filesize('1.2Tb'), 1200000000000)
if __name__ == '__main__':
unittest.main()

View File

@ -1,5 +1,6 @@
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
# Allow direct execution
import os
@ -31,19 +32,18 @@ params = get_params({
})
TEST_ID = 'gr51aVj-mLg'
ANNOTATIONS_FILE = TEST_ID + '.flv.annotations.xml'
EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label']
class TestAnnotations(unittest.TestCase):
def setUp(self):
# Clear old files
self.tearDown()
def test_info_json(self):
expected = list(EXPECTED_ANNOTATIONS) #Two annotations could have the same text.
expected = list(EXPECTED_ANNOTATIONS) # Two annotations could have the same text.
ie = youtube_dl.extractor.YoutubeIE()
ydl = YoutubeDL(params)
ydl.add_info_extractor(ie)
@ -51,7 +51,7 @@ class TestAnnotations(unittest.TestCase):
self.assertTrue(os.path.exists(ANNOTATIONS_FILE))
annoxml = None
with io.open(ANNOTATIONS_FILE, 'r', encoding='utf-8') as annof:
annoxml = xml.etree.ElementTree.parse(annof)
annoxml = xml.etree.ElementTree.parse(annof)
self.assertTrue(annoxml is not None, 'Failed to parse annotations XML')
root = annoxml.getroot()
self.assertEqual(root.tag, 'document')
@ -59,18 +59,17 @@ class TestAnnotations(unittest.TestCase):
self.assertEqual(annotationsTag.tag, 'annotations')
annotations = annotationsTag.findall('annotation')
#Not all the annotations have TEXT children and the annotations are returned unsorted.
# Not all the annotations have TEXT children and the annotations are returned unsorted.
for a in annotations:
self.assertEqual(a.tag, 'annotation')
if a.get('type') == 'text':
textTag = a.find('TEXT')
text = textTag.text
self.assertTrue(text in expected) #assertIn only added in python 2.7
#remove the first occurance, there could be more than one annotation with the same text
expected.remove(text)
#We should have seen (and removed) all the expected annotation texts.
self.assertEqual(a.tag, 'annotation')
if a.get('type') == 'text':
textTag = a.find('TEXT')
text = textTag.text
self.assertTrue(text in expected) # assertIn only added in python 2.7
# remove the first occurance, there could be more than one annotation with the same text
expected.remove(text)
# We should have seen (and removed) all the expected annotation texts.
self.assertEqual(len(expected), 0, 'Not all expected annotations were found.')
def tearDown(self):
try_rm(ANNOTATIONS_FILE)

View File

@ -1,5 +1,6 @@
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
# Allow direct execution
import os
@ -32,7 +33,7 @@ params = get_params({
TEST_ID = 'BaW_jenozKc'
INFO_JSON_FILE = TEST_ID + '.info.json'
DESCRIPTION_FILE = TEST_ID + '.mp4.description'
EXPECTED_DESCRIPTION = u'''test chars: "'/\ä↭𝕐
EXPECTED_DESCRIPTION = '''test chars: "'/\ä↭𝕐
test URL: https://github.com/rg3/youtube-dl/issues/1892
This is a test video for youtube-dl.
@ -53,11 +54,11 @@ class TestInfoJSON(unittest.TestCase):
self.assertTrue(os.path.exists(INFO_JSON_FILE))
with io.open(INFO_JSON_FILE, 'r', encoding='utf-8') as jsonf:
jd = json.load(jsonf)
self.assertEqual(jd['upload_date'], u'20121002')
self.assertEqual(jd['upload_date'], '20121002')
self.assertEqual(jd['description'], EXPECTED_DESCRIPTION)
self.assertEqual(jd['id'], TEST_ID)
self.assertEqual(jd['extractor'], 'youtube')
self.assertEqual(jd['title'], u'''youtube-dl test video "'/\ä↭𝕐''')
self.assertEqual(jd['title'], '''youtube-dl test video "'/\ä↭𝕐''')
self.assertEqual(jd['uploader'], 'Philipp Hagemeister')
self.assertTrue(os.path.exists(DESCRIPTION_FILE))

View File

@ -1,4 +1,5 @@
#!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
@ -10,13 +11,8 @@ from test.helper import FakeYDL
from youtube_dl.extractor import (
YoutubeUserIE,
YoutubePlaylistIE,
YoutubeIE,
YoutubeChannelIE,
YoutubeShowIE,
YoutubeTopListIE,
YoutubeSearchURLIE,
)
@ -32,7 +28,7 @@ class TestYoutubeLists(unittest.TestCase):
result = ie.extract('https://www.youtube.com/watch?v=FXxLjLQi3Fg&list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
self.assertEqual(result['_type'], 'url')
self.assertEqual(YoutubeIE().extract_id(result['url']), 'FXxLjLQi3Fg')
def test_youtube_course(self):
dl = FakeYDL()
ie = YoutubePlaylistIE(dl)
@ -43,28 +39,6 @@ class TestYoutubeLists(unittest.TestCase):
self.assertEqual(len(entries), 25)
self.assertEqual(YoutubeIE().extract_id(entries[-1]['url']), 'rYefUsYuEp0')
def test_youtube_channel(self):
dl = FakeYDL()
ie = YoutubeChannelIE(dl)
#test paginated channel
result = ie.extract('https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w')
self.assertTrue(len(result['entries']) > 90)
#test autogenerated channel
result = ie.extract('https://www.youtube.com/channel/HCtnHdj3df7iM/videos')
self.assertTrue(len(result['entries']) >= 18)
def test_youtube_user(self):
dl = FakeYDL()
ie = YoutubeUserIE(dl)
result = ie.extract('https://www.youtube.com/user/TheLinuxFoundation')
self.assertTrue(len(result['entries']) >= 320)
def test_youtube_show(self):
dl = FakeYDL()
ie = YoutubeShowIE(dl)
result = ie.extract('http://www.youtube.com/show/airdisasters')
self.assertTrue(len(result) >= 3)
def test_youtube_mix(self):
dl = FakeYDL()
ie = YoutubePlaylistIE(dl)
@ -83,21 +57,5 @@ class TestYoutubeLists(unittest.TestCase):
entries = result['entries']
self.assertEqual(len(entries), 100)
def test_youtube_toplist(self):
dl = FakeYDL()
ie = YoutubeTopListIE(dl)
result = ie.extract('yttoplist:music:Trending')
entries = result['entries']
self.assertTrue(len(entries) >= 5)
def test_youtube_search_url(self):
dl = FakeYDL()
ie = YoutubeSearchURLIE(dl)
result = ie.extract('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video')
entries = result['entries']
self.assertIsPlaylist(result)
self.assertEqual(result['title'], 'youtube-dl test video')
self.assertTrue(len(entries) >= 5)
if __name__ == '__main__':
unittest.main()

View File

@ -14,7 +14,7 @@ import re
import string
from youtube_dl.extractor import YoutubeIE
from youtube_dl.utils import compat_str, compat_urlretrieve
from youtube_dl.compat import compat_str, compat_urlretrieve
_TESTS = [
(
@ -47,18 +47,6 @@ _TESTS = [
'2ACFC7A61CA478CD21425E5A57EBD73DDC78E22A.2094302436B2D377D14A3BBA23022D023B8BC25AA',
'A52CB8B320D22032ABB3A41D773D2B6342034902.A22E87CDD37DBE75A5E52412DC874AC16A7CFCA2',
),
(
'http://s.ytimg.com/yts/swfbin/player-vfl5vIhK2/watch_as3.swf',
'swf',
86,
'O1I3456789abcde0ghijklmnopqrstuvwxyzABCDEFGHfJKLMN2PQRSTUVWXY\\!"#$%&\'()*+,-./:;<=>?'
),
(
'http://s.ytimg.com/yts/swfbin/player-vflmDyk47/watch_as3.swf',
'swf',
'F375F75BF2AFDAAF2666E43868D46816F83F13E81C46.3725A8218E446A0DECD33F79DC282994D6AA92C92C9',
'9C29AA6D499282CD97F33DCED0A644E8128A5273.64C18E31F38361864D86834E6662FAADFA2FB57F'
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflBb0OQx.js',
'js',

View File

@ -22,12 +22,15 @@ import traceback
if os.name == 'nt':
import ctypes
from .utils import (
from .compat import (
compat_cookiejar,
compat_expanduser,
compat_http_client,
compat_str,
compat_urllib_error,
compat_urllib_request,
)
from .utils import (
escape_url,
ContentTooShortError,
date_from_str,
@ -58,11 +61,13 @@ from .utils import (
write_string,
YoutubeDLHandler,
prepend_extension,
args_to_str,
)
from .cache import Cache
from .extractor import get_info_extractor, gen_extractors
from .downloader import get_suitable_downloader
from .postprocessor import FFmpegMergerPP
from .downloader.rtmp import rtmpdump_version
from .postprocessor import FFmpegMergerPP, FFmpegPostProcessor
from .version import __version__
@ -108,6 +113,8 @@ class YoutubeDL(object):
forcefilename: Force printing final filename.
forceduration: Force printing duration.
forcejson: Force printing info_dict as JSON.
dump_single_json: Force printing the info_dict of the whole playlist
(or video) as a single JSON line.
simulate: Do not download the video files.
format: Video format code.
format_limit: Highest quality format to try.
@ -166,6 +173,8 @@ class YoutubeDL(object):
'auto' for elaborate guessing
encoding: Use this encoding instead of the system-specified.
extract_flat: Do not resolve URLs, return the immediate result.
Pass in 'in_playlist' to only show this behavior for
playlist items.
The following parameters are not used by YoutubeDL itself, they are used by
the FileDownloader:
@ -185,7 +194,7 @@ class YoutubeDL(object):
_num_downloads = None
_screen_file = None
def __init__(self, params=None):
def __init__(self, params=None, auto_init=True):
"""Create a FileDownloader object with the given options."""
if params is None:
params = {}
@ -229,11 +238,11 @@ class YoutubeDL(object):
if (sys.version_info >= (3,) and sys.platform != 'win32' and
sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
and not params['restrictfilenames']):
and not params.get('restrictfilenames', False)):
# On Python 3, the Unicode filesystem API will throw errors (#1474)
self.report_warning(
'Assuming --restrict-filenames since file system encoding '
'cannot encode all charactes. '
'cannot encode all characters. '
'Set the LC_ALL environment variable to fix this.')
self.params['restrictfilenames'] = True
@ -242,6 +251,26 @@ class YoutubeDL(object):
self._setup_opener()
if auto_init:
self.print_debug_header()
self.add_default_info_extractors()
def warn_if_short_id(self, argv):
# short YouTube ID starting with dash?
idxs = [
i for i, a in enumerate(argv)
if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
if idxs:
correct_argv = (
['youtube-dl'] +
[a for i, a in enumerate(argv) if i not in idxs] +
['--'] + [argv[i] for i in idxs]
)
self.report_warning(
'Long argument string detected. '
'Use -- to separate parameters and URLs, like this:\n%s\n' %
args_to_str(correct_argv))
def add_info_extractor(self, ie):
"""Add an InfoExtractor object to the end of the list."""
self._ies.append(ie)
@ -286,7 +315,7 @@ class YoutubeDL(object):
self._output_process.stdin.write((message + '\n').encode('utf-8'))
self._output_process.stdin.flush()
res = ''.join(self._output_channel.readline().decode('utf-8')
for _ in range(line_count))
for _ in range(line_count))
return res[:-len('\n')]
def to_screen(self, message, skip_eol=False):
@ -448,7 +477,7 @@ class YoutubeDL(object):
template_dict = collections.defaultdict(lambda: 'NA', template_dict)
outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
tmpl = os.path.expanduser(outtmpl)
tmpl = compat_expanduser(outtmpl)
filename = tmpl % template_dict
return filename
except ValueError as err:
@ -523,7 +552,7 @@ class YoutubeDL(object):
try:
ie_result = ie.extract(url)
if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
break
if isinstance(ie_result, list):
# Backwards compatibility: old IE result format
@ -536,7 +565,7 @@ class YoutubeDL(object):
return self.process_ie_result(ie_result, download, extra_info)
else:
return ie_result
except ExtractorError as de: # An error we somewhat expected
except ExtractorError as de: # An error we somewhat expected
self.report_error(compat_str(de), de.format_traceback())
break
except MaxDownloadsReached:
@ -569,8 +598,12 @@ class YoutubeDL(object):
result_type = ie_result.get('_type', 'video')
if self.params.get('extract_flat', False):
if result_type in ('url', 'url_transparent'):
if result_type in ('url', 'url_transparent'):
extract_flat = self.params.get('extract_flat', False)
if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or
extract_flat is True):
if self.params.get('forcejson', False):
self.to_stdout(json.dumps(ie_result))
return ie_result
if result_type == 'video':
@ -609,7 +642,7 @@ class YoutubeDL(object):
return self.process_ie_result(
new_result, download=download, extra_info=extra_info)
elif result_type == 'playlist':
elif result_type == 'playlist' or result_type == 'multi_video':
# We process each entry in the playlist
playlist = ie_result.get('title', None) or ie_result.get('id', None)
self.to_screen('[download] Downloading playlist: %s' % playlist)
@ -643,6 +676,8 @@ class YoutubeDL(object):
extra = {
'n_entries': n_entries,
'playlist': playlist,
'playlist_id': ie_result.get('id'),
'playlist_title': ie_result.get('title'),
'playlist_index': i + playliststart,
'extractor': ie_result['extractor'],
'webpage_url': ie_result['webpage_url'],
@ -662,14 +697,20 @@ class YoutubeDL(object):
ie_result['entries'] = playlist_results
return ie_result
elif result_type == 'compat_list':
self.report_warning(
'Extractor %s returned a compat_list result. '
'It needs to be updated.' % ie_result.get('extractor'))
def _fixup(r):
self.add_extra_info(r,
self.add_extra_info(
r,
{
'extractor': ie_result['extractor'],
'webpage_url': ie_result['webpage_url'],
'webpage_url_basename': url_basename(ie_result['webpage_url']),
'extractor_key': ie_result['extractor_key'],
})
}
)
return r
ie_result['entries'] = [
self.process_ie_result(_fixup(r), download, extra_info)
@ -709,7 +750,7 @@ class YoutubeDL(object):
if video_formats:
return video_formats[0]
else:
extensions = ['mp4', 'flv', 'webm', '3gp']
extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a']
if format_spec in extensions:
filter_f = lambda f: f['ext'] == format_spec
else:
@ -815,28 +856,36 @@ class YoutubeDL(object):
if req_format in ('-1', 'all'):
formats_to_download = formats
else:
# We can accept formats requested in the format: 34/5/best, we pick
# the first that is available, starting from left
req_formats = req_format.split('/')
for rf in req_formats:
if re.match(r'.+?\+.+?', rf) is not None:
# Two formats have been requested like '137+139'
format_1, format_2 = rf.split('+')
formats_info = (self.select_format(format_1, formats),
self.select_format(format_2, formats))
if all(formats_info):
selected_format = {
'requested_formats': formats_info,
'format': rf,
'ext': formats_info[0]['ext'],
}
for rfstr in req_format.split(','):
# We can accept formats requested in the format: 34/5/best, we pick
# the first that is available, starting from left
req_formats = rfstr.split('/')
for rf in req_formats:
if re.match(r'.+?\+.+?', rf) is not None:
# Two formats have been requested like '137+139'
format_1, format_2 = rf.split('+')
formats_info = (self.select_format(format_1, formats),
self.select_format(format_2, formats))
if all(formats_info):
# The first format must contain the video and the
# second the audio
if formats_info[0].get('vcodec') == 'none':
self.report_error('The first format must '
'contain the video, try using '
'"-f %s+%s"' % (format_2, format_1))
return
selected_format = {
'requested_formats': formats_info,
'format': rf,
'ext': formats_info[0]['ext'],
}
else:
selected_format = None
else:
selected_format = None
else:
selected_format = self.select_format(rf, formats)
if selected_format is not None:
formats_to_download = [selected_format]
break
selected_format = self.select_format(rf, formats)
if selected_format is not None:
formats_to_download.append(selected_format)
break
if not formats_to_download:
raise ExtractorError('requested format not available',
expected=True)
@ -902,6 +951,8 @@ class YoutubeDL(object):
if self.params.get('forcejson', False):
info_dict['_filename'] = filename
self.to_stdout(json.dumps(info_dict))
if self.params.get('dump_single_json', False):
info_dict['_filename'] = filename
# Do nothing else if in simulate mode
if self.params.get('simulate', False):
@ -967,7 +1018,7 @@ class YoutubeDL(object):
else:
self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
subfile.write(sub)
subfile.write(sub)
except (OSError, IOError):
self.report_error('Cannot write subtitles file ' + sub_filename)
return
@ -979,7 +1030,7 @@ class YoutubeDL(object):
else:
self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn)
try:
write_json_file(info_dict, encodeFilename(infofn))
write_json_file(info_dict, infofn)
except (OSError, IOError):
self.report_error('Cannot write metadata to JSON file ' + infofn)
return
@ -999,10 +1050,10 @@ class YoutubeDL(object):
with open(thumb_filename, 'wb') as thumbf:
shutil.copyfileobj(uf, thumbf)
self.to_screen('[%s] %s: Writing thumbnail to: %s' %
(info_dict['extractor'], info_dict['id'], thumb_filename))
(info_dict['extractor'], info_dict['id'], thumb_filename))
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self.report_warning('Unable to download thumbnail "%s": %s' %
(info_dict['thumbnail'], compat_str(err)))
(info_dict['thumbnail'], compat_str(err)))
if not self.params.get('skip_download', False):
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)):
@ -1020,11 +1071,11 @@ class YoutubeDL(object):
downloaded = []
success = True
merger = FFmpegMergerPP(self, not self.params.get('keepvideo'))
if not merger._get_executable():
if not merger._executable:
postprocessors = []
self.report_warning('You have requested multiple '
'formats but ffmpeg or avconv are not installed.'
' The formats won\'t be merged')
'formats but ffmpeg or avconv are not installed.'
' The formats won\'t be merged')
else:
postprocessors = [merger]
for f in info_dict['requested_formats']:
@ -1086,13 +1137,16 @@ class YoutubeDL(object):
for url in url_list:
try:
#It also downloads the videos
self.extract_info(url)
# It also downloads the videos
res = self.extract_info(url)
except UnavailableVideoError:
self.report_error('unable to download video')
except MaxDownloadsReached:
self.to_screen('[info] Maximum number of downloaded files reached.')
raise
else:
if self.params.get('dump_single_json', False):
self.to_stdout(json.dumps(res))
return self._download_retcode
@ -1216,6 +1270,8 @@ class YoutubeDL(object):
res += 'video@'
if fdict.get('vbr') is not None:
res += '%4dk' % fdict['vbr']
if fdict.get('fps') is not None:
res += ', %sfps' % fdict['fps']
if fdict.get('acodec') is not None:
if res:
res += ', '
@ -1273,12 +1329,13 @@ class YoutubeDL(object):
# urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
# To work around aforementioned issue we will replace request's original URL with
# percent-encoded one
url = req if isinstance(req, compat_str) else req.get_full_url()
req_is_string = isinstance(req, basestring if sys.version_info < (3, 0) else compat_str)
url = req if req_is_string else req.get_full_url()
url_escaped = escape_url(url)
# Substitute URL if any change after escaping
if url != url_escaped:
if isinstance(req, compat_str):
if req_is_string:
req = url_escaped
else:
req = compat_urllib_request.Request(
@ -1296,11 +1353,13 @@ class YoutubeDL(object):
self.report_warning(
'Your Python is broken! Update to a newer and supported version')
stdout_encoding = getattr(
sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
encoding_str = (
'[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
locale.getpreferredencoding(),
sys.getfilesystemencoding(),
sys.stdout.encoding,
stdout_encoding,
self.get_encoding()))
write_string(encoding_str, encoding=None)
@ -1319,8 +1378,19 @@ class YoutubeDL(object):
sys.exc_clear()
except:
pass
self._write_string('[debug] Python version %s - %s' %
(platform.python_version(), platform_name()) + '\n')
self._write_string('[debug] Python version %s - %s\n' % (
platform.python_version(), platform_name()))
exe_versions = FFmpegPostProcessor.get_versions()
exe_versions['rtmpdump'] = rtmpdump_version()
exe_str = ', '.join(
'%s %s' % (exe, v)
for exe, v in sorted(exe_versions.items())
if v
)
if not exe_str:
exe_str = 'none'
self._write_string('[debug] exe versions: %s\n' % exe_str)
proxy_map = {}
for handler in self._opener.handlers:

View File

@ -1,82 +1,7 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__authors__ = (
'Ricardo Garcia Gonzalez',
'Danny Colligan',
'Benjamin Johnson',
'Vasyl\' Vavrychuk',
'Witold Baryluk',
'Paweł Paprota',
'Gergely Imreh',
'Rogério Brito',
'Philipp Hagemeister',
'Sören Schulze',
'Kevin Ngo',
'Ori Avtalion',
'shizeeg',
'Filippo Valsorda',
'Christian Albrecht',
'Dave Vasilevsky',
'Jaime Marquínez Ferrándiz',
'Jeff Crouse',
'Osama Khalid',
'Michael Walter',
'M. Yasoob Ullah Khalid',
'Julien Fraichard',
'Johny Mo Swag',
'Axel Noack',
'Albert Kim',
'Pierre Rudloff',
'Huarong Huo',
'Ismael Mejía',
'Steffan \'Ruirize\' James',
'Andras Elso',
'Jelle van der Waa',
'Marcin Cieślak',
'Anton Larionov',
'Takuya Tsuchida',
'Sergey M.',
'Michael Orlitzky',
'Chris Gahan',
'Saimadhav Heblikar',
'Mike Col',
'Oleg Prutz',
'pulpe',
'Andreas Schmitz',
'Michael Kaiser',
'Niklas Laxström',
'David Triendl',
'Anthony Weems',
'David Wagner',
'Juan C. Olivares',
'Mattias Harrysson',
'phaer',
'Sainyam Kapoor',
'Nicolas Évrard',
'Jason Normore',
'Hoje Lee',
'Adam Thalhammer',
'Georg Jähnig',
'Ralf Haring',
'Koki Takahashi',
'Ariset Llerena',
'Adam Malcontenti-Wilson',
'Tobias Bell',
'Naglis Jonaitis',
'Charles Chen',
'Hassaan Ali',
'Dobrosław Żybort',
'David Fabijan',
'Sebastian Haas',
'Alexander Kirk',
'Erik Johnson',
'Keith Beckman',
'Ole Ernst',
'Aaron McDaniel (mcd1992)',
'Magnus Kolstad',
'Hari Padmanaban',
)
from __future__ import unicode_literals
__license__ = 'Public Domain'
@ -90,9 +15,13 @@ import sys
from .options import (
parseOpts,
)
from .utils import (
from .compat import (
compat_expanduser,
compat_getpass,
compat_print,
workaround_optparse_bug9161,
)
from .utils import (
DateRange,
DEFAULT_OUTTMPL,
decodeOption,
@ -130,7 +59,9 @@ def _real_main(argv=None):
# https://github.com/rg3/youtube-dl/issues/820
codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None)
setproctitle(u'youtube-dl')
workaround_optparse_bug9161()
setproctitle('youtube-dl')
parser, opts, args = parseOpts(argv)
@ -146,10 +77,10 @@ def _real_main(argv=None):
if opts.headers is not None:
for h in opts.headers:
if h.find(':', 1) < 0:
parser.error(u'wrong header formatting, it should be key:value, not "%s"'%h)
parser.error('wrong header formatting, it should be key:value, not "%s"' % h)
key, value = h.split(':', 2)
if opts.verbose:
write_string(u'[debug] Adding header from command line option %s:%s\n'%(key, value))
write_string('[debug] Adding header from command line option %s:%s\n' % (key, value))
std_headers[key] = value
# Dump user agent
@ -167,9 +98,9 @@ def _real_main(argv=None):
batchfd = io.open(opts.batchfile, 'r', encoding='utf-8', errors='ignore')
batch_urls = read_batch_urls(batchfd)
if opts.verbose:
write_string(u'[debug] Batch file urls: ' + repr(batch_urls) + u'\n')
write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n')
except IOError:
sys.exit(u'ERROR: batch file could not be read')
sys.exit('ERROR: batch file could not be read')
all_urls = batch_urls + args
all_urls = [url.strip() for url in all_urls]
_enc = preferredencoding()
@ -182,7 +113,7 @@ def _real_main(argv=None):
compat_print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else ''))
matchedUrls = [url for url in all_urls if ie.suitable(url)]
for mu in matchedUrls:
compat_print(u' ' + mu)
compat_print(' ' + mu)
sys.exit(0)
if opts.list_extractor_descriptions:
for ie in sorted(extractors, key=lambda ie: ie.IE_NAME.lower()):
@ -192,69 +123,66 @@ def _real_main(argv=None):
if desc is False:
continue
if hasattr(ie, 'SEARCH_KEY'):
_SEARCHES = (u'cute kittens', u'slithering pythons', u'falling cat', u'angry poodle', u'purple fish', u'running tortoise', u'sleeping bunny')
_COUNTS = (u'', u'5', u'10', u'all')
desc += u' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES))
_SEARCHES = ('cute kittens', 'slithering pythons', 'falling cat', 'angry poodle', 'purple fish', 'running tortoise', 'sleeping bunny')
_COUNTS = ('', '5', '10', 'all')
desc += ' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES))
compat_print(desc)
sys.exit(0)
# Conflicting, missing and erroneous options
if opts.usenetrc and (opts.username is not None or opts.password is not None):
parser.error(u'using .netrc conflicts with giving username/password')
parser.error('using .netrc conflicts with giving username/password')
if opts.password is not None and opts.username is None:
parser.error(u'account username missing\n')
parser.error('account username missing\n')
if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid):
parser.error(u'using output template conflicts with using title, video ID or auto number')
parser.error('using output template conflicts with using title, video ID or auto number')
if opts.usetitle and opts.useid:
parser.error(u'using title conflicts with using video ID')
parser.error('using title conflicts with using video ID')
if opts.username is not None and opts.password is None:
opts.password = compat_getpass(u'Type account password and press [Return]: ')
opts.password = compat_getpass('Type account password and press [Return]: ')
if opts.ratelimit is not None:
numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
if numeric_limit is None:
parser.error(u'invalid rate limit specified')
parser.error('invalid rate limit specified')
opts.ratelimit = numeric_limit
if opts.min_filesize is not None:
numeric_limit = FileDownloader.parse_bytes(opts.min_filesize)
if numeric_limit is None:
parser.error(u'invalid min_filesize specified')
parser.error('invalid min_filesize specified')
opts.min_filesize = numeric_limit
if opts.max_filesize is not None:
numeric_limit = FileDownloader.parse_bytes(opts.max_filesize)
if numeric_limit is None:
parser.error(u'invalid max_filesize specified')
parser.error('invalid max_filesize specified')
opts.max_filesize = numeric_limit
if opts.retries is not None:
try:
opts.retries = int(opts.retries)
except (TypeError, ValueError):
parser.error(u'invalid retry count specified')
parser.error('invalid retry count specified')
if opts.buffersize is not None:
numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize)
if numeric_buffersize is None:
parser.error(u'invalid buffer size specified')
parser.error('invalid buffer size specified')
opts.buffersize = numeric_buffersize
if opts.playliststart <= 0:
raise ValueError(u'Playlist start must be positive')
raise ValueError('Playlist start must be positive')
if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart:
raise ValueError(u'Playlist end must be greater than playlist start')
raise ValueError('Playlist end must be greater than playlist start')
if opts.extractaudio:
if opts.audioformat not in ['best', 'aac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']:
parser.error(u'invalid audio format specified')
parser.error('invalid audio format specified')
if opts.audioquality:
opts.audioquality = opts.audioquality.strip('k').strip('K')
if not opts.audioquality.isdigit():
parser.error(u'invalid audio quality specified')
parser.error('invalid audio quality specified')
if opts.recodevideo is not None:
if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv']:
parser.error(u'invalid video recode format specified')
parser.error('invalid video recode format specified')
if opts.date is not None:
date = DateRange.day(opts.date)
else:
date = DateRange(opts.dateafter, opts.datebefore)
if opts.default_search not in ('auto', 'auto_warning', 'error', 'fixup_error', None) and ':' not in opts.default_search:
parser.error(u'--default-search invalid; did you forget a colon (:) at the end?')
# Do not download videos when there are audio-only formats
if opts.extractaudio and not opts.keepvideo and opts.format is None:
@ -262,28 +190,28 @@ def _real_main(argv=None):
# --all-sub automatically sets --write-sub if --write-auto-sub is not given
# this was the old behaviour if only --all-sub was given.
if opts.allsubtitles and (opts.writeautomaticsub == False):
if opts.allsubtitles and not opts.writeautomaticsub:
opts.writesubtitles = True
if sys.version_info < (3,):
# In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems)
if opts.outtmpl is not None:
opts.outtmpl = opts.outtmpl.decode(preferredencoding())
outtmpl =((opts.outtmpl is not None and opts.outtmpl)
or (opts.format == '-1' and opts.usetitle and u'%(title)s-%(id)s-%(format)s.%(ext)s')
or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s')
or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s')
or (opts.usetitle and u'%(title)s-%(id)s.%(ext)s')
or (opts.useid and u'%(id)s.%(ext)s')
or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
or DEFAULT_OUTTMPL)
outtmpl = ((opts.outtmpl is not None and opts.outtmpl)
or (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s')
or (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s')
or (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s')
or (opts.usetitle and '%(title)s-%(id)s.%(ext)s')
or (opts.useid and '%(id)s.%(ext)s')
or (opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s')
or DEFAULT_OUTTMPL)
if not os.path.splitext(outtmpl)[1] and opts.extractaudio:
parser.error(u'Cannot download a video and extract audio into the same'
u' file! Use "{0}.%(ext)s" instead of "{0}" as the output'
u' template'.format(outtmpl))
parser.error('Cannot download a video and extract audio into the same'
' file! Use "{0}.%(ext)s" instead of "{0}" as the output'
' template'.format(outtmpl))
any_printing = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson
download_archive_fn = os.path.expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive
any_printing = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson or opts.dump_single_json
download_archive_fn = compat_expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive
ydl_opts = {
'usenetrc': opts.usenetrc,
@ -302,8 +230,9 @@ def _real_main(argv=None):
'forcefilename': opts.getfilename,
'forceformat': opts.getformat,
'forcejson': opts.dumpjson,
'simulate': opts.simulate,
'skip_download': (opts.skip_download or opts.simulate or any_printing),
'dump_single_json': opts.dump_single_json,
'simulate': opts.simulate or any_printing,
'skip_download': opts.skip_download,
'format': opts.format,
'format_limit': opts.format_limit,
'listformats': opts.listformats,
@ -368,12 +297,10 @@ def _real_main(argv=None):
'youtube_include_dash_manifest': opts.youtube_include_dash_manifest,
'encoding': opts.encoding,
'exec_cmd': opts.exec_cmd,
'extract_flat': opts.extract_flat,
}
with YoutubeDL(ydl_opts) as ydl:
ydl.print_debug_header()
ydl.add_default_info_extractors()
# PostProcessors
if opts.joinparts:
ydl.add_post_processor(FFmpegJoinVideosPP())
@ -393,7 +320,6 @@ def _real_main(argv=None):
ydl.add_post_processor(FFmpegAudioFixPP())
ydl.add_post_processor(AtomicParsleyPP())
# Please keep ExecAfterDownload towards the bottom as it allows the user to modify the final file in any way.
# So if the user is able to remove the file before your postprocessor runs it might cause a few problems.
if opts.exec_cmd:
@ -410,18 +336,19 @@ def _real_main(argv=None):
# Maybe do nothing
if (len(all_urls) < 1) and (opts.load_info_filename is None):
if not (opts.update_self or opts.rm_cachedir):
parser.error(u'you must provide at least one URL')
else:
if opts.update_self or opts.rm_cachedir:
sys.exit()
ydl.warn_if_short_id(sys.argv[1:] if argv is None else argv)
parser.error('you must provide at least one URL')
try:
if opts.load_info_filename is not None:
retcode = ydl.download_with_info_file(opts.load_info_filename)
else:
retcode = ydl.download(all_urls)
except MaxDownloadsReached:
ydl.to_screen(u'--max-download limit reached, aborting.')
ydl.to_screen('--max-download limit reached, aborting.')
retcode = 101
sys.exit(retcode)
@ -433,6 +360,6 @@ def main(argv=None):
except DownloadError:
sys.exit(1)
except SameFileError:
sys.exit(u'ERROR: fixed output name but more than one file to download')
sys.exit('ERROR: fixed output name but more than one file to download')
except KeyboardInterrupt:
sys.exit(u'\nERROR: Interrupted by user')
sys.exit('\nERROR: Interrupted by user')

View File

@ -1,4 +1,5 @@
#!/usr/bin/env python
from __future__ import unicode_literals
# Execute with
# $ python youtube_dl/__main__.py (2.6+)

View File

@ -1,3 +1,5 @@
from __future__ import unicode_literals
__all__ = ['aes_encrypt', 'key_expansion', 'aes_ctr_decrypt', 'aes_cbc_decrypt', 'aes_decrypt_text']
import base64
@ -7,10 +9,11 @@ from .utils import bytes_to_intlist, intlist_to_bytes
BLOCK_SIZE_BYTES = 16
def aes_ctr_decrypt(data, key, counter):
"""
Decrypt with aes in counter mode
@param {int[]} data cipher
@param {int[]} key 16/24/32-Byte cipher key
@param {instance} counter Instance whose next_value function (@returns {int[]} 16-Byte block)
@ -19,23 +22,24 @@ def aes_ctr_decrypt(data, key, counter):
"""
expanded_key = key_expansion(key)
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
decrypted_data=[]
decrypted_data = []
for i in range(block_count):
counter_block = counter.next_value()
block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]
block += [0]*(BLOCK_SIZE_BYTES - len(block))
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
block += [0] * (BLOCK_SIZE_BYTES - len(block))
cipher_counter_block = aes_encrypt(counter_block, expanded_key)
decrypted_data += xor(block, cipher_counter_block)
decrypted_data = decrypted_data[:len(data)]
return decrypted_data
def aes_cbc_decrypt(data, key, iv):
"""
Decrypt with aes in CBC mode
@param {int[]} data cipher
@param {int[]} key 16/24/32-Byte cipher key
@param {int[]} iv 16-Byte IV
@ -43,94 +47,98 @@ def aes_cbc_decrypt(data, key, iv):
"""
expanded_key = key_expansion(key)
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
decrypted_data=[]
decrypted_data = []
previous_cipher_block = iv
for i in range(block_count):
block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]
block += [0]*(BLOCK_SIZE_BYTES - len(block))
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
block += [0] * (BLOCK_SIZE_BYTES - len(block))
decrypted_block = aes_decrypt(block, expanded_key)
decrypted_data += xor(decrypted_block, previous_cipher_block)
previous_cipher_block = block
decrypted_data = decrypted_data[:len(data)]
return decrypted_data
def key_expansion(data):
"""
Generate key schedule
@param {int[]} data 16/24/32-Byte cipher key
@returns {int[]} 176/208/240-Byte expanded key
@returns {int[]} 176/208/240-Byte expanded key
"""
data = data[:] # copy
data = data[:] # copy
rcon_iteration = 1
key_size_bytes = len(data)
expanded_key_size_bytes = (key_size_bytes // 4 + 7) * BLOCK_SIZE_BYTES
while len(data) < expanded_key_size_bytes:
temp = data[-4:]
temp = key_schedule_core(temp, rcon_iteration)
rcon_iteration += 1
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
for _ in range(3):
temp = data[-4:]
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
if key_size_bytes == 32:
temp = data[-4:]
temp = sub_bytes(temp)
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
temp = data[-4:]
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
data = data[:expanded_key_size_bytes]
return data
def aes_encrypt(data, expanded_key):
"""
Encrypt one block with aes
@param {int[]} data 16-Byte state
@param {int[]} expanded_key 176/208/240-Byte expanded key
@param {int[]} expanded_key 176/208/240-Byte expanded key
@returns {int[]} 16-Byte cipher
"""
rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
for i in range(1, rounds+1):
for i in range(1, rounds + 1):
data = sub_bytes(data)
data = shift_rows(data)
if i != rounds:
data = mix_columns(data)
data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES])
data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES])
return data
def aes_decrypt(data, expanded_key):
"""
Decrypt one block with aes
@param {int[]} data 16-Byte cipher
@param {int[]} expanded_key 176/208/240-Byte expanded key
@returns {int[]} 16-Byte state
"""
rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
for i in range(rounds, 0, -1):
data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES])
data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES])
if i != rounds:
data = mix_columns_inv(data)
data = shift_rows_inv(data)
data = sub_bytes_inv(data)
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
return data
def aes_decrypt_text(data, password, key_size_bytes):
"""
Decrypt text
@ -138,33 +146,34 @@ def aes_decrypt_text(data, password, key_size_bytes):
- The cipher key is retrieved by encrypting the first 16 Byte of 'password'
with the first 'key_size_bytes' Bytes from 'password' (if necessary filled with 0's)
- Mode of operation is 'counter'
@param {str} data Base64 encoded string
@param {str,unicode} password Password (will be encoded with utf-8)
@param {int} key_size_bytes Possible values: 16 for 128-Bit, 24 for 192-Bit or 32 for 256-Bit
@returns {str} Decrypted data
"""
NONCE_LENGTH_BYTES = 8
data = bytes_to_intlist(base64.b64decode(data))
password = bytes_to_intlist(password.encode('utf-8'))
key = password[:key_size_bytes] + [0]*(key_size_bytes - len(password))
key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password))
key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES)
nonce = data[:NONCE_LENGTH_BYTES]
cipher = data[NONCE_LENGTH_BYTES:]
class Counter:
__value = nonce + [0]*(BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
__value = nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
def next_value(self):
temp = self.__value
self.__value = inc(self.__value)
return temp
decrypted_data = aes_ctr_decrypt(cipher, key, Counter())
plaintext = intlist_to_bytes(decrypted_data)
return plaintext
RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36)
@ -200,14 +209,14 @@ SBOX_INV = (0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d)
MIX_COLUMN_MATRIX = ((0x2,0x3,0x1,0x1),
(0x1,0x2,0x3,0x1),
(0x1,0x1,0x2,0x3),
(0x3,0x1,0x1,0x2))
MIX_COLUMN_MATRIX_INV = ((0xE,0xB,0xD,0x9),
(0x9,0xE,0xB,0xD),
(0xD,0x9,0xE,0xB),
(0xB,0xD,0x9,0xE))
MIX_COLUMN_MATRIX = ((0x2, 0x3, 0x1, 0x1),
(0x1, 0x2, 0x3, 0x1),
(0x1, 0x1, 0x2, 0x3),
(0x3, 0x1, 0x1, 0x2))
MIX_COLUMN_MATRIX_INV = ((0xE, 0xB, 0xD, 0x9),
(0x9, 0xE, 0xB, 0xD),
(0xD, 0x9, 0xE, 0xB),
(0xB, 0xD, 0x9, 0xE))
RIJNDAEL_EXP_TABLE = (0x01, 0x03, 0x05, 0x0F, 0x11, 0x33, 0x55, 0xFF, 0x1A, 0x2E, 0x72, 0x96, 0xA1, 0xF8, 0x13, 0x35,
0x5F, 0xE1, 0x38, 0x48, 0xD8, 0x73, 0x95, 0xA4, 0xF7, 0x02, 0x06, 0x0A, 0x1E, 0x22, 0x66, 0xAA,
0xE5, 0x34, 0x5C, 0xE4, 0x37, 0x59, 0xEB, 0x26, 0x6A, 0xBE, 0xD9, 0x70, 0x90, 0xAB, 0xE6, 0x31,
@ -241,30 +250,37 @@ RIJNDAEL_LOG_TABLE = (0x00, 0x00, 0x19, 0x01, 0x32, 0x02, 0x1a, 0xc6, 0x4b, 0xc7
0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5,
0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07)
def sub_bytes(data):
return [SBOX[x] for x in data]
def sub_bytes_inv(data):
return [SBOX_INV[x] for x in data]
def rotate(data):
return data[1:] + [data[0]]
def key_schedule_core(data, rcon_iteration):
data = rotate(data)
data = sub_bytes(data)
data[0] = data[0] ^ RCON[rcon_iteration]
return data
def xor(data1, data2):
return [x^y for x, y in zip(data1, data2)]
return [x ^ y for x, y in zip(data1, data2)]
def rijndael_mul(a, b):
if(a==0 or b==0):
if(a == 0 or b == 0):
return 0
return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF]
def mix_column(data, matrix):
data_mixed = []
for row in range(4):
@ -275,33 +291,38 @@ def mix_column(data, matrix):
data_mixed.append(mixed)
return data_mixed
def mix_columns(data, matrix=MIX_COLUMN_MATRIX):
data_mixed = []
for i in range(4):
column = data[i*4 : (i+1)*4]
column = data[i * 4: (i + 1) * 4]
data_mixed += mix_column(column, matrix)
return data_mixed
def mix_columns_inv(data):
return mix_columns(data, MIX_COLUMN_MATRIX_INV)
def shift_rows(data):
data_shifted = []
for column in range(4):
for row in range(4):
data_shifted.append( data[((column + row) & 0b11) * 4 + row] )
data_shifted.append(data[((column + row) & 0b11) * 4 + row])
return data_shifted
def shift_rows_inv(data):
data_shifted = []
for column in range(4):
for row in range(4):
data_shifted.append( data[((column - row) & 0b11) * 4 + row] )
data_shifted.append(data[((column - row) & 0b11) * 4 + row])
return data_shifted
def inc(data):
data = data[:] # copy
for i in range(len(data)-1,-1,-1):
data = data[:] # copy
for i in range(len(data) - 1, -1, -1):
if data[i] == 255:
data[i] = 0
else:

View File

@ -8,9 +8,8 @@ import re
import shutil
import traceback
from .utils import (
write_json_file,
)
from .compat import compat_expanduser, compat_getenv
from .utils import write_json_file
class Cache(object):
@ -20,9 +19,9 @@ class Cache(object):
def _get_root_dir(self):
res = self._ydl.params.get('cachedir')
if res is None:
cache_root = os.environ.get('XDG_CACHE_HOME', '~/.cache')
cache_root = compat_getenv('XDG_CACHE_HOME', '~/.cache')
res = os.path.join(cache_root, 'youtube-dl')
return os.path.expanduser(res)
return compat_expanduser(res)
def _get_cache_fn(self, section, key, dtype):
assert re.match(r'^[a-zA-Z0-9_.-]+$', section), \

356
youtube_dl/compat.py Normal file
View File

@ -0,0 +1,356 @@
from __future__ import unicode_literals
import getpass
import optparse
import os
import re
import subprocess
import sys
try:
import urllib.request as compat_urllib_request
except ImportError: # Python 2
import urllib2 as compat_urllib_request
try:
import urllib.error as compat_urllib_error
except ImportError: # Python 2
import urllib2 as compat_urllib_error
try:
import urllib.parse as compat_urllib_parse
except ImportError: # Python 2
import urllib as compat_urllib_parse
try:
from urllib.parse import urlparse as compat_urllib_parse_urlparse
except ImportError: # Python 2
from urlparse import urlparse as compat_urllib_parse_urlparse
try:
import urllib.parse as compat_urlparse
except ImportError: # Python 2
import urlparse as compat_urlparse
try:
import http.cookiejar as compat_cookiejar
except ImportError: # Python 2
import cookielib as compat_cookiejar
try:
import html.entities as compat_html_entities
except ImportError: # Python 2
import htmlentitydefs as compat_html_entities
try:
import html.parser as compat_html_parser
except ImportError: # Python 2
import HTMLParser as compat_html_parser
try:
import http.client as compat_http_client
except ImportError: # Python 2
import httplib as compat_http_client
try:
from urllib.error import HTTPError as compat_HTTPError
except ImportError: # Python 2
from urllib2 import HTTPError as compat_HTTPError
try:
from urllib.request import urlretrieve as compat_urlretrieve
except ImportError: # Python 2
from urllib import urlretrieve as compat_urlretrieve
try:
from subprocess import DEVNULL
compat_subprocess_get_DEVNULL = lambda: DEVNULL
except ImportError:
compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
try:
from urllib.parse import unquote as compat_urllib_parse_unquote
except ImportError:
def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
if string == '':
return string
res = string.split('%')
if len(res) == 1:
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'replace'
# pct_sequence: contiguous sequence of percent-encoded bytes, decoded
pct_sequence = b''
string = res[0]
for item in res[1:]:
try:
if not item:
raise ValueError
pct_sequence += item[:2].decode('hex')
rest = item[2:]
if not rest:
# This segment was just a single percent-encoded character.
# May be part of a sequence of code units, so delay decoding.
# (Stored in pct_sequence).
continue
except ValueError:
rest = '%' + item
# Encountered non-percent-encoded characters. Flush the current
# pct_sequence.
string += pct_sequence.decode(encoding, errors) + rest
pct_sequence = b''
if pct_sequence:
# Flush the final pct_sequence
string += pct_sequence.decode(encoding, errors)
return string
try:
from urllib.parse import parse_qs as compat_parse_qs
except ImportError: # Python 2
# HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
# Python 2's version is apparently totally broken
def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
qs, _coerce_result = qs, unicode
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = compat_urllib_parse_unquote(
name, encoding=encoding, errors=errors)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = compat_urllib_parse_unquote(
value, encoding=encoding, errors=errors)
value = _coerce_result(value)
r.append((name, value))
return r
def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
parsed_result = {}
pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
encoding=encoding, errors=errors)
for name, value in pairs:
if name in parsed_result:
parsed_result[name].append(value)
else:
parsed_result[name] = [value]
return parsed_result
try:
compat_str = unicode # Python 2
except NameError:
compat_str = str
try:
compat_chr = unichr # Python 2
except NameError:
compat_chr = chr
try:
from xml.etree.ElementTree import ParseError as compat_xml_parse_error
except ImportError: # Python 2.6
from xml.parsers.expat import ExpatError as compat_xml_parse_error
try:
from shlex import quote as shlex_quote
except ImportError: # Python < 3.3
def shlex_quote(s):
if re.match(r'^[-_\w./]+$', s):
return s
else:
return "'" + s.replace("'", "'\"'\"'") + "'"
def compat_ord(c):
if type(c) is int:
return c
else:
return ord(c)
if sys.version_info >= (3, 0):
compat_getenv = os.getenv
compat_expanduser = os.path.expanduser
else:
# Environment variables should be decoded with filesystem encoding.
# Otherwise it will fail if any non-ASCII characters present (see #3854 #3217 #2918)
def compat_getenv(key, default=None):
from .utils import get_filesystem_encoding
env = os.getenv(key, default)
if env:
env = env.decode(get_filesystem_encoding())
return env
# HACK: The default implementations of os.path.expanduser from cpython do not decode
# environment variables with filesystem encoding. We will work around this by
# providing adjusted implementations.
# The following are os.path.expanduser implementations from cpython 2.7.8 stdlib
# for different platforms with correct environment variables decoding.
if os.name == 'posix':
def compat_expanduser(path):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing."""
if not path.startswith('~'):
return path
i = path.find('/', 1)
if i < 0:
i = len(path)
if i == 1:
if 'HOME' not in os.environ:
import pwd
userhome = pwd.getpwuid(os.getuid()).pw_dir
else:
userhome = compat_getenv('HOME')
else:
import pwd
try:
pwent = pwd.getpwnam(path[1:i])
except KeyError:
return path
userhome = pwent.pw_dir
userhome = userhome.rstrip('/')
return (userhome + path[i:]) or '/'
elif os.name == 'nt' or os.name == 'ce':
def compat_expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = compat_getenv('HOME')
elif 'USERPROFILE' in os.environ:
userhome = compat_getenv('USERPROFILE')
elif not 'HOMEPATH' in os.environ:
return path
else:
try:
drive = compat_getenv('HOMEDRIVE')
except KeyError:
drive = ''
userhome = os.path.join(drive, compat_getenv('HOMEPATH'))
if i != 1: # ~user
userhome = os.path.join(os.path.dirname(userhome), path[1:i])
return userhome + path[i:]
else:
compat_expanduser = os.path.expanduser
if sys.version_info < (3, 0):
def compat_print(s):
from .utils import preferredencoding
print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
else:
def compat_print(s):
assert isinstance(s, compat_str)
print(s)
try:
subprocess_check_output = subprocess.check_output
except AttributeError:
def subprocess_check_output(*args, **kwargs):
assert 'input' not in kwargs
p = subprocess.Popen(*args, stdout=subprocess.PIPE, **kwargs)
output, _ = p.communicate()
ret = p.poll()
if ret:
raise subprocess.CalledProcessError(ret, p.args, output=output)
return output
if sys.version_info < (3, 0) and sys.platform == 'win32':
def compat_getpass(prompt, *args, **kwargs):
if isinstance(prompt, compat_str):
from .utils import preferredencoding
prompt = prompt.encode(preferredencoding())
return getpass.getpass(prompt, *args, **kwargs)
else:
compat_getpass = getpass.getpass
# Old 2.6 and 2.7 releases require kwargs to be bytes
try:
(lambda x: x)(**{'x': 0})
except TypeError:
def compat_kwargs(kwargs):
return dict((bytes(k), v) for k, v in kwargs.items())
else:
compat_kwargs = lambda kwargs: kwargs
# Fix https://github.com/rg3/youtube-dl/issues/4223
# See http://bugs.python.org/issue9161 for what is broken
def workaround_optparse_bug9161():
op = optparse.OptionParser()
og = optparse.OptionGroup(op, 'foo')
try:
og.add_option('-t')
except TypeError:
real_add_option = optparse.OptionGroup.add_option
def _compat_add_option(self, *args, **kwargs):
enc = lambda v: (
v.encode('ascii', 'replace') if isinstance(v, compat_str)
else v)
bargs = [enc(a) for a in args]
bkwargs = dict(
(k, enc(v)) for k, v in kwargs.items())
return real_add_option(self, *bargs, **bkwargs)
optparse.OptionGroup.add_option = _compat_add_option
__all__ = [
'compat_HTTPError',
'compat_chr',
'compat_cookiejar',
'compat_expanduser',
'compat_getenv',
'compat_getpass',
'compat_html_entities',
'compat_html_parser',
'compat_http_client',
'compat_kwargs',
'compat_ord',
'compat_parse_qs',
'compat_print',
'compat_str',
'compat_subprocess_get_DEVNULL',
'compat_urllib_error',
'compat_urllib_parse',
'compat_urllib_parse_unquote',
'compat_urllib_parse_urlparse',
'compat_urllib_request',
'compat_urlparse',
'compat_urlretrieve',
'compat_xml_parse_error',
'shlex_quote',
'subprocess_check_output',
'workaround_optparse_bug9161',
]

View File

@ -2,6 +2,7 @@ from __future__ import unicode_literals
from .common import FileDownloader
from .hls import HlsFD
from .hls import NativeHlsFD
from .http import HttpFD
from .mplayer import MplayerFD
from .rtmp import RtmpFD
@ -19,6 +20,8 @@ def get_suitable_downloader(info_dict):
if url.startswith('rtmp'):
return RtmpFD
if protocol == 'm3u8_native':
return NativeHlsFD
if (protocol == 'm3u8') or (protocol is None and determine_ext(url) == 'm3u8'):
return HlsFD
if url.startswith('mms') or url.startswith('rtsp'):
@ -27,3 +30,8 @@ def get_suitable_downloader(info_dict):
return F4mFD
else:
return HttpFD
__all__ = [
'get_suitable_downloader',
'FileDownloader',
]

View File

@ -1,3 +1,5 @@
from __future__ import unicode_literals
import os
import re
import sys
@ -42,6 +44,7 @@ class FileDownloader(object):
Subclasses of this one must re-define the real_download method.
"""
_TEST_FILE_SIZE = 10241
params = None
def __init__(self, ydl, params):
@ -78,7 +81,7 @@ class FileDownloader(object):
if total is None:
return None
dif = now - start
if current == 0 or dif < 0.001: # One millisecond
if current == 0 or dif < 0.001: # One millisecond
return None
rate = float(current) / dif
return int((float(total) - float(current)) / rate)
@ -92,7 +95,7 @@ class FileDownloader(object):
@staticmethod
def calc_speed(start, now, bytes):
dif = now - start
if bytes == 0 or dif < 0.001: # One millisecond
if bytes == 0 or dif < 0.001: # One millisecond
return None
return float(bytes) / dif
@ -105,7 +108,7 @@ class FileDownloader(object):
@staticmethod
def best_block_size(elapsed_time, bytes):
new_min = max(bytes / 2.0, 1.0)
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
if elapsed_time < 0.001:
return int(new_max)
rate = bytes / elapsed_time
@ -158,14 +161,14 @@ class FileDownloader(object):
def temp_name(self, filename):
"""Returns a temporary filename for the given filename."""
if self.params.get('nopart', False) or filename == u'-' or \
if self.params.get('nopart', False) or filename == '-' or \
(os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
return filename
return filename + u'.part'
return filename + '.part'
def undo_temp_name(self, filename):
if filename.endswith(u'.part'):
return filename[:-len(u'.part')]
if filename.endswith('.part'):
return filename[:-len('.part')]
return filename
def try_rename(self, old_filename, new_filename):
@ -174,7 +177,7 @@ class FileDownloader(object):
return
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
except (IOError, OSError) as err:
self.report_error(u'unable to rename file: %s' % compat_str(err))
self.report_error('unable to rename file: %s' % compat_str(err))
def try_utime(self, filename, last_modified_hdr):
"""Try to set the last-modified time of the given file."""
@ -199,10 +202,10 @@ class FileDownloader(object):
def report_destination(self, filename):
"""Report destination filename."""
self.to_screen(u'[download] Destination: ' + filename)
self.to_screen('[download] Destination: ' + filename)
def _report_progress_status(self, msg, is_last_line=False):
fullmsg = u'[download] ' + msg
fullmsg = '[download] ' + msg
if self.params.get('progress_with_newline', False):
self.to_screen(fullmsg)
else:
@ -210,13 +213,13 @@ class FileDownloader(object):
prev_len = getattr(self, '_report_progress_prev_line_length',
0)
if prev_len > len(fullmsg):
fullmsg += u' ' * (prev_len - len(fullmsg))
fullmsg += ' ' * (prev_len - len(fullmsg))
self._report_progress_prev_line_length = len(fullmsg)
clear_line = u'\r'
clear_line = '\r'
else:
clear_line = (u'\r\x1b[K' if sys.stderr.isatty() else u'\r')
clear_line = ('\r\x1b[K' if sys.stderr.isatty() else '\r')
self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line)
self.to_console_title(u'youtube-dl ' + msg)
self.to_console_title('youtube-dl ' + msg)
def report_progress(self, percent, data_len_str, speed, eta):
"""Report download progress."""
@ -232,7 +235,7 @@ class FileDownloader(object):
percent_str = 'Unknown %'
speed_str = self.format_speed(speed)
msg = (u'%s of %s at %s ETA %s' %
msg = ('%s of %s at %s ETA %s' %
(percent_str, data_len_str, speed_str, eta_str))
self._report_progress_status(msg)
@ -242,37 +245,37 @@ class FileDownloader(object):
downloaded_str = format_bytes(downloaded_data_len)
speed_str = self.format_speed(speed)
elapsed_str = FileDownloader.format_seconds(elapsed)
msg = u'%s at %s (%s)' % (downloaded_str, speed_str, elapsed_str)
msg = '%s at %s (%s)' % (downloaded_str, speed_str, elapsed_str)
self._report_progress_status(msg)
def report_finish(self, data_len_str, tot_time):
"""Report download finished."""
if self.params.get('noprogress', False):
self.to_screen(u'[download] Download completed')
self.to_screen('[download] Download completed')
else:
self._report_progress_status(
(u'100%% of %s in %s' %
('100%% of %s in %s' %
(data_len_str, self.format_seconds(tot_time))),
is_last_line=True)
def report_resuming_byte(self, resume_len):
"""Report attempt to resume at given byte."""
self.to_screen(u'[download] Resuming download at byte %s' % resume_len)
self.to_screen('[download] Resuming download at byte %s' % resume_len)
def report_retry(self, count, retries):
"""Report retry in case of HTTP error 5xx"""
self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
self.to_screen('[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded."""
try:
self.to_screen(u'[download] %s has already been downloaded' % file_name)
self.to_screen('[download] %s has already been downloaded' % file_name)
except UnicodeEncodeError:
self.to_screen(u'[download] The file has already been downloaded')
self.to_screen('[download] The file has already been downloaded')
def report_unable_to_resume(self):
"""Report it was impossible to resume download."""
self.to_screen(u'[download] Unable to resume')
self.to_screen('[download] Unable to resume')
def download(self, filename, info_dict):
"""Download to a filename using the info from info_dict
@ -292,7 +295,7 @@ class FileDownloader(object):
def real_download(self, filename, info_dict):
"""Real download process. Redefine in subclasses."""
raise NotImplementedError(u'This method must be implemented by subclasses')
raise NotImplementedError('This method must be implemented by subclasses')
def _hook_progress(self, status):
for ph in self._progress_hooks:

View File

@ -16,6 +16,7 @@ from ..utils import (
format_bytes,
encodeFilename,
sanitize_open,
xpath_text,
)
@ -54,7 +55,7 @@ class FlvReader(io.BytesIO):
if size == 1:
real_size = self.read_unsigned_long_long()
header_end = 16
return real_size, box_type, self.read(real_size-header_end)
return real_size, box_type, self.read(real_size - header_end)
def read_asrt(self):
# version
@ -179,7 +180,7 @@ def build_fragments_list(boot_info):
n_frags = segment_run_entry[1]
fragment_run_entry_table = boot_info['fragments'][0]['fragments']
first_frag_number = fragment_run_entry_table[0]['first']
for (i, frag_number) in zip(range(1, n_frags+1), itertools.count(first_frag_number)):
for (i, frag_number) in zip(range(1, n_frags + 1), itertools.count(first_frag_number)):
res.append((1, frag_number))
return res
@ -224,13 +225,15 @@ class F4mFD(FileDownloader):
self.to_screen('[download] Downloading f4m manifest')
manifest = self.ydl.urlopen(man_url).read()
self.report_destination(filename)
http_dl = HttpQuietDownloader(self.ydl,
http_dl = HttpQuietDownloader(
self.ydl,
{
'continuedl': True,
'quiet': True,
'noprogress': True,
'test': self.params.get('test', False),
})
}
)
doc = etree.fromstring(manifest)
formats = [(int(f.attrib.get('bitrate', -1)), f) for f in doc.findall(_add_ns('media'))]
@ -243,14 +246,23 @@ class F4mFD(FileDownloader):
lambda f: int(f[0]) == requested_bitrate, formats))[0]
base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
bootstrap = base64.b64decode(doc.find(_add_ns('bootstrapInfo')).text)
bootstrap_node = doc.find(_add_ns('bootstrapInfo'))
if bootstrap_node.text is None:
bootstrap_url = compat_urlparse.urljoin(
base_url, bootstrap_node.attrib['url'])
bootstrap = self.ydl.urlopen(bootstrap_url).read()
else:
bootstrap = base64.b64decode(bootstrap_node.text)
metadata = base64.b64decode(media.find(_add_ns('metadata')).text)
boot_info = read_bootstrap_info(bootstrap)
fragments_list = build_fragments_list(boot_info)
if self.params.get('test', False):
# We only download the first fragment
fragments_list = fragments_list[:1]
total_frags = len(fragments_list)
# For some akamai manifests we'll need to add a query to the fragment url
akamai_pv = xpath_text(doc, _add_ns('pv-2.0'))
tmpfilename = self.temp_name(filename)
(dest_stream, tmpfilename) = sanitize_open(tmpfilename, 'wb')
@ -267,7 +279,7 @@ class F4mFD(FileDownloader):
def frag_progress_hook(status):
frag_total_bytes = status.get('total_bytes', 0)
estimated_size = (state['downloaded_bytes'] +
(total_frags - state['frag_counter']) * frag_total_bytes)
(total_frags - state['frag_counter']) * frag_total_bytes)
if status['status'] == 'finished':
state['downloaded_bytes'] += frag_total_bytes
state['frag_counter'] += 1
@ -277,19 +289,21 @@ class F4mFD(FileDownloader):
frag_downloaded_bytes = status['downloaded_bytes']
byte_counter = state['downloaded_bytes'] + frag_downloaded_bytes
frag_progress = self.calc_percent(frag_downloaded_bytes,
frag_total_bytes)
frag_total_bytes)
progress = self.calc_percent(state['frag_counter'], total_frags)
progress += frag_progress / float(total_frags)
eta = self.calc_eta(start, time.time(), estimated_size, byte_counter)
self.report_progress(progress, format_bytes(estimated_size),
status.get('speed'), eta)
status.get('speed'), eta)
http_dl.add_progress_hook(frag_progress_hook)
frags_filenames = []
for (seg_i, frag_i) in fragments_list:
name = 'Seg%d-Frag%d' % (seg_i, frag_i)
url = base_url + name
if akamai_pv:
url += '?' + akamai_pv.strip(';')
frag_filename = '%s-%s' % (tmpfilename, name)
success = http_dl.download(frag_filename, {'url': url})
if not success:

View File

@ -1,8 +1,13 @@
from __future__ import unicode_literals
import os
import re
import subprocess
from .common import FileDownloader
from ..utils import (
compat_urlparse,
compat_urllib_request,
check_executable,
encodeFilename,
)
@ -23,14 +28,14 @@ class HlsFD(FileDownloader):
if check_executable(program, ['-version']):
break
else:
self.report_error(u'm3u8 download detected but ffmpeg or avconv could not be found. Please install one.')
self.report_error('m3u8 download detected but ffmpeg or avconv could not be found. Please install one.')
return False
cmd = [program] + args
retval = subprocess.call(cmd)
if retval == 0:
fsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen(u'\r[%s] %s bytes' % (cmd[0], fsize))
self.to_screen('\r[%s] %s bytes' % (cmd[0], fsize))
self.try_rename(tmpfilename, filename)
self._hook_progress({
'downloaded_bytes': fsize,
@ -40,6 +45,59 @@ class HlsFD(FileDownloader):
})
return True
else:
self.to_stderr(u"\n")
self.report_error(u'%s exited with code %d' % (program, retval))
self.to_stderr('\n')
self.report_error('%s exited with code %d' % (program, retval))
return False
class NativeHlsFD(FileDownloader):
""" A more limited implementation that does not require ffmpeg """
def real_download(self, filename, info_dict):
url = info_dict['url']
self.report_destination(filename)
tmpfilename = self.temp_name(filename)
self.to_screen(
'[hlsnative] %s: Downloading m3u8 manifest' % info_dict['id'])
data = self.ydl.urlopen(url).read()
s = data.decode('utf-8', 'ignore')
segment_urls = []
for line in s.splitlines():
line = line.strip()
if line and not line.startswith('#'):
segment_url = (
line
if re.match(r'^https?://', line)
else compat_urlparse.urljoin(url, line))
segment_urls.append(segment_url)
is_test = self.params.get('test', False)
remaining_bytes = self._TEST_FILE_SIZE if is_test else None
byte_counter = 0
with open(tmpfilename, 'wb') as outf:
for i, segurl in enumerate(segment_urls):
self.to_screen(
'[hlsnative] %s: Downloading segment %d / %d' %
(info_dict['id'], i + 1, len(segment_urls)))
seg_req = compat_urllib_request.Request(segurl)
if remaining_bytes is not None:
seg_req.add_header('Range', 'bytes=0-%d' % (remaining_bytes - 1))
segment = self.ydl.urlopen(seg_req).read()
if remaining_bytes is not None:
segment = segment[:remaining_bytes]
remaining_bytes -= len(segment)
outf.write(segment)
byte_counter += len(segment)
if remaining_bytes is not None and remaining_bytes <= 0:
break
self._hook_progress({
'downloaded_bytes': byte_counter,
'total_bytes': byte_counter,
'filename': filename,
'status': 'finished',
})
self.try_rename(tmpfilename, filename)
return True

View File

@ -1,3 +1,5 @@
from __future__ import unicode_literals
import os
import time
@ -14,8 +16,6 @@ from ..utils import (
class HttpFD(FileDownloader):
_TEST_FILE_SIZE = 10241
def real_download(self, filename, info_dict):
url = info_dict['url']
tmpfilename = self.temp_name(filename)
@ -108,7 +108,7 @@ class HttpFD(FileDownloader):
self.report_retry(count, retries)
if count > retries:
self.report_error(u'giving up after %s retries' % retries)
self.report_error('giving up after %s retries' % retries)
return False
data_len = data.info().get('Content-length', None)
@ -126,10 +126,10 @@ class HttpFD(FileDownloader):
min_data_len = self.params.get("min_filesize", None)
max_data_len = self.params.get("max_filesize", None)
if min_data_len is not None and data_len < min_data_len:
self.to_screen(u'\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
return False
if max_data_len is not None and data_len > max_data_len:
self.to_screen(u'\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
self.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
return False
data_len_str = format_bytes(data_len)
@ -153,13 +153,13 @@ class HttpFD(FileDownloader):
filename = self.undo_temp_name(tmpfilename)
self.report_destination(filename)
except (OSError, IOError) as err:
self.report_error(u'unable to open for writing: %s' % str(err))
self.report_error('unable to open for writing: %s' % str(err))
return False
try:
stream.write(data_block)
except (IOError, OSError) as err:
self.to_stderr(u"\n")
self.report_error(u'unable to write data: %s' % str(err))
self.to_stderr('\n')
self.report_error('unable to write data: %s' % str(err))
return False
if not self.params.get('noresizebuffer', False):
block_size = self.best_block_size(after - before, len(data_block))
@ -190,10 +190,10 @@ class HttpFD(FileDownloader):
self.slow_down(start, byte_counter - resume_len)
if stream is None:
self.to_stderr(u"\n")
self.report_error(u'Did not get any data blocks')
self.to_stderr('\n')
self.report_error('Did not get any data blocks')
return False
if tmpfilename != u'-':
if tmpfilename != '-':
stream.close()
self.report_finish(data_len_str, (time.time() - start))
if data_len is not None and byte_counter != data_len:

View File

@ -1,7 +1,10 @@
from __future__ import unicode_literals
import os
import subprocess
from .common import FileDownloader
from ..compat import compat_subprocess_get_DEVNULL
from ..utils import (
encodeFilename,
)
@ -13,19 +16,23 @@ class MplayerFD(FileDownloader):
self.report_destination(filename)
tmpfilename = self.temp_name(filename)
args = ['mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy', '-dumpstream', '-dumpfile', tmpfilename, url]
args = [
'mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy',
'-dumpstream', '-dumpfile', tmpfilename, url]
# Check for mplayer first
try:
subprocess.call(['mplayer', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
subprocess.call(
['mplayer', '-h'],
stdout=compat_subprocess_get_DEVNULL(), stderr=subprocess.STDOUT)
except (OSError, IOError):
self.report_error(u'MMS or RTSP download detected but "%s" could not be run' % args[0])
self.report_error('MMS or RTSP download detected but "%s" could not be run' % args[0])
return False
# Download using mplayer.
retval = subprocess.call(args)
if retval == 0:
fsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen(u'\r[%s] %s bytes' % (args[0], fsize))
self.to_screen('\r[%s] %s bytes' % (args[0], fsize))
self.try_rename(tmpfilename, filename)
self._hook_progress({
'downloaded_bytes': fsize,
@ -35,6 +42,6 @@ class MplayerFD(FileDownloader):
})
return True
else:
self.to_stderr(u"\n")
self.report_error(u'mplayer exited with code %d' % retval)
self.to_stderr('\n')
self.report_error('mplayer exited with code %d' % retval)
return False

View File

@ -12,9 +12,15 @@ from ..utils import (
compat_str,
encodeFilename,
format_bytes,
get_exe_version,
)
def rtmpdump_version():
return get_exe_version(
'rtmpdump', ['--help'], r'(?i)RTMPDump\s*v?([0-9a-zA-Z._-]+)')
class RtmpFD(FileDownloader):
def real_download(self, filename, info_dict):
def run_rtmpdump(args):
@ -40,13 +46,13 @@ class RtmpFD(FileDownloader):
continue
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line)
if mobj:
downloaded_data_len = int(float(mobj.group(1))*1024)
downloaded_data_len = int(float(mobj.group(1)) * 1024)
percent = float(mobj.group(2))
if not resume_percent:
resume_percent = percent
resume_downloaded_data_len = downloaded_data_len
eta = self.calc_eta(start, time.time(), 100-resume_percent, percent-resume_percent)
speed = self.calc_speed(start, time.time(), downloaded_data_len-resume_downloaded_data_len)
eta = self.calc_eta(start, time.time(), 100 - resume_percent, percent - resume_percent)
speed = self.calc_speed(start, time.time(), downloaded_data_len - resume_downloaded_data_len)
data_len = None
if percent > 0:
data_len = int(downloaded_data_len * 100 / percent)
@ -66,7 +72,7 @@ class RtmpFD(FileDownloader):
# no percent for live streams
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line)
if mobj:
downloaded_data_len = int(float(mobj.group(1))*1024)
downloaded_data_len = int(float(mobj.group(1)) * 1024)
time_now = time.time()
speed = self.calc_speed(start, time_now, downloaded_data_len)
self.report_progress_live_stream(downloaded_data_len, speed, time_now - start)
@ -82,7 +88,7 @@ class RtmpFD(FileDownloader):
if not cursor_in_new_line:
self.to_screen('')
cursor_in_new_line = True
self.to_screen('[rtmpdump] '+line)
self.to_screen('[rtmpdump] ' + line)
proc.wait()
if not cursor_in_new_line:
self.to_screen('')
@ -174,7 +180,7 @@ class RtmpFD(FileDownloader):
while (retval == RD_INCOMPLETE or retval == RD_FAILED) and not test and not live:
prevsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen('[rtmpdump] %s bytes' % prevsize)
time.sleep(5.0) # This seems to be needed
time.sleep(5.0) # This seems to be needed
retval = run_rtmpdump(basic_args + ['-e'] + [[], ['-k', '1']][retval == RD_FAILED])
cursize = os.path.getsize(encodeFilename(tmpfilename))
if prevsize == cursize and retval == RD_FAILED:

View File

@ -1,3 +1,5 @@
from __future__ import unicode_literals
from .abc import ABCIE
from .academicearth import AcademicEarthCourseIE
from .addanime import AddAnimeIE
@ -20,19 +22,23 @@ from .arte import (
ArteTVDDCIE,
ArteTVEmbedIE,
)
from .audiomack import AudiomackIE
from .auengine import AUEngineIE
from .bambuser import BambuserIE, BambuserChannelIE
from .bandcamp import BandcampIE, BandcampAlbumIE
from .bbccouk import BBCCoUkIE
from .beeg import BeegIE
from .behindkink import BehindKinkIE
from .bild import BildIE
from .bilibili import BiliBiliIE
from .blinkx import BlinkxIE
from .bliptv import BlipTVIE, BlipTVUserIE
from .bloomberg import BloombergIE
from .bpb import BpbIE
from .br import BRIE
from .breakcom import BreakIE
from .brightcove import BrightcoveIE
from .buzzfeed import BuzzFeedIE
from .byutv import BYUtvIE
from .c56 import C56IE
from .canal13cl import Canal13clIE
@ -60,9 +66,11 @@ from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
from .condenast import CondeNastIE
from .cracked import CrackedIE
from .criterion import CriterionIE
from .crunchyroll import CrunchyrollIE
from .crunchyroll import (
CrunchyrollIE,
CrunchyrollShowPlaylistIE
)
from .cspan import CSpanIE
from .d8 import D8IE
from .dailymotion import (
DailymotionIE,
DailymotionPlaylistIE,
@ -111,6 +119,7 @@ from .fktv import (
FKTVPosteckeIE,
)
from .flickr import FlickrIE
from .folketinget import FolketingetIE
from .fourtube import FourTubeIE
from .franceculture import FranceCultureIE
from .franceinter import FranceInterIE
@ -123,6 +132,7 @@ from .francetv import (
)
from .freesound import FreesoundIE
from .freespeech import FreespeechIE
from .freevideo import FreeVideoIE
from .funnyordie import FunnyOrDieIE
from .gamekings import GamekingsIE
from .gameone import (
@ -134,13 +144,18 @@ from .gamestar import GameStarIE
from .gametrailers import GametrailersIE
from .gdcvault import GDCVaultIE
from .generic import GenericIE
from .glide import GlideIE
from .globo import GloboIE
from .godtube import GodTubeIE
from .goldenmoustache import GoldenMoustacheIE
from .golem import GolemIE
from .googleplus import GooglePlusIE
from .googlesearch import GoogleSearchIE
from .gorillavid import GorillaVidIE
from .goshgay import GoshgayIE
from .grooveshark import GroovesharkIE
from .hark import HarkIE
from .heise import HeiseIE
from .helsinki import HelsinkiIE
from .hentaistigma import HentaiStigmaIE
from .hornbunny import HornBunnyIE
@ -170,7 +185,6 @@ from .jadorecettepub import JadoreCettePubIE
from .jeuxvideo import JeuxVideoIE
from .jove import JoveIE
from .jukebox import JukeboxIE
from .justintv import JustinTVIE
from .jpopsukitv import JpopsukiIE
from .kankan import KankanIE
from .keezmovies import KeezMoviesIE
@ -181,6 +195,7 @@ from .kontrtube import KontrTubeIE
from .krasview import KrasViewIE
from .ku6 import Ku6IE
from .la7 import LA7IE
from .laola1tv import Laola1TvIE
from .lifenews import LifeNewsIE
from .liveleak import LiveLeakIE
from .livestream import (
@ -188,6 +203,7 @@ from .livestream import (
LivestreamOriginalIE,
LivestreamShortenerIE,
)
from .lrt import LRTIE
from .lynda import (
LyndaIE,
LyndaCourseIE
@ -199,6 +215,7 @@ from .malemotion import MalemotionIE
from .mdr import MDRIE
from .metacafe import MetacafeIE
from .metacritic import MetacriticIE
from .mgoon import MgoonIE
from .ministrygrid import MinistryGridIE
from .mit import TechTVMITIE, MITIE, OCWMITIE
from .mitele import MiTeleIE
@ -208,6 +225,7 @@ from .mpora import MporaIE
from .moevideo import MoeVideoIE
from .mofosex import MofosexIE
from .mojvideo import MojvideoIE
from .moniker import MonikerIE
from .mooshare import MooshareIE
from .morningstar import MorningstarIE
from .motherless import MotherlessIE
@ -220,6 +238,7 @@ from .mtv import (
MTVServicesEmbeddedIE,
MTVIggyIE,
)
from .muenchentv import MuenchenTVIE
from .musicplayon import MusicPlayOnIE
from .musicvault import MusicVaultIE
from .muzu import MuzuTVIE
@ -237,8 +256,9 @@ from .ndtv import NDTVIE
from .newgrounds import NewgroundsIE
from .newstube import NewstubeIE
from .nfb import NFBIE
from .nfl import NFLIE
from .nhl import NHLIE, NHLVideocenterIE
from .niconico import NiconicoIE
from .niconico import NiconicoIE, NiconicoPlaylistIE
from .ninegag import NineGagIE
from .noco import NocoIE
from .normalboots import NormalbootsIE
@ -246,7 +266,10 @@ from .nosvideo import NosVideoIE
from .novamov import NovaMovIE
from .nowness import NownessIE
from .nowvideo import NowVideoIE
from .npo import NPOIE
from .npo import (
NPOIE,
TegenlichtVproIE,
)
from .nrk import (
NRKIE,
NRKTVIE,
@ -254,6 +277,7 @@ from .nrk import (
from .ntv import NTVIE
from .nytimes import NYTimesIE
from .nuvid import NuvidIE
from .oktoberfesttv import OktoberfestTVIE
from .ooyala import OoyalaIE
from .orf import (
ORFTVthekIE,
@ -263,7 +287,10 @@ from .orf import (
from .parliamentliveuk import ParliamentLiveUKIE
from .patreon import PatreonIE
from .pbs import PBSIE
from .phoenix import PhoenixIE
from .photobucket import PhotobucketIE
from .planetaplay import PlanetaPlayIE
from .played import PlayedIE
from .playfm import PlayFMIE
from .playvid import PlayvidIE
from .podomatic import PodomaticIE
@ -274,6 +301,7 @@ from .pornoxo import PornoXOIE
from .promptfile import PromptFileIE
from .prosiebensat1 import ProSiebenSat1IE
from .pyvideo import PyvideoIE
from .quickvid import QuickVidIE
from .radiofrance import RadioFranceIE
from .rai import RaiIE
from .rbmaradio import RBMARadioIE
@ -302,6 +330,8 @@ from .sbs import SBSIE
from .scivee import SciVeeIE
from .screencast import ScreencastIE
from .servingsys import ServingSysIE
from .sexu import SexuIE
from .sexykarma import SexyKarmaIE
from .shared import SharedIE
from .sharesix import ShareSixIE
from .sina import SinaIE
@ -332,7 +362,10 @@ from .spankwire import SpankwireIE
from .spiegel import SpiegelIE, SpiegelArticleIE
from .spiegeltv import SpiegeltvIE
from .spike import SpikeIE
from .sport5 import Sport5IE
from .sportbox import SportBoxIE
from .sportdeutschland import SportDeutschlandIE
from .srmediathek import SRMediathekIE
from .stanfordoc import StanfordOpenClassroomIE
from .steam import SteamIE
from .streamcloud import StreamcloudIE
@ -342,6 +375,8 @@ from .swrmediathek import SWRMediathekIE
from .syfy import SyfyIE
from .sztvhu import SztvHuIE
from .tagesschau import TagesschauIE
from .tapely import TapelyIE
from .tass import TassIE
from .teachertube import (
TeacherTubeIE,
TeacherTubeUserIE,
@ -350,15 +385,24 @@ from .teachingchannel import TeachingChannelIE
from .teamcoco import TeamcocoIE
from .techtalks import TechTalksIE
from .ted import TEDIE
from .telebruxelles import TeleBruxellesIE
from .telecinco import TelecincoIE
from .telemb import TeleMBIE
from .tenplay import TenPlayIE
from .testurl import TestURLIE
from .tf1 import TF1IE
from .theonion import TheOnionIE
from .theplatform import ThePlatformIE
from .thesixtyone import TheSixtyOneIE
from .thisav import ThisAVIE
from .tinypic import TinyPicIE
from .tlc import TlcIE, TlcDeIE
from .tmz import TMZIE
from .tnaflix import TNAFlixIE
from .thvideo import (
THVideoIE,
THVideoPlaylistIE
)
from .toutv import TouTvIE
from .toypics import ToypicsUserIE, ToypicsIE
from .traileraddict import TrailerAddictIE
@ -367,11 +411,13 @@ from .trutube import TruTubeIE
from .tube8 import Tube8IE
from .tudou import TudouIE
from .tumblr import TumblrIE
from .tunein import TuneInIE
from .turbo import TurboIE
from .tutv import TutvIE
from .tvigle import TvigleIE
from .tvp import TvpIE
from .tvplay import TVPlayIE
from .twitch import TwitchIE
from .ubu import UbuIE
from .udemy import (
UdemyIE,
@ -387,22 +433,26 @@ from .vesti import VestiIE
from .vevo import VevoIE
from .vgtv import VGTVIE
from .vh1 import VH1IE
from .vice import ViceIE
from .viddler import ViddlerIE
from .videobam import VideoBamIE
from .videodetective import VideoDetectiveIE
from .videolecturesnet import VideoLecturesNetIE
from .videofyme import VideofyMeIE
from .videomega import VideoMegaIE
from .videopremium import VideoPremiumIE
from .videott import VideoTtIE
from .videoweed import VideoWeedIE
from .vidme import VidmeIE
from .vidzi import VidziIE
from .vimeo import (
VimeoIE,
VimeoChannelIE,
VimeoUserIE,
VimeoAlbumIE,
VimeoChannelIE,
VimeoGroupsIE,
VimeoLikesIE,
VimeoReviewIE,
VimeoUserIE,
VimeoWatchLaterIE,
)
from .vimple import VimpleIE
@ -411,12 +461,17 @@ from .vine import (
VineUserIE,
)
from .viki import VikiIE
from .vk import VKIE
from .vk import (
VKIE,
VKUserVideosIE,
)
from .vodlocker import VodlockerIE
from .vporn import VpornIE
from .vrt import VRTIE
from .vube import VubeIE
from .vuclip import VuClipIE
from .vulture import VultureIE
from .walla import WallaIE
from .washingtonpost import WashingtonPostIE
from .wat import WatIE
from .wayofthemaster import WayOfTheMasterIE
@ -433,17 +488,19 @@ from .wrzuta import WrzutaIE
from .xbef import XBefIE
from .xboxclips import XboxClipsIE
from .xhamster import XHamsterIE
from .xminus import XMinusIE
from .xnxx import XNXXIE
from .xvideos import XVideosIE
from .xtube import XTubeUserIE, XTubeIE
from .yahoo import (
YahooIE,
YahooNewsIE,
YahooSearchIE,
)
from .ynet import YnetIE
from .youjizz import YouJizzIE
from .youku import YoukuIE
from .youporn import YouPornIE
from .yourupload import YourUploadIE
from .youtube import (
YoutubeIE,
YoutubeChannelIE,
@ -461,9 +518,11 @@ from .youtube import (
YoutubeUserIE,
YoutubeWatchLaterIE,
)
from .zdf import ZDFIE
from .zingmp3 import (
ZingMp3SongIE,
ZingMp3AlbumIE,
)
_ALL_CLASSES = [
klass
@ -482,4 +541,4 @@ def gen_extractors():
def get_info_extractor(ie_name):
"""Returns the info extractor class with the given ie_name"""
return globals()[ie_name+'IE']
return globals()[ie_name + 'IE']

View File

@ -11,19 +11,18 @@ class ABCIE(InfoExtractor):
_VALID_URL = r'http://www\.abc\.net\.au/news/[^/]+/[^/]+/(?P<id>\d+)'
_TEST = {
'url': 'http://www.abc.net.au/news/2014-07-25/bringing-asylum-seekers-to-australia-would-give/5624716',
'md5': 'dad6f8ad011a70d9ddf887ce6d5d0742',
'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334',
'md5': 'cb3dd03b18455a661071ee1e28344d9f',
'info_dict': {
'id': '5624716',
'id': '5868334',
'ext': 'mp4',
'title': 'Bringing asylum seekers to Australia would give them right to asylum claims: professor',
'description': 'md5:ba36fa5e27e5c9251fd929d339aea4af',
'title': 'Australia to help staff Ebola treatment centre in Sierra Leone',
'description': 'md5:809ad29c67a05f54eb41f2a105693a67',
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
urls_info_json = self._search_regex(

View File

@ -1,4 +1,5 @@
from __future__ import unicode_literals
import re
from .common import InfoExtractor
@ -18,15 +19,14 @@ class AcademicEarthCourseIE(InfoExtractor):
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
playlist_id = m.group('id')
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
title = self._html_search_regex(
r'<h1 class="playlist-name"[^>]*?>(.*?)</h1>', webpage, u'title')
r'<h1 class="playlist-name"[^>]*?>(.*?)</h1>', webpage, 'title')
description = self._html_search_regex(
r'<p class="excerpt"[^>]*?>(.*?)</p>',
webpage, u'description', fatal=False)
webpage, 'description', fatal=False)
urls = re.findall(
r'<li class="lecture-preview">\s*?<a target="_blank" href="([^"]+)">',
webpage)

View File

@ -3,19 +3,19 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
from ..compat import (
compat_HTTPError,
compat_str,
compat_urllib_parse,
compat_urllib_parse_urlparse,
)
from ..utils import (
ExtractorError,
)
class AddAnimeIE(InfoExtractor):
_VALID_URL = r'^http://(?:\w+\.)?add-anime\.net/watch_video\.php\?(?:.*?)v=(?P<video_id>[\w_]+)(?:.*)'
_VALID_URL = r'^http://(?:\w+\.)?add-anime\.net/watch_video\.php\?(?:.*?)v=(?P<id>[\w_]+)(?:.*)'
_TEST = {
'url': 'http://www.add-anime.net/watch_video.php?v=24MR3YO5SAS9',
'md5': '72954ea10bc979ab5e2eb288b21425a0',
@ -28,9 +28,9 @@ class AddAnimeIE(InfoExtractor):
}
def _real_extract(self, url):
video_id = self._match_id(url)
try:
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('video_id')
webpage = self._download_webpage(url, video_id)
except ExtractorError as ee:
if not isinstance(ee.cause, compat_HTTPError) or \
@ -48,7 +48,7 @@ class AddAnimeIE(InfoExtractor):
r'a\.value = ([0-9]+)[+]([0-9]+)[*]([0-9]+);',
redir_webpage)
if av is None:
raise ExtractorError(u'Cannot find redirect math task')
raise ExtractorError('Cannot find redirect math task')
av_res = int(av.group(1)) + int(av.group(2)) * int(av.group(3))
parsed_url = compat_urllib_parse_urlparse(url)

View File

@ -5,6 +5,7 @@ import re
from .common import InfoExtractor
class AdultSwimIE(InfoExtractor):
_VALID_URL = r'https?://video\.adultswim\.com/(?P<path>.+?)(?:\.html)?(?:\?.*)?(?:#.*)?$'
_TEST = {

View File

@ -22,7 +22,7 @@ class AllocineIE(InfoExtractor):
'id': '19546517',
'ext': 'mp4',
'title': 'Astérix - Le Domaine des Dieux Teaser VF',
'description': 'md5:4a754271d9c6f16c72629a8a993ee884',
'description': 'md5:abcd09ce503c6560512c14ebfdb720d2',
'thumbnail': 're:http://.*\.jpg',
},
}, {

View File

@ -35,7 +35,7 @@ class AnySexIE(InfoExtractor):
title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title')
description = self._html_search_regex(
r'<div class="description">([^<]+)</div>', webpage, 'description', fatal=False)
r'<div class="description"[^>]*>([^<]+)</div>', webpage, 'description', fatal=False)
thumbnail = self._html_search_regex(
r'preview_url\s*:\s*\'(.*?)\'', webpage, 'thumbnail', fatal=False)
@ -43,7 +43,7 @@ class AnySexIE(InfoExtractor):
r'<a href="http://anysex\.com/categories/[^"]+" title="[^"]*">([^<]+)</a>', webpage)
duration = parse_duration(self._search_regex(
r'<b>Duration:</b> (\d+:\d+)', webpage, 'duration', fatal=False))
r'<b>Duration:</b> (?:<q itemprop="duration">)?(\d+:\d+)', webpage, 'duration', fatal=False))
view_count = int_or_none(self._html_search_regex(
r'<b>Views:</b> (\d+)', webpage, 'view count', fatal=False))

View File

@ -1,5 +1,4 @@
#coding: utf-8
# coding: utf-8
from __future__ import unicode_literals
import re
@ -26,8 +25,7 @@ class AparatIE(InfoExtractor):
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
video_id = self._match_id(url)
# Note: There is an easier-to-parse configuration at
# http://www.aparat.com/video/video/config/videohash/%video_id
@ -40,15 +38,15 @@ class AparatIE(InfoExtractor):
for i, video_url in enumerate(video_urls):
req = HEADRequest(video_url)
res = self._request_webpage(
req, video_id, note=u'Testing video URL %d' % i, errnote=False)
req, video_id, note='Testing video URL %d' % i, errnote=False)
if res:
break
else:
raise ExtractorError(u'No working video URLs found')
raise ExtractorError('No working video URLs found')
title = self._search_regex(r'\s+title:\s*"([^"]+)"', webpage, u'title')
title = self._search_regex(r'\s+title:\s*"([^"]+)"', webpage, 'title')
thumbnail = self._search_regex(
r'\s+image:\s*"([^"]+)"', webpage, u'thumbnail', fatal=False)
r'\s+image:\s*"([^"]+)"', webpage, 'thumbnail', fatal=False)
return {
'id': video_id,

View File

@ -70,15 +70,17 @@ class AppleTrailersIE(InfoExtractor):
uploader_id = mobj.group('company')
playlist_url = compat_urlparse.urljoin(url, 'includes/playlists/itunes.inc')
def fix_html(s):
s = re.sub(r'(?s)<script[^<]*?>.*?</script>', '', s)
s = re.sub(r'<img ([^<]*?)>', r'<img \1/>', s)
# The ' in the onClick attributes are not escaped, it couldn't be parsed
# like: http://trailers.apple.com/trailers/wb/gravity/
def _clean_json(m):
return 'iTunes.playURL(%s);' % m.group(1).replace('\'', '&#39;')
s = re.sub(self._JSON_RE, _clean_json, s)
s = '<html>' + s + u'</html>'
s = '<html>%s</html>' % s
return s
doc = self._download_xml(playlist_url, movie, transform_source=fix_html)
@ -86,7 +88,7 @@ class AppleTrailersIE(InfoExtractor):
for li in doc.findall('./div/ul/li'):
on_click = li.find('.//a').attrib['onClick']
trailer_info_json = self._search_regex(self._JSON_RE,
on_click, 'trailer info')
on_click, 'trailer info')
trailer_info = json.loads(trailer_info_json)
title = trailer_info['title']
video_id = movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', title).lower()

View File

@ -4,16 +4,16 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .generic import GenericIE
from ..utils import (
determine_ext,
ExtractorError,
qualities,
compat_urllib_parse_urlparse,
compat_urllib_parse,
int_or_none,
parse_duration,
unified_strdate,
xpath_text,
parse_xml,
)
@ -56,6 +56,11 @@ class ARDMediathekIE(InfoExtractor):
if '>Der gewünschte Beitrag ist nicht mehr verfügbar.<' in webpage:
raise ExtractorError('Video %s is no longer available' % video_id, expected=True)
if re.search(r'[\?&]rss($|[=&])', url):
doc = parse_xml(webpage)
if doc.tag == 'rss':
return GenericIE()._extract_rss(url, video_id, doc)
title = self._html_search_regex(
[r'<h1(?:\s+class="boxTopHeadline")?>(.*?)</h1>',
r'<meta name="dcterms.title" content="(.*?)"/>',
@ -187,4 +192,3 @@ class ARDIE(InfoExtractor):
'upload_date': upload_date,
'thumbnail': thumbnail,
}

View File

@ -5,16 +5,15 @@ import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
find_xpath_attr,
unified_strdate,
determine_ext,
get_element_by_id,
compat_str,
get_element_by_attribute,
int_or_none,
qualities,
)
# There are different sources of video in arte.tv, the extraction process
# There are different sources of video in arte.tv, the extraction process
# is different for each one. The videos usually expire in 7 days, so we can't
# add tests.
@ -86,92 +85,70 @@ class ArteTVPlus7IE(InfoExtractor):
info = self._download_json(json_url, video_id)
player_info = info['videoJsonPlayer']
upload_date_str = player_info.get('shootingDate')
if not upload_date_str:
upload_date_str = player_info.get('VDA', '').split(' ')[0]
title = player_info['VTI'].strip()
subtitle = player_info.get('VSU', '').strip()
if subtitle:
title += ' - %s' % subtitle
info_dict = {
'id': player_info['VID'],
'title': player_info['VTI'],
'title': title,
'description': player_info.get('VDE'),
'upload_date': unified_strdate(player_info.get('VDA', '').split(' ')[0]),
'upload_date': unified_strdate(upload_date_str),
'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
}
qfunc = qualities(['HQ', 'MQ', 'EQ', 'SQ'])
all_formats = player_info['VSR'].values()
# Some formats use the m3u8 protocol
all_formats = list(filter(lambda f: f.get('videoFormat') != 'M3U8', all_formats))
def _match_lang(f):
if f.get('versionCode') is None:
return True
# Return true if that format is in the language of the url
if lang == 'fr':
l = 'F'
elif lang == 'de':
l = 'A'
else:
l = lang
regexes = [r'VO?%s' % l, r'VO?.-ST%s' % l]
return any(re.match(r, f['versionCode']) for r in regexes)
# Some formats may not be in the same language as the url
# TODO: Might want not to drop videos that does not match requested language
# but to process those formats with lower precedence
formats = filter(_match_lang, all_formats)
formats = list(formats) # in python3 filter returns an iterator
if not formats:
# Some videos are only available in the 'Originalversion'
# they aren't tagged as being in French or German
# Sometimes there are neither videos of requested lang code
# nor original version videos available
# For such cases we just take all_formats as is
formats = all_formats
if not formats:
raise ExtractorError('The formats list is empty')
formats = []
for format_id, format_dict in player_info['VSR'].items():
f = dict(format_dict)
versionCode = f.get('versionCode')
if re.match(r'[A-Z]Q', formats[0]['quality']) is not None:
def sort_key(f):
return ['HQ', 'MQ', 'EQ', 'SQ'].index(f['quality'])
else:
def sort_key(f):
versionCode = f.get('versionCode')
if versionCode is None:
versionCode = ''
return (
# Sort first by quality
int(f.get('height', -1)),
int(f.get('bitrate', -1)),
# The original version with subtitles has lower relevance
re.match(r'VO-ST(F|A)', versionCode) is None,
# The version with sourds/mal subtitles has also lower relevance
re.match(r'VO?(F|A)-STM\1', versionCode) is None,
# Prefer http downloads over m3u8
0 if f['url'].endswith('m3u8') else 1,
)
formats = sorted(formats, key=sort_key)
def _format(format_info):
quality = ''
height = format_info.get('height')
if height is not None:
quality = compat_str(height)
bitrate = format_info.get('bitrate')
if bitrate is not None:
quality += '-%d' % bitrate
if format_info.get('versionCode') is not None:
format_id = '%s-%s' % (quality, format_info['versionCode'])
else:
format_id = quality
info = {
langcode = {
'fr': 'F',
'de': 'A',
}.get(lang, lang)
lang_rexs = [r'VO?%s' % langcode, r'VO?.-ST%s' % langcode]
lang_pref = (
None if versionCode is None else (
10 if any(re.match(r, versionCode) for r in lang_rexs)
else -10))
source_pref = 0
if versionCode is not None:
# The original version with subtitles has lower relevance
if re.match(r'VO-ST(F|A)', versionCode):
source_pref -= 10
# The version with sourds/mal subtitles has also lower relevance
elif re.match(r'VO?(F|A)-STM\1', versionCode):
source_pref -= 9
format = {
'format_id': format_id,
'format_note': format_info.get('versionLibelle'),
'width': format_info.get('width'),
'height': height,
'preference': -10 if f.get('videoFormat') == 'M3U8' else None,
'language_preference': lang_pref,
'format_note': '%s, %s' % (f.get('versionCode'), f.get('versionLibelle')),
'width': int_or_none(f.get('width')),
'height': int_or_none(f.get('height')),
'tbr': int_or_none(f.get('bitrate')),
'quality': qfunc(f['quality']),
'source_preference': source_pref,
}
if format_info['mediaType'] == 'rtmp':
info['url'] = format_info['streamer']
info['play_path'] = 'mp4:' + format_info['url']
info['ext'] = 'flv'
else:
info['url'] = format_info['url']
info['ext'] = determine_ext(info['url'])
return info
info_dict['formats'] = [_format(f) for f in formats]
if f.get('mediaType') == 'rtmp':
format['url'] = f['streamer']
format['play_path'] = 'mp4:' + f['url']
format['ext'] = 'flv'
else:
format['url'] = f['url']
formats.append(format)
self._sort_formats(formats)
info_dict['formats'] = formats
return info_dict

View File

@ -0,0 +1,69 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from .soundcloud import SoundcloudIE
from ..utils import ExtractorError
import time
class AudiomackIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?audiomack\.com/song/(?P<id>[\w/-]+)'
IE_NAME = 'audiomack'
_TESTS = [
# hosted on audiomack
{
'url': 'http://www.audiomack.com/song/roosh-williams/extraordinary',
'info_dict':
{
'id': 'roosh-williams/extraordinary',
'ext': 'mp3',
'title': 'Roosh Williams - Extraordinary'
}
},
# hosted on soundcloud via audiomack
{
'url': 'http://www.audiomack.com/song/xclusiveszone/take-kare',
'file': '172419696.mp3',
'info_dict':
{
'ext': 'mp3',
'title': 'Young Thug ft Lil Wayne - Take Kare',
"upload_date": "20141016",
"description": "New track produced by London On Da Track called “Take Kare\"\n\nhttp://instagram.com/theyoungthugworld\nhttps://www.facebook.com/ThuggerThuggerCashMoney\n",
"uploader": "Young Thug World"
}
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
api_response = self._download_json(
"http://www.audiomack.com/api/music/url/song/%s?_=%d" % (
video_id, time.time()),
video_id)
if "url" not in api_response:
raise ExtractorError("Unable to deduce api url of song")
realurl = api_response["url"]
# Audiomack wraps a lot of soundcloud tracks in their branded wrapper
# - if so, pass the work off to the soundcloud extractor
if SoundcloudIE.suitable(realurl):
return {'_type': 'url', 'url': realurl, 'ie_key': 'Soundcloud'}
webpage = self._download_webpage(url, video_id)
artist = self._html_search_regex(
r'<span class="artist">(.*?)</span>', webpage, "artist")
songtitle = self._html_search_regex(
r'<h1 class="profile-title song-title"><span class="artist">.*?</span>(.*?)</h1>',
webpage, "title")
title = artist + " - " + songtitle
return {
'id': video_id,
'title': title,
'url': realurl,
}

View File

@ -24,8 +24,7 @@ class AUEngineIE(InfoExtractor):
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(r'<title>(?P<title>.+?)</title>', webpage, 'title')

View File

@ -18,7 +18,7 @@ class BambuserIE(InfoExtractor):
_TEST = {
'url': 'http://bambuser.com/v/4050584',
# MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388
#u'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
# 'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
'info_dict': {
'id': '4050584',
'ext': 'flv',
@ -38,7 +38,7 @@ class BambuserIE(InfoExtractor):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
info_url = ('http://player-c.api.bambuser.com/getVideo.json?'
'&api_key=%s&vid=%s' % (self._API_KEY, video_id))
'&api_key=%s&vid=%s' % (self._API_KEY, video_id))
info_json = self._download_webpage(info_url, video_id)
info = json.loads(info_json)['result']
@ -73,10 +73,11 @@ class BambuserChannelIE(InfoExtractor):
urls = []
last_id = ''
for i in itertools.count(1):
req_url = ('http://bambuser.com/xhr-api/index.php?username={user}'
req_url = (
'http://bambuser.com/xhr-api/index.php?username={user}'
'&sort=created&access_mode=0%2C1%2C2&limit={count}'
'&method=broadcast&format=json&vid_older_than={last}'
).format(user=user, count=self._STEP, last=last_id)
).format(user=user, count=self._STEP, last=last_id)
req = compat_urllib_request.Request(req_url)
# Without setting this header, we wouldn't get any result
req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)

View File

@ -15,13 +15,23 @@ class BandcampIE(InfoExtractor):
_VALID_URL = r'https?://.*?\.bandcamp\.com/track/(?P<title>.*)'
_TESTS = [{
'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
'file': '1812978515.mp3',
'md5': 'c557841d5e50261777a6585648adf439',
'info_dict': {
"title": "youtube-dl \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad",
"duration": 9.8485,
'id': '1812978515',
'ext': 'mp3',
'title': "youtube-dl \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad",
'duration': 9.8485,
},
'_skip': 'There is a limit of 200 free downloads / month for the test song'
}, {
'url': 'http://benprunty.bandcamp.com/track/lanius-battle',
'md5': '2b68e5851514c20efdff2afc5603b8b4',
'info_dict': {
'id': '2650410135',
'ext': 'mp3',
'title': 'Lanius (Battle)',
'uploader': 'Ben Prunty Music',
},
}]
def _real_extract(self, url):
@ -59,9 +69,9 @@ class BandcampIE(InfoExtractor):
raise ExtractorError('No free songs found')
download_link = m_download.group(1)
video_id = re.search(
r'var TralbumData = {(.*?)id: (?P<id>\d*?)$',
webpage, re.MULTILINE | re.DOTALL).group('id')
video_id = self._search_regex(
r'var TralbumData = {.*?id: (?P<id>\d+),?$',
webpage, 'video id', flags=re.MULTILINE | re.DOTALL)
download_webpage = self._download_webpage(download_link, video_id, 'Downloading free downloads page')
# We get the dictionary of the track from some javascript code
@ -73,12 +83,12 @@ class BandcampIE(InfoExtractor):
initial_url = mp3_info['url']
re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$'
m_url = re.match(re_url, initial_url)
#We build the url we will use to get the final track url
# We build the url we will use to get the final track url
# This url is build in Bandcamp in the script download_bunde_*.js
request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), video_id, m_url.group('ts'))
final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url')
# If we could correctly generate the .rand field the url would be
#in the "download_url" key
# in the "download_url" key
final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1)
return {
@ -100,20 +110,25 @@ class BandcampAlbumIE(InfoExtractor):
'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
'playlist': [
{
'file': '1353101989.mp3',
'md5': '39bc1eded3476e927c724321ddf116cf',
'info_dict': {
'id': '1353101989',
'ext': 'mp3',
'title': 'Intro',
}
},
{
'file': '38097443.mp3',
'md5': '1a2c32e2691474643e912cc6cd4bffaa',
'info_dict': {
'id': '38097443',
'ext': 'mp3',
'title': 'Kero One - Keep It Alive (Blazo remix)',
}
},
],
'info_dict': {
'title': 'Jazz Format Mixtape vol.1',
},
'params': {
'playlistend': 2
},

View File

@ -165,10 +165,10 @@ class BBCCoUkIE(SubtitlesInfoExtractor):
webpage = self._download_webpage(url, group_id, 'Downloading video page')
if re.search(r'id="emp-error" class="notinuk">', webpage):
raise ExtractorError('Currently BBC iPlayer TV programmes are available to play in the UK only',
expected=True)
expected=True)
playlist = self._download_xml('http://www.bbc.co.uk/iplayer/playlist/%s' % group_id, group_id,
'Downloading playlist XML')
'Downloading playlist XML')
no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems')
if no_items is not None:
@ -195,7 +195,7 @@ class BBCCoUkIE(SubtitlesInfoExtractor):
duration = int(item.get('duration'))
media_selection = self._download_xml(
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s' % programme_id,
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s' % programme_id,
programme_id, 'Downloading media selection XML')
for media in self._extract_medias(media_selection):
@ -220,4 +220,4 @@ class BBCCoUkIE(SubtitlesInfoExtractor):
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
}

View File

@ -40,7 +40,7 @@ class BeegIE(InfoExtractor):
title = self._html_search_regex(
r'<title>([^<]+)\s*-\s*beeg\.?</title>', webpage, 'title')
description = self._html_search_regex(
r'<meta name="description" content="([^"]*)"',
webpage, 'description', fatal=False)

View File

@ -0,0 +1,39 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import int_or_none
class BildIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?bild\.de/(?:[^/]+/)+(?P<display_id>[^/]+)-(?P<id>\d+)(?:,auto=true)?\.bild\.html'
IE_DESC = 'Bild.de'
_TEST = {
'url': 'http://www.bild.de/video/clip/apple-ipad-air/das-koennen-die-neuen-ipads-38184146.bild.html',
'md5': 'dd495cbd99f2413502a1713a1156ac8a',
'info_dict': {
'id': '38184146',
'ext': 'mp4',
'title': 'BILD hat sie getestet',
'thumbnail': 'http://bilder.bild.de/fotos/stand-das-koennen-die-neuen-ipads-38184138/Bild/1.bild.jpg',
'duration': 196,
'description': 'Mit dem iPad Air 2 und dem iPad Mini 3 hat Apple zwei neue Tablet-Modelle präsentiert. BILD-Reporter Sven Stein durfte die Geräte bereits testen. ',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
xml_url = url.split(".bild.html")[0] + ",view=xml.bild.xml"
doc = self._download_xml(xml_url, video_id)
duration = int_or_none(doc.attrib.get('duration'), scale=1000)
return {
'id': video_id,
'title': doc.attrib['ueberschrift'],
'description': doc.attrib.get('text'),
'url': doc.attrib['src'],
'thumbnail': doc.attrib.get('img'),
'duration': duration,
}

View File

@ -64,6 +64,20 @@ class BlipTVIE(SubtitlesInfoExtractor):
'uploader': 'redvsblue',
'uploader_id': '792887',
}
},
{
'url': 'http://blip.tv/play/gbk766dkj4Yn',
'md5': 'fe0a33f022d49399a241e84a8ea8b8e3',
'info_dict': {
'id': '1749452',
'ext': 'mp4',
'upload_date': '20090208',
'description': 'Witness the first appearance of the Nostalgia Critic character, as Doug reviews the movie Transformers.',
'title': 'Nostalgia Critic: Transformers',
'timestamp': 1234068723,
'uploader': 'NostalgiaCritic',
'uploader_id': '246467',
}
}
]
@ -71,13 +85,16 @@ class BlipTVIE(SubtitlesInfoExtractor):
mobj = re.match(self._VALID_URL, url)
lookup_id = mobj.group('lookup_id')
# See https://github.com/rg3/youtube-dl/issues/857
# See https://github.com/rg3/youtube-dl/issues/857 and
# https://github.com/rg3/youtube-dl/issues/4197
if lookup_id:
info_page = self._download_webpage(
'http://blip.tv/play/%s.x?p=1' % lookup_id, lookup_id, 'Resolving lookup id')
video_id = self._search_regex(r'data-episode-id="([0-9]+)', info_page, 'video_id')
else:
video_id = mobj.group('id')
urlh = self._request_webpage(
'http://blip.tv/play/%s' % lookup_id, lookup_id, 'Resolving lookup id')
url = compat_urlparse.urlparse(urlh.geturl())
qs = compat_urlparse.parse_qs(url.query)
mobj = re.match(self._VALID_URL, qs['file'][0])
video_id = mobj.group('id')
rss = self._download_xml('http://blip.tv/rss/flash/%s' % video_id, video_id, 'Downloading video RSS')
@ -113,7 +130,7 @@ class BlipTVIE(SubtitlesInfoExtractor):
msg = self._download_webpage(
url + '?showplayer=20140425131715&referrer=http://blip.tv&mask=7&skin=flashvars&view=url',
video_id, 'Resolving URL for %s' % role)
real_url = compat_urlparse.parse_qs(msg)['message'][0]
real_url = compat_urlparse.parse_qs(msg.strip())['message'][0]
media_type = media_content.get('type')
if media_type == 'text/srt' or url.endswith('.srt'):
@ -165,9 +182,17 @@ class BlipTVIE(SubtitlesInfoExtractor):
class BlipTVUserIE(InfoExtractor):
_VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)(?!api\.swf)([^/]+)/*$'
_VALID_URL = r'(?:(?:https?://(?:\w+\.)?blip\.tv/)|bliptvuser:)(?!api\.swf)([^/]+)/*$'
_PAGE_SIZE = 12
IE_NAME = 'blip.tv:user'
_TEST = {
'url': 'http://blip.tv/actone',
'info_dict': {
'id': 'actone',
'title': 'Act One: The Series',
},
'playlist_count': 5,
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
@ -178,6 +203,7 @@ class BlipTVUserIE(InfoExtractor):
page = self._download_webpage(url, username, 'Downloading user page')
mobj = re.search(r'data-users-id="([^"]+)"', page)
page_base = page_base % mobj.group(1)
title = self._og_search_title(page)
# Download video ids using BlipTV Ajax calls. Result size per
# query is limited (currently to 12 videos) so we need to query
@ -214,4 +240,5 @@ class BlipTVUserIE(InfoExtractor):
urls = ['http://blip.tv/%s' % video_id for video_id in video_ids]
url_entries = [self.url_result(vurl, 'BlipTV') for vurl in urls]
return [self.playlist_result(url_entries, playlist_title=username)]
return self.playlist_result(
url_entries, playlist_title=title, playlist_id=username)

View File

@ -0,0 +1,37 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class BpbIE(InfoExtractor):
IE_DESC = 'Bundeszentrale für politische Bildung'
_VALID_URL = r'http://www\.bpb\.de/mediathek/(?P<id>[0-9]+)/'
_TEST = {
'url': 'http://www.bpb.de/mediathek/297/joachim-gauck-zu-1989-und-die-erinnerung-an-die-ddr',
'md5': '0792086e8e2bfbac9cdf27835d5f2093',
'info_dict': {
'id': '297',
'ext': 'mp4',
'title': 'Joachim Gauck zu 1989 und die Erinnerung an die DDR',
'description': 'Joachim Gauck, erster Beauftragter für die Stasi-Unterlagen, spricht auf dem Geschichtsforum über die friedliche Revolution 1989 und eine "gewisse Traurigkeit" im Umgang mit der DDR-Vergangenheit.'
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<h2 class="white">(.*?)</h2>', webpage, 'title')
video_url = self._html_search_regex(
r'(http://film\.bpb\.de/player/dokument_[0-9]+\.mp4)',
webpage, 'video URL')
return {
'id': video_id,
'url': video_url,
'title': title,
'description': self._og_search_description(webpage),
}

View File

@ -1,8 +1,6 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
@ -26,6 +24,8 @@ class BRIE(InfoExtractor):
'title': 'Wenn das Traditions-Theater wackelt',
'description': 'Heimatsound-Festival 2014: Wenn das Traditions-Theater wackelt',
'duration': 34,
'uploader': 'BR',
'upload_date': '20140802',
}
},
{
@ -66,8 +66,7 @@ class BRIE(InfoExtractor):
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('id')
display_id = self._match_id(url)
page = self._download_webpage(url, display_id)
xml_url = self._search_regex(
r"return BRavFramework\.register\(BRavFramework\('avPlayer_(?:[a-f0-9-]{36})'\)\.setup\({dataURL:'(/(?:[a-z0-9\-]+/)+[a-z0-9/~_.-]+)'}\)\);", page, 'XMLURL')

View File

@ -4,37 +4,61 @@ import re
import json
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_age_limit,
)
class BreakIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?break\.com/video/([^/]+)'
_TEST = {
_VALID_URL = r'http://(?:www\.)?break\.com/video/(?:[^/]+/)*.+-(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.break.com/video/when-girls-act-like-guys-2468056',
'md5': 'a3513fb1547fba4fb6cfac1bffc6c46b',
'md5': '33aa4ff477ecd124d18d7b5d23b87ce5',
'info_dict': {
'id': '2468056',
'ext': 'mp4',
'title': 'When Girls Act Like D-Bags',
}
}
}, {
'url': 'http://www.break.com/video/ugc/baby-flex-2773063',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group(1).split("-")[-1]
embed_url = 'http://www.break.com/embed/%s' % video_id
webpage = self._download_webpage(embed_url, video_id)
info_json = self._search_regex(r'var embedVars = ({.*})\s*?</script>',
webpage, 'info json', flags=re.DOTALL)
info = json.loads(info_json)
video_url = info['videoUri']
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://www.break.com/embed/%s' % video_id, video_id)
info = json.loads(self._search_regex(
r'var embedVars = ({.*})\s*?</script>',
webpage, 'info json', flags=re.DOTALL))
youtube_id = info.get('youtubeId')
if youtube_id:
return self.url_result(youtube_id, 'Youtube')
final_url = video_url + '?' + info['AuthToken']
formats = [{
'url': media['uri'] + '?' + info['AuthToken'],
'tbr': media['bitRate'],
'width': media['width'],
'height': media['height'],
} for media in info['media']]
if not formats:
formats.append({
'url': info['videoUri']
})
self._sort_formats(formats)
duration = int_or_none(info.get('videoLengthInSeconds'))
age_limit = parse_age_limit(info.get('audienceRating'))
return {
'id': video_id,
'url': final_url,
'title': info['contentName'],
'thumbnail': info['thumbUri'],
'duration': duration,
'age_limit': age_limit,
'formats': formats,
}

View File

@ -14,6 +14,7 @@ from ..utils import (
compat_str,
compat_urllib_request,
compat_parse_qs,
compat_urllib_parse_urlparse,
determine_ext,
ExtractorError,
@ -23,7 +24,7 @@ from ..utils import (
class BrightcoveIE(InfoExtractor):
_VALID_URL = r'https?://.*brightcove\.com/(services|viewer).*\?(?P<query>.*)'
_VALID_URL = r'https?://.*brightcove\.com/(services|viewer).*?\?(?P<query>.*)'
_FEDERATED_URL_TEMPLATE = 'http://c.brightcove.com/services/viewer/htmlFederated?%s'
_TESTS = [
@ -87,6 +88,15 @@ class BrightcoveIE(InfoExtractor):
'description': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals',
},
},
{
# playlist test
# from http://support.brightcove.com/en/video-cloud/docs/playlist-support-single-video-players
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=3550052898001&playerKey=AQ%7E%7E%2CAAABmA9XpXk%7E%2C-Kp7jNgisre1fG5OdqpAFUTcs0lP_ZoL',
'info_dict': {
'title': 'Sealife',
},
'playlist_mincount': 7,
},
]
@classmethod
@ -101,6 +111,8 @@ class BrightcoveIE(InfoExtractor):
lambda m: m.group(1) + '/>', object_str)
# Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608
object_str = object_str.replace('<--', '<!--')
# remove namespace to simplify extraction
object_str = re.sub(r'(<object[^>]*)(xmlns=".*?")', r'\1', object_str)
object_str = fix_xml_ampersands(object_str)
object_doc = xml.etree.ElementTree.fromstring(object_str.encode('utf-8'))
@ -209,7 +221,7 @@ class BrightcoveIE(InfoExtractor):
webpage = self._download_webpage(req, video_id)
error_msg = self._html_search_regex(
r"<h1>We're sorry.</h1>\s*<p>(.*?)</p>", webpage,
r"<h1>We're sorry.</h1>([\s\n]*<p>.*?</p>)+", webpage,
'error message', default=None)
if error_msg is not None:
raise ExtractorError(
@ -251,11 +263,19 @@ class BrightcoveIE(InfoExtractor):
formats = []
for rend in renditions:
url = rend['defaultURL']
if not url:
continue
if rend['remote']:
# This type of renditions are served through akamaihd.net,
# but they don't use f4m manifests
url = url.replace('control/', '') + '?&v=3.3.0&fp=13&r=FEEFJ&g=RTSJIMBMPFPB'
ext = 'flv'
url_comp = compat_urllib_parse_urlparse(url)
if url_comp.path.endswith('.m3u8'):
formats.extend(
self._extract_m3u8_formats(url, info['id'], 'mp4'))
continue
elif 'akamaihd.net' in url_comp.netloc:
# This type of renditions are served through
# akamaihd.net, but they don't use f4m manifests
url = url.replace('control/', '') + '?&v=3.3.0&fp=13&r=FEEFJ&g=RTSJIMBMPFPB'
ext = 'flv'
else:
ext = determine_ext(url)
size = rend.get('size')

View File

@ -0,0 +1,74 @@
# coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
class BuzzFeedIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?buzzfeed\.com/[^?#]*?/(?P<id>[^?#]+)'
_TESTS = [{
'url': 'http://www.buzzfeed.com/abagg/this-angry-ram-destroys-a-punching-bag-like-a-boss?utm_term=4ldqpia',
'info_dict': {
'id': 'this-angry-ram-destroys-a-punching-bag-like-a-boss',
'title': 'This Angry Ram Destroys A Punching Bag Like A Boss',
'description': 'Rambro!',
},
'playlist': [{
'info_dict': {
'id': 'aVCR29aE_OQ',
'ext': 'mp4',
'upload_date': '20141024',
'uploader_id': 'Buddhanz1',
'description': 'He likes to stay in shape with his heavy bag, he wont stop until its on the ground\n\nFollow Angry Ram on Facebook for regular updates -\nhttps://www.facebook.com/pages/Angry-Ram/1436897249899558?ref=hl',
'uploader': 'Buddhanz',
'title': 'Angry Ram destroys a punching bag',
}
}]
}, {
'url': 'http://www.buzzfeed.com/sheridanwatson/look-at-this-cute-dog-omg?utm_term=4ldqpia',
'params': {
'skip_download': True, # Got enough YouTube download tests
},
'info_dict': {
'description': 'Munchkin the Teddy Bear is back !',
'title': 'You Need To Stop What You\'re Doing And Watching This Dog Walk On A Treadmill',
},
'playlist': [{
'info_dict': {
'id': 'mVmBL8B-In0',
'ext': 'mp4',
'upload_date': '20141124',
'uploader_id': 'CindysMunchkin',
'description': '© 2014 Munchkin the Shih Tzu\nAll rights reserved\nFacebook: http://facebook.com/MunchkintheShihTzu',
'uploader': 'Munchkin the Shih Tzu',
'title': 'Munchkin the Teddy Bear gets her exercise',
},
}]
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
all_buckets = re.findall(
r'(?s)<div class="video-embed[^"]*"..*?rel:bf_bucket_data=\'([^\']+)\'',
webpage)
entries = []
for bd_json in all_buckets:
bd = json.loads(bd_json)
video = bd.get('video') or bd.get('progload_video')
if not video:
continue
entries.append(self.url_result(video['url']))
return {
'_type': 'playlist',
'id': playlist_id,
'title': self._og_search_title(webpage),
'description': self._og_search_description(webpage),
'entries': entries,
}

View File

@ -10,12 +10,12 @@ from ..utils import ExtractorError
class BYUtvIE(InfoExtractor):
_VALID_URL = r'^https?://(?:www\.)?byutv.org/watch/[0-9a-f-]+/(?P<video_id>[^/?#]+)'
_TEST = {
'url': 'http://www.byutv.org/watch/44e80f7b-e3ba-43ba-8c51-b1fd96c94a79/granite-flats-talking',
'url': 'http://www.byutv.org/watch/6587b9a3-89d2-42a6-a7f7-fd2f81840a7d/studio-c-season-5-episode-5',
'info_dict': {
'id': 'granite-flats-talking',
'id': 'studio-c-season-5-episode-5',
'ext': 'mp4',
'description': 'md5:4e9a7ce60f209a33eca0ac65b4918e1c',
'title': 'Talking',
'description': 'md5:5438d33774b6bdc662f9485a340401cc',
'title': 'Season 5 Episode 5',
'thumbnail': 're:^https?://.*promo.*'
},
'params': {

View File

@ -7,15 +7,21 @@ from .common import InfoExtractor
from ..utils import (
unified_strdate,
url_basename,
qualities,
)
class CanalplusIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.canalplus\.fr/.*?/(?P<path>.*)|player\.canalplus\.fr/#/(?P<id>[0-9]+))'
_VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/cplus/%s'
IE_NAME = 'canalplus.fr'
IE_DESC = 'canalplus.fr, piwiplus.fr and d8.tv'
_VALID_URL = r'https?://(?:www\.(?P<site>canalplus\.fr|piwiplus\.fr|d8\.tv)/.*?/(?P<path>.*)|player\.canalplus\.fr/#/(?P<id>[0-9]+))'
_VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/%s/%s'
_SITE_ID_MAP = {
'canalplus.fr': 'cplus',
'piwiplus.fr': 'teletoon',
'd8.tv': 'd8',
}
_TEST = {
_TESTS = [{
'url': 'http://www.canalplus.fr/c-infos-documentaires/pid1830-c-zapping.html?vid=922470',
'md5': '3db39fb48b9685438ecf33a1078023e4',
'info_dict': {
@ -25,36 +31,73 @@ class CanalplusIE(InfoExtractor):
'description': 'Le meilleur de toutes les chaînes, tous les jours.\nEmission du 26 août 2013',
'upload_date': '20130826',
},
}
}, {
'url': 'http://www.piwiplus.fr/videos-piwi/pid1405-le-labyrinthe-boing-super-ranger.html?vid=1108190',
'info_dict': {
'id': '1108190',
'ext': 'flv',
'title': 'Le labyrinthe - Boing super ranger',
'description': 'md5:4cea7a37153be42c1ba2c1d3064376ff',
'upload_date': '20140724',
},
'skip': 'Only works from France',
}, {
'url': 'http://www.d8.tv/d8-docs-mags/pid6589-d8-campagne-intime.html',
'info_dict': {
'id': '966289',
'ext': 'flv',
'title': 'Campagne intime - Documentaire exceptionnel',
'description': 'md5:d2643b799fb190846ae09c61e59a859f',
'upload_date': '20131108',
},
'skip': 'videos get deleted after a while',
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.groupdict().get('id')
site_id = self._SITE_ID_MAP[mobj.group('site') or 'canal']
# Beware, some subclasses do not define an id group
display_id = url_basename(mobj.group('path'))
if video_id is None:
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(r'<canal:player videoId="(\d+)"', webpage, 'video id')
video_id = self._search_regex(
r'<canal:player[^>]+?videoId="(\d+)"', webpage, 'video id')
info_url = self._VIDEO_INFO_TEMPLATE % video_id
info_url = self._VIDEO_INFO_TEMPLATE % (site_id, video_id)
doc = self._download_xml(info_url, video_id, 'Downloading video XML')
video_info = [video for video in doc if video.find('ID').text == video_id][0]
media = video_info.find('MEDIA')
infos = video_info.find('INFOS')
preferences = ['MOBILE', 'BAS_DEBIT', 'HAUT_DEBIT', 'HD', 'HLS', 'HDS']
preference = qualities(['MOBILE', 'BAS_DEBIT', 'HAUT_DEBIT', 'HD', 'HLS', 'HDS'])
formats = [
{
'url': fmt.text + '?hdcore=2.11.3' if fmt.tag == 'HDS' else fmt.text,
'format_id': fmt.tag,
'ext': 'mp4' if fmt.tag == 'HLS' else 'flv',
'preference': preferences.index(fmt.tag) if fmt.tag in preferences else -1,
} for fmt in media.find('VIDEOS') if fmt.text
]
formats = []
for fmt in media.find('VIDEOS'):
format_url = fmt.text
if not format_url:
continue
format_id = fmt.tag
if format_id == 'HLS':
hls_formats = self._extract_m3u8_formats(format_url, video_id, 'flv')
for fmt in hls_formats:
fmt['preference'] = preference(format_id)
formats.extend(hls_formats)
elif format_id == 'HDS':
hds_formats = self._extract_f4m_formats(format_url + '?hdcore=2.11.3', video_id)
for fmt in hds_formats:
fmt['preference'] = preference(format_id)
formats.extend(hds_formats)
else:
formats.append({
'url': format_url,
'format_id': format_id,
'preference': preference(format_id),
})
self._sort_formats(formats)
return {
@ -69,4 +112,4 @@ class CanalplusIE(InfoExtractor):
'like_count': int(infos.find('NB_LIKES').text),
'comment_count': int(infos.find('NB_COMMENTS').text),
'formats': formats,
}
}

View File

@ -45,4 +45,4 @@ class CBSIE(InfoExtractor):
real_id = self._search_regex(
r"video\.settings\.pid\s*=\s*'([^']+)';",
webpage, 'real video ID')
return self.url_result(u'theplatform:%s' % real_id)
return self.url_result('theplatform:%s' % real_id)

View File

@ -84,4 +84,4 @@ class CBSNewsIE(InfoExtractor):
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}
}

View File

@ -92,7 +92,7 @@ class CeskaTelevizeIE(InfoExtractor):
req.add_header('Referer', url)
playlist = self._download_xml(req, video_id)
formats = []
for i in playlist.find('smilRoot/body'):
if 'AD' not in i.attrib['id']:

View File

@ -5,6 +5,7 @@ import re
from .common import InfoExtractor
from ..utils import ExtractorError
class Channel9IE(InfoExtractor):
'''
Common extractor for channel9.msdn.com.
@ -27,11 +28,11 @@ class Channel9IE(InfoExtractor):
'title': 'Developer Kick-Off Session: Stuff We Love',
'description': 'md5:c08d72240b7c87fcecafe2692f80e35f',
'duration': 4576,
'thumbnail': 'http://media.ch9.ms/ch9/9d51/03902f2d-fc97-4d3c-b195-0bfe15a19d51/KOS002_220.jpg',
'thumbnail': 'http://video.ch9.ms/ch9/9d51/03902f2d-fc97-4d3c-b195-0bfe15a19d51/KOS002_220.jpg',
'session_code': 'KOS002',
'session_day': 'Day 1',
'session_room': 'Arena 1A',
'session_speakers': [ 'Ed Blankenship', 'Andrew Coates', 'Brady Gaster', 'Patrick Klug', 'Mads Kristensen' ],
'session_speakers': ['Ed Blankenship', 'Andrew Coates', 'Brady Gaster', 'Patrick Klug', 'Mads Kristensen'],
},
},
{
@ -43,8 +44,8 @@ class Channel9IE(InfoExtractor):
'title': 'Self-service BI with Power BI - nuclear testing',
'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b',
'duration': 1540,
'thumbnail': 'http://media.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
'authors': [ 'Mike Wilmot' ],
'thumbnail': 'http://video.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
'authors': ['Mike Wilmot'],
},
}
]
@ -83,7 +84,7 @@ class Channel9IE(InfoExtractor):
'format_id': x.group('quality'),
'format_note': x.group('note'),
'format': '%s (%s)' % (x.group('quality'), x.group('note')),
'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate
'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate
'preference': self._known_formats.index(x.group('quality')),
'vcodec': 'none' if x.group('note') == 'Audio only' else None,
} for x in list(re.finditer(FORMAT_REGEX, html)) if x.group('quality') in self._known_formats]
@ -94,7 +95,7 @@ class Channel9IE(InfoExtractor):
def _extract_title(self, html):
title = self._html_search_meta('title', html, 'title')
if title is None:
if title is None:
title = self._og_search_title(html)
TITLE_SUFFIX = ' (Channel 9)'
if title is not None and title.endswith(TITLE_SUFFIX):
@ -115,7 +116,7 @@ class Channel9IE(InfoExtractor):
return self._html_search_meta('description', html, 'description')
def _extract_duration(self, html):
m = re.search(r'data-video_duration="(?P<hours>\d{2}):(?P<minutes>\d{2}):(?P<seconds>\d{2})"', html)
m = re.search(r'"length": *"(?P<hours>\d{2}):(?P<minutes>\d{2}):(?P<seconds>\d{2})"', html)
return ((int(m.group('hours')) * 60 * 60) + (int(m.group('minutes')) * 60) + int(m.group('seconds'))) if m else None
def _extract_slides(self, html):
@ -167,7 +168,7 @@ class Channel9IE(InfoExtractor):
return re.findall(r'<a href="/Events/Speakers/[^"]+">([^<]+)</a>', html)
def _extract_content(self, html, content_path):
# Look for downloadable content
# Look for downloadable content
formats = self._formats_from_html(html)
slides = self._extract_slides(html)
zip_ = self._extract_zip(html)
@ -187,32 +188,33 @@ class Channel9IE(InfoExtractor):
view_count = self._extract_view_count(html)
comment_count = self._extract_comment_count(html)
common = {'_type': 'video',
'id': content_path,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'avg_rating': avg_rating,
'rating_count': rating_count,
'view_count': view_count,
'comment_count': comment_count,
}
common = {
'_type': 'video',
'id': content_path,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'avg_rating': avg_rating,
'rating_count': rating_count,
'view_count': view_count,
'comment_count': comment_count,
}
result = []
if slides is not None:
d = common.copy()
d.update({ 'title': title + '-Slides', 'url': slides })
d.update({'title': title + '-Slides', 'url': slides})
result.append(d)
if zip_ is not None:
d = common.copy()
d.update({ 'title': title + '-Zip', 'url': zip_ })
d.update({'title': title + '-Zip', 'url': zip_})
result.append(d)
if len(formats) > 0:
d = common.copy()
d.update({ 'title': title, 'formats': formats })
d.update({'title': title, 'formats': formats})
result.append(d)
return result
@ -258,16 +260,17 @@ class Channel9IE(InfoExtractor):
webpage = self._download_webpage(url, content_path, 'Downloading web page')
page_type_m = re.search(r'<meta name="Search.PageType" content="(?P<pagetype>[^"]+)"/>', webpage)
if page_type_m is None:
raise ExtractorError('Search.PageType not found, don\'t know how to process this page', expected=True)
page_type_m = re.search(r'<meta name="WT.entryid" content="(?P<pagetype>[^:]+)[^"]+"/>', webpage)
if page_type_m is not None:
page_type = page_type_m.group('pagetype')
if page_type == 'Entry': # Any 'item'-like page, may contain downloadable content
return self._extract_entry_item(webpage, content_path)
elif page_type == 'Session': # Event session page, may contain downloadable content
return self._extract_session(webpage, content_path)
elif page_type == 'Event':
return self._extract_list(content_path)
else:
raise ExtractorError('Unexpected WT.entryid %s' % page_type, expected=True)
page_type = page_type_m.group('pagetype')
if page_type == 'List': # List page, may contain list of 'item'-like objects
else: # Assuming list
return self._extract_list(content_path)
elif page_type == 'Entry.Item': # Any 'item'-like page, may contain downloadable content
return self._extract_entry_item(webpage, content_path)
elif page_type == 'Session': # Event session page, may contain downloadable content
return self._extract_session(webpage, content_path)
else:
raise ExtractorError('Unexpected Search.PageType %s' % page_type, expected=True)

View File

@ -42,53 +42,71 @@ class CinemassacreIE(InfoExtractor):
webpage = self._download_webpage(url, display_id)
video_date = mobj.group('date_Y') + mobj.group('date_m') + mobj.group('date_d')
mobj = re.search(r'src="(?P<embed_url>http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?id=(?:Cinemassacre-)?(?P<video_id>.+?))"', webpage)
mobj = re.search(r'src="(?P<embed_url>http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?[^"]*\bid=(?P<full_video_id>(?:Cinemassacre-)?(?P<video_id>.+?)))"', webpage)
if not mobj:
raise ExtractorError('Can\'t extract embed url and video id')
playerdata_url = mobj.group('embed_url')
video_id = mobj.group('video_id')
full_video_id = mobj.group('full_video_id')
video_title = self._html_search_regex(
r'<title>(?P<title>.+?)\|', webpage, 'title')
video_description = self._html_search_regex(
r'<div class="entry-content">(?P<description>.+?)</div>',
webpage, 'description', flags=re.DOTALL, fatal=False)
video_thumbnail = self._og_search_thumbnail(webpage)
playerdata = self._download_webpage(playerdata_url, video_id, 'Downloading player webpage')
video_thumbnail = self._search_regex(
r'image: \'(?P<thumbnail>[^\']+)\'', playerdata, 'thumbnail', fatal=False)
sd_url = self._search_regex(r'file: \'([^\']+)\', label: \'SD\'', playerdata, 'sd_file')
videolist_url = self._search_regex(r'file: \'([^\']+\.smil)\'}', playerdata, 'videolist_url')
videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML')
vidurl = self._search_regex(
r'\'vidurl\'\s*:\s*"([^\']+)"', playerdata, 'vidurl').replace('\\/', '/')
formats = []
baseurl = sd_url[:sd_url.rfind('/')+1]
for video in videolist.findall('.//video'):
src = video.get('src')
if not src:
continue
file_ = src.partition(':')[-1]
width = int_or_none(video.get('width'))
height = int_or_none(video.get('height'))
bitrate = int_or_none(video.get('system-bitrate'))
format = {
'url': baseurl + file_,
'format_id': src.rpartition('.')[0].rpartition('_')[-1],
}
if width or height:
format.update({
'tbr': bitrate // 1000 if bitrate else None,
'width': width,
'height': height,
})
else:
format.update({
'abr': bitrate // 1000 if bitrate else None,
'vcodec': 'none',
})
formats.append(format)
self._sort_formats(formats)
videolist_url = None
mobj = re.search(r"'videoserver'\s*:\s*'(?P<videoserver>[^']+)'", playerdata)
if mobj:
videoserver = mobj.group('videoserver')
mobj = re.search(r'\'vidid\'\s*:\s*"(?P<vidid>[^\']+)"', playerdata)
vidid = mobj.group('vidid') if mobj else full_video_id
videolist_url = 'http://%s/vod/smil:%s.smil/jwplayer.smil' % (videoserver, vidid)
else:
mobj = re.search(r"file\s*:\s*'(?P<smil>http.+?/jwplayer\.smil)'", playerdata)
if mobj:
videolist_url = mobj.group('smil')
if videolist_url:
videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML')
formats = []
baseurl = vidurl[:vidurl.rfind('/') + 1]
for video in videolist.findall('.//video'):
src = video.get('src')
if not src:
continue
file_ = src.partition(':')[-1]
width = int_or_none(video.get('width'))
height = int_or_none(video.get('height'))
bitrate = int_or_none(video.get('system-bitrate'))
format = {
'url': baseurl + file_,
'format_id': src.rpartition('.')[0].rpartition('_')[-1],
}
if width or height:
format.update({
'tbr': bitrate // 1000 if bitrate else None,
'width': width,
'height': height,
})
else:
format.update({
'abr': bitrate // 1000 if bitrate else None,
'vcodec': 'none',
})
formats.append(format)
self._sort_formats(formats)
else:
formats = [{
'url': vidurl,
}]
return {
'id': video_id,

View File

@ -24,7 +24,7 @@ class ClipfishIE(InfoExtractor):
'title': 'FIFA 14 - E3 2013 Trailer',
'duration': 82,
},
u'skip': 'Blocked in the US'
'skip': 'Blocked in the US'
}
def _real_extract(self, url):
@ -34,7 +34,7 @@ class ClipfishIE(InfoExtractor):
info_url = ('http://www.clipfish.de/devxml/videoinfo/%s?ts=%d' %
(video_id, int(time.time())))
doc = self._download_xml(
info_url, video_id, note=u'Downloading info page')
info_url, video_id, note='Downloading info page')
title = doc.find('title').text
video_url = doc.find('filename').text
if video_url is None:

View File

@ -4,7 +4,6 @@ import json
import re
from .common import InfoExtractor
from ..utils import int_or_none
_translation_table = {
@ -35,14 +34,11 @@ class CliphunterIE(InfoExtractor):
'title': 'Fun Jynx Maze solo',
'thumbnail': 're:^https?://.*\.jpg$',
'age_limit': 18,
'duration': 1317,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_title = self._search_regex(
@ -86,14 +82,11 @@ class CliphunterIE(InfoExtractor):
thumbnail = self._search_regex(
r"var\s+mov_thumb\s*=\s*'([^']+)';",
webpage, 'thumbnail', fatal=False)
duration = int_or_none(self._search_regex(
r'pl_dur\s*=\s*([0-9]+)', webpage, 'duration', fatal=False))
return {
'id': video_id,
'title': video_title,
'formats': formats,
'duration': duration,
'age_limit': self._rta_search(webpage),
'thumbnail': thumbnail,
}

View File

@ -39,6 +39,7 @@ class ClipsyndicateIE(InfoExtractor):
transform_source=fix_xml_ampersands)
track_doc = pdoc.find('trackList/track')
def find_param(name):
node = find_xpath_attr(track_doc, './/param', 'name', name)
if node is not None:

View File

@ -4,14 +4,16 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
from ..compat import (
compat_parse_qs,
compat_urllib_parse,
remove_end,
HEADRequest,
compat_HTTPError,
)
from ..utils import (
ExtractorError,
HEADRequest,
remove_end,
)
class CloudyIE(InfoExtractor):

View File

@ -12,24 +12,25 @@ from ..utils import (
class CNNIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://((edition|www)\.)?cnn\.com/video/(data/.+?|\?)/
(?P<path>.+?/(?P<title>[^/]+?)(?:\.cnn|(?=&)))'''
(?P<path>.+?/(?P<title>[^/]+?)(?:\.cnn(-ap)?|(?=&)))'''
_TESTS = [{
'url': 'http://edition.cnn.com/video/?/video/sports/2013/06/09/nadal-1-on-1.cnn',
'file': 'sports_2013_06_09_nadal-1-on-1.cnn.mp4',
'md5': '3e6121ea48df7e2259fe73a0628605c4',
'info_dict': {
'id': 'sports_2013_06_09_nadal-1-on-1.cnn',
'ext': 'mp4',
'title': 'Nadal wins 8th French Open title',
'description': 'World Sport\'s Amanda Davies chats with 2013 French Open champion Rafael Nadal.',
'duration': 135,
'upload_date': '20130609',
},
},
{
}, {
"url": "http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29",
"file": "us_2013_08_21_sot-student-gives-epic-speech.georgia-institute-of-technology.mp4",
"md5": "b5cc60c60a3477d185af8f19a2a26f4e",
"info_dict": {
'id': 'us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology',
'ext': 'mp4',
"title": "Student's epic speech stuns new freshmen",
"description": "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"",
"upload_date": "20130821",

View File

@ -10,47 +10,46 @@ from ..utils import int_or_none
class CollegeHumorIE(InfoExtractor):
_VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/(video|embed|e)/(?P<videoid>[0-9]+)/?(?P<shorttitle>.*)$'
_TESTS = [{
'url': 'http://www.collegehumor.com/video/6902724/comic-con-cosplay-catastrophe',
'md5': 'dcc0f5c1c8be98dc33889a191f4c26bd',
'info_dict': {
'id': '6902724',
'ext': 'mp4',
'title': 'Comic-Con Cosplay Catastrophe',
'description': "Fans get creative this year at San Diego. Too creative. And yes, that's really Joss Whedon.",
'age_limit': 13,
'duration': 187,
_TESTS = [
{
'url': 'http://www.collegehumor.com/video/6902724/comic-con-cosplay-catastrophe',
'md5': 'dcc0f5c1c8be98dc33889a191f4c26bd',
'info_dict': {
'id': '6902724',
'ext': 'mp4',
'title': 'Comic-Con Cosplay Catastrophe',
'description': "Fans get creative this year at San Diego. Too creative. And yes, that's really Joss Whedon.",
'age_limit': 13,
'duration': 187,
},
}, {
'url': 'http://www.collegehumor.com/video/3505939/font-conference',
'md5': '72fa701d8ef38664a4dbb9e2ab721816',
'info_dict': {
'id': '3505939',
'ext': 'mp4',
'title': 'Font Conference',
'description': "This video wasn't long enough, so we made it double-spaced.",
'age_limit': 10,
'duration': 179,
},
}, {
# embedded youtube video
'url': 'http://www.collegehumor.com/embed/6950306',
'info_dict': {
'id': 'Z-bao9fg6Yc',
'ext': 'mp4',
'title': 'Young Americans Think President John F. Kennedy Died THIS MORNING IN A CAR ACCIDENT!!!',
'uploader': 'Mark Dice',
'uploader_id': 'MarkDice',
'description': 'md5:62c3dab9351fac7bb44b53b69511d87f',
'upload_date': '20140127',
},
'params': {
'skip_download': True,
},
'add_ie': ['Youtube'],
},
},
{
'url': 'http://www.collegehumor.com/video/3505939/font-conference',
'md5': '72fa701d8ef38664a4dbb9e2ab721816',
'info_dict': {
'id': '3505939',
'ext': 'mp4',
'title': 'Font Conference',
'description': "This video wasn't long enough, so we made it double-spaced.",
'age_limit': 10,
'duration': 179,
},
},
# embedded youtube video
{
'url': 'http://www.collegehumor.com/embed/6950306',
'info_dict': {
'id': 'Z-bao9fg6Yc',
'ext': 'mp4',
'title': 'Young Americans Think President John F. Kennedy Died THIS MORNING IN A CAR ACCIDENT!!!',
'uploader': 'Mark Dice',
'uploader_id': 'MarkDice',
'description': 'md5:62c3dab9351fac7bb44b53b69511d87f',
'upload_date': '20140127',
},
'params': {
'skip_download': True,
},
'add_ie': ['Youtube'],
},
]
def _real_extract(self, url):

View File

@ -2,7 +2,6 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .mtv import MTVServicesInfoExtractor
from ..utils import (
compat_str,
@ -31,7 +30,7 @@ class ComedyCentralIE(MTVServicesInfoExtractor):
}
class ComedyCentralShowsIE(InfoExtractor):
class ComedyCentralShowsIE(MTVServicesInfoExtractor):
IE_DESC = 'The Daily Show / The Colbert Report'
# urls can be abbreviations like :thedailyshow or :colbert
# urls for episodes like:
@ -109,18 +108,8 @@ class ComedyCentralShowsIE(InfoExtractor):
'400': (384, 216),
}
@staticmethod
def _transform_rtmp_url(rtmp_video_url):
m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp\.comedystor/.*)$', rtmp_video_url)
if not m:
raise ExtractorError('Cannot transform RTMP url')
base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/'
return base + m.group('finalid')
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
mobj = re.match(self._VALID_URL, url)
if mobj.group('shortname'):
if mobj.group('shortname') in ('tds', 'thedailyshow'):
@ -212,9 +201,6 @@ class ComedyCentralShowsIE(InfoExtractor):
'ext': self._video_extensions.get(format, 'mp4'),
'height': h,
'width': w,
'format_note': 'HTTP 400 at the moment (patches welcome!)',
'preference': -100,
})
formats.append({
'format_id': 'rtmp-%s' % format,

View File

@ -2,6 +2,7 @@ from __future__ import unicode_literals
import base64
from collections import defaultdict
import datetime
import hashlib
import json
import netrc
@ -12,15 +13,19 @@ import sys
import time
import xml.etree.ElementTree
from ..utils import (
from ..compat import (
compat_cookiejar,
compat_http_client,
compat_urllib_error,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_str,
)
from ..utils import (
clean_html,
compiled_regex_type,
ExtractorError,
float_or_none,
int_or_none,
RegexNotFoundError,
sanitize_filename,
@ -40,7 +45,11 @@ class InfoExtractor(object):
information possibly downloading the video to the file system, among
other possible outcomes.
The dictionaries must include the following fields:
The type field determines the the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
@ -70,6 +79,7 @@ class InfoExtractor(object):
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
@ -83,10 +93,19 @@ class InfoExtractor(object):
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
* language_preference Is this in the correct requested
language?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_referer HTTP Referer header value to set.
* http_method HTTP method to use for the download.
* http_headers A dictionary of additional HTTP headers
@ -134,9 +153,45 @@ class InfoExtractor(object):
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list or a PagedList object, each
element of which is a valid dictionary under this specfication.
Additionally, playlists can have "title" and "id" attributes with the same
semantics as videos (see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
Additionally, it may have properties believed to be identical to the
resolved entity, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
@ -165,6 +220,14 @@ class InfoExtractor(object):
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return m.group('id')
@classmethod
def working(cls):
"""Getter method for _WORKING."""
@ -227,7 +290,6 @@ class InfoExtractor(object):
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
""" Returns a tuple (page content as string, URL handle) """
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
@ -236,8 +298,14 @@ class InfoExtractor(object):
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal)
return (content, urlh)
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
@ -270,6 +338,12 @@ class InfoExtractor(object):
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if os.name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
@ -288,7 +362,7 @@ class InfoExtractor(object):
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
return (content, urlh)
return content
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
""" Returns the data of the page as a string """
@ -325,7 +399,11 @@ class InfoExtractor(object):
try:
return json.loads(json_string)
except ValueError as ve:
raise ExtractorError('Failed to download JSON', cause=ve)
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
@ -352,17 +430,18 @@ class InfoExtractor(object):
"""Report attempt to log in."""
self.to_screen('Logging in')
#Methods for following #608
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None):
"""Returns a url that points to a page that should be processed"""
#TODO: ie should be the class used for getting the info
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
return video_info
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None):
"""Returns a playlist"""
@ -374,7 +453,7 @@ class InfoExtractor(object):
video_info['title'] = playlist_title
return video_info
def _search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0):
def _search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
@ -395,22 +474,25 @@ class InfoExtractor(object):
_name = name
if mobj:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not _NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s; '
'please report this issue on http://yt-dl.org/bug' % _name)
'please report this issue on http://yt-dl.org/bug' % _name)
return None
def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0):
def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags)
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
@ -443,7 +525,7 @@ class InfoExtractor(object):
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning('parsing .netrc: %s' % compat_str(err))
return (username, password)
def _get_tfa_info(self):
@ -504,9 +586,9 @@ class InfoExtractor(object):
display_name = name
return self._html_search_regex(
r'''(?ix)<meta
(?=[^>]+(?:itemprop|name|property)=["\']?%s["\']?)
[^>]+content=["\']([^"\']+)["\']''' % re.escape(name),
html, display_name, fatal=fatal, **kwargs)
(?=[^>]+(?:itemprop|name|property)=(["\']?)%s\1)
[^>]+content=(["\'])(?P<content>.*?)\1''' % re.escape(name),
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
@ -537,7 +619,7 @@ class InfoExtractor(object):
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
'twitter card player')
def _sort_formats(self, formats):
if not formats:
@ -582,6 +664,7 @@ class InfoExtractor(object):
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
@ -590,14 +673,16 @@ class InfoExtractor(object):
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id'),
)
formats.sort(key=_formats_key)
def http_scheme(self):
""" Either "https:" or "https:", depending on the user's preferences """
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
@ -665,7 +750,9 @@ class InfoExtractor(object):
return formats
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None):
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None):
formats = [{
'format_id': 'm3u8-meta',
'url': m3u8_url,
@ -676,7 +763,15 @@ class InfoExtractor(object):
'format_note': 'Quality selection URL',
}]
m3u8_doc = self._download_webpage(m3u8_url, video_id)
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
m3u8_doc = self._download_webpage(
m3u8_url, video_id,
note='Downloading m3u8 information',
errnote='Failed to download m3u8 information')
last_info = None
kv_rex = re.compile(
r'(?P<key>[a-zA-Z_-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)')
@ -692,15 +787,17 @@ class InfoExtractor(object):
continue
else:
if last_info is None:
formats.append({'url': line})
formats.append({'url': format_url(line)})
continue
tbr = int_or_none(last_info.get('BANDWIDTH'), scale=1000)
f = {
'format_id': 'm3u8-%d' % (tbr if tbr else len(formats)),
'url': line.strip(),
'url': format_url(line.strip()),
'tbr': tbr,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}
codecs = last_info.get('CODECS')
if codecs:
@ -720,6 +817,39 @@ class InfoExtractor(object):
self._sort_formats(formats)
return formats
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime("%Y-%m-%d %H:%M")
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None):
cookie = compat_cookiejar.Cookie(0, name, value, None, None, domain, None,
None, '/', True, False, expire_time, '', None, None, None)
self._downloader.cookiejar.set_cookie(cookie)
class SearchInfoExtractor(InfoExtractor):
"""

Some files were not shown because too many files have changed in this diff Show More