diff --git a/.gitignore b/.gitignore index 7dd0ad09b..37b2fa8d3 100644 --- a/.gitignore +++ b/.gitignore @@ -23,6 +23,8 @@ updates_key.pem *.vtt *.flv *.mp4 +*.m4a +*.m4v *.part test/testdata .tox diff --git a/README.md b/README.md index 91e18e372..cf0bb7b65 100644 --- a/README.md +++ b/README.md @@ -34,10 +34,13 @@ which means you can modify it, redistribute it or use it however you like. empty string (--proxy "") for direct connection --no-check-certificate Suppress HTTPS certificate validation. --cache-dir DIR Location in the filesystem where youtube-dl can - store downloaded information permanently. By + store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache - /youtube-dl . + /youtube-dl . At the moment, only YouTube player + files (for videos with obfuscated signatures) are + cached, but that may change. --no-cache-dir Disable filesystem caching + --socket-timeout None Time to wait before giving up, in seconds --bidi-workaround Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH @@ -55,8 +58,10 @@ which means you can modify it, redistribute it or use it however you like. --max-filesize SIZE Do not download any videos larger than SIZE (e.g. 50k or 44.6m) --date DATE download only videos uploaded in this date - --datebefore DATE download only videos uploaded before this date - --dateafter DATE download only videos uploaded after this date + --datebefore DATE download only videos uploaded on or before this + date (i.e. inclusive) + --dateafter DATE download only videos uploaded on or after this + date (i.e. inclusive) --min-views COUNT Do not download any videos with less than COUNT views --max-views COUNT Do not download any videos with more than COUNT @@ -88,13 +93,13 @@ which means you can modify it, redistribute it or use it however you like. different, %(autonumber)s to get an automatically incremented number, %(ext)s for the filename extension, %(format)s for the format description - (like "22 - 1280x720" or "HD"),%(format_id)s for + (like "22 - 1280x720" or "HD"), %(format_id)s for the unique id of the format (like Youtube's - itags: "137"),%(upload_date)s for the upload date - (YYYYMMDD), %(extractor)s for the provider - (youtube, metacafe, etc), %(id)s for the video id - , %(playlist)s for the playlist the video is in, - %(playlist_index)s for the position in the + itags: "137"), %(upload_date)s for the upload + date (YYYYMMDD), %(extractor)s for the provider + (youtube, metacafe, etc), %(id)s for the video + id, %(playlist)s for the playlist the video is + in, %(playlist_index)s for the position in the playlist and %% for a literal percent. Use - to output to stdout. Can also be used to download to a different directory, for example with -o '/my/d @@ -106,7 +111,7 @@ which means you can modify it, redistribute it or use it however you like. avoid "&" and spaces in filenames -a, --batch-file FILE file containing URLs to download ('-' for stdin) --load-info FILE json file containing the video information - (created with the "--write-json" option + (created with the "--write-json" option) -w, --no-overwrites do not overwrite files -c, --continue force resume of partially downloaded files. By default, youtube-dl will resume downloads if @@ -140,7 +145,7 @@ which means you can modify it, redistribute it or use it however you like. --no-progress do not print progress bar --console-title display progress in console titlebar -v, --verbose print various debugging information - --dump-intermediate-pages print downloaded pages to debug problems(very + --dump-intermediate-pages print downloaded pages to debug problems (very verbose) --write-pages Write downloaded intermediary pages to files in the current directory to debug problems @@ -153,8 +158,7 @@ which means you can modify it, redistribute it or use it however you like. --prefer-free-formats prefer free video formats unless a specific one is requested --max-quality FORMAT highest quality format to download - -F, --list-formats list all available formats (currently youtube - only) + -F, --list-formats list all available formats ## Subtitle Options: --write-sub write subtitle file @@ -172,7 +176,7 @@ which means you can modify it, redistribute it or use it however you like. -u, --username USERNAME account username -p, --password PASSWORD account password -n, --netrc use .netrc authentication data - --video-password PASSWORD video password (vimeo only) + --video-password PASSWORD video password (vimeo, smotri) ## Post-processing Options: -x, --extract-audio convert video files to audio-only files (requires @@ -190,7 +194,13 @@ which means you can modify it, redistribute it or use it however you like. processed files are overwritten by default --embed-subs embed subtitles in the video (only for mp4 videos) - --add-metadata add metadata to the files + --add-metadata write metadata to the video file + --xattrs write metadata to the video file's xattrs (using + dublin core and xdg standards) + --prefer-avconv Prefer avconv over ffmpeg for running the + postprocessors (default) + --prefer-ffmpeg Prefer ffmpeg over avconv for running the + postprocessors # CONFIGURATION @@ -229,9 +239,12 @@ Videos can be filtered by their upload date using the options `--date`, `--dateb Examples: - $ youtube-dl --dateafter now-6months #will only download the videos uploaded in the last 6 months - $ youtube-dl --date 19700101 #will only download the videos uploaded in January 1, 1970 - $ youtube-dl --dateafter 20000101 --datebefore 20100101 #will only download the videos uploaded between 2000 and 2010 + $ # Download only the videos uploaded in the last 6 months + $ youtube-dl --dateafter now-6months + $ # Download only the videos uploaded on January 1, 1970 + $ youtube-dl --date 19700101 + $ # will only download the videos uploaded in the 200x decade + $ youtube-dl --dateafter 20000101 --datebefore 20091231 # FAQ @@ -310,7 +323,7 @@ Site support requests must contain an example URL. An example URL is a URL you m ### Are you using the latest version? -Before reporting any issue, type youtube-dl -U. This should report that you're up-to-date. Ábout 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well. +Before reporting any issue, type youtube-dl -U. This should report that you're up-to-date. About 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well. ### Is the issue already documented? @@ -335,3 +348,7 @@ In particular, every site support request issue should only pertain to services ### Is anyone going to need the feature? Only post features that you (or an incapicated friend you can personally talk to) require. Do not post features because they seem like a good idea. If they are really useful, they will be requested by someone who requires them. + +### Is your question about youtube-dl? + +It may sound strange, but some bug reports we receive are completely unrelated to youtube-dl and relate to a different or even the reporter's own application. Please make sure that you are actually using youtube-dl. If you are using a UI for youtube-dl, report the bug to the maintainer of the actual application providing the UI. On the other hand, if your UI for youtube-dl fails in some way you believe is related to youtube-dl, by all means, go ahead and report the bug. diff --git a/devscripts/bash-completion.in b/devscripts/bash-completion.in index 3af87a378..28bd23727 100644 --- a/devscripts/bash-completion.in +++ b/devscripts/bash-completion.in @@ -6,7 +6,7 @@ __youtube_dl() prev="${COMP_WORDS[COMP_CWORD-1]}" opts="{{flags}}" keywords=":ytfavorites :ytrecommended :ytsubscriptions :ytwatchlater :ythistory" - fileopts="-a|--batch-file|--download-archive|--cookies" + fileopts="-a|--batch-file|--download-archive|--cookies|--load-info" diropts="--cache-dir" if [[ ${prev} =~ ${fileopts} ]]; then diff --git a/devscripts/check-porn.py b/devscripts/check-porn.py index 63401fe18..86aa37b5f 100644 --- a/devscripts/check-porn.py +++ b/devscripts/check-porn.py @@ -3,6 +3,9 @@ """ This script employs a VERY basic heuristic ('porn' in webpage.lower()) to check if we are not 'age_limit' tagging some porn site + +A second approach implemented relies on a list of porn domains, to activate it +pass the list filename as the only argument """ # Allow direct execution @@ -11,25 +14,42 @@ import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from test.helper import get_testcases +from youtube_dl.utils import compat_urllib_parse_urlparse from youtube_dl.utils import compat_urllib_request +if len(sys.argv) > 1: + METHOD = 'LIST' + LIST = open(sys.argv[1]).read().decode('utf8').strip() +else: + METHOD = 'EURISTIC' + for test in get_testcases(): - try: - webpage = compat_urllib_request.urlopen(test['url'], timeout=10).read() - except: - print('\nFail: {0}'.format(test['name'])) - continue + if METHOD == 'EURISTIC': + try: + webpage = compat_urllib_request.urlopen(test['url'], timeout=10).read() + except: + print('\nFail: {0}'.format(test['name'])) + continue - webpage = webpage.decode('utf8', 'replace') + webpage = webpage.decode('utf8', 'replace') - if 'porn' in webpage.lower() and ('info_dict' not in test - or 'age_limit' not in test['info_dict'] - or test['info_dict']['age_limit'] != 18): + RESULT = 'porn' in webpage.lower() + + elif METHOD == 'LIST': + domain = compat_urllib_parse_urlparse(test['url']).netloc + if not domain: + print('\nFail: {0}'.format(test['name'])) + continue + domain = '.'.join(domain.split('.')[-2:]) + + RESULT = ('.' + domain + '\n' in LIST or '\n' + domain + '\n' in LIST) + + if RESULT and ('info_dict' not in test or 'age_limit' not in test['info_dict'] + or test['info_dict']['age_limit'] != 18): print('\nPotential missing age_limit check: {0}'.format(test['name'])) - elif 'porn' not in webpage.lower() and ('info_dict' in test and - 'age_limit' in test['info_dict'] and - test['info_dict']['age_limit'] == 18): + elif not RESULT and ('info_dict' in test and 'age_limit' in test['info_dict'] + and test['info_dict']['age_limit'] == 18): print('\nPotential false negative: {0}'.format(test['name'])) else: diff --git a/devscripts/gh-pages/update-feed.py b/devscripts/gh-pages/update-feed.py index 16571a924..0ba15ae0f 100755 --- a/devscripts/gh-pages/update-feed.py +++ b/devscripts/gh-pages/update-feed.py @@ -1,56 +1,76 @@ #!/usr/bin/env python3 import datetime - +import io +import json import textwrap -import json -atom_template=textwrap.dedent("""\ - - - youtube-dl releases - youtube-dl-updates-feed - @TIMESTAMP@ - @ENTRIES@ - """) +atom_template = textwrap.dedent("""\ + + + + youtube-dl releases + https://yt-dl.org/feed/youtube-dl-updates-feed + @TIMESTAMP@ + @ENTRIES@ + """) -entry_template=textwrap.dedent(""" - - youtube-dl-@VERSION@ - New version @VERSION@ - - -
- Downloads available at https://yt-dl.org/downloads/@VERSION@/ -
-
- - The youtube-dl maintainers - - @TIMESTAMP@ -
- """) +entry_template = textwrap.dedent(""" + + https://yt-dl.org/feed/youtube-dl-updates-feed/youtube-dl-@VERSION@ + New version @VERSION@ + + +
+ Downloads available at https://yt-dl.org/downloads/@VERSION@/ +
+
+ + The youtube-dl maintainers + + @TIMESTAMP@ +
+ """) now = datetime.datetime.now() -now_iso = now.isoformat() +now_iso = now.isoformat() + 'Z' -atom_template = atom_template.replace('@TIMESTAMP@',now_iso) - -entries=[] +atom_template = atom_template.replace('@TIMESTAMP@', now_iso) versions_info = json.load(open('update/versions.json')) versions = list(versions_info['versions'].keys()) versions.sort() +entries = [] for v in versions: - entry = entry_template.replace('@TIMESTAMP@',v.replace('.','-')) - entry = entry.replace('@VERSION@',v) - entries.append(entry) + fields = v.split('.') + year, month, day = map(int, fields[:3]) + faked = 0 + patchlevel = 0 + while True: + try: + datetime.date(year, month, day) + except ValueError: + day -= 1 + faked += 1 + assert day > 0 + continue + break + if len(fields) >= 4: + try: + patchlevel = int(fields[3]) + except ValueError: + patchlevel = 1 + timestamp = '%04d-%02d-%02dT00:%02d:%02dZ' % (year, month, day, faked, patchlevel) + + entry = entry_template.replace('@TIMESTAMP@', timestamp) + entry = entry.replace('@VERSION@', v) + entries.append(entry) entries_str = textwrap.indent(''.join(entries), '\t') atom_template = atom_template.replace('@ENTRIES@', entries_str) -with open('update/releases.atom','w',encoding='utf-8') as atom_file: - atom_file.write(atom_template) +with io.open('update/releases.atom', 'w', encoding='utf-8') as atom_file: + atom_file.write(atom_template) diff --git a/devscripts/make_readme.py b/devscripts/make_readme.py index 7f2ea319c..cae1fa4f2 100755 --- a/devscripts/make_readme.py +++ b/devscripts/make_readme.py @@ -1,20 +1,24 @@ +import io import sys import re README_FILE = 'README.md' helptext = sys.stdin.read() -with open(README_FILE) as f: +if isinstance(helptext, bytes): + helptext = helptext.decode('utf-8') + +with io.open(README_FILE, encoding='utf-8') as f: oldreadme = f.read() header = oldreadme[:oldreadme.index('# OPTIONS')] footer = oldreadme[oldreadme.index('# CONFIGURATION'):] -options = helptext[helptext.index(' General Options:')+19:] +options = helptext[helptext.index(' General Options:') + 19:] options = re.sub(r'^ (\w.+)$', r'## \1', options, flags=re.M) options = '# OPTIONS\n' + options + '\n' -with open(README_FILE, 'w') as f: +with io.open(README_FILE, 'w', encoding='utf-8') as f: f.write(header) f.write(options) f.write(footer) diff --git a/devscripts/release.sh b/devscripts/release.sh index 2766174c1..323acf8cf 100755 --- a/devscripts/release.sh +++ b/devscripts/release.sh @@ -24,6 +24,8 @@ if [ -z "$1" ]; then echo "ERROR: specify version number like this: $0 1994.09.0 version="$1" if [ ! -z "`git tag | grep "$version"`" ]; then echo 'ERROR: version already present'; exit 1; fi if [ ! -z "`git status --porcelain | grep -v CHANGELOG`" ]; then echo 'ERROR: the working directory is not clean; commit or stash changes'; exit 1; fi +useless_files=$(find youtube_dl -type f -not -name '*.py') +if [ ! -z "$useless_files" ]; then echo "ERROR: Non-.py files in youtube_dl: $useless_files"; exit 1; fi if [ ! -f "updates_key.pem" ]; then echo 'ERROR: updates_key.pem missing'; exit 1; fi /bin/echo -e "\n### First of all, testing..." diff --git a/setup.py b/setup.py index 653ca9a73..1f45159cd 100644 --- a/setup.py +++ b/setup.py @@ -71,7 +71,10 @@ setup( author_email='ytdl@yt-dl.org', maintainer='Philipp Hagemeister', maintainer_email='phihag@phihag.de', - packages=['youtube_dl', 'youtube_dl.extractor', 'youtube_dl.downloader'], + packages=[ + 'youtube_dl', + 'youtube_dl.extractor', 'youtube_dl.downloader', + 'youtube_dl.postprocessor'], # Provokes warning on most systems (why?!) # test_suite = 'nose.collector', diff --git a/test/test_all_urls.py b/test/test_all_urls.py index bd77b7c30..75547f42a 100644 --- a/test/test_all_urls.py +++ b/test/test_all_urls.py @@ -113,6 +113,8 @@ class TestAllURLsMatching(unittest.TestCase): def test_vimeo_matching(self): self.assertMatch('http://vimeo.com/channels/tributes', ['vimeo:channel']) self.assertMatch('http://vimeo.com/user7108434', ['vimeo:user']) + self.assertMatch('http://vimeo.com/user7108434/videos', ['vimeo:user']) + self.assertMatch('https://vimeo.com/user21297594/review/75524534/3c257a1b5d', ['vimeo:review']) # https://github.com/rg3/youtube-dl/issues/1930 def test_soundcloud_not_matching_sets(self): diff --git a/test/test_download.py b/test/test_download.py index d0be8d27c..0d925ae69 100644 --- a/test/test_download.py +++ b/test/test_download.py @@ -148,7 +148,7 @@ def generator(test_case): for key, value in info_dict.items() if value and key in ('title', 'description', 'uploader', 'upload_date', 'uploader_id', 'location')) if not all(key in tc.get('info_dict', {}).keys() for key in test_info_dict.keys()): - sys.stderr.write(u'\n"info_dict": ' + json.dumps(test_info_dict, ensure_ascii=False, indent=2) + u'\n') + sys.stderr.write(u'\n"info_dict": ' + json.dumps(test_info_dict, ensure_ascii=False, indent=4) + u'\n') # Check for the presence of mandatory fields for key in ('id', 'url', 'title', 'ext'): diff --git a/test/test_playlists.py b/test/test_playlists.py index 1b7b4e3d8..5eeba091e 100644 --- a/test/test_playlists.py +++ b/test/test_playlists.py @@ -1,6 +1,7 @@ #!/usr/bin/env python # encoding: utf-8 +from __future__ import unicode_literals # Allow direct execution import os @@ -28,7 +29,10 @@ from youtube_dl.extractor import ( BandcampAlbumIE, SmotriCommunityIE, SmotriUserIE, - IviCompilationIE + IviCompilationIE, + ImdbListIE, + KhanAcademyIE, + EveryonesMixtapeIE, ) @@ -42,7 +46,7 @@ class TestPlaylists(unittest.TestCase): ie = DailymotionPlaylistIE(dl) result = ie.extract('http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q') self.assertIsPlaylist(result) - self.assertEqual(result['title'], u'SPORT') + self.assertEqual(result['title'], 'SPORT') self.assertTrue(len(result['entries']) > 20) def test_dailymotion_user(self): @@ -50,7 +54,7 @@ class TestPlaylists(unittest.TestCase): ie = DailymotionUserIE(dl) result = ie.extract('http://www.dailymotion.com/user/generation-quoi/') self.assertIsPlaylist(result) - self.assertEqual(result['title'], u'Génération Quoi') + self.assertEqual(result['title'], 'Génération Quoi') self.assertTrue(len(result['entries']) >= 26) def test_vimeo_channel(self): @@ -58,7 +62,7 @@ class TestPlaylists(unittest.TestCase): ie = VimeoChannelIE(dl) result = ie.extract('http://vimeo.com/channels/tributes') self.assertIsPlaylist(result) - self.assertEqual(result['title'], u'Vimeo Tributes') + self.assertEqual(result['title'], 'Vimeo Tributes') self.assertTrue(len(result['entries']) > 24) def test_vimeo_user(self): @@ -66,7 +70,7 @@ class TestPlaylists(unittest.TestCase): ie = VimeoUserIE(dl) result = ie.extract('http://vimeo.com/nkistudio/videos') self.assertIsPlaylist(result) - self.assertEqual(result['title'], u'Nki') + self.assertEqual(result['title'], 'Nki') self.assertTrue(len(result['entries']) > 65) def test_vimeo_album(self): @@ -74,7 +78,7 @@ class TestPlaylists(unittest.TestCase): ie = VimeoAlbumIE(dl) result = ie.extract('http://vimeo.com/album/2632481') self.assertIsPlaylist(result) - self.assertEqual(result['title'], u'Staff Favorites: November 2013') + self.assertEqual(result['title'], 'Staff Favorites: November 2013') self.assertTrue(len(result['entries']) > 12) def test_vimeo_groups(self): @@ -82,7 +86,7 @@ class TestPlaylists(unittest.TestCase): ie = VimeoGroupsIE(dl) result = ie.extract('http://vimeo.com/groups/rolexawards') self.assertIsPlaylist(result) - self.assertEqual(result['title'], u'Rolex Awards for Enterprise') + self.assertEqual(result['title'], 'Rolex Awards for Enterprise') self.assertTrue(len(result['entries']) > 72) def test_ustream_channel(self): @@ -90,7 +94,7 @@ class TestPlaylists(unittest.TestCase): ie = UstreamChannelIE(dl) result = ie.extract('http://www.ustream.tv/channel/young-americans-for-liberty') self.assertIsPlaylist(result) - self.assertEqual(result['id'], u'5124905') + self.assertEqual(result['id'], '5124905') self.assertTrue(len(result['entries']) >= 11) def test_soundcloud_set(self): @@ -98,7 +102,7 @@ class TestPlaylists(unittest.TestCase): ie = SoundcloudSetIE(dl) result = ie.extract('https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep') self.assertIsPlaylist(result) - self.assertEqual(result['title'], u'The Royal Concept EP') + self.assertEqual(result['title'], 'The Royal Concept EP') self.assertTrue(len(result['entries']) >= 6) def test_soundcloud_user(self): @@ -106,7 +110,7 @@ class TestPlaylists(unittest.TestCase): ie = SoundcloudUserIE(dl) result = ie.extract('https://soundcloud.com/the-concept-band') self.assertIsPlaylist(result) - self.assertEqual(result['id'], u'9615865') + self.assertEqual(result['id'], '9615865') self.assertTrue(len(result['entries']) >= 12) def test_livestream_event(self): @@ -114,7 +118,7 @@ class TestPlaylists(unittest.TestCase): ie = LivestreamIE(dl) result = ie.extract('http://new.livestream.com/tedx/cityenglish') self.assertIsPlaylist(result) - self.assertEqual(result['title'], u'TEDCity2.0 (English)') + self.assertEqual(result['title'], 'TEDCity2.0 (English)') self.assertTrue(len(result['entries']) >= 4) def test_nhl_videocenter(self): @@ -122,8 +126,8 @@ class TestPlaylists(unittest.TestCase): ie = NHLVideocenterIE(dl) result = ie.extract('http://video.canucks.nhl.com/videocenter/console?catid=999') self.assertIsPlaylist(result) - self.assertEqual(result['id'], u'999') - self.assertEqual(result['title'], u'Highlights') + self.assertEqual(result['id'], '999') + self.assertEqual(result['title'], 'Highlights') self.assertEqual(len(result['entries']), 12) def test_bambuser_channel(self): @@ -131,7 +135,7 @@ class TestPlaylists(unittest.TestCase): ie = BambuserChannelIE(dl) result = ie.extract('http://bambuser.com/channel/pixelversity') self.assertIsPlaylist(result) - self.assertEqual(result['title'], u'pixelversity') + self.assertEqual(result['title'], 'pixelversity') self.assertTrue(len(result['entries']) >= 60) def test_bandcamp_album(self): @@ -139,7 +143,7 @@ class TestPlaylists(unittest.TestCase): ie = BandcampAlbumIE(dl) result = ie.extract('http://mpallante.bandcamp.com/album/nightmare-night-ep') self.assertIsPlaylist(result) - self.assertEqual(result['title'], u'Nightmare Night EP') + self.assertEqual(result['title'], 'Nightmare Night EP') self.assertTrue(len(result['entries']) >= 4) def test_smotri_community(self): @@ -147,8 +151,8 @@ class TestPlaylists(unittest.TestCase): ie = SmotriCommunityIE(dl) result = ie.extract('http://smotri.com/community/video/kommuna') self.assertIsPlaylist(result) - self.assertEqual(result['id'], u'kommuna') - self.assertEqual(result['title'], u'КПРФ') + self.assertEqual(result['id'], 'kommuna') + self.assertEqual(result['title'], 'КПРФ') self.assertTrue(len(result['entries']) >= 4) def test_smotri_user(self): @@ -156,17 +160,17 @@ class TestPlaylists(unittest.TestCase): ie = SmotriUserIE(dl) result = ie.extract('http://smotri.com/user/inspector') self.assertIsPlaylist(result) - self.assertEqual(result['id'], u'inspector') - self.assertEqual(result['title'], u'Inspector') + self.assertEqual(result['id'], 'inspector') + self.assertEqual(result['title'], 'Inspector') self.assertTrue(len(result['entries']) >= 9) def test_AcademicEarthCourse(self): dl = FakeYDL() ie = AcademicEarthCourseIE(dl) - result = ie.extract(u'http://academicearth.org/courses/building-dynamic-websites/') + result = ie.extract('http://academicearth.org/courses/building-dynamic-websites/') self.assertIsPlaylist(result) - self.assertEqual(result['id'], u'building-dynamic-websites') - self.assertEqual(result['title'], u'Building Dynamic Websites') + self.assertEqual(result['id'], 'building-dynamic-websites') + self.assertEqual(result['title'], 'Building Dynamic Websites') self.assertEqual(result['description'], u"Today's websites are increasingly dynamic. Pages are no longer static HTML files but instead generated by scripts and database calls. User interfaces are more seamless, with technologies like Ajax replacing traditional page reloads. This course teaches students how to build dynamic websites with Ajax and with Linux, Apache, MySQL, and PHP (LAMP), one of today's most popular frameworks. Students learn how to set up domain names with DNS, how to structure pages with XHTML and CSS, how to program in JavaScript and PHP, how to configure Apache and MySQL, how to design and query databases with SQL, how to use Ajax with both XML and JSON, and how to build mashups. The course explores issues of security, scalability, and cross-browser support and also discusses enterprise-level deployments of websites, including third-party hosting, virtualization, colocation in data centers, firewalling, and load-balancing.") self.assertEqual(len(result['entries']), 10) @@ -175,8 +179,8 @@ class TestPlaylists(unittest.TestCase): ie = IviCompilationIE(dl) result = ie.extract('http://www.ivi.ru/watch/dezhurnyi_angel') self.assertIsPlaylist(result) - self.assertEqual(result['id'], u'dezhurnyi_angel') - self.assertEqual(result['title'], u'Дежурный ангел (2010 - 2012)') + self.assertEqual(result['id'], 'dezhurnyi_angel') + self.assertEqual(result['title'], 'Дежурный ангел (2010 - 2012)') self.assertTrue(len(result['entries']) >= 36) def test_ivi_compilation_season(self): @@ -184,9 +188,37 @@ class TestPlaylists(unittest.TestCase): ie = IviCompilationIE(dl) result = ie.extract('http://www.ivi.ru/watch/dezhurnyi_angel/season2') self.assertIsPlaylist(result) - self.assertEqual(result['id'], u'dezhurnyi_angel/season2') - self.assertEqual(result['title'], u'Дежурный ангел (2010 - 2012) 2 сезон') + self.assertEqual(result['id'], 'dezhurnyi_angel/season2') + self.assertEqual(result['title'], 'Дежурный ангел (2010 - 2012) 2 сезон') self.assertTrue(len(result['entries']) >= 20) + + def test_imdb_list(self): + dl = FakeYDL() + ie = ImdbListIE(dl) + result = ie.extract('http://www.imdb.com/list/sMjedvGDd8U') + self.assertIsPlaylist(result) + self.assertEqual(result['id'], 'sMjedvGDd8U') + self.assertEqual(result['title'], 'Animated and Family Films') + self.assertTrue(len(result['entries']) >= 48) + + def test_khanacademy_topic(self): + dl = FakeYDL() + ie = KhanAcademyIE(dl) + result = ie.extract('https://www.khanacademy.org/math/applied-math/cryptography') + self.assertIsPlaylist(result) + self.assertEqual(result['id'], 'cryptography') + self.assertEqual(result['title'], 'Journey into cryptography') + self.assertEqual(result['description'], 'How have humans protected their secret messages through history? What has changed today?') + self.assertTrue(len(result['entries']) >= 3) + + def test_EveryonesMixtape(self): + dl = FakeYDL() + ie = EveryonesMixtapeIE(dl) + result = ie.extract('http://everyonesmixtape.com/#/mix/m7m0jJAbMQi') + self.assertIsPlaylist(result) + self.assertEqual(result['id'], 'm7m0jJAbMQi') + self.assertEqual(result['title'], 'Driving') + self.assertEqual(len(result['entries']), 24) if __name__ == '__main__': diff --git a/test/test_subtitles.py b/test/test_subtitles.py index 263b5ac69..1e4e62faa 100644 --- a/test/test_subtitles.py +++ b/test/test_subtitles.py @@ -167,13 +167,13 @@ class TestTedSubtitles(BaseTestSubtitles): def test_subtitles(self): self.DL.params['writesubtitles'] = True subtitles = self.getSubtitles() - self.assertEqual(md5(subtitles['en']), '2154f31ff9b9f89a0aa671537559c21d') + self.assertEqual(md5(subtitles['en']), '4262c1665ff928a2dada178f62cb8d14') def test_subtitles_lang(self): self.DL.params['writesubtitles'] = True self.DL.params['subtitleslangs'] = ['fr'] subtitles = self.getSubtitles() - self.assertEqual(md5(subtitles['fr']), '7616cbc6df20ec2c1204083c83871cf6') + self.assertEqual(md5(subtitles['fr']), '66a63f7f42c97a50f8c0e90bc7797bb5') def test_allsubtitles(self): self.DL.params['writesubtitles'] = True diff --git a/test/test_unicode_literals.py b/test/test_unicode_literals.py new file mode 100644 index 000000000..a4ba7bad0 --- /dev/null +++ b/test/test_unicode_literals.py @@ -0,0 +1,47 @@ +from __future__ import unicode_literals + +import io +import os +import re +import unittest + +rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + +IGNORED_FILES = [ + 'setup.py', # http://bugs.python.org/issue13943 +] + + +class TestUnicodeLiterals(unittest.TestCase): + def test_all_files(self): + print('Skipping this test (not yet fully implemented)') + return + + for dirpath, _, filenames in os.walk(rootDir): + for basename in filenames: + if not basename.endswith('.py'): + continue + if basename in IGNORED_FILES: + continue + + fn = os.path.join(dirpath, basename) + with io.open(fn, encoding='utf-8') as inf: + code = inf.read() + + if "'" not in code and '"' not in code: + continue + imps = 'from __future__ import unicode_literals' + self.assertTrue( + imps in code, + ' %s missing in %s' % (imps, fn)) + + m = re.search(r'(?<=\s)u[\'"](?!\)|,|$)', code) + if m is not None: + self.assertTrue( + m is None, + 'u present in %s, around %s' % ( + fn, code[m.start() - 10:m.end() + 10])) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/test_youtube_signature.py b/test/test_youtube_signature.py index 056700614..a3fc53047 100644 --- a/test/test_youtube_signature.py +++ b/test/test_youtube_signature.py @@ -27,12 +27,6 @@ _TESTS = [ 85, u'3456789a0cdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS[UVWXYZ!"#$%&\'()*+,-./:;<=>?@', ), - ( - u'https://s.ytimg.com/yts/swfbin/watch_as3-vflg5GhxU.swf', - u'swf', - 82, - u':/.-,+*)=\'&%$#"!ZYX0VUTSRQPONMLKJIHGFEDCBAzyxw>utsrqponmlkjihgfedcba987654321' - ), ] diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py index a9a3639d7..a0ab89b3d 100644 --- a/youtube_dl/YoutubeDL.py +++ b/youtube_dl/YoutubeDL.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -from __future__ import absolute_import +from __future__ import absolute_import, unicode_literals import collections import errno @@ -51,9 +51,11 @@ from .utils import ( write_json_file, write_string, YoutubeDLHandler, + prepend_extension, ) from .extractor import get_info_extractor, gen_extractors from .downloader import get_suitable_downloader +from .postprocessor import FFmpegMergerPP from .version import __version__ @@ -148,11 +150,16 @@ class YoutubeDL(object): socket_timeout: Time to wait for unresponsive hosts, in seconds bidi_workaround: Work around buggy terminals without bidirectional text support, using fridibi + debug_printtraffic:Print out sent and received HTTP traffic The following parameters are not used by YoutubeDL itself, they are used by the FileDownloader: nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test, noresizebuffer, retries, continuedl, noprogress, consoletitle + + The following options are used by the post processors: + prefer_ffmpeg: If True, use ffmpeg instead of avconv if both are available, + otherwise prefer avconv. """ params = None @@ -164,6 +171,8 @@ class YoutubeDL(object): def __init__(self, params=None): """Create a FileDownloader object with the given options.""" + if params is None: + params = {} self._ies = [] self._ies_instances = {} self._pps = [] @@ -172,7 +181,7 @@ class YoutubeDL(object): self._num_downloads = 0 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)] self._err_file = sys.stderr - self.params = {} if params is None else params + self.params = params if params.get('bidi_workaround', False): try: @@ -197,7 +206,7 @@ class YoutubeDL(object): self._output_channel = os.fdopen(master, 'rb') except OSError as ose: if ose.errno == 2: - self.report_warning(u'Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.') + self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.') else: raise @@ -206,13 +215,13 @@ class YoutubeDL(object): and not params['restrictfilenames']): # On Python 3, the Unicode filesystem API will throw errors (#1474) self.report_warning( - u'Assuming --restrict-filenames since file system encoding ' - u'cannot encode all charactes. ' - u'Set the LC_ALL environment variable to fix this.') + 'Assuming --restrict-filenames since file system encoding ' + 'cannot encode all charactes. ' + 'Set the LC_ALL environment variable to fix this.') self.params['restrictfilenames'] = True if '%(stitle)s' in self.params.get('outtmpl', ''): - self.report_warning(u'%(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead.') + self.report_warning('%(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead.') self._setup_opener() @@ -255,13 +264,13 @@ class YoutubeDL(object): return message assert hasattr(self, '_output_process') - assert type(message) == type(u'') - line_count = message.count(u'\n') + 1 - self._output_process.stdin.write((message + u'\n').encode('utf-8')) + assert type(message) == type('') + line_count = message.count('\n') + 1 + self._output_process.stdin.write((message + '\n').encode('utf-8')) self._output_process.stdin.flush() - res = u''.join(self._output_channel.readline().decode('utf-8') + res = ''.join(self._output_channel.readline().decode('utf-8') for _ in range(line_count)) - return res[:-len(u'\n')] + return res[:-len('\n')] def to_screen(self, message, skip_eol=False): """Print message to stdout if not in quiet mode.""" @@ -273,19 +282,19 @@ class YoutubeDL(object): self.params['logger'].debug(message) elif not check_quiet or not self.params.get('quiet', False): message = self._bidi_workaround(message) - terminator = [u'\n', u''][skip_eol] + terminator = ['\n', ''][skip_eol] output = message + terminator write_string(output, self._screen_file) def to_stderr(self, message): """Print message to stderr.""" - assert type(message) == type(u'') + assert type(message) == type('') if self.params.get('logger'): self.params['logger'].error(message) else: message = self._bidi_workaround(message) - output = message + u'\n' + output = message + '\n' write_string(output, self._err_file) def to_console_title(self, message): @@ -296,21 +305,21 @@ class YoutubeDL(object): # already of type unicode() ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message)) elif 'TERM' in os.environ: - write_string(u'\033]0;%s\007' % message, self._screen_file) + write_string('\033]0;%s\007' % message, self._screen_file) def save_console_title(self): if not self.params.get('consoletitle', False): return if 'TERM' in os.environ: # Save the title on stack - write_string(u'\033[22;0t', self._screen_file) + write_string('\033[22;0t', self._screen_file) def restore_console_title(self): if not self.params.get('consoletitle', False): return if 'TERM' in os.environ: # Restore the title from stack - write_string(u'\033[23;0t', self._screen_file) + write_string('\033[23;0t', self._screen_file) def __enter__(self): self.save_console_title() @@ -336,13 +345,13 @@ class YoutubeDL(object): if self.params.get('verbose'): if tb is None: if sys.exc_info()[0]: # if .trouble has been called from an except block - tb = u'' + tb = '' if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: - tb += u''.join(traceback.format_exception(*sys.exc_info()[1].exc_info)) + tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info)) tb += compat_str(traceback.format_exc()) else: tb_data = traceback.format_list(traceback.extract_stack()) - tb = u''.join(tb_data) + tb = ''.join(tb_data) self.to_stderr(tb) if not self.params.get('ignoreerrors', False): if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: @@ -358,10 +367,10 @@ class YoutubeDL(object): If stderr is a tty file the 'WARNING:' will be colored ''' if self._err_file.isatty() and os.name != 'nt': - _msg_header = u'\033[0;33mWARNING:\033[0m' + _msg_header = '\033[0;33mWARNING:\033[0m' else: - _msg_header = u'WARNING:' - warning_message = u'%s %s' % (_msg_header, message) + _msg_header = 'WARNING:' + warning_message = '%s %s' % (_msg_header, message) self.to_stderr(warning_message) def report_error(self, message, tb=None): @@ -370,18 +379,18 @@ class YoutubeDL(object): in red if stderr is a tty file. ''' if self._err_file.isatty() and os.name != 'nt': - _msg_header = u'\033[0;31mERROR:\033[0m' + _msg_header = '\033[0;31mERROR:\033[0m' else: - _msg_header = u'ERROR:' - error_message = u'%s %s' % (_msg_header, message) + _msg_header = 'ERROR:' + error_message = '%s %s' % (_msg_header, message) self.trouble(error_message, tb) def report_file_already_downloaded(self, file_name): """Report file has already been fully downloaded.""" try: - self.to_screen(u'[download] %s has already been downloaded' % file_name) + self.to_screen('[download] %s has already been downloaded' % file_name) except UnicodeEncodeError: - self.to_screen(u'[download] The file has already been downloaded') + self.to_screen('[download] The file has already been downloaded') def increment_downloads(self): """Increment the ordinal that assigns a number to each file.""" @@ -396,61 +405,61 @@ class YoutubeDL(object): autonumber_size = self.params.get('autonumber_size') if autonumber_size is None: autonumber_size = 5 - autonumber_templ = u'%0' + str(autonumber_size) + u'd' + autonumber_templ = '%0' + str(autonumber_size) + 'd' template_dict['autonumber'] = autonumber_templ % self._num_downloads if template_dict.get('playlist_index') is not None: - template_dict['playlist_index'] = u'%05d' % template_dict['playlist_index'] + template_dict['playlist_index'] = '%05d' % template_dict['playlist_index'] sanitize = lambda k, v: sanitize_filename( compat_str(v), restricted=self.params.get('restrictfilenames'), - is_id=(k == u'id')) + is_id=(k == 'id')) template_dict = dict((k, sanitize(k, v)) for k, v in template_dict.items() if v is not None) - template_dict = collections.defaultdict(lambda: u'NA', template_dict) + template_dict = collections.defaultdict(lambda: 'NA', template_dict) tmpl = os.path.expanduser(self.params['outtmpl']) filename = tmpl % template_dict return filename except ValueError as err: - self.report_error(u'Error in output template: ' + str(err) + u' (encoding: ' + repr(preferredencoding()) + ')') + self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')') return None def _match_entry(self, info_dict): """ Returns None iff the file should be downloaded """ - video_title = info_dict.get('title', info_dict.get('id', u'video')) + video_title = info_dict.get('title', info_dict.get('id', 'video')) if 'title' in info_dict: # This can happen when we're just evaluating the playlist title = info_dict['title'] matchtitle = self.params.get('matchtitle', False) if matchtitle: if not re.search(matchtitle, title, re.IGNORECASE): - return u'"' + title + '" title did not match pattern "' + matchtitle + '"' + return '"' + title + '" title did not match pattern "' + matchtitle + '"' rejecttitle = self.params.get('rejecttitle', False) if rejecttitle: if re.search(rejecttitle, title, re.IGNORECASE): - return u'"' + title + '" title matched reject pattern "' + rejecttitle + '"' + return '"' + title + '" title matched reject pattern "' + rejecttitle + '"' date = info_dict.get('upload_date', None) if date is not None: dateRange = self.params.get('daterange', DateRange()) if date not in dateRange: - return u'%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange) + return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange) view_count = info_dict.get('view_count', None) if view_count is not None: min_views = self.params.get('min_views') if min_views is not None and view_count < min_views: - return u'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views) + return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views) max_views = self.params.get('max_views') if max_views is not None and view_count > max_views: - return u'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views) + return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views) age_limit = self.params.get('age_limit') if age_limit is not None: if age_limit < info_dict.get('age_limit', 0): - return u'Skipping "' + title + '" because it is age restricted' + return 'Skipping "' + title + '" because it is age restricted' if self.in_download_archive(info_dict): - return u'%s has already been recorded in archive' % video_title + return '%s has already been recorded in archive' % video_title return None @staticmethod @@ -477,8 +486,8 @@ class YoutubeDL(object): continue if not ie.working(): - self.report_warning(u'The program functionality for this site has been marked as broken, ' - u'and will probably not work.') + self.report_warning('The program functionality for this site has been marked as broken, ' + 'and will probably not work.') try: ie_result = ie.extract(url) @@ -511,7 +520,7 @@ class YoutubeDL(object): else: raise else: - self.report_error(u'no suitable InfoExtractor: %s' % url) + self.report_error('no suitable InfoExtractor: %s' % url) def process_ie_result(self, ie_result, download=True, extra_info={}): """ @@ -562,7 +571,7 @@ class YoutubeDL(object): elif result_type == 'playlist': # We process each entry in the playlist playlist = ie_result.get('title', None) or ie_result.get('id', None) - self.to_screen(u'[download] Downloading playlist: %s' % playlist) + self.to_screen('[download] Downloading playlist: %s' % playlist) playlist_results = [] @@ -577,11 +586,11 @@ class YoutubeDL(object): n_entries = len(entries) self.to_screen( - u"[%s] playlist '%s': Collected %d video ids (downloading %d of them)" % + "[%s] playlist '%s': Collected %d video ids (downloading %d of them)" % (ie_result['extractor'], playlist, n_all_entries, n_entries)) for i, entry in enumerate(entries, 1): - self.to_screen(u'[download] Downloading video #%s of %s' % (i, n_entries)) + self.to_screen('[download] Downloading video #%s of %s' % (i, n_entries)) extra = { 'playlist': playlist, 'playlist_index': i + playliststart, @@ -593,7 +602,7 @@ class YoutubeDL(object): reason = self._match_entry(entry) if reason is not None: - self.to_screen(u'[download] ' + reason) + self.to_screen('[download] ' + reason) continue entry_result = self.process_ie_result(entry, @@ -626,7 +635,7 @@ class YoutubeDL(object): elif format_spec == 'worst': return available_formats[0] else: - extensions = [u'mp4', u'flv', u'webm', u'3gp'] + extensions = ['mp4', 'flv', 'webm', '3gp'] if format_spec in extensions: filter_f = lambda f: f['ext'] == format_spec else: @@ -645,7 +654,7 @@ class YoutubeDL(object): info_dict['playlist_index'] = None # This extractors handle format selection themselves - if info_dict['extractor'] in [u'Youku']: + if info_dict['extractor'] in ['Youku']: if download: self.process_info(info_dict) return info_dict @@ -662,10 +671,10 @@ class YoutubeDL(object): if format.get('format_id') is None: format['format_id'] = compat_str(i) if format.get('format') is None: - format['format'] = u'{id} - {res}{note}'.format( + format['format'] = '{id} - {res}{note}'.format( id=format['format_id'], res=self.format_resolution(format), - note=u' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '', + note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '', ) # Automatically determine file extension if missing if 'ext' not in format: @@ -697,21 +706,35 @@ class YoutubeDL(object): if req_format in ('-1', 'all'): formats_to_download = formats else: - # We can accept formats requestd in the format: 34/5/best, we pick + # We can accept formats requested in the format: 34/5/best, we pick # the first that is available, starting from left req_formats = req_format.split('/') for rf in req_formats: - selected_format = self.select_format(rf, formats) + if re.match(r'.+?\+.+?', rf) is not None: + # Two formats have been requested like '137+139' + format_1, format_2 = rf.split('+') + formats_info = (self.select_format(format_1, formats), + self.select_format(format_2, formats)) + if all(formats_info): + selected_format = { + 'requested_formats': formats_info, + 'format': rf, + 'ext': formats_info[0]['ext'], + } + else: + selected_format = None + else: + selected_format = self.select_format(rf, formats) if selected_format is not None: formats_to_download = [selected_format] break if not formats_to_download: - raise ExtractorError(u'requested format not available', + raise ExtractorError('requested format not available', expected=True) if download: if len(formats_to_download) > 1: - self.to_screen(u'[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download))) + self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download))) for format in formats_to_download: new_info = dict(info_dict) new_info.update(format) @@ -729,7 +752,7 @@ class YoutubeDL(object): info_dict['fulltitle'] = info_dict['title'] if len(info_dict['title']) > 200: - info_dict['title'] = info_dict['title'][:197] + u'...' + info_dict['title'] = info_dict['title'][:197] + '...' # Keep for backwards compatibility info_dict['stitle'] = info_dict['title'] @@ -739,7 +762,7 @@ class YoutubeDL(object): reason = self._match_entry(info_dict) if reason is not None: - self.to_screen(u'[download] ' + reason) + self.to_screen('[download] ' + reason) return max_downloads = self.params.get('max_downloads') @@ -756,7 +779,7 @@ class YoutubeDL(object): self.to_stdout(info_dict['id']) if self.params.get('forceurl', False): # For RTMP URLs, also include the playpath - self.to_stdout(info_dict['url'] + info_dict.get('play_path', u'')) + self.to_stdout(info_dict['url'] + info_dict.get('play_path', '')) if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None: self.to_stdout(info_dict['thumbnail']) if self.params.get('forcedescription', False) and info_dict.get('description') is not None: @@ -783,37 +806,37 @@ class YoutubeDL(object): if dn != '' and not os.path.exists(dn): os.makedirs(dn) except (OSError, IOError) as err: - self.report_error(u'unable to create directory ' + compat_str(err)) + self.report_error('unable to create directory ' + compat_str(err)) return if self.params.get('writedescription', False): - descfn = filename + u'.description' + descfn = filename + '.description' if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)): - self.to_screen(u'[info] Video description is already present') + self.to_screen('[info] Video description is already present') else: try: - self.to_screen(u'[info] Writing video description to: ' + descfn) + self.to_screen('[info] Writing video description to: ' + descfn) with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile: descfile.write(info_dict['description']) except (KeyError, TypeError): - self.report_warning(u'There\'s no description to write.') + self.report_warning('There\'s no description to write.') except (OSError, IOError): - self.report_error(u'Cannot write description file ' + descfn) + self.report_error('Cannot write description file ' + descfn) return if self.params.get('writeannotations', False): - annofn = filename + u'.annotations.xml' + annofn = filename + '.annotations.xml' if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)): - self.to_screen(u'[info] Video annotations are already present') + self.to_screen('[info] Video annotations are already present') else: try: - self.to_screen(u'[info] Writing video annotations to: ' + annofn) + self.to_screen('[info] Writing video annotations to: ' + annofn) with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile: annofile.write(info_dict['annotations']) except (KeyError, TypeError): - self.report_warning(u'There are no annotations to write.') + self.report_warning('There are no annotations to write.') except (OSError, IOError): - self.report_error(u'Cannot write annotations file: ' + annofn) + self.report_error('Cannot write annotations file: ' + annofn) return subtitles_are_requested = any([self.params.get('writesubtitles', False), @@ -831,45 +854,45 @@ class YoutubeDL(object): try: sub_filename = subtitles_filename(filename, sub_lang, sub_format) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)): - self.to_screen(u'[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format)) + self.to_screen('[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format)) else: - self.to_screen(u'[info] Writing video subtitles to: ' + sub_filename) + self.to_screen('[info] Writing video subtitles to: ' + sub_filename) with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile: subfile.write(sub) except (OSError, IOError): - self.report_error(u'Cannot write subtitles file ' + descfn) + self.report_error('Cannot write subtitles file ' + descfn) return if self.params.get('writeinfojson', False): - infofn = os.path.splitext(filename)[0] + u'.info.json' + infofn = os.path.splitext(filename)[0] + '.info.json' if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)): - self.to_screen(u'[info] Video description metadata is already present') + self.to_screen('[info] Video description metadata is already present') else: - self.to_screen(u'[info] Writing video description metadata as JSON to: ' + infofn) + self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn) try: write_json_file(info_dict, encodeFilename(infofn)) except (OSError, IOError): - self.report_error(u'Cannot write metadata to JSON file ' + infofn) + self.report_error('Cannot write metadata to JSON file ' + infofn) return if self.params.get('writethumbnail', False): if info_dict.get('thumbnail') is not None: - thumb_format = determine_ext(info_dict['thumbnail'], u'jpg') - thumb_filename = os.path.splitext(filename)[0] + u'.' + thumb_format + thumb_format = determine_ext(info_dict['thumbnail'], 'jpg') + thumb_filename = os.path.splitext(filename)[0] + '.' + thumb_format if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)): - self.to_screen(u'[%s] %s: Thumbnail is already present' % + self.to_screen('[%s] %s: Thumbnail is already present' % (info_dict['extractor'], info_dict['id'])) else: - self.to_screen(u'[%s] %s: Downloading thumbnail ...' % + self.to_screen('[%s] %s: Downloading thumbnail ...' % (info_dict['extractor'], info_dict['id'])) try: uf = compat_urllib_request.urlopen(info_dict['thumbnail']) with open(thumb_filename, 'wb') as thumbf: shutil.copyfileobj(uf, thumbf) - self.to_screen(u'[%s] %s: Writing thumbnail to: %s' % + self.to_screen('[%s] %s: Writing thumbnail to: %s' % (info_dict['extractor'], info_dict['id'], thumb_filename)) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self.report_warning(u'Unable to download thumbnail "%s": %s' % + self.report_warning('Unable to download thumbnail "%s": %s' % (info_dict['thumbnail'], compat_str(err))) if not self.params.get('skip_download', False): @@ -877,24 +900,49 @@ class YoutubeDL(object): success = True else: try: - fd = get_suitable_downloader(info_dict)(self, self.params) - for ph in self._progress_hooks: - fd.add_progress_hook(ph) - success = fd.download(filename, info_dict) + def dl(name, info): + fd = get_suitable_downloader(info)(self, self.params) + for ph in self._progress_hooks: + fd.add_progress_hook(ph) + return fd.download(name, info) + if info_dict.get('requested_formats') is not None: + downloaded = [] + success = True + merger = FFmpegMergerPP(self) + if not merger._get_executable(): + postprocessors = [] + self.report_warning('You have requested multiple ' + 'formats but ffmpeg or avconv are not installed.' + ' The formats won\'t be merged') + else: + postprocessors = [merger] + for f in info_dict['requested_formats']: + new_info = dict(info_dict) + new_info.update(f) + fname = self.prepare_filename(new_info) + fname = prepend_extension(fname, 'f%s' % f['format_id']) + downloaded.append(fname) + partial_success = dl(fname, new_info) + success = success and partial_success + info_dict['__postprocessors'] = postprocessors + info_dict['__files_to_merge'] = downloaded + else: + # Just a single file + success = dl(filename, info_dict) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self.report_error(u'unable to download video data: %s' % str(err)) + self.report_error('unable to download video data: %s' % str(err)) return except (OSError, IOError) as err: raise UnavailableVideoError(err) except (ContentTooShortError, ) as err: - self.report_error(u'content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) + self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) return if success: try: self.post_process(filename, info_dict) except (PostProcessingError) as err: - self.report_error(u'postprocessing: %s' % str(err)) + self.report_error('postprocessing: %s' % str(err)) return self.record_download_archive(info_dict) @@ -911,9 +959,9 @@ class YoutubeDL(object): #It also downloads the videos self.extract_info(url) except UnavailableVideoError: - self.report_error(u'unable to download video') + self.report_error('unable to download video') except MaxDownloadsReached: - self.to_screen(u'[info] Maximum number of downloaded files reached.') + self.to_screen('[info] Maximum number of downloaded files reached.') raise return self._download_retcode @@ -926,7 +974,7 @@ class YoutubeDL(object): except DownloadError: webpage_url = info.get('webpage_url') if webpage_url is not None: - self.report_warning(u'The info failed to download, trying with "%s"' % webpage_url) + self.report_warning('The info failed to download, trying with "%s"' % webpage_url) return self.download([webpage_url]) else: raise @@ -937,7 +985,11 @@ class YoutubeDL(object): info = dict(ie_info) info['filepath'] = filename keep_video = None - for pp in self._pps: + pps_chain = [] + if ie_info.get('__postprocessors') is not None: + pps_chain.extend(ie_info['__postprocessors']) + pps_chain.extend(self._pps) + for pp in pps_chain: try: keep_video_wish, new_info = pp.run(info) if keep_video_wish is not None: @@ -950,10 +1002,10 @@ class YoutubeDL(object): self.report_error(e.msg) if keep_video is False and not self.params.get('keepvideo', False): try: - self.to_screen(u'Deleting original file %s (pass -k to keep)' % filename) + self.to_screen('Deleting original file %s (pass -k to keep)' % filename) os.remove(encodeFilename(filename)) except (IOError, OSError): - self.report_warning(u'Unable to remove downloaded video file') + self.report_warning('Unable to remove downloaded video file') def _make_archive_id(self, info_dict): # Future-proof against any change in case @@ -964,7 +1016,7 @@ class YoutubeDL(object): extractor = info_dict.get('ie_key') # key in a playlist if extractor is None: return None # Incomplete video information - return extractor.lower() + u' ' + info_dict['id'] + return extractor.lower() + ' ' + info_dict['id'] def in_download_archive(self, info_dict): fn = self.params.get('download_archive') @@ -992,7 +1044,7 @@ class YoutubeDL(object): vid_id = self._make_archive_id(info_dict) assert vid_id with locked_file(fn, 'a', encoding='utf-8') as archive_file: - archive_file.write(vid_id + u'\n') + archive_file.write(vid_id + '\n') @staticmethod def format_resolution(format, default='unknown'): @@ -1002,49 +1054,51 @@ class YoutubeDL(object): return format['resolution'] if format.get('height') is not None: if format.get('width') is not None: - res = u'%sx%s' % (format['width'], format['height']) + res = '%sx%s' % (format['width'], format['height']) else: - res = u'%sp' % format['height'] + res = '%sp' % format['height'] elif format.get('width') is not None: - res = u'?x%d' % format['width'] + res = '?x%d' % format['width'] else: res = default return res def list_formats(self, info_dict): def format_note(fdict): - res = u'' - if f.get('ext') in ['f4f', 'f4m']: - res += u'(unsupported) ' + res = '' + if fdict.get('ext') in ['f4f', 'f4m']: + res += '(unsupported) ' if fdict.get('format_note') is not None: - res += fdict['format_note'] + u' ' + res += fdict['format_note'] + ' ' if fdict.get('tbr') is not None: - res += u'%4dk ' % fdict['tbr'] + res += '%4dk ' % fdict['tbr'] if (fdict.get('vcodec') is not None and fdict.get('vcodec') != 'none'): - res += u'%-5s@' % fdict['vcodec'] + res += '%-5s' % fdict['vcodec'] + if fdict.get('vbr') is not None: + res += '@' elif fdict.get('vbr') is not None and fdict.get('abr') is not None: - res += u'video@' + res += 'video@' if fdict.get('vbr') is not None: - res += u'%4dk' % fdict['vbr'] + res += '%4dk' % fdict['vbr'] if fdict.get('acodec') is not None: if res: - res += u', ' - res += u'%-5s' % fdict['acodec'] + res += ', ' + res += '%-5s' % fdict['acodec'] elif fdict.get('abr') is not None: if res: - res += u', ' + res += ', ' res += 'audio' if fdict.get('abr') is not None: - res += u'@%3dk' % fdict['abr'] + res += '@%3dk' % fdict['abr'] if fdict.get('filesize') is not None: if res: - res += u', ' + res += ', ' res += format_bytes(fdict['filesize']) return res def line(format, idlen=20): - return ((u'%-' + compat_str(idlen + 1) + u's%-10s%-12s%s') % ( + return (('%-' + compat_str(idlen + 1) + 's%-10s%-12s%s') % ( format['format_id'], format['ext'], self.format_resolution(format), @@ -1052,7 +1106,7 @@ class YoutubeDL(object): )) formats = info_dict.get('formats', [info_dict]) - idlen = max(len(u'format code'), + idlen = max(len('format code'), max(len(f['format_id']) for f in formats)) formats_s = [line(f, idlen) for f in formats] if len(formats) > 1: @@ -1060,10 +1114,10 @@ class YoutubeDL(object): formats_s[-1] += (' ' if format_note(formats[-1]) else '') + '(best)' header_line = line({ - 'format_id': u'format code', 'ext': u'extension', - 'resolution': u'resolution', 'format_note': u'note'}, idlen=idlen) - self.to_screen(u'[info] Available formats for %s:\n%s\n%s' % - (info_dict['id'], header_line, u"\n".join(formats_s))) + 'format_id': 'format code', 'ext': 'extension', + 'resolution': 'resolution', 'format_note': 'note'}, idlen=idlen) + self.to_screen('[info] Available formats for %s:\n%s\n%s' % + (info_dict['id'], header_line, '\n'.join(formats_s))) def urlopen(self, req): """ Start an HTTP download """ @@ -1072,7 +1126,7 @@ class YoutubeDL(object): def print_debug_header(self): if not self.params.get('verbose'): return - write_string(u'[debug] youtube-dl version ' + __version__ + u'\n') + write_string('[debug] youtube-dl version ' + __version__ + '\n') try: sp = subprocess.Popen( ['git', 'rev-parse', '--short', 'HEAD'], @@ -1081,20 +1135,20 @@ class YoutubeDL(object): out, err = sp.communicate() out = out.decode().strip() if re.match('[0-9a-f]+', out): - write_string(u'[debug] Git HEAD: ' + out + u'\n') + write_string('[debug] Git HEAD: ' + out + '\n') except: try: sys.exc_clear() except: pass - write_string(u'[debug] Python version %s - %s' % - (platform.python_version(), platform_name()) + u'\n') + write_string('[debug] Python version %s - %s' % + (platform.python_version(), platform_name()) + '\n') proxy_map = {} for handler in self._opener.handlers: if hasattr(handler, 'proxies'): proxy_map.update(handler.proxies) - write_string(u'[debug] Proxy map: ' + compat_str(proxy_map) + u'\n') + write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n') def _setup_opener(self): timeout_val = self.params.get('socket_timeout') @@ -1124,10 +1178,13 @@ class YoutubeDL(object): if 'http' in proxies and 'https' not in proxies: proxies['https'] = proxies['http'] proxy_handler = compat_urllib_request.ProxyHandler(proxies) + + debuglevel = 1 if self.params.get('debug_printtraffic') else 0 https_handler = make_HTTPS_handler( - self.params.get('nocheckcertificate', False)) + self.params.get('nocheckcertificate', False), debuglevel=debuglevel) + ydlh = YoutubeDLHandler(debuglevel=debuglevel) opener = compat_urllib_request.build_opener( - https_handler, proxy_handler, cookie_processor, YoutubeDLHandler()) + https_handler, proxy_handler, cookie_processor, ydlh) # Delete the default user-agent header, which would otherwise apply in # cases where our custom HTTP handler doesn't come into play # (See https://github.com/rg3/youtube-dl/issues/1309 for details) diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py index c37d28c59..82b1ff4f4 100644 --- a/youtube_dl/__init__.py +++ b/youtube_dl/__init__.py @@ -38,12 +38,15 @@ __authors__ = ( 'Takuya Tsuchida', 'Sergey M.', 'Michael Orlitzky', + 'Chris Gahan', + 'Saimadhav Heblikar', ) __license__ = 'Public Domain' import codecs import getpass +import locale import optparse import os import random @@ -73,11 +76,12 @@ from .FileDownloader import ( from .extractor import gen_extractors from .version import __version__ from .YoutubeDL import YoutubeDL -from .PostProcessor import ( +from .postprocessor import ( FFmpegMetadataPP, FFmpegVideoConvertor, FFmpegExtractAudioPP, FFmpegEmbedSubtitlePP, + XAttrMetadataPP, ) @@ -185,13 +189,13 @@ def parseOpts(overrideArguments=None): general.add_option('--no-check-certificate', action='store_true', dest='no_check_certificate', default=False, help='Suppress HTTPS certificate validation.') general.add_option( '--cache-dir', dest='cachedir', default=get_cachedir(), metavar='DIR', - help='Location in the filesystem where youtube-dl can store downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl .') + help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.') general.add_option( '--no-cache-dir', action='store_const', const=None, dest='cachedir', help='Disable filesystem caching') general.add_option( '--socket-timeout', dest='socket_timeout', - type=float, default=None, help=optparse.SUPPRESS_HELP) + type=float, default=None, help=u'Time to wait before giving up, in seconds') general.add_option( '--bidi-workaround', dest='bidi_workaround', action='store_true', help=u'Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH') @@ -213,8 +217,12 @@ def parseOpts(overrideArguments=None): selection.add_option('--min-filesize', metavar='SIZE', dest='min_filesize', help="Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)", default=None) selection.add_option('--max-filesize', metavar='SIZE', dest='max_filesize', help="Do not download any videos larger than SIZE (e.g. 50k or 44.6m)", default=None) selection.add_option('--date', metavar='DATE', dest='date', help='download only videos uploaded in this date', default=None) - selection.add_option('--datebefore', metavar='DATE', dest='datebefore', help='download only videos uploaded before this date', default=None) - selection.add_option('--dateafter', metavar='DATE', dest='dateafter', help='download only videos uploaded after this date', default=None) + selection.add_option( + '--datebefore', metavar='DATE', dest='datebefore', default=None, + help='download only videos uploaded on or before this date (i.e. inclusive)') + selection.add_option( + '--dateafter', metavar='DATE', dest='dateafter', default=None, + help='download only videos uploaded on or after this date (i.e. inclusive)') selection.add_option( '--min-views', metavar='COUNT', dest='min_views', default=None, type=int, @@ -239,7 +247,7 @@ def parseOpts(overrideArguments=None): authentication.add_option('-n', '--netrc', action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False) authentication.add_option('--video-password', - dest='videopassword', metavar='PASSWORD', help='video password (vimeo only)') + dest='videopassword', metavar='PASSWORD', help='video password (vimeo, smotri)') video_format.add_option('-f', '--format', @@ -252,7 +260,7 @@ def parseOpts(overrideArguments=None): video_format.add_option('--max-quality', action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download') video_format.add_option('-F', '--list-formats', - action='store_true', dest='listformats', help='list all available formats (currently youtube only)') + action='store_true', dest='listformats', help='list all available formats') subtitles.add_option('--write-sub', '--write-srt', action='store_true', dest='writesubtitles', @@ -326,14 +334,16 @@ def parseOpts(overrideArguments=None): action='store_true', dest='verbose', help='print various debugging information', default=False) verbosity.add_option('--dump-intermediate-pages', action='store_true', dest='dump_intermediate_pages', default=False, - help='print downloaded pages to debug problems(very verbose)') + help='print downloaded pages to debug problems (very verbose)') verbosity.add_option('--write-pages', action='store_true', dest='write_pages', default=False, help='Write downloaded intermediary pages to files in the current directory to debug problems') verbosity.add_option('--youtube-print-sig-code', action='store_true', dest='youtube_print_sig_code', default=False, help=optparse.SUPPRESS_HELP) - + verbosity.add_option('--print-traffic', + dest='debug_printtraffic', action='store_true', default=False, + help=optparse.SUPPRESS_HELP) filesystem.add_option('-t', '--title', action='store_true', dest='usetitle', help='use title in file name (default)', default=False) @@ -350,11 +360,11 @@ def parseOpts(overrideArguments=None): '%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, ' '%(autonumber)s to get an automatically incremented number, ' '%(ext)s for the filename extension, ' - '%(format)s for the format description (like "22 - 1280x720" or "HD"),' - '%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"),' + '%(format)s for the format description (like "22 - 1280x720" or "HD"), ' + '%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"), ' '%(upload_date)s for the upload date (YYYYMMDD), ' '%(extractor)s for the provider (youtube, metacafe, etc), ' - '%(id)s for the video id , %(playlist)s for the playlist the video is in, ' + '%(id)s for the video id, %(playlist)s for the playlist the video is in, ' '%(playlist_index)s for the position in the playlist and %% for a literal percent. ' 'Use - to output to stdout. Can also be used to download to a different directory, ' 'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .')) @@ -368,7 +378,7 @@ def parseOpts(overrideArguments=None): dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)') filesystem.add_option('--load-info', dest='load_info_filename', metavar='FILE', - help='json file containing the video information (created with the "--write-json" option') + help='json file containing the video information (created with the "--write-json" option)') filesystem.add_option('-w', '--no-overwrites', action='store_true', dest='nooverwrites', help='do not overwrite files', default=False) filesystem.add_option('-c', '--continue', @@ -412,7 +422,13 @@ def parseOpts(overrideArguments=None): postproc.add_option('--embed-subs', action='store_true', dest='embedsubtitles', default=False, help='embed subtitles in the video (only for mp4 videos)') postproc.add_option('--add-metadata', action='store_true', dest='addmetadata', default=False, - help='add metadata to the files') + help='write metadata to the video file') + postproc.add_option('--xattrs', action='store_true', dest='xattrs', default=False, + help='write metadata to the video file\'s xattrs (using dublin core and xdg standards)') + postproc.add_option('--prefer-avconv', action='store_false', dest='prefer_ffmpeg', + help='Prefer avconv over ffmpeg for running the postprocessors (default)') + postproc.add_option('--prefer-ffmpeg', action='store_true', dest='prefer_ffmpeg', + help='Prefer ffmpeg over avconv for running the postprocessors') parser.add_option_group(general) @@ -473,6 +489,8 @@ def parseOpts(overrideArguments=None): write_string(u'[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n') write_string(u'[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n') write_string(u'[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n') + write_string(u'[debug] Encodings: locale %r, fs %r, out %r, pref: %r\n' % + (locale.getpreferredencoding(), sys.getfilesystemencoding(), sys.stdout.encoding, preferredencoding())) return parser, opts, args @@ -517,6 +535,8 @@ def _real_main(argv=None): sys.exit(u'ERROR: batch file could not be read') all_urls = batchurls + args all_urls = [url.strip() for url in all_urls] + _enc = preferredencoding() + all_urls = [url.decode(_enc, 'ignore') if isinstance(url, bytes) else url for url in all_urls] extractors = gen_extractors() @@ -546,7 +566,7 @@ def _real_main(argv=None): if opts.usenetrc and (opts.username is not None or opts.password is not None): parser.error(u'using .netrc conflicts with giving username/password') if opts.password is not None and opts.username is None: - parser.error(u' account username missing\n') + parser.error(u'account username missing\n') if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid): parser.error(u'using output template conflicts with using title, video ID or auto number') if opts.usetitle and opts.useid: @@ -620,6 +640,7 @@ def _real_main(argv=None): u' template'.format(outtmpl)) any_printing = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson + download_archive_fn = os.path.expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive ydl_opts = { 'usenetrc': opts.usenetrc, @@ -687,12 +708,14 @@ def _real_main(argv=None): 'cachedir': opts.cachedir, 'youtube_print_sig_code': opts.youtube_print_sig_code, 'age_limit': opts.age_limit, - 'download_archive': opts.download_archive, + 'download_archive': download_archive_fn, 'cookiefile': opts.cookiefile, 'nocheckcertificate': opts.no_check_certificate, 'proxy': opts.proxy, 'socket_timeout': opts.socket_timeout, 'bidi_workaround': opts.bidi_workaround, + 'debug_printtraffic': opts.debug_printtraffic, + 'prefer_ffmpeg': opts.prefer_ffmpeg, } with YoutubeDL(ydl_opts) as ydl: @@ -709,6 +732,8 @@ def _real_main(argv=None): ydl.add_post_processor(FFmpegVideoConvertor(preferedformat=opts.recodevideo)) if opts.embedsubtitles: ydl.add_post_processor(FFmpegEmbedSubtitlePP(subtitlesformat=opts.subtitlesformat)) + if opts.xattrs: + ydl.add_post_processor(XAttrMetadataPP()) # Update version if opts.update_self: diff --git a/youtube_dl/downloader/hls.py b/youtube_dl/downloader/hls.py index 51e8c4778..fa983462b 100644 --- a/youtube_dl/downloader/hls.py +++ b/youtube_dl/downloader/hls.py @@ -29,7 +29,7 @@ class HlsFD(FileDownloader): retval = subprocess.call(cmd) if retval == 0: fsize = os.path.getsize(encodeFilename(tmpfilename)) - self.to_screen(u'\r[%s] %s bytes' % (args[0], fsize)) + self.to_screen(u'\r[%s] %s bytes' % (cmd[0], fsize)) self.try_rename(tmpfilename, filename) self._hook_progress({ 'downloaded_bytes': fsize, diff --git a/youtube_dl/downloader/http.py b/youtube_dl/downloader/http.py index 14b88efd3..8407727ba 100644 --- a/youtube_dl/downloader/http.py +++ b/youtube_dl/downloader/http.py @@ -133,7 +133,7 @@ class HttpFD(FileDownloader): return False try: stream.write(data_block) - except (IOError, OSError): + except (IOError, OSError) as err: self.to_stderr(u"\n") self.report_error(u'unable to write data: %s' % str(err)) return False diff --git a/youtube_dl/extractor/__init__.py b/youtube_dl/extractor/__init__.py index a39a1e2f4..d66f7b026 100644 --- a/youtube_dl/extractor/__init__.py +++ b/youtube_dl/extractor/__init__.py @@ -28,6 +28,7 @@ from .channel9 import Channel9IE from .cinemassacre import CinemassacreIE from .clipfish import ClipfishIE from .clipsyndicate import ClipsyndicateIE +from .cmt import CMTIE from .cnn import CNNIE from .collegehumor import CollegeHumorIE from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE @@ -51,6 +52,7 @@ from .ehow import EHowIE from .eighttracks import EightTracksIE from .eitb import EitbIE from .escapist import EscapistIE +from .everyonesmixtape import EveryonesMixtapeIE from .exfm import ExfmIE from .extremetube import ExtremeTubeIE from .facebook import FacebookIE @@ -60,11 +62,13 @@ from .fktv import ( FKTVPosteckeIE, ) from .flickr import FlickrIE +from .franceinter import FranceInterIE from .francetv import ( PluzzIE, FranceTvInfoIE, FranceTVIE, - GenerationQuoiIE + GenerationQuoiIE, + CultureboxIE, ) from .freesound import FreesoundIE from .funnyordie import FunnyOrDieIE @@ -79,7 +83,10 @@ from .hotnewhiphop import HotNewHipHopIE from .howcast import HowcastIE from .hypem import HypemIE from .ign import IGNIE, OneUPIE -from .imdb import ImdbIE +from .imdb import ( + ImdbIE, + ImdbListIE +) from .ina import InaIE from .infoq import InfoQIE from .instagram import InstagramIE @@ -91,17 +98,25 @@ from .ivi import ( from .jeuxvideo import JeuxVideoIE from .jukebox import JukeboxIE from .justintv import JustinTVIE +from .jpopsukitv import JpopsukiIE from .kankan import KankanIE from .keezmovies import KeezMoviesIE +from .khanacademy import KhanAcademyIE from .kickstarter import KickStarterIE from .keek import KeekIE from .liveleak import LiveLeakIE from .livestream import LivestreamIE, LivestreamOriginalIE +from .lynda import ( + LyndaIE, + LyndaCourseIE +) +from .macgamestore import MacGameStoreIE from .mdr import MDRIE from .metacafe import MetacafeIE from .metacritic import MetacriticIE from .mit import TechTVMITIE, MITIE from .mixcloud import MixcloudIE +from .mpora import MporaIE from .mofosex import MofosexIE from .mtv import MTVIE from .muzu import MuzuTVIE @@ -116,6 +131,7 @@ from .newgrounds import NewgroundsIE from .nhl import NHLIE, NHLVideocenterIE from .niconico import NiconicoIE from .ninegag import NineGagIE +from .novamov import NovamovIE from .nowvideo import NowVideoIE from .ooyala import OoyalaIE from .orf import ORFIE @@ -189,6 +205,7 @@ from .vimeo import ( VimeoUserIE, VimeoAlbumIE, VimeoGroupsIE, + VimeoReviewIE, ) from .vine import VineIE from .viki import VikiIE diff --git a/youtube_dl/extractor/academicearth.py b/youtube_dl/extractor/academicearth.py index ac05f8246..72f81d01a 100644 --- a/youtube_dl/extractor/academicearth.py +++ b/youtube_dl/extractor/academicearth.py @@ -1,3 +1,4 @@ +from __future__ import unicode_literals import re from .common import InfoExtractor @@ -5,7 +6,7 @@ from .common import InfoExtractor class AcademicEarthCourseIE(InfoExtractor): _VALID_URL = r'^https?://(?:www\.)?academicearth\.org/(?:courses|playlists)/(?P[^?#/]+)' - IE_NAME = u'AcademicEarth:Course' + IE_NAME = 'AcademicEarth:Course' def _real_extract(self, url): m = re.match(self._VALID_URL, url) diff --git a/youtube_dl/extractor/appletrailers.py b/youtube_dl/extractor/appletrailers.py index e7361ae06..922cede05 100644 --- a/youtube_dl/extractor/appletrailers.py +++ b/youtube_dl/extractor/appletrailers.py @@ -1,3 +1,5 @@ +from __future__ import unicode_literals + import re import json @@ -11,46 +13,46 @@ from ..utils import ( class AppleTrailersIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/trailers/(?P[^/]+)/(?P[^/]+)' _TEST = { - u"url": u"http://trailers.apple.com/trailers/wb/manofsteel/", - u"playlist": [ + "url": "http://trailers.apple.com/trailers/wb/manofsteel/", + "playlist": [ { - u"file": u"manofsteel-trailer4.mov", - u"md5": u"d97a8e575432dbcb81b7c3acb741f8a8", - u"info_dict": { - u"duration": 111, - u"title": u"Trailer 4", - u"upload_date": u"20130523", - u"uploader_id": u"wb", + "file": "manofsteel-trailer4.mov", + "md5": "d97a8e575432dbcb81b7c3acb741f8a8", + "info_dict": { + "duration": 111, + "title": "Trailer 4", + "upload_date": "20130523", + "uploader_id": "wb", }, }, { - u"file": u"manofsteel-trailer3.mov", - u"md5": u"b8017b7131b721fb4e8d6f49e1df908c", - u"info_dict": { - u"duration": 182, - u"title": u"Trailer 3", - u"upload_date": u"20130417", - u"uploader_id": u"wb", + "file": "manofsteel-trailer3.mov", + "md5": "b8017b7131b721fb4e8d6f49e1df908c", + "info_dict": { + "duration": 182, + "title": "Trailer 3", + "upload_date": "20130417", + "uploader_id": "wb", }, }, { - u"file": u"manofsteel-trailer.mov", - u"md5": u"d0f1e1150989b9924679b441f3404d48", - u"info_dict": { - u"duration": 148, - u"title": u"Trailer", - u"upload_date": u"20121212", - u"uploader_id": u"wb", + "file": "manofsteel-trailer.mov", + "md5": "d0f1e1150989b9924679b441f3404d48", + "info_dict": { + "duration": 148, + "title": "Trailer", + "upload_date": "20121212", + "uploader_id": "wb", }, }, { - u"file": u"manofsteel-teaser.mov", - u"md5": u"5fe08795b943eb2e757fa95cb6def1cb", - u"info_dict": { - u"duration": 93, - u"title": u"Teaser", - u"upload_date": u"20120721", - u"uploader_id": u"wb", + "file": "manofsteel-teaser.mov", + "md5": "5fe08795b943eb2e757fa95cb6def1cb", + "info_dict": { + "duration": 93, + "title": "Teaser", + "upload_date": "20120721", + "uploader_id": "wb", }, } ] diff --git a/youtube_dl/extractor/archiveorg.py b/youtube_dl/extractor/archiveorg.py index 8bb546410..34ce8429b 100644 --- a/youtube_dl/extractor/archiveorg.py +++ b/youtube_dl/extractor/archiveorg.py @@ -1,9 +1,10 @@ +from __future__ import unicode_literals + import json import re from .common import InfoExtractor from ..utils import ( - determine_ext, unified_strdate, ) @@ -13,23 +14,22 @@ class ArchiveOrgIE(InfoExtractor): IE_DESC = 'archive.org videos' _VALID_URL = r'(?:https?://)?(?:www\.)?archive\.org/details/(?P[^?/]+)(?:[?].*)?$' _TEST = { - u"url": u"http://archive.org/details/XD300-23_68HighlightsAResearchCntAugHumanIntellect", - u'file': u'XD300-23_68HighlightsAResearchCntAugHumanIntellect.ogv', - u'md5': u'8af1d4cf447933ed3c7f4871162602db', - u'info_dict': { - u"title": u"1968 Demo - FJCC Conference Presentation Reel #1", - u"description": u"Reel 1 of 3: Also known as the \"Mother of All Demos\", Doug Engelbart's presentation at the Fall Joint Computer Conference in San Francisco, December 9, 1968 titled \"A Research Center for Augmenting Human Intellect.\" For this presentation, Doug and his team astonished the audience by not only relating their research, but demonstrating it live. This was the debut of the mouse, interactive computing, hypermedia, computer supported software engineering, video teleconferencing, etc. See also Doug's 1968 Demo page for more background, highlights, links, and the detailed paper published in this conference proceedings. Filmed on 3 reels: Reel 1 | Reel 2 | Reel 3", - u"upload_date": u"19681210", - u"uploader": u"SRI International" + "url": "http://archive.org/details/XD300-23_68HighlightsAResearchCntAugHumanIntellect", + 'file': 'XD300-23_68HighlightsAResearchCntAugHumanIntellect.ogv', + 'md5': '8af1d4cf447933ed3c7f4871162602db', + 'info_dict': { + "title": "1968 Demo - FJCC Conference Presentation Reel #1", + "description": "Reel 1 of 3: Also known as the \"Mother of All Demos\", Doug Engelbart's presentation at the Fall Joint Computer Conference in San Francisco, December 9, 1968 titled \"A Research Center for Augmenting Human Intellect.\" For this presentation, Doug and his team astonished the audience by not only relating their research, but demonstrating it live. This was the debut of the mouse, interactive computing, hypermedia, computer supported software engineering, video teleconferencing, etc. See also Doug's 1968 Demo page for more background, highlights, links, and the detailed paper published in this conference proceedings. Filmed on 3 reels: Reel 1 | Reel 2 | Reel 3", + "upload_date": "19681210", + "uploader": "SRI International" } } - def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') - json_url = url + (u'?' if u'?' in url else '&') + u'output=json' + json_url = url + ('?' if '?' in url else '&') + 'output=json' json_data = self._download_webpage(json_url, video_id) data = json.loads(json_data) @@ -38,16 +38,16 @@ class ArchiveOrgIE(InfoExtractor): uploader = data['metadata']['creator'][0] upload_date = unified_strdate(data['metadata']['date'][0]) - formats = [{ + formats = [ + { 'format': fdata['format'], 'url': 'http://' + data['server'] + data['dir'] + fn, 'file_size': int(fdata['size']), } - for fn,fdata in data['files'].items() + for fn, fdata in data['files'].items() if 'Video' in fdata['format']] - formats.sort(key=lambda fdata: fdata['file_size']) - for f in formats: - f['ext'] = determine_ext(f['url']) + + self._sort_formats(formats) return { '_type': 'video', diff --git a/youtube_dl/extractor/arte.py b/youtube_dl/extractor/arte.py index 9254fbfe0..7cf3785ac 100644 --- a/youtube_dl/extractor/arte.py +++ b/youtube_dl/extractor/arte.py @@ -1,4 +1,6 @@ # encoding: utf-8 +from __future__ import unicode_literals + import re import json @@ -22,7 +24,7 @@ class ArteTvIE(InfoExtractor): _LIVEWEB_URL = r'(?:http://)?liveweb\.arte\.tv/(?Pfr|de)/(?P.+?)/(?P.+)' _LIVE_URL = r'index-[0-9]+\.html$' - IE_NAME = u'arte.tv' + IE_NAME = 'arte.tv' @classmethod def suitable(cls, url): @@ -37,7 +39,7 @@ class ArteTvIE(InfoExtractor): # r'src="(.*?/videothek_js.*?\.js)', # 0, # [ - # (1, 'url', u'Invalid URL: %s' % url) + # (1, 'url', 'Invalid URL: %s' % url) # ] # ) # http_host = url.split('/')[2] @@ -49,12 +51,12 @@ class ArteTvIE(InfoExtractor): # '(rtmp://.*?)\'', # re.DOTALL, # [ - # (1, 'path', u'could not extract video path: %s' % url), - # (2, 'player', u'could not extract video player: %s' % url), - # (3, 'url', u'could not extract video url: %s' % url) + # (1, 'path', 'could not extract video path: %s' % url), + # (2, 'player', 'could not extract video player: %s' % url), + # (3, 'url', 'could not extract video url: %s' % url) # ] # ) - # video_url = u'%s/%s' % (info.get('url'), info.get('path')) + # video_url = '%s/%s' % (info.get('url'), info.get('path')) def _real_extract(self, url): mobj = re.match(self._VIDEOS_URL, url) @@ -107,9 +109,9 @@ class ArteTvIE(InfoExtractor): def _extract_liveweb(self, url, name, lang): """Extract form http://liveweb.arte.tv/""" webpage = self._download_webpage(url, name) - video_id = self._search_regex(r'eventId=(\d+?)("|&)', webpage, u'event id') + video_id = self._search_regex(r'eventId=(\d+?)("|&)', webpage, 'event id') config_doc = self._download_xml('http://download.liveweb.arte.tv/o21/liveweb/events/event-%s.xml' % video_id, - video_id, u'Downloading information') + video_id, 'Downloading information') event_doc = config_doc.find('event') url_node = event_doc.find('video').find('urlHd') if url_node is None: @@ -124,7 +126,7 @@ class ArteTvIE(InfoExtractor): class ArteTVPlus7IE(InfoExtractor): - IE_NAME = u'arte.tv:+7' + IE_NAME = 'arte.tv:+7' _VALID_URL = r'https?://www\.arte.tv/guide/(?Pfr|de)/(?:(?:sendungen|emissions)/)?(?P.*?)/(?P.*?)(\?.*)?' @classmethod @@ -207,7 +209,7 @@ class ArteTVPlus7IE(InfoExtractor): if bitrate is not None: quality += '-%d' % bitrate if format_info.get('versionCode') is not None: - format_id = u'%s-%s' % (quality, format_info['versionCode']) + format_id = '%s-%s' % (quality, format_info['versionCode']) else: format_id = quality info = { @@ -216,7 +218,7 @@ class ArteTVPlus7IE(InfoExtractor): 'width': format_info.get('width'), 'height': height, } - if format_info['mediaType'] == u'rtmp': + if format_info['mediaType'] == 'rtmp': info['url'] = format_info['streamer'] info['play_path'] = 'mp4:' + format_info['url'] info['ext'] = 'flv' @@ -231,27 +233,27 @@ class ArteTVPlus7IE(InfoExtractor): # It also uses the arte_vp_url url from the webpage to extract the information class ArteTVCreativeIE(ArteTVPlus7IE): - IE_NAME = u'arte.tv:creative' + IE_NAME = 'arte.tv:creative' _VALID_URL = r'https?://creative\.arte\.tv/(?Pfr|de)/magazine?/(?P.+)' _TEST = { - u'url': u'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design', - u'file': u'050489-002.mp4', - u'info_dict': { - u'title': u'Agentur Amateur / Agence Amateur #2 : Corporate Design', + 'url': 'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design', + 'file': '050489-002.mp4', + 'info_dict': { + 'title': 'Agentur Amateur / Agence Amateur #2 : Corporate Design', }, } class ArteTVFutureIE(ArteTVPlus7IE): - IE_NAME = u'arte.tv:future' + IE_NAME = 'arte.tv:future' _VALID_URL = r'https?://future\.arte\.tv/(?Pfr|de)/(thema|sujet)/.*?#article-anchor-(?P\d+)' _TEST = { - u'url': u'http://future.arte.tv/fr/sujet/info-sciences#article-anchor-7081', - u'file': u'050940-003.mp4', - u'info_dict': { - u'title': u'Les champignons au secours de la planète', + 'url': 'http://future.arte.tv/fr/sujet/info-sciences#article-anchor-7081', + 'file': '050940-003.mp4', + 'info_dict': { + 'title': 'Les champignons au secours de la planète', }, } @@ -263,7 +265,7 @@ class ArteTVFutureIE(ArteTVPlus7IE): class ArteTVDDCIE(ArteTVPlus7IE): - IE_NAME = u'arte.tv:ddc' + IE_NAME = 'arte.tv:ddc' _VALID_URL = r'http?://ddc\.arte\.tv/(?Pemission|folge)/(?P.+)' def _real_extract(self, url): diff --git a/youtube_dl/extractor/auengine.py b/youtube_dl/extractor/auengine.py index bcccc0b7a..c6f30e626 100644 --- a/youtube_dl/extractor/auengine.py +++ b/youtube_dl/extractor/auengine.py @@ -1,3 +1,5 @@ +from __future__ import unicode_literals + import re from .common import InfoExtractor @@ -7,13 +9,14 @@ from ..utils import ( ExtractorError, ) + class AUEngineIE(InfoExtractor): _TEST = { - u'url': u'http://auengine.com/embed.php?file=lfvlytY6&w=650&h=370', - u'file': u'lfvlytY6.mp4', - u'md5': u'48972bdbcf1a3a2f5533e62425b41d4f', - u'info_dict': { - u"title": u"[Commie]The Legend of the Legendary Heroes - 03 - Replication Eye (Alpha Stigma)[F9410F5A]" + 'url': 'http://auengine.com/embed.php?file=lfvlytY6&w=650&h=370', + 'file': 'lfvlytY6.mp4', + 'md5': '48972bdbcf1a3a2f5533e62425b41d4f', + 'info_dict': { + 'title': '[Commie]The Legend of the Legendary Heroes - 03 - Replication Eye (Alpha Stigma)[F9410F5A]' } } _VALID_URL = r'(?:http://)?(?:www\.)?auengine\.com/embed\.php\?.*?file=([^&]+).*?' @@ -23,7 +26,7 @@ class AUEngineIE(InfoExtractor): video_id = mobj.group(1) webpage = self._download_webpage(url, video_id) title = self._html_search_regex(r'(?P<title>.+?)', - webpage, u'title') + webpage, 'title') title = title.strip() links = re.findall(r'\s(?:file|url):\s*["\']([^\'"]+)["\']', webpage) links = map(compat_urllib_parse.unquote, links) @@ -37,7 +40,7 @@ class AUEngineIE(InfoExtractor): video_url = link if not video_url: raise ExtractorError(u'Could not find video URL') - ext = u'.' + determine_ext(video_url) + ext = '.' + determine_ext(video_url) if ext == title[-len(ext):]: title = title[:-len(ext)] diff --git a/youtube_dl/extractor/bambuser.py b/youtube_dl/extractor/bambuser.py index d48c0c38d..ccd31c4c7 100644 --- a/youtube_dl/extractor/bambuser.py +++ b/youtube_dl/extractor/bambuser.py @@ -1,3 +1,5 @@ +from __future__ import unicode_literals + import re import json import itertools @@ -9,26 +11,26 @@ from ..utils import ( class BambuserIE(InfoExtractor): - IE_NAME = u'bambuser' + IE_NAME = 'bambuser' _VALID_URL = r'https?://bambuser\.com/v/(?P\d+)' _API_KEY = '005f64509e19a868399060af746a00aa' _TEST = { - u'url': u'http://bambuser.com/v/4050584', + 'url': 'http://bambuser.com/v/4050584', # MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388 - #u'md5': u'fba8f7693e48fd4e8641b3fd5539a641', - u'info_dict': { - u'id': u'4050584', - u'ext': u'flv', - u'title': u'Education engineering days - lightning talks', - u'duration': 3741, - u'uploader': u'pixelversity', - u'uploader_id': u'344706', + #u'md5': 'fba8f7693e48fd4e8641b3fd5539a641', + 'info_dict': { + 'id': '4050584', + 'ext': 'flv', + 'title': 'Education engineering days - lightning talks', + 'duration': 3741, + 'uploader': 'pixelversity', + 'uploader_id': '344706', }, - u'params': { + 'params': { # It doesn't respect the 'Range' header, it would download the whole video # caused the travis builds to fail: https://travis-ci.org/rg3/youtube-dl/jobs/14493845#L59 - u'skip_download': True, + 'skip_download': True, }, } @@ -53,7 +55,7 @@ class BambuserIE(InfoExtractor): class BambuserChannelIE(InfoExtractor): - IE_NAME = u'bambuser:channel' + IE_NAME = 'bambuser:channel' _VALID_URL = r'https?://bambuser\.com/channel/(?P.*?)(?:/|#|\?|$)' # The maximum number we can get with each request _STEP = 50 @@ -72,7 +74,7 @@ class BambuserChannelIE(InfoExtractor): # Without setting this header, we wouldn't get any result req.add_header('Referer', 'http://bambuser.com/channel/%s' % user) info_json = self._download_webpage(req, user, - u'Downloading page %d' % i) + 'Downloading page %d' % i) results = json.loads(info_json)['result'] if len(results) == 0: break diff --git a/youtube_dl/extractor/bandcamp.py b/youtube_dl/extractor/bandcamp.py index 3a32c14c5..886b0dfab 100644 --- a/youtube_dl/extractor/bandcamp.py +++ b/youtube_dl/extractor/bandcamp.py @@ -1,3 +1,5 @@ +from __future__ import unicode_literals + import json import re @@ -10,16 +12,16 @@ from ..utils import ( class BandcampIE(InfoExtractor): - IE_NAME = u'Bandcamp' _VALID_URL = r'http://.*?\.bandcamp\.com/track/(?P.*)' _TESTS = [{ - u'url': u'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song', - u'file': u'1812978515.mp3', - u'md5': u'cdeb30cdae1921719a3cbcab696ef53c', - u'info_dict': { - u"title": u"youtube-dl test song \"'/\\\u00e4\u21ad" + 'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song', + 'file': '1812978515.mp3', + 'md5': 'c557841d5e50261777a6585648adf439', + 'info_dict': { + "title": "youtube-dl \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad", + "duration": 10, }, - u'skip': u'There is a limit of 200 free downloads / month for the test song' + '_skip': 'There is a limit of 200 free downloads / month for the test song' }] def _real_extract(self, url): @@ -30,85 +32,98 @@ class BandcampIE(InfoExtractor): m_download = re.search(r'freeDownloadPage: "(.*?)"', webpage) if m_download is None: m_trackinfo = re.search(r'trackinfo: (.+),\s*?\n', webpage) - if m_trackinfo: - json_code = m_trackinfo.group(1) - data = json.loads(json_code) + if m_trackinfo: + json_code = m_trackinfo.group(1) + data = json.loads(json_code) + d = data[0] + + duration = int(round(d['duration'])) + formats = [] + for format_id, format_url in d['file'].items(): + ext, _, abr_str = format_id.partition('-') + + formats.append({ + 'format_id': format_id, + 'url': format_url, + 'ext': format_id.partition('-')[0], + 'vcodec': 'none', + 'acodec': format_id.partition('-')[0], + 'abr': int(format_id.partition('-')[2]), + }) + + self._sort_formats(formats) - for d in data: - formats = [{ - 'format_id': 'format_id', - 'url': format_url, - 'ext': format_id.partition('-')[0] - } for format_id, format_url in sorted(d['file'].items())] return { 'id': compat_str(d['id']), 'title': d['title'], 'formats': formats, + 'duration': duration, } - else: - raise ExtractorError(u'No free songs found') + else: + raise ExtractorError('No free songs found') download_link = m_download.group(1) - id = re.search(r'var TralbumData = {(.*?)id: (?P<id>\d*?)$', - webpage, re.MULTILINE|re.DOTALL).group('id') + video_id = re.search( + r'var TralbumData = {(.*?)id: (?P<id>\d*?)$', + webpage, re.MULTILINE | re.DOTALL).group('id') - download_webpage = self._download_webpage(download_link, id, + download_webpage = self._download_webpage(download_link, video_id, 'Downloading free downloads page') # We get the dictionary of the track from some javascrip code info = re.search(r'items: (.*?),$', download_webpage, re.MULTILINE).group(1) info = json.loads(info)[0] # We pick mp3-320 for now, until format selection can be easily implemented. - mp3_info = info[u'downloads'][u'mp3-320'] + mp3_info = info['downloads']['mp3-320'] # If we try to use this url it says the link has expired - initial_url = mp3_info[u'url'] + initial_url = mp3_info['url'] re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$' m_url = re.match(re_url, initial_url) #We build the url we will use to get the final track url # This url is build in Bandcamp in the script download_bunde_*.js - request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), id, m_url.group('ts')) - final_url_webpage = self._download_webpage(request_url, id, 'Requesting download url') + request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), video_id, m_url.group('ts')) + final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url') # If we could correctly generate the .rand field the url would be #in the "download_url" key final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1) - track_info = {'id':id, - 'title' : info[u'title'], - 'ext' : 'mp3', - 'url' : final_url, - 'thumbnail' : info[u'thumb_url'], - 'uploader' : info[u'artist'] - } - - return [track_info] + return { + 'id': video_id, + 'title': info['title'], + 'ext': 'mp3', + 'vcodec': 'none', + 'url': final_url, + 'thumbnail': info.get('thumb_url'), + 'uploader': info.get('artist'), + } class BandcampAlbumIE(InfoExtractor): - IE_NAME = u'Bandcamp:album' + IE_NAME = 'Bandcamp:album' _VALID_URL = r'http://.*?\.bandcamp\.com/album/(?P<title>.*)' _TEST = { - u'url': u'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1', - u'playlist': [ + 'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1', + 'playlist': [ { - u'file': u'1353101989.mp3', - u'md5': u'39bc1eded3476e927c724321ddf116cf', - u'info_dict': { - u'title': u'Intro', + 'file': '1353101989.mp3', + 'md5': '39bc1eded3476e927c724321ddf116cf', + 'info_dict': { + 'title': 'Intro', } }, { - u'file': u'38097443.mp3', - u'md5': u'1a2c32e2691474643e912cc6cd4bffaa', - u'info_dict': { - u'title': u'Kero One - Keep It Alive (Blazo remix)', + 'file': '38097443.mp3', + 'md5': '1a2c32e2691474643e912cc6cd4bffaa', + 'info_dict': { + 'title': 'Kero One - Keep It Alive (Blazo remix)', } }, ], - u'params': { - u'playlistend': 2 + 'params': { + 'playlistend': 2 }, - u'skip': u'Bancamp imposes download limits. See test_playlists:test_bandcamp_album for the playlist test' + 'skip': 'Bancamp imposes download limits. See test_playlists:test_bandcamp_album for the playlist test' } def _real_extract(self, url): @@ -117,11 +132,11 @@ class BandcampAlbumIE(InfoExtractor): webpage = self._download_webpage(url, title) tracks_paths = re.findall(r'<a href="(.*?)" itemprop="url">', webpage) if not tracks_paths: - raise ExtractorError(u'The page doesn\'t contain any track') + raise ExtractorError('The page doesn\'t contain any tracks') entries = [ self.url_result(compat_urlparse.urljoin(url, t_path), ie=BandcampIE.ie_key()) for t_path in tracks_paths] - title = self._search_regex(r'album_title : "(.*?)"', webpage, u'title') + title = self._search_regex(r'album_title : "(.*?)"', webpage, 'title') return { '_type': 'playlist', 'title': title, diff --git a/youtube_dl/extractor/blinkx.py b/youtube_dl/extractor/blinkx.py index 144ce64cc..96408e4e0 100644 --- a/youtube_dl/extractor/blinkx.py +++ b/youtube_dl/extractor/blinkx.py @@ -1,3 +1,5 @@ +from __future__ import unicode_literals + import datetime import json import re @@ -10,19 +12,19 @@ from ..utils import ( class BlinkxIE(InfoExtractor): _VALID_URL = r'^(?:https?://(?:www\.)blinkx\.com/#?ce/|blinkx:)(?P<id>[^?]+)' - _IE_NAME = u'blinkx' + IE_NAME = 'blinkx' _TEST = { - u'url': u'http://www.blinkx.com/ce/8aQUy7GVFYgFzpKhT0oqsilwOGFRVXk3R1ZGWWdGenBLaFQwb3FzaWx3OGFRVXk3R1ZGWWdGenB', - u'file': u'8aQUy7GV.mp4', - u'md5': u'2e9a07364af40163a908edbf10bb2492', - u'info_dict': { - u"title": u"Police Car Rolls Away", - u"uploader": u"stupidvideos.com", - u"upload_date": u"20131215", - u"description": u"A police car gently rolls away from a fight. Maybe it felt weird being around a confrontation and just had to get out of there!", - u"duration": 14.886, - u"thumbnails": [{ + 'url': 'http://www.blinkx.com/ce/8aQUy7GVFYgFzpKhT0oqsilwOGFRVXk3R1ZGWWdGenBLaFQwb3FzaWx3OGFRVXk3R1ZGWWdGenB', + 'file': '8aQUy7GV.mp4', + 'md5': '2e9a07364af40163a908edbf10bb2492', + 'info_dict': { + "title": "Police Car Rolls Away", + "uploader": "stupidvideos.com", + "upload_date": "20131215", + "description": "A police car gently rolls away from a fight. Maybe it felt weird being around a confrontation and just had to get out of there!", + "duration": 14.886, + "thumbnails": [{ "width": 100, "height": 76, "url": "http://cdn.blinkx.com/stream/b/41/StupidVideos/20131215/1873969261/1873969261_tn_0.jpg", @@ -30,17 +32,17 @@ class BlinkxIE(InfoExtractor): }, } - def _real_extract(self, url): - m = re.match(self._VALID_URL, url) + def _real_extract(self, rl): + m = re.match(self._VALID_URL, rl) video_id = m.group('id') display_id = video_id[:8] api_url = (u'https://apib4.blinkx.com/api.php?action=play_video&' + - u'video=%s' % video_id) + 'video=%s' % video_id) data_json = self._download_webpage(api_url, display_id) data = json.loads(data_json)['api']['results'][0] dt = datetime.datetime.fromtimestamp(data['pubdate_epoch']) - upload_date = dt.strftime('%Y%m%d') + pload_date = dt.strftime('%Y%m%d') duration = None thumbnails = [] @@ -61,9 +63,10 @@ class BlinkxIE(InfoExtractor): elif m['type'] in ('flv', 'mp4'): vcodec = remove_start(m['vcodec'], 'ff') acodec = remove_start(m['acodec'], 'ff') + tbr = (int(m['vbr']) + int(m['abr'])) // 1000 format_id = (u'%s-%sk-%s' % (vcodec, - (int(m['vbr']) + int(m['abr'])) // 1000, + tbr, m['w'])) formats.append({ 'format_id': format_id, @@ -72,10 +75,12 @@ class BlinkxIE(InfoExtractor): 'acodec': acodec, 'abr': int(m['abr']) // 1000, 'vbr': int(m['vbr']) // 1000, + 'tbr': tbr, 'width': int(m['w']), 'height': int(m['h']), }) - formats.sort(key=lambda f: (f['width'], f['vbr'], f['abr'])) + + self._sort_formats(formats) return { 'id': display_id, @@ -83,7 +88,7 @@ class BlinkxIE(InfoExtractor): 'title': data['title'], 'formats': formats, 'uploader': data['channel_name'], - 'upload_date': upload_date, + 'upload_date': pload_date, 'description': data.get('description'), 'thumbnails': thumbnails, 'duration': duration, diff --git a/youtube_dl/extractor/bliptv.py b/youtube_dl/extractor/bliptv.py index c7e0a53c1..3ce9b5324 100644 --- a/youtube_dl/extractor/bliptv.py +++ b/youtube_dl/extractor/bliptv.py @@ -1,3 +1,5 @@ +from __future__ import unicode_literals + import datetime import json import re @@ -6,10 +8,8 @@ import socket from .common import InfoExtractor from ..utils import ( compat_http_client, - compat_parse_qs, compat_str, compat_urllib_error, - compat_urllib_parse_urlparse, compat_urllib_request, ExtractorError, @@ -20,42 +20,36 @@ from ..utils import ( class BlipTVIE(InfoExtractor): """Information extractor for blip.tv""" - _VALID_URL = r'^(?:https?://)?(?:www\.)?blip\.tv/((.+/)|(play/)|(api\.swf#))(.+)$' - _URL_EXT = r'^.*\.([a-z0-9]+)$' - IE_NAME = u'blip.tv' + _VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv/((.+/)|(play/)|(api\.swf#))(.+)$' + _TEST = { - u'url': u'http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352', - u'file': u'5779306.m4v', - u'md5': u'80baf1ec5c3d2019037c1c707d676b9f', - u'info_dict': { - u"upload_date": u"20111205", - u"description": u"md5:9bc31f227219cde65e47eeec8d2dc596", - u"uploader": u"Comic Book Resources - CBR TV", - u"title": u"CBR EXCLUSIVE: \"Gotham City Imposters\" Bats VS Jokerz Short 3" + 'url': 'http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352', + 'file': '5779306.mov', + 'md5': 'c6934ad0b6acf2bd920720ec888eb812', + 'info_dict': { + 'upload_date': '20111205', + 'description': 'md5:9bc31f227219cde65e47eeec8d2dc596', + 'uploader': 'Comic Book Resources - CBR TV', + 'title': 'CBR EXCLUSIVE: "Gotham City Imposters" Bats VS Jokerz Short 3', } } def report_direct_download(self, title): """Report information extraction.""" - self.to_screen(u'%s: Direct download detected' % title) + self.to_screen('%s: Direct download detected' % title) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) if mobj is None: - raise ExtractorError(u'Invalid URL: %s' % url) + raise ExtractorError('Invalid URL: %s' % url) # See https://github.com/rg3/youtube-dl/issues/857 - api_mobj = re.match(r'http://a\.blip\.tv/api\.swf#(?P<video_id>[\d\w]+)', url) - if api_mobj is not None: - url = 'http://blip.tv/play/g_%s' % api_mobj.group('video_id') - urlp = compat_urllib_parse_urlparse(url) - if urlp.path.startswith('/play/'): - response = self._request_webpage(url, None, False) - redirecturl = response.geturl() - rurlp = compat_urllib_parse_urlparse(redirecturl) - file_id = compat_parse_qs(rurlp.fragment)['file'][0].rpartition('/')[2] - url = 'http://blip.tv/a/a-' + file_id - return self._real_extract(url) + embed_mobj = re.search(r'^(?:https?://)?(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)([a-zA-Z0-9]+)', url) + if embed_mobj: + info_url = 'http://blip.tv/play/%s.x?p=1' % embed_mobj.group(1) + info_page = self._download_webpage(info_url, embed_mobj.group(1)) + video_id = self._search_regex(r'data-episode-id="(\d+)', info_page, 'video_id') + return self.url_result('http://blip.tv/a/a-' + video_id, 'BlipTV') if '?' in url: cchar = '&' @@ -66,13 +60,13 @@ class BlipTVIE(InfoExtractor): request.add_header('User-Agent', 'iTunes/10.6.1') self.report_extraction(mobj.group(1)) urlh = self._request_webpage(request, None, False, - u'unable to download video info webpage') + 'unable to download video info webpage') try: json_code_bytes = urlh.read() json_code = json_code_bytes.decode('utf-8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - raise ExtractorError(u'Unable to read video info webpage: %s' % compat_str(err)) + raise ExtractorError('Unable to read video info webpage: %s' % compat_str(err)) try: json_data = json.loads(json_code) @@ -82,32 +76,38 @@ class BlipTVIE(InfoExtractor): data = json_data upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d') + formats = [] if 'additionalMedia' in data: - formats = sorted(data['additionalMedia'], key=lambda f: int(f['media_height'])) - best_format = formats[-1] - video_url = best_format['url'] + for f in sorted(data['additionalMedia'], key=lambda f: int(f['media_height'])): + if not int(f['media_width']): # filter m3u8 + continue + formats.append({ + 'url': f['url'], + 'format_id': f['role'], + 'width': int(f['media_width']), + 'height': int(f['media_height']), + }) else: - video_url = data['media']['url'] - umobj = re.match(self._URL_EXT, video_url) - if umobj is None: - raise ValueError('Can not determine filename extension') - ext = umobj.group(1) + formats.append({ + 'url': data['media']['url'], + 'width': int(data['media']['width']), + 'height': int(data['media']['height']), + }) + + self._sort_formats(formats) return { 'id': compat_str(data['item_id']), - 'url': video_url, 'uploader': data['display_name'], 'upload_date': upload_date, 'title': data['title'], - 'ext': ext, - 'format': data['media']['mimeType'], 'thumbnail': data['thumbnailUrl'], 'description': data['description'], - 'player_url': data['embedUrl'], 'user_agent': 'iTunes/10.6.1', + 'formats': formats, } except (ValueError, KeyError) as err: - raise ExtractorError(u'Unable to parse video information: %s' % repr(err)) + raise ExtractorError('Unable to parse video information: %s' % repr(err)) class BlipTVUserIE(InfoExtractor): @@ -115,19 +115,19 @@ class BlipTVUserIE(InfoExtractor): _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$' _PAGE_SIZE = 12 - IE_NAME = u'blip.tv:user' + IE_NAME = 'blip.tv:user' def _real_extract(self, url): # Extract username mobj = re.match(self._VALID_URL, url) if mobj is None: - raise ExtractorError(u'Invalid URL: %s' % url) + raise ExtractorError('Invalid URL: %s' % url) username = mobj.group(1) page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1' - page = self._download_webpage(url, username, u'Downloading user page') + page = self._download_webpage(url, username, 'Downloading user page') mobj = re.search(r'data-users-id="([^"]+)"', page) page_base = page_base % mobj.group(1) @@ -143,7 +143,7 @@ class BlipTVUserIE(InfoExtractor): while True: url = page_base + "&page=" + str(pagenum) page = self._download_webpage(url, username, - u'Downloading video ids from page %d' % pagenum) + 'Downloading video ids from page %d' % pagenum) # Extract video identifiers ids_in_page = [] @@ -165,6 +165,6 @@ class BlipTVUserIE(InfoExtractor): pagenum += 1 - urls = [u'http://blip.tv/%s' % video_id for video_id in video_ids] + urls = ['http://blip.tv/%s' % video_id for video_id in video_ids] url_entries = [self.url_result(vurl, 'BlipTV') for vurl in urls] return [self.playlist_result(url_entries, playlist_title = username)] diff --git a/youtube_dl/extractor/bloomberg.py b/youtube_dl/extractor/bloomberg.py index 755d9c9ef..d18bc7e0c 100644 --- a/youtube_dl/extractor/bloomberg.py +++ b/youtube_dl/extractor/bloomberg.py @@ -1,6 +1,7 @@ import re from .common import InfoExtractor +from .ooyala import OoyalaIE class BloombergIE(InfoExtractor): @@ -23,5 +24,5 @@ class BloombergIE(InfoExtractor): mobj = re.match(self._VALID_URL, url) name = mobj.group('name') webpage = self._download_webpage(url, name) - ooyala_url = self._og_search_video_url(webpage) - return self.url_result(ooyala_url, ie='Ooyala') + ooyala_code = self._search_regex(r'<source src="http://player.ooyala.com/player/[^/]+/([^".]+)', webpage, u'ooyala url') + return OoyalaIE._build_url_result(ooyala_code) diff --git a/youtube_dl/extractor/brightcove.py b/youtube_dl/extractor/brightcove.py index f7f0041c0..8ac38f4aa 100644 --- a/youtube_dl/extractor/brightcove.py +++ b/youtube_dl/extractor/brightcove.py @@ -1,4 +1,5 @@ # encoding: utf-8 +from __future__ import unicode_literals import re import json @@ -13,6 +14,7 @@ from ..utils import ( compat_urllib_request, ExtractorError, + unsmuggle_url, ) @@ -24,47 +26,47 @@ class BrightcoveIE(InfoExtractor): _TESTS = [ { # From http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/ - u'url': u'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001', - u'file': u'2371591881001.mp4', - u'md5': u'5423e113865d26e40624dce2e4b45d95', - u'note': u'Test Brightcove downloads and detection in GenericIE', - u'info_dict': { - u'title': u'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”', - u'uploader': u'8TV', - u'description': u'md5:a950cc4285c43e44d763d036710cd9cd', + 'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001', + 'file': '2371591881001.mp4', + 'md5': '5423e113865d26e40624dce2e4b45d95', + 'note': 'Test Brightcove downloads and detection in GenericIE', + 'info_dict': { + 'title': 'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”', + 'uploader': '8TV', + 'description': 'md5:a950cc4285c43e44d763d036710cd9cd', } }, { # From http://medianetwork.oracle.com/video/player/1785452137001 - u'url': u'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1217746023001&flashID=myPlayer&%40videoPlayer=1785452137001', - u'file': u'1785452137001.flv', - u'info_dict': { - u'title': u'JVMLS 2012: Arrays 2.0 - Opportunities and Challenges', - u'description': u'John Rose speaks at the JVM Language Summit, August 1, 2012.', - u'uploader': u'Oracle', + 'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1217746023001&flashID=myPlayer&%40videoPlayer=1785452137001', + 'file': '1785452137001.flv', + 'info_dict': { + 'title': 'JVMLS 2012: Arrays 2.0 - Opportunities and Challenges', + 'description': 'John Rose speaks at the JVM Language Summit, August 1, 2012.', + 'uploader': 'Oracle', }, }, { # From http://mashable.com/2013/10/26/thermoelectric-bracelet-lets-you-control-your-body-temperature/ - u'url': u'http://c.brightcove.com/services/viewer/federated_f9?&playerID=1265504713001&publisherID=AQ%7E%7E%2CAAABBzUwv1E%7E%2CxP-xFHVUstiMFlNYfvF4G9yFnNaqCw_9&videoID=2750934548001', - u'info_dict': { - u'id': u'2750934548001', - u'ext': u'mp4', - u'title': u'This Bracelet Acts as a Personal Thermostat', - u'description': u'md5:547b78c64f4112766ccf4e151c20b6a0', - u'uploader': u'Mashable', + 'url': 'http://c.brightcove.com/services/viewer/federated_f9?&playerID=1265504713001&publisherID=AQ%7E%7E%2CAAABBzUwv1E%7E%2CxP-xFHVUstiMFlNYfvF4G9yFnNaqCw_9&videoID=2750934548001', + 'info_dict': { + 'id': '2750934548001', + 'ext': 'mp4', + 'title': 'This Bracelet Acts as a Personal Thermostat', + 'description': 'md5:547b78c64f4112766ccf4e151c20b6a0', + 'uploader': 'Mashable', }, }, { # test that the default referer works # from http://national.ballet.ca/interact/video/Lost_in_Motion_II/ - u'url': u'http://link.brightcove.com/services/player/bcpid756015033001?bckey=AQ~~,AAAApYJi_Ck~,GxhXCegT1Dp39ilhXuxMJxasUhVNZiil&bctid=2878862109001', - u'info_dict': { - u'id': u'2878862109001', - u'ext': u'mp4', - u'title': u'Lost in Motion II', - u'description': u'md5:363109c02998fee92ec02211bd8000df', - u'uploader': u'National Ballet of Canada', + 'url': 'http://link.brightcove.com/services/player/bcpid756015033001?bckey=AQ~~,AAAApYJi_Ck~,GxhXCegT1Dp39ilhXuxMJxasUhVNZiil&bctid=2878862109001', + 'info_dict': { + 'id': '2878862109001', + 'ext': 'mp4', + 'title': 'Lost in Motion II', + 'description': 'md5:363109c02998fee92ec02211bd8000df', + 'uploader': 'National Ballet of Canada', }, }, ] @@ -80,13 +82,13 @@ class BrightcoveIE(InfoExtractor): object_str = re.sub(r'(<param name="[^"]+" value="[^"]+")>', lambda m: m.group(1) + '/>', object_str) # Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608 - object_str = object_str.replace(u'<--', u'<!--') + object_str = object_str.replace('<--', '<!--') object_doc = xml.etree.ElementTree.fromstring(object_str) - assert u'BrightcoveExperience' in object_doc.attrib['class'] - params = {'flashID': object_doc.attrib['id'], - 'playerID': find_xpath_attr(object_doc, './param', 'name', 'playerID').attrib['value'], - } + assert 'BrightcoveExperience' in object_doc.attrib['class'] + params = { + 'playerID': find_xpath_attr(object_doc, './param', 'name', 'playerID').attrib['value'], + } def find_param(name): node = find_xpath_attr(object_doc, './param', 'name', name) if node is not None: @@ -120,6 +122,8 @@ class BrightcoveIE(InfoExtractor): return None def _real_extract(self, url): + url, smuggled_data = unsmuggle_url(url, {}) + # Change the 'videoId' and others field to '@videoPlayer' url = re.sub(r'(?<=[?&])(videoI(d|D)|bctid)', '%40videoPlayer', url) # Change bckey (used by bcove.me urls) to playerKey @@ -130,9 +134,10 @@ class BrightcoveIE(InfoExtractor): videoPlayer = query.get('@videoPlayer') if videoPlayer: - return self._get_video_info(videoPlayer[0], query_str, query, - # We set the original url as the default 'Referer' header - referer=url) + # We set the original url as the default 'Referer' header + referer = smuggled_data.get('Referer', url) + return self._get_video_info( + videoPlayer[0], query_str, query, referer=referer) else: player_key = query['playerKey'] return self._get_playlist_info(player_key[0]) @@ -156,11 +161,11 @@ class BrightcoveIE(InfoExtractor): def _get_playlist_info(self, player_key): playlist_info = self._download_webpage(self._PLAYLIST_URL_TEMPLATE % player_key, - player_key, u'Downloading playlist information') + player_key, 'Downloading playlist information') json_data = json.loads(playlist_info) if 'videoList' not in json_data: - raise ExtractorError(u'Empty playlist') + raise ExtractorError('Empty playlist') playlist_info = json_data['videoList'] videos = [self._extract_video_info(video_info) for video_info in playlist_info['mediaCollectionDTO']['videoDTOs']] @@ -189,5 +194,5 @@ class BrightcoveIE(InfoExtractor): 'url': video_info['FLVFullLengthURL'], }) else: - raise ExtractorError(u'Unable to extract video url for %s' % info['id']) + raise ExtractorError('Unable to extract video url for %s' % info['id']) return info diff --git a/youtube_dl/extractor/c56.py b/youtube_dl/extractor/c56.py index dc3a8d47d..690bc7c25 100644 --- a/youtube_dl/extractor/c56.py +++ b/youtube_dl/extractor/c56.py @@ -1,21 +1,21 @@ # coding: utf-8 +from __future__ import unicode_literals import re import json from .common import InfoExtractor -from ..utils import determine_ext + class C56IE(InfoExtractor): _VALID_URL = r'https?://((www|player)\.)?56\.com/(.+?/)?(v_|(play_album.+-))(?P<textid>.+?)\.(html|swf)' - IE_NAME = u'56.com' - - _TEST ={ - u'url': u'http://www.56.com/u39/v_OTM0NDA3MTY.html', - u'file': u'93440716.flv', - u'md5': u'e59995ac63d0457783ea05f93f12a866', - u'info_dict': { - u'title': u'网事知多少 第32期:车怒', + IE_NAME = '56.com' + _TEST = { + 'url': 'http://www.56.com/u39/v_OTM0NDA3MTY.html', + 'file': '93440716.flv', + 'md5': 'e59995ac63d0457783ea05f93f12a866', + 'info_dict': { + 'title': '网事知多少 第32期:车怒', }, } @@ -23,14 +23,18 @@ class C56IE(InfoExtractor): mobj = re.match(self._VALID_URL, url, flags=re.VERBOSE) text_id = mobj.group('textid') info_page = self._download_webpage('http://vxml.56.com/json/%s/' % text_id, - text_id, u'Downloading video info') + text_id, 'Downloading video info') info = json.loads(info_page)['info'] - best_format = sorted(info['rfiles'], key=lambda f: int(f['filesize']))[-1] - video_url = best_format['url'] + formats = [{ + 'format_id': f['type'], + 'filesize': int(f['filesize']), + 'url': f['url'] + } for f in info['rfiles']] + self._sort_formats(formats) - return {'id': info['vid'], - 'title': info['Subject'], - 'url': video_url, - 'ext': determine_ext(video_url), - 'thumbnail': info.get('bimg') or info.get('img'), - } + return { + 'id': info['vid'], + 'title': info['Subject'], + 'formats': formats, + 'thumbnail': info.get('bimg') or info.get('img'), + } diff --git a/youtube_dl/extractor/channel9.py b/youtube_dl/extractor/channel9.py index ae70ea229..574881b70 100644 --- a/youtube_dl/extractor/channel9.py +++ b/youtube_dl/extractor/channel9.py @@ -76,14 +76,18 @@ class Channel9IE(InfoExtractor): </div>)? # File size part may be missing ''' # Extract known formats - formats = [{'url': x.group('url'), - 'format_id': x.group('quality'), - 'format_note': x.group('note'), - 'format': '%s (%s)' % (x.group('quality'), x.group('note')), - 'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate - } for x in list(re.finditer(FORMAT_REGEX, html)) if x.group('quality') in self._known_formats] - # Sort according to known formats list - formats.sort(key=lambda fmt: self._known_formats.index(fmt['format_id'])) + formats = [{ + 'url': x.group('url'), + 'format_id': x.group('quality'), + 'format_note': x.group('note'), + 'format': u'%s (%s)' % (x.group('quality'), x.group('note')), + 'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate + 'preference': self._known_formats.index(x.group('quality')), + 'vcodec': 'none' if x.group('note') == 'Audio only' else None, + } for x in list(re.finditer(FORMAT_REGEX, html)) if x.group('quality') in self._known_formats] + + self._sort_formats(formats) + return formats def _extract_title(self, html): diff --git a/youtube_dl/extractor/cmt.py b/youtube_dl/extractor/cmt.py new file mode 100644 index 000000000..88e0e9aba --- /dev/null +++ b/youtube_dl/extractor/cmt.py @@ -0,0 +1,19 @@ +from .mtv import MTVIE + +class CMTIE(MTVIE): + IE_NAME = u'cmt.com' + _VALID_URL = r'https?://www\.cmt\.com/videos/.+?/(?P<videoid>[^/]+)\.jhtml' + _FEED_URL = 'http://www.cmt.com/sitewide/apps/player/embed/rss/' + + _TESTS = [ + { + u'url': u'http://www.cmt.com/videos/garth-brooks/989124/the-call-featuring-trisha-yearwood.jhtml#artist=30061', + u'md5': u'e6b7ef3c4c45bbfae88061799bbba6c2', + u'info_dict': { + u'id': u'989124', + u'ext': u'mp4', + u'title': u'Garth Brooks - "The Call (featuring Trisha Yearwood)"', + u'description': u'Blame It All On My Roots', + }, + }, + ] diff --git a/youtube_dl/extractor/cnn.py b/youtube_dl/extractor/cnn.py index ecac5e0e9..80bf59ade 100644 --- a/youtube_dl/extractor/cnn.py +++ b/youtube_dl/extractor/cnn.py @@ -1,3 +1,5 @@ +from __future__ import unicode_literals + import re from .common import InfoExtractor @@ -12,23 +14,24 @@ class CNNIE(InfoExtractor): (?P<path>.+?/(?P<title>[^/]+?)(?:\.cnn|(?=&)))''' _TESTS = [{ - u'url': u'http://edition.cnn.com/video/?/video/sports/2013/06/09/nadal-1-on-1.cnn', - u'file': u'sports_2013_06_09_nadal-1-on-1.cnn.mp4', - u'md5': u'3e6121ea48df7e2259fe73a0628605c4', - u'info_dict': { - u'title': u'Nadal wins 8th French Open title', - u'description': u'World Sport\'s Amanda Davies chats with 2013 French Open champion Rafael Nadal.', - u'duration': 135, - u'upload_date': u'20130609', + 'url': 'http://edition.cnn.com/video/?/video/sports/2013/06/09/nadal-1-on-1.cnn', + 'file': 'sports_2013_06_09_nadal-1-on-1.cnn.mp4', + 'md5': '3e6121ea48df7e2259fe73a0628605c4', + 'info_dict': { + 'title': 'Nadal wins 8th French Open title', + 'description': 'World Sport\'s Amanda Davies chats with 2013 French Open champion Rafael Nadal.', + 'duration': 135, + 'upload_date': '20130609', }, }, { - u"url": u"http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29", - u"file": u"us_2013_08_21_sot-student-gives-epic-speech.georgia-institute-of-technology.mp4", - u"md5": u"b5cc60c60a3477d185af8f19a2a26f4e", - u"info_dict": { - u"title": "Student's epic speech stuns new freshmen", - u"description": "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"" + "url": "http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29", + "file": "us_2013_08_21_sot-student-gives-epic-speech.georgia-institute-of-technology.mp4", + "md5": "b5cc60c60a3477d185af8f19a2a26f4e", + "info_dict": { + "title": "Student's epic speech stuns new freshmen", + "description": "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"", + "upload_date": "20130821", } }] @@ -36,7 +39,7 @@ class CNNIE(InfoExtractor): mobj = re.match(self._VALID_URL, url) path = mobj.group('path') page_title = mobj.group('title') - info_url = u'http://cnn.com/video/data/3.0/%s/index.xml' % path + info_url = 'http://cnn.com/video/data/3.0/%s/index.xml' % path info = self._download_xml(info_url, page_title) formats = [] diff --git a/youtube_dl/extractor/collegehumor.py b/youtube_dl/extractor/collegehumor.py index b27c1dfc5..d10b7bd0c 100644 --- a/youtube_dl/extractor/collegehumor.py +++ b/youtube_dl/extractor/collegehumor.py @@ -1,82 +1,68 @@ +from __future__ import unicode_literals + +import json import re from .common import InfoExtractor -from ..utils import ( - compat_urllib_parse_urlparse, - determine_ext, - - ExtractorError, -) class CollegeHumorIE(InfoExtractor): _VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/(video|embed|e)/(?P<videoid>[0-9]+)/?(?P<shorttitle>.*)$' _TESTS = [{ - u'url': u'http://www.collegehumor.com/video/6902724/comic-con-cosplay-catastrophe', - u'file': u'6902724.mp4', - u'md5': u'1264c12ad95dca142a9f0bf7968105a0', - u'info_dict': { - u'title': u'Comic-Con Cosplay Catastrophe', - u'description': u'Fans get creative this year at San Diego. Too creative. And yes, that\'s really Joss Whedon.', + 'url': 'http://www.collegehumor.com/video/6902724/comic-con-cosplay-catastrophe', + 'file': '6902724.mp4', + 'md5': 'dcc0f5c1c8be98dc33889a191f4c26bd', + 'info_dict': { + 'title': 'Comic-Con Cosplay Catastrophe', + 'description': 'Fans get creative this year at San Diego. Too', + 'age_limit': 13, }, }, { - u'url': u'http://www.collegehumor.com/video/3505939/font-conference', - u'file': u'3505939.mp4', - u'md5': u'c51ca16b82bb456a4397987791a835f5', - u'info_dict': { - u'title': u'Font Conference', - u'description': u'This video wasn\'t long enough, so we made it double-spaced.', + 'url': 'http://www.collegehumor.com/video/3505939/font-conference', + 'file': '3505939.mp4', + 'md5': '72fa701d8ef38664a4dbb9e2ab721816', + 'info_dict': { + 'title': 'Font Conference', + 'description': 'This video wasn\'t long enough, so we made it double-spaced.', + 'age_limit': 10, }, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) - if mobj is None: - raise ExtractorError(u'Invalid URL: %s' % url) video_id = mobj.group('videoid') - info = { - 'id': video_id, - 'uploader': None, - 'upload_date': None, - } + jsonUrl = 'http://www.collegehumor.com/moogaloop/video/' + video_id + '.json' + data = json.loads(self._download_webpage( + jsonUrl, video_id, 'Downloading info JSON')) + vdata = data['video'] - self.report_extraction(video_id) - xmlUrl = 'http://www.collegehumor.com/moogaloop/video/' + video_id - mdoc = self._download_xml(xmlUrl, video_id, - u'Downloading info XML', - u'Unable to download video info XML') - - try: - videoNode = mdoc.findall('./video')[0] - youtubeIdNode = videoNode.find('./youtubeID') - if youtubeIdNode is not None: - return self.url_result(youtubeIdNode.text, 'Youtube') - info['description'] = videoNode.findall('./description')[0].text - info['title'] = videoNode.findall('./caption')[0].text - info['thumbnail'] = videoNode.findall('./thumbnail')[0].text - next_url = videoNode.findall('./file')[0].text - except IndexError: - raise ExtractorError(u'Invalid metadata XML file') - - if next_url.endswith(u'manifest.f4m'): - manifest_url = next_url + '?hdcore=2.10.3' - adoc = self._download_xml(manifest_url, video_id, - u'Downloading XML manifest', - u'Unable to download video info XML') - - try: - video_id = adoc.findall('./{http://ns.adobe.com/f4m/1.0}id')[0].text - except IndexError: - raise ExtractorError(u'Invalid manifest file') - url_pr = compat_urllib_parse_urlparse(info['thumbnail']) - info['url'] = url_pr.scheme + '://' + url_pr.netloc + video_id[:-2].replace('.csmil','').replace(',','') - info['ext'] = 'mp4' + AGE_LIMITS = {'nc17': 18, 'r': 18, 'pg13': 13, 'pg': 10, 'g': 0} + rating = vdata.get('rating') + if rating: + age_limit = AGE_LIMITS.get(rating.lower()) else: - # Old-style direct links - info['url'] = next_url - info['ext'] = determine_ext(info['url']) + age_limit = None # None = No idea - return info + PREFS = {'high_quality': 2, 'low_quality': 0} + formats = [] + for format_key in ('mp4', 'webm'): + for qname, qurl in vdata[format_key].items(): + formats.append({ + 'format_id': format_key + '_' + qname, + 'url': qurl, + 'format': format_key, + 'preference': PREFS.get(qname), + }) + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': vdata['title'], + 'description': vdata.get('description'), + 'thumbnail': vdata.get('thumbnail'), + 'formats': formats, + 'age_limit': age_limit, + } diff --git a/youtube_dl/extractor/comedycentral.py b/youtube_dl/extractor/comedycentral.py index a54ce3ee7..27bd8256e 100644 --- a/youtube_dl/extractor/comedycentral.py +++ b/youtube_dl/extractor/comedycentral.py @@ -12,7 +12,9 @@ from ..utils import ( class ComedyCentralIE(MTVServicesInfoExtractor): - _VALID_URL = r'https?://(?:www.)?comedycentral.com/(video-clips|episodes|cc-studios)/(?P<title>.*)' + _VALID_URL = r'''(?x)https?://(?:www.)?comedycentral.com/ + (video-clips|episodes|cc-studios|video-collections) + /(?P<title>.*)''' _FEED_URL = u'http://comedycentral.com/feeds/mrss/' _TEST = { diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index 6fa60622e..692d828da 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -1,4 +1,6 @@ import base64 +import hashlib +import json import os import re import socket @@ -51,7 +53,8 @@ class InfoExtractor(object): Calculated from the format_id, width, height. and format_note fields if missing. * format_id A short description of the format - ("mp4_h264_opus" or "19") + ("mp4_h264_opus" or "19"). + Technically optional, but strongly recommended. * format_note Additional info about the format ("3D" or "DASH video") * width Width of the video, if known @@ -68,7 +71,12 @@ class InfoExtractor(object): download, lower-case. "http", "https", "rtsp", "rtmp" or so. * preference Order number of this format. If this field is - present, the formats get sorted by this field. + present and not None, the formats get sorted + by this field. + -1 for default (order by other properties), + -2 or smaller for less than default. + * quality Order number of the video quality of this + format, irrespective of the file format. -1 for default (order by other properties), -2 or smaller for less than default. url: Final video URL. @@ -227,6 +235,9 @@ class InfoExtractor(object): url = url_or_request.get_full_url() except AttributeError: url = url_or_request + if len(url) > 200: + h = hashlib.md5(url).hexdigest() + url = url[:200 - len(h)] + h raw_filename = ('%s_%s.dump' % (video_id, url)) filename = sanitize_filename(raw_filename, restricted=True) self.to_screen(u'Saving request to ' + filename) @@ -254,6 +265,15 @@ class InfoExtractor(object): xml_string = transform_source(xml_string) return xml.etree.ElementTree.fromstring(xml_string.encode('utf-8')) + def _download_json(self, url_or_request, video_id, + note=u'Downloading JSON metadata', + errnote=u'Unable to download JSON metadata'): + json_string = self._download_webpage(url_or_request, video_id, note, errnote) + try: + return json.loads(json_string) + except ValueError as ve: + raise ExtractorError('Failed to download JSON', cause=ve) + def report_warning(self, msg, video_id=None): idstr = u'' if video_id is None else u'%s: ' % video_id self._downloader.report_warning( @@ -376,7 +396,7 @@ class InfoExtractor(object): @staticmethod def _og_regexes(prop): content_re = r'content=(?:"([^>]+?)"|\'(.+?)\')' - property_re = r'property=[\'"]og:%s[\'"]' % re.escape(prop) + property_re = r'(?:name|property)=[\'"]og:%s[\'"]' % re.escape(prop) template = r'<meta[^>]+?%s[^>]+?%s' return [ template % (property_re, content_re), @@ -481,9 +501,11 @@ class InfoExtractor(object): return ( preference, + f.get('quality') if f.get('quality') is not None else -1, f.get('height') if f.get('height') is not None else -1, f.get('width') if f.get('width') is not None else -1, ext_preference, + f.get('tbr') if f.get('tbr') is not None else -1, f.get('vbr') if f.get('vbr') is not None else -1, f.get('abr') if f.get('abr') is not None else -1, audio_ext_preference, diff --git a/youtube_dl/extractor/condenast.py b/youtube_dl/extractor/condenast.py index f336a3c62..03b75b80d 100644 --- a/youtube_dl/extractor/condenast.py +++ b/youtube_dl/extractor/condenast.py @@ -1,4 +1,5 @@ # coding: utf-8 +from __future__ import unicode_literals import re import json @@ -20,30 +21,31 @@ class CondeNastIE(InfoExtractor): # The keys are the supported sites and the values are the name to be shown # to the user and in the extractor description. - _SITES = {'wired': u'WIRED', - 'gq': u'GQ', - 'vogue': u'Vogue', - 'glamour': u'Glamour', - 'wmagazine': u'W Magazine', - 'vanityfair': u'Vanity Fair', - } + _SITES = { + 'wired': 'WIRED', + 'gq': 'GQ', + 'vogue': 'Vogue', + 'glamour': 'Glamour', + 'wmagazine': 'W Magazine', + 'vanityfair': 'Vanity Fair', + } _VALID_URL = r'http://(video|www).(?P<site>%s).com/(?P<type>watch|series|video)/(?P<id>.+)' % '|'.join(_SITES.keys()) - IE_DESC = u'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values())) + IE_DESC = 'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values())) _TEST = { - u'url': u'http://video.wired.com/watch/3d-printed-speakers-lit-with-led', - u'file': u'5171b343c2b4c00dd0c1ccb3.mp4', - u'md5': u'1921f713ed48aabd715691f774c451f7', - u'info_dict': { - u'title': u'3D Printed Speakers Lit With LED', - u'description': u'Check out these beautiful 3D printed LED speakers. You can\'t actually buy them, but LumiGeek is working on a board that will let you make you\'re own.', + 'url': 'http://video.wired.com/watch/3d-printed-speakers-lit-with-led', + 'file': '5171b343c2b4c00dd0c1ccb3.mp4', + 'md5': '1921f713ed48aabd715691f774c451f7', + 'info_dict': { + 'title': '3D Printed Speakers Lit With LED', + 'description': 'Check out these beautiful 3D printed LED speakers. You can\'t actually buy them, but LumiGeek is working on a board that will let you make you\'re own.', } } def _extract_series(self, url, webpage): title = self._html_search_regex(r'<div class="cne-series-info">.*?<h1>(.+?)</h1>', - webpage, u'series title', flags=re.DOTALL) + webpage, 'series title', flags=re.DOTALL) url_object = compat_urllib_parse_urlparse(url) base_url = '%s://%s' % (url_object.scheme, url_object.netloc) m_paths = re.finditer(r'<p class="cne-thumb-title">.*?<a href="(/watch/.+?)["\?]', @@ -57,39 +59,41 @@ class CondeNastIE(InfoExtractor): description = self._html_search_regex([r'<div class="cne-video-description">(.+?)</div>', r'<div class="video-post-content">(.+?)</div>', ], - webpage, u'description', + webpage, 'description', fatal=False, flags=re.DOTALL) params = self._search_regex(r'var params = {(.+?)}[;,]', webpage, - u'player params', flags=re.DOTALL) - video_id = self._search_regex(r'videoId: [\'"](.+?)[\'"]', params, u'video id') - player_id = self._search_regex(r'playerId: [\'"](.+?)[\'"]', params, u'player id') - target = self._search_regex(r'target: [\'"](.+?)[\'"]', params, u'target') + 'player params', flags=re.DOTALL) + video_id = self._search_regex(r'videoId: [\'"](.+?)[\'"]', params, 'video id') + player_id = self._search_regex(r'playerId: [\'"](.+?)[\'"]', params, 'player id') + target = self._search_regex(r'target: [\'"](.+?)[\'"]', params, 'target') data = compat_urllib_parse.urlencode({'videoId': video_id, 'playerId': player_id, 'target': target, }) base_info_url = self._search_regex(r'url = [\'"](.+?)[\'"][,;]', - webpage, u'base info url', + webpage, 'base info url', default='http://player.cnevids.com/player/loader.js?') info_url = base_info_url + data info_page = self._download_webpage(info_url, video_id, - u'Downloading video info') - video_info = self._search_regex(r'var video = ({.+?});', info_page, u'video info') + 'Downloading video info') + video_info = self._search_regex(r'var video = ({.+?});', info_page, 'video info') video_info = json.loads(video_info) - def _formats_sort_key(f): - type_ord = 1 if f['type'] == 'video/mp4' else 0 - quality_ord = 1 if f['quality'] == 'high' else 0 - return (quality_ord, type_ord) - best_format = sorted(video_info['sources'][0], key=_formats_sort_key)[-1] + formats = [{ + 'format_id': '%s-%s' % (fdata['type'].split('/')[-1], fdata['quality']), + 'url': fdata['src'], + 'ext': fdata['type'].split('/')[-1], + 'quality': 1 if fdata['quality'] == 'high' else 0, + } for fdata in video_info['sources'][0]] + self._sort_formats(formats) - return {'id': video_id, - 'url': best_format['src'], - 'ext': best_format['type'].split('/')[-1], - 'title': video_info['title'], - 'thumbnail': video_info['poster_frame'], - 'description': description, - } + return { + 'id': video_id, + 'formats': formats, + 'title': video_info['title'], + 'thumbnail': video_info['poster_frame'], + 'description': description, + } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) diff --git a/youtube_dl/extractor/cspan.py b/youtube_dl/extractor/cspan.py index d5730684d..a2cbd4d8d 100644 --- a/youtube_dl/extractor/cspan.py +++ b/youtube_dl/extractor/cspan.py @@ -1,20 +1,25 @@ +from __future__ import unicode_literals + +import json import re from .common import InfoExtractor from ..utils import ( - compat_urllib_parse, + unescapeHTML, ) + class CSpanIE(InfoExtractor): _VALID_URL = r'http://www\.c-spanvideo\.org/program/(.*)' + IE_DESC = 'C-SPAN' _TEST = { - u'url': u'http://www.c-spanvideo.org/program/HolderonV', - u'file': u'315139.flv', - u'md5': u'74a623266956f69e4df0068ab6c80fe4', - u'info_dict': { - u"title": u"Attorney General Eric Holder on Voting Rights Act Decision" + 'url': 'http://www.c-spanvideo.org/program/HolderonV', + 'file': '315139.mp4', + 'md5': '8e44ce11f0f725527daccc453f553eb0', + 'info_dict': { + 'title': 'Attorney General Eric Holder on Voting Rights Act Decision', + 'description': 'Attorney General Eric Holder spoke to reporters following the Supreme Court decision in [Shelby County v. Holder] in which the court ruled that the preclearance provisions of the Voting Rights Act could not be enforced until Congress established new guidelines for review.', }, - u'skip': u'Requires rtmpdump' } def _real_extract(self, url): @@ -22,30 +27,22 @@ class CSpanIE(InfoExtractor): prog_name = mobj.group(1) webpage = self._download_webpage(url, prog_name) video_id = self._search_regex(r'programid=(.*?)&', webpage, 'video id') - data = compat_urllib_parse.urlencode({'programid': video_id, - 'dynamic':'1'}) - info_url = 'http://www.c-spanvideo.org/common/services/flashXml.php?' + data - video_info = self._download_webpage(info_url, video_id, u'Downloading video info') - self.report_extraction(video_id) + title = self._html_search_regex( + r'<!-- title -->\n\s*<h1[^>]*>(.*?)</h1>', webpage, 'title') + description = self._og_search_description(webpage) - title = self._html_search_regex(r'<string name="title">(.*?)</string>', - video_info, 'title') - description = self._html_search_regex(r'<meta (?:property="og:|name=")description" content="(.*?)"', - webpage, 'description', - flags=re.MULTILINE|re.DOTALL) + info_url = 'http://c-spanvideo.org/videoLibrary/assets/player/ajax-player.php?os=android&html5=program&id=' + video_id + data_json = self._download_webpage( + info_url, video_id, 'Downloading video info') + data = json.loads(data_json) - url = self._search_regex(r'<string name="URL">(.*?)</string>', - video_info, 'video url') - url = url.replace('$(protocol)', 'rtmp').replace('$(port)', '443') - path = self._search_regex(r'<string name="path">(.*?)</string>', - video_info, 'rtmp play path') + url = unescapeHTML(data['video']['files'][0]['path']['#text']) - return {'id': video_id, - 'title': title, - 'ext': 'flv', - 'url': url, - 'play_path': path, - 'description': description, - 'thumbnail': self._og_search_thumbnail(webpage), - } + return { + 'id': video_id, + 'title': title, + 'url': url, + 'description': description, + 'thumbnail': self._og_search_thumbnail(webpage), + } diff --git a/youtube_dl/extractor/defense.py b/youtube_dl/extractor/defense.py index 424d960da..c5529f8d4 100644 --- a/youtube_dl/extractor/defense.py +++ b/youtube_dl/extractor/defense.py @@ -1,3 +1,5 @@ +from __future__ import unicode_literals + import re import json @@ -5,15 +7,14 @@ from .common import InfoExtractor class DefenseGouvFrIE(InfoExtractor): - _IE_NAME = 'defense.gouv.fr' + IE_NAME = 'defense.gouv.fr' _VALID_URL = (r'http://.*?\.defense\.gouv\.fr/layout/set/' r'ligthboxvideo/base-de-medias/webtv/(.*)') _TEST = { - u'url': (u'http://www.defense.gouv.fr/layout/set/ligthboxvideo/' - u'base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1'), - u'file': u'11213.mp4', - u'md5': u'75bba6124da7e63d2d60b5244ec9430c', + 'url': 'http://www.defense.gouv.fr/layout/set/ligthboxvideo/base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1', + 'file': '11213.mp4', + 'md5': '75bba6124da7e63d2d60b5244ec9430c', "info_dict": { "title": "attaque-chimique-syrienne-du-21-aout-2013-1" } diff --git a/youtube_dl/extractor/dreisat.py b/youtube_dl/extractor/dreisat.py index 416e25156..0b11d1f10 100644 --- a/youtube_dl/extractor/dreisat.py +++ b/youtube_dl/extractor/dreisat.py @@ -10,11 +10,11 @@ from ..utils import ( class DreiSatIE(InfoExtractor): IE_NAME = '3sat' - _VALID_URL = r'(?:http://)?(?:www\.)?3sat\.de/mediathek/index\.php\?(?:(?:mode|display)=[^&]+&)*obj=(?P<id>[0-9]+)$' + _VALID_URL = r'(?:http://)?(?:www\.)?3sat\.de/mediathek/(?:index\.php)?\?(?:(?:mode|display)=[^&]+&)*obj=(?P<id>[0-9]+)$' _TEST = { u"url": u"http://www.3sat.de/mediathek/index.php?obj=36983", - u'file': u'36983.webm', - u'md5': u'57c97d0469d71cf874f6815aa2b7c944', + u'file': u'36983.mp4', + u'md5': u'9dcfe344732808dbfcc901537973c922', u'info_dict': { u"title": u"Kaffeeland Schweiz", u"description": u"Über 80 Kaffeeröstereien liefern in der Schweiz das Getränk, in das das Land so vernarrt ist: Mehr als 1000 Tassen trinkt ein Schweizer pro Jahr. SCHWEIZWEIT nimmt die Kaffeekultur unter die...", diff --git a/youtube_dl/extractor/everyonesmixtape.py b/youtube_dl/extractor/everyonesmixtape.py new file mode 100644 index 000000000..12829cbcc --- /dev/null +++ b/youtube_dl/extractor/everyonesmixtape.py @@ -0,0 +1,69 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + compat_urllib_request, + ExtractorError, +) + + +class EveryonesMixtapeIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?everyonesmixtape\.com/#/mix/(?P<id>[0-9a-zA-Z]+)(?:/(?P<songnr>[0-9]))?$' + + _TEST = { + 'url': 'http://everyonesmixtape.com/#/mix/m7m0jJAbMQi/5', + 'file': '5bfseWNmlds.mp4', + "info_dict": { + "title": "Passion Pit - \"Sleepyhead\" (Official Music Video)", + "uploader": "FKR.TV", + "uploader_id": "frenchkissrecords", + "description": "Music video for \"Sleepyhead\" from Passion Pit's debut EP Chunk Of Change.\nBuy on iTunes: https://itunes.apple.com/us/album/chunk-of-change-ep/id300087641\n\nDirected by The Wilderness.\n\nhttp://www.passionpitmusic.com\nhttp://www.frenchkissrecords.com", + "upload_date": "20081015" + }, + 'params': { + 'skip_download': True, # This is simply YouTube + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + playlist_id = mobj.group('id') + + pllist_url = 'http://everyonesmixtape.com/mixtape.php?a=getMixes&u=-1&linked=%s&explore=' % playlist_id + pllist_req = compat_urllib_request.Request(pllist_url) + pllist_req.add_header('X-Requested-With', 'XMLHttpRequest') + + playlist_list = self._download_json( + pllist_req, playlist_id, note='Downloading playlist metadata') + try: + playlist_no = next(playlist['id'] + for playlist in playlist_list + if playlist['code'] == playlist_id) + except StopIteration: + raise ExtractorError('Playlist id not found') + + pl_url = 'http://everyonesmixtape.com/mixtape.php?a=getMix&id=%s&userId=null&code=' % playlist_no + pl_req = compat_urllib_request.Request(pl_url) + pl_req.add_header('X-Requested-With', 'XMLHttpRequest') + playlist = self._download_json( + pl_req, playlist_id, note='Downloading playlist info') + + entries = [{ + '_type': 'url', + 'url': t['url'], + 'title': t['title'], + } for t in playlist['tracks']] + + if mobj.group('songnr'): + songnr = int(mobj.group('songnr')) - 1 + return entries[songnr] + + playlist_title = playlist['mixData']['name'] + return { + '_type': 'playlist', + 'id': playlist_id, + 'title': playlist_title, + 'entries': entries, + } diff --git a/youtube_dl/extractor/flickr.py b/youtube_dl/extractor/flickr.py index e1d2f0526..21ea5ec2b 100644 --- a/youtube_dl/extractor/flickr.py +++ b/youtube_dl/extractor/flickr.py @@ -1,3 +1,5 @@ +from __future__ import unicode_literals + import re from .common import InfoExtractor @@ -11,13 +13,13 @@ class FlickrIE(InfoExtractor): """Information Extractor for Flickr videos""" _VALID_URL = r'(?:https?://)?(?:www\.|secure\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*' _TEST = { - u'url': u'http://www.flickr.com/photos/forestwander-nature-pictures/5645318632/in/photostream/', - u'file': u'5645318632.mp4', - u'md5': u'6fdc01adbc89d72fc9c4f15b4a4ba87b', - u'info_dict': { - u"description": u"Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.", - u"uploader_id": u"forestwander-nature-pictures", - u"title": u"Dark Hollow Waterfalls" + 'url': 'http://www.flickr.com/photos/forestwander-nature-pictures/5645318632/in/photostream/', + 'file': '5645318632.mp4', + 'md5': '6fdc01adbc89d72fc9c4f15b4a4ba87b', + 'info_dict': { + "description": "Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.", + "uploader_id": "forestwander-nature-pictures", + "title": "Dark Hollow Waterfalls" } } @@ -29,13 +31,13 @@ class FlickrIE(InfoExtractor): webpage_url = 'http://www.flickr.com/photos/' + video_uploader_id + '/' + video_id webpage = self._download_webpage(webpage_url, video_id) - secret = self._search_regex(r"photo_secret: '(\w+)'", webpage, u'secret') + secret = self._search_regex(r"photo_secret: '(\w+)'", webpage, 'secret') first_url = 'https://secure.flickr.com/apps/video/video_mtl_xml.gne?v=x&photo_id=' + video_id + '&secret=' + secret + '&bitrate=700&target=_self' first_xml = self._download_webpage(first_url, video_id, 'Downloading first data webpage') node_id = self._html_search_regex(r'<Item id="id">(\d+-\d+)</Item>', - first_xml, u'node_id') + first_xml, 'node_id') second_url = 'https://secure.flickr.com/video_playlist.gne?node_id=' + node_id + '&tech=flash&mode=playlist&bitrate=700&secret=' + secret + '&rd=video.yahoo.com&noad=1' second_xml = self._download_webpage(second_url, video_id, 'Downloading second data webpage') @@ -44,7 +46,7 @@ class FlickrIE(InfoExtractor): mobj = re.search(r'<STREAM APP="(.+?)" FULLPATH="(.+?)"', second_xml) if mobj is None: - raise ExtractorError(u'Unable to extract video url') + raise ExtractorError('Unable to extract video url') video_url = mobj.group(1) + unescapeHTML(mobj.group(2)) return [{ diff --git a/youtube_dl/extractor/franceinter.py b/youtube_dl/extractor/franceinter.py new file mode 100644 index 000000000..deb1b0b9d --- /dev/null +++ b/youtube_dl/extractor/franceinter.py @@ -0,0 +1,38 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class FranceInterIE(InfoExtractor): + _VALID_URL = r'http://(?:www\.)?franceinter\.fr/player/reecouter\?play=(?P<id>[0-9]{6})' + _TEST = { + 'url': 'http://www.franceinter.fr/player/reecouter?play=793962', + 'file': '793962.mp3', + 'md5': '4764932e466e6f6c79c317d2e74f6884', + "info_dict": { + "title": "L’Histoire dans les jeux vidéo", + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage(url, video_id) + title = self._html_search_regex( + r'<span class="roll_overflow">(.*?)</span></h1>', webpage, 'title') + path = self._search_regex( + r'&urlAOD=(.*?)&startTime', webpage, 'video url') + video_url = 'http://www.franceinter.fr/' + path + + return { + 'id': video_id, + 'formats': [{ + 'url': video_url, + 'vcodec': 'none', + }], + 'title': title, + } diff --git a/youtube_dl/extractor/francetv.py b/youtube_dl/extractor/francetv.py index ad85bc16d..b32ff9f86 100644 --- a/youtube_dl/extractor/francetv.py +++ b/youtube_dl/extractor/francetv.py @@ -191,3 +191,29 @@ class GenerationQuoiIE(InfoExtractor): info = json.loads(info_json) return self.url_result('http://www.dailymotion.com/video/%s' % info['id'], ie='Dailymotion') + + +class CultureboxIE(FranceTVBaseInfoExtractor): + IE_NAME = u'culturebox.francetvinfo.fr' + _VALID_URL = r'https?://culturebox\.francetvinfo\.fr/(?P<name>.*?)(\?|$)' + + _TEST = { + u'url': u'http://culturebox.francetvinfo.fr/einstein-on-the-beach-au-theatre-du-chatelet-146813', + u'info_dict': { + u'id': u'EV_6785', + u'ext': u'mp4', + u'title': u'Einstein on the beach au Théâtre du Châtelet', + u'description': u'md5:9ce2888b1efefc617b5e58b3f6200eeb', + }, + u'params': { + # m3u8 download + u'skip_download': True, + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + name = mobj.group('name') + webpage = self._download_webpage(url, name) + video_id = self._search_regex(r'"http://videos\.francetv\.fr/video/(.*?)"', webpage, u'video id') + return self._extract_video(video_id) diff --git a/youtube_dl/extractor/gamespot.py b/youtube_dl/extractor/gamespot.py index 26b7d2ae5..380ebbe55 100644 --- a/youtube_dl/extractor/gamespot.py +++ b/youtube_dl/extractor/gamespot.py @@ -1,3 +1,5 @@ +from __future__ import unicode_literals + import re import json @@ -13,12 +15,12 @@ from ..utils import ( class GameSpotIE(InfoExtractor): _VALID_URL = r'(?:http://)?(?:www\.)?gamespot\.com/.*-(?P<page_id>\d+)/?' _TEST = { - u"url": u"http://www.gamespot.com/arma-iii/videos/arma-iii-community-guide-sitrep-i-6410818/", - u"file": u"gs-2300-6410818.mp4", - u"md5": u"b2a30deaa8654fcccd43713a6b6a4825", - u"info_dict": { - u"title": u"Arma 3 - Community Guide: SITREP I", - u'description': u'Check out this video where some of the basics of Arma 3 is explained.', + "url": "http://www.gamespot.com/arma-iii/videos/arma-iii-community-guide-sitrep-i-6410818/", + "file": "gs-2300-6410818.mp4", + "md5": "b2a30deaa8654fcccd43713a6b6a4825", + "info_dict": { + "title": "Arma 3 - Community Guide: SITREP I", + 'description': 'Check out this video where some of the basics of Arma 3 is explained.', } } diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index 7a14c98f9..839530982 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -1,9 +1,12 @@ # encoding: utf-8 +from __future__ import unicode_literals + import os import re from .common import InfoExtractor +from .youtube import YoutubeIE from ..utils import ( compat_urllib_error, compat_urllib_parse, @@ -22,78 +25,78 @@ from .ooyala import OoyalaIE class GenericIE(InfoExtractor): - IE_DESC = u'Generic downloader that works on some sites' + IE_DESC = 'Generic downloader that works on some sites' _VALID_URL = r'.*' - IE_NAME = u'generic' + IE_NAME = 'generic' _TESTS = [ { - u'url': u'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html', - u'file': u'13601338388002.mp4', - u'md5': u'6e15c93721d7ec9e9ca3fdbf07982cfd', - u'info_dict': { - u"uploader": u"www.hodiho.fr", - u"title": u"R\u00e9gis plante sa Jeep" + 'url': 'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html', + 'file': '13601338388002.mp4', + 'md5': '6e15c93721d7ec9e9ca3fdbf07982cfd', + 'info_dict': { + 'uploader': 'www.hodiho.fr', + 'title': 'R\u00e9gis plante sa Jeep', } }, # embedded vimeo video { - u'add_ie': ['Vimeo'], - u'url': u'http://skillsmatter.com/podcast/home/move-semanticsperfect-forwarding-and-rvalue-references', - u'file': u'22444065.mp4', - u'md5': u'2903896e23df39722c33f015af0666e2', - u'info_dict': { - u'title': u'ACCU 2011: Move Semantics,Perfect Forwarding, and Rvalue references- Scott Meyers- 13/04/2011', - u"uploader_id": u"skillsmatter", - u"uploader": u"Skills Matter", + 'add_ie': ['Vimeo'], + 'url': 'http://skillsmatter.com/podcast/home/move-semanticsperfect-forwarding-and-rvalue-references', + 'file': '22444065.mp4', + 'md5': '2903896e23df39722c33f015af0666e2', + 'info_dict': { + 'title': 'ACCU 2011: Move Semantics,Perfect Forwarding, and Rvalue references- Scott Meyers- 13/04/2011', + 'uploader_id': 'skillsmatter', + 'uploader': 'Skills Matter', } }, # bandcamp page with custom domain { - u'add_ie': ['Bandcamp'], - u'url': u'http://bronyrock.com/track/the-pony-mash', - u'file': u'3235767654.mp3', - u'info_dict': { - u'title': u'The Pony Mash', - u'uploader': u'M_Pallante', + 'add_ie': ['Bandcamp'], + 'url': 'http://bronyrock.com/track/the-pony-mash', + 'file': '3235767654.mp3', + 'info_dict': { + 'title': 'The Pony Mash', + 'uploader': 'M_Pallante', }, - u'skip': u'There is a limit of 200 free downloads / month for the test song', + 'skip': 'There is a limit of 200 free downloads / month for the test song', }, # embedded brightcove video # it also tests brightcove videos that need to set the 'Referer' in the # http requests { - u'add_ie': ['Brightcove'], - u'url': u'http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/', - u'info_dict': { - u'id': u'2765128793001', - u'ext': u'mp4', - u'title': u'Le cours de bourse : l’analyse technique', - u'description': u'md5:7e9ad046e968cb2d1114004aba466fd9', - u'uploader': u'BFM BUSINESS', + 'add_ie': ['Brightcove'], + 'url': 'http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/', + 'info_dict': { + 'id': '2765128793001', + 'ext': 'mp4', + 'title': 'Le cours de bourse : l’analyse technique', + 'description': 'md5:7e9ad046e968cb2d1114004aba466fd9', + 'uploader': 'BFM BUSINESS', }, - u'params': { - u'skip_download': True, + 'params': { + 'skip_download': True, }, }, # Direct link to a video { - u'url': u'http://media.w3.org/2010/05/sintel/trailer.mp4', - u'file': u'trailer.mp4', - u'md5': u'67d406c2bcb6af27fa886f31aa934bbe', - u'info_dict': { - u'id': u'trailer', - u'title': u'trailer', - u'upload_date': u'20100513', + 'url': 'http://media.w3.org/2010/05/sintel/trailer.mp4', + 'file': 'trailer.mp4', + 'md5': '67d406c2bcb6af27fa886f31aa934bbe', + 'info_dict': { + 'id': 'trailer', + 'title': 'trailer', + 'upload_date': '20100513', } }, # ooyala video { - u'url': u'http://www.rollingstone.com/music/videos/norwegian-dj-cashmere-cat-goes-spartan-on-with-me-premiere-20131219', - u'md5': u'5644c6ca5d5782c1d0d350dad9bd840c', - u'info_dict': { - u'id': u'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ', - u'ext': u'mp4', - u'title': u'2cc213299525360.mov', #that's what we get + 'url': 'http://www.rollingstone.com/music/videos/norwegian-dj-cashmere-cat-goes-spartan-on-with-me-premiere-20131219', + 'md5': '5644c6ca5d5782c1d0d350dad9bd840c', + 'info_dict': { + 'id': 'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ', + 'ext': 'mp4', + 'title': '2cc213299525360.mov', #that's what we get }, }, ] @@ -101,12 +104,12 @@ class GenericIE(InfoExtractor): def report_download_webpage(self, video_id): """Report webpage download.""" if not self._downloader.params.get('test', False): - self._downloader.report_warning(u'Falling back on generic information extractor.') + self._downloader.report_warning('Falling back on generic information extractor.') super(GenericIE, self).report_download_webpage(video_id) def report_following_redirect(self, new_url): """Report information extraction.""" - self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url) + self._downloader.to_screen('[redirect] Following redirect to %s' % new_url) def _send_head(self, url): """Check if it is a redirect, like url shorteners, in case return the new url.""" @@ -152,7 +155,7 @@ class GenericIE(InfoExtractor): response = opener.open(HEADRequest(url)) if response is None: - raise ExtractorError(u'Invalid URL protocol') + raise ExtractorError('Invalid URL protocol') return response def _real_extract(self, url): @@ -162,6 +165,8 @@ class GenericIE(InfoExtractor): return self.url_result('http://' + url) video_id = os.path.splitext(url.split('/')[-1])[0] + self.to_screen('%s: Requesting header' % video_id) + try: response = self._send_head(url) @@ -184,7 +189,7 @@ class GenericIE(InfoExtractor): 'formats': [{ 'format_id': m.group('format_id'), 'url': url, - 'vcodec': u'none' if m.group('type') == 'audio' else None + 'vcodec': 'none' if m.group('type') == 'audio' else None }], 'upload_date': upload_date, } @@ -198,7 +203,7 @@ class GenericIE(InfoExtractor): except ValueError: # since this is the last-resort InfoExtractor, if # this error is thrown, it'll be thrown here - raise ExtractorError(u'Failed to download URL: %s' % url) + raise ExtractorError('Failed to download URL: %s' % url) self.report_extraction(video_id) @@ -209,22 +214,23 @@ class GenericIE(InfoExtractor): # Video Title - Tagline | Site Name # and so on and so forth; it's just not practical video_title = self._html_search_regex( - r'(?s)<title>(.*?)', webpage, u'video title', - default=u'video') + r'(?s)(.*?)', webpage, 'video title', + default='video') # video uploader is domain name video_uploader = self._search_regex( - r'^(?:https?://)?([^/]*)/.*', url, u'video uploader') + r'^(?:https?://)?([^/]*)/.*', url, 'video uploader') # Look for BrightCove: bc_url = BrightcoveIE._extract_brightcove_url(webpage) if bc_url is not None: - self.to_screen(u'Brightcove video detected.') - return self.url_result(bc_url, 'Brightcove') + self.to_screen('Brightcove video detected.') + surl = smuggle_url(bc_url, {'Referer': url}) + return self.url_result(surl, 'Brightcove') # Look for embedded (iframe) Vimeo player mobj = re.search( - r']+?src="(https?://player.vimeo.com/video/.+?)"', webpage) + r']+?src="((?:https?:)?//player.vimeo.com/video/.+?)"', webpage) if mobj: player_url = unescapeHTML(mobj.group(1)) surl = smuggle_url(player_url, {'Referer': url}) @@ -271,16 +277,12 @@ class GenericIE(InfoExtractor): } # Look for embedded blip.tv player - mobj = re.search(r']*https?://api.blip.tv/\w+/redirect/\w+/(\d+)', webpage) + mobj = re.search(r']*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage) if mobj: - return self.url_result('http://blip.tv/seo/-'+mobj.group(1), 'BlipTV') - mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*https?://(?:\w+\.)?blip.tv/(?:play/|api\.swf#)([a-zA-Z0-9]+)', webpage) + return self.url_result('http://blip.tv/a/a-'+mobj.group(1), 'BlipTV') + mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*(https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)[a-zA-Z0-9]+)', webpage) if mobj: - player_url = 'http://blip.tv/play/%s.x?p=1' % mobj.group(1) - player_page = self._download_webpage(player_url, mobj.group(1)) - blip_video_id = self._search_regex(r'data-episode-id="(\d+)', player_page, u'blip_video_id', fatal=False) - if blip_video_id: - return self.url_result('http://blip.tv/seo/-'+blip_video_id, 'BlipTV') + return self.url_result(mobj.group(1), 'BlipTV') # Look for Bandcamp pages with custom domain mobj = re.search(r']*?content="(.*?bandcamp\.com.*?)"', webpage) @@ -301,18 +303,32 @@ class GenericIE(InfoExtractor): return OoyalaIE._build_url_result(mobj.group(1)) # Look for Aparat videos - mobj = re.search(r'