mirror of
https://github.com/l1ving/youtube-dl
synced 2025-03-13 11:27:30 +08:00
Merge remote-tracking branch 'rg3/master'
Conflicts: youtube_dl/postprocessor/ffmpeg.py
This commit is contained in:
commit
82aa1945b2
@ -5,9 +5,8 @@ python:
|
||||
- "3.2"
|
||||
- "3.3"
|
||||
- "3.4"
|
||||
before_install:
|
||||
- sudo apt-get update -qq
|
||||
- sudo apt-get install -yqq rtmpdump
|
||||
- "3.5"
|
||||
sudo: false
|
||||
script: nosetests test --verbose
|
||||
notifications:
|
||||
email:
|
||||
|
19
AUTHORS
19
AUTHORS
@ -124,3 +124,22 @@ Mohammad Teimori Pabandi
|
||||
Roman Le Négrate
|
||||
Matthias Küch
|
||||
Julian Richen
|
||||
Ping O.
|
||||
Mister Hat
|
||||
Peter Ding
|
||||
jackyzy823
|
||||
George Brighton
|
||||
Remita Amine
|
||||
Aurélio A. Heckert
|
||||
Bernhard Minks
|
||||
sceext
|
||||
Zach Bruggeman
|
||||
Tjark Saul
|
||||
slangangular
|
||||
Behrouz Abbasi
|
||||
ngld
|
||||
nyuszika7h
|
||||
Shaun Walbridge
|
||||
Lee Jenkins
|
||||
Anssi Hannula
|
||||
Lukáš Lalinský
|
||||
|
@ -125,7 +125,7 @@ If you want to add support for a new site, you can follow this quick list (assum
|
||||
```
|
||||
5. Add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py).
|
||||
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will be then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
|
||||
7. Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Add tests and code for as many as you want.
|
||||
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L62-L200). Add tests and code for as many as you want.
|
||||
8. If you can, check the code with [flake8](https://pypi.python.org/pypi/flake8).
|
||||
9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
|
||||
|
||||
|
82
README.md
82
README.md
@ -9,6 +9,7 @@ youtube-dl - download videos from youtube.com or other video platforms
|
||||
- [VIDEO SELECTION](#video-selection)
|
||||
- [FAQ](#faq)
|
||||
- [DEVELOPER INSTRUCTIONS](#developer-instructions)
|
||||
- [EMBEDDING YOUTUBE-DL](#embedding-youtube-dl)
|
||||
- [BUGS](#bugs)
|
||||
- [COPYRIGHT](#copyright)
|
||||
|
||||
@ -17,12 +18,12 @@ youtube-dl - download videos from youtube.com or other video platforms
|
||||
To install it right away for all UNIX users (Linux, OS X, etc.), type:
|
||||
|
||||
sudo curl https://yt-dl.org/latest/youtube-dl -o /usr/local/bin/youtube-dl
|
||||
sudo chmod a+x /usr/local/bin/youtube-dl
|
||||
sudo chmod a+rx /usr/local/bin/youtube-dl
|
||||
|
||||
If you do not have curl, you can alternatively use a recent wget:
|
||||
|
||||
sudo wget https://yt-dl.org/downloads/latest/youtube-dl -O /usr/local/bin/youtube-dl
|
||||
sudo chmod a+x /usr/local/bin/youtube-dl
|
||||
sudo chmod a+rx /usr/local/bin/youtube-dl
|
||||
|
||||
Windows users can [download a .exe file](https://yt-dl.org/latest/youtube-dl.exe) and place it in their home directory or any other location on their [PATH](http://en.wikipedia.org/wiki/PATH_%28variable%29).
|
||||
|
||||
@ -34,7 +35,7 @@ You can also use pip:
|
||||
|
||||
sudo pip install youtube-dl
|
||||
|
||||
Alternatively, refer to the developer instructions below for how to check out and work with the git repository. For further options, including PGP signatures, see https://rg3.github.io/youtube-dl/download.html .
|
||||
Alternatively, refer to the [developer instructions](#developer-instructions) for how to check out and work with the git repository. For further options, including PGP signatures, see https://rg3.github.io/youtube-dl/download.html .
|
||||
|
||||
# DESCRIPTION
|
||||
**youtube-dl** is a small command-line program to download videos from
|
||||
@ -52,8 +53,9 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
-i, --ignore-errors Continue on download errors, for example to skip unavailable videos in a playlist
|
||||
--abort-on-error Abort downloading of further videos (in the playlist or the command line) if an error occurs
|
||||
--dump-user-agent Display the current browser identification
|
||||
--list-extractors List all supported extractors and the URLs they would handle
|
||||
--list-extractors List all supported extractors
|
||||
--extractor-descriptions Output descriptions of all supported extractors
|
||||
--force-generic-extractor Force extraction to use the generic extractor
|
||||
--default-search PREFIX Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple".
|
||||
Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The
|
||||
default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.
|
||||
@ -74,7 +76,7 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
## Video Selection:
|
||||
--playlist-start NUMBER Playlist video to start at (default is 1)
|
||||
--playlist-end NUMBER Playlist video to end at (default is last)
|
||||
--playlist-items ITEM_SPEC Playlist video items to download. Specify indices of the videos in the playlist seperated by commas like: "--playlist-items 1,2,5,8"
|
||||
--playlist-items ITEM_SPEC Playlist video items to download. Specify indices of the videos in the playlist separated by commas like: "--playlist-items 1,2,5,8"
|
||||
if you want to download videos indexed 1, 2, 5, 8 in the playlist. You can specify range: "--playlist-items 1-3,7,10-13", it will
|
||||
download the videos at index 1, 2, 3, 7, 10, 11, 12 and 13.
|
||||
--match-title REGEX Download only matching titles (regex or caseless sub-string)
|
||||
@ -107,7 +109,7 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
--playlist-reverse Download playlist videos in reverse order
|
||||
--xattr-set-filesize Set file xattribute ytdl.filesize with expected filesize (experimental)
|
||||
--hls-prefer-native Use the native HLS downloader instead of ffmpeg (experimental)
|
||||
--external-downloader COMMAND Use the specified external downloader. Currently supports aria2c,curl,wget
|
||||
--external-downloader COMMAND Use the specified external downloader. Currently supports aria2c,axel,curl,httpie,wget
|
||||
--external-downloader-args ARGS Give these arguments to the external downloader
|
||||
|
||||
## Filesystem Options:
|
||||
@ -133,7 +135,7 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
--no-mtime Do not use the Last-modified header to set the file modification time
|
||||
--write-description Write video description to a .description file
|
||||
--write-info-json Write video metadata to a .info.json file
|
||||
--write-annotations Write video annotations to a .annotation file
|
||||
--write-annotations Write video annotations to a .annotations.xml file
|
||||
--load-info FILE JSON file containing the video information (created with the "--write-info-json" option)
|
||||
--cookies FILE File to read cookies from and dump cookie jar in
|
||||
--cache-dir DIR Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl
|
||||
@ -168,7 +170,7 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
--no-progress Do not print progress bar
|
||||
--console-title Display progress in console titlebar
|
||||
-v, --verbose Print various debugging information
|
||||
--dump-pages Print downloaded pages to debug problems (very verbose)
|
||||
--dump-pages Print downloaded pages encoded using base64 to debug problems (very verbose)
|
||||
--write-pages Write downloaded intermediary pages to files in the current directory to debug problems
|
||||
--print-traffic Display sent and read HTTP traffic
|
||||
-C, --call-home Contact the youtube-dl server for debugging
|
||||
@ -189,8 +191,8 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
--all-formats Download all available video formats
|
||||
--prefer-free-formats Prefer free video formats unless a specific one is requested
|
||||
-F, --list-formats List all available formats
|
||||
--youtube-skip-dash-manifest Do not download the DASH manifest on YouTube videos
|
||||
--merge-output-format FORMAT If a merge is required (e.g. bestvideo+bestaudio), output to given container format. One of mkv, mp4, ogg, webm, flv.Ignored if no
|
||||
--youtube-skip-dash-manifest Do not download the DASH manifests and related data on YouTube videos
|
||||
--merge-output-format FORMAT If a merge is required (e.g. bestvideo+bestaudio), output to given container format. One of mkv, mp4, ogg, webm, flv. Ignored if no
|
||||
merge is required
|
||||
|
||||
## Subtitle Options:
|
||||
@ -206,24 +208,25 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
-p, --password PASSWORD Account password. If this option is left out, youtube-dl will ask interactively.
|
||||
-2, --twofactor TWOFACTOR Two-factor auth code
|
||||
-n, --netrc Use .netrc authentication data
|
||||
--video-password PASSWORD Video password (vimeo, smotri)
|
||||
--video-password PASSWORD Video password (vimeo, smotri, youku)
|
||||
|
||||
## Post-processing Options:
|
||||
-x, --extract-audio Convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)
|
||||
--audio-format FORMAT Specify audio format: "best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; "best" by default
|
||||
--audio-quality QUALITY Specify ffmpeg/avconv audio quality, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default
|
||||
5)
|
||||
--recode-video FORMAT Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv)
|
||||
--recode-video FORMAT Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv|avi)
|
||||
--postprocessor-args ARGS Give these arguments to the postprocessor
|
||||
-k, --keep-video Keep the video file on disk after the post-processing; the video is erased by default
|
||||
--no-post-overwrites Do not overwrite post-processed files; the post-processed files are overwritten by default
|
||||
--embed-subs Embed subtitles in the video (only for mkv and mp4 videos)
|
||||
--embed-thumbnail Embed thumbnail in the audio as cover art
|
||||
--add-metadata Write metadata to the video file
|
||||
--metadata-from-title FORMAT Parse additional metadata like song title / artist from the video title. The format syntax is the same as --output, the parsed
|
||||
parameters replace existing values. Additional templates: %(album), %(artist). Example: --metadata-from-title "%(artist)s -
|
||||
parameters replace existing values. Additional templates: %(album)s, %(artist)s. Example: --metadata-from-title "%(artist)s -
|
||||
%(title)s" matches a title like "Coldplay - Paradise"
|
||||
--xattrs Write metadata to the video file's xattrs (using dublin core and xdg standards)
|
||||
--fixup POLICY Automatically correct known faults of the file. One of never (do nothing), warn (only emit a warning), detect_or_warn(the default;
|
||||
--fixup POLICY Automatically correct known faults of the file. One of never (do nothing), warn (only emit a warning), detect_or_warn (the default;
|
||||
fix file if we can, warn otherwise)
|
||||
--prefer-avconv Prefer avconv over ffmpeg for running the postprocessors (default)
|
||||
--prefer-ffmpeg Prefer ffmpeg over avconv for running the postprocessors
|
||||
@ -234,7 +237,34 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
|
||||
# CONFIGURATION
|
||||
|
||||
You can configure youtube-dl by placing default arguments (such as `--extract-audio --no-mtime` to always extract the audio and not copy the mtime) into `/etc/youtube-dl.conf` and/or `~/.config/youtube-dl/config`. On Windows, the configuration file locations are `%APPDATA%\youtube-dl\config.txt` and `C:\Users\<user name>\youtube-dl.conf`.
|
||||
You can configure youtube-dl by placing any supported command line option to a configuration file. On Linux, system wide configuration file is located at `/etc/youtube-dl.conf` and user wide configuration file at `~/.config/youtube-dl/config`. On Windows, the user wide configuration file locations are `%APPDATA%\youtube-dl\config.txt` or `C:\Users\<user name>\youtube-dl.conf`. For example, with the following configration file youtube-dl will always extract the audio, not copy the mtime and use proxy:
|
||||
```
|
||||
--extract-audio
|
||||
--no-mtime
|
||||
--proxy 127.0.0.1:3128
|
||||
```
|
||||
|
||||
You can use `--ignore-config` if you want to disable configuration file for a particular youtube-dl run.
|
||||
|
||||
### Authentication with `.netrc` file ###
|
||||
|
||||
You may also want to configure automatic credentials storage for extractors that support authentication (by providing login and password with `--username` and `--password`) in order not to pass credentials as command line arguments on every youtube-dl execution and prevent tracking plain text passwords in shell command history. You can achieve this using [`.netrc` file](http://stackoverflow.com/tags/.netrc/info) on per extractor basis. For that you will need to create `.netrc` file in your `$HOME` and restrict permissions to read/write by you only:
|
||||
```
|
||||
touch $HOME/.netrc
|
||||
chmod a-rwx,u+rw $HOME/.netrc
|
||||
```
|
||||
After that you can add credentials for extractor in the following format, where *extractor* is the name of extractor in lowercase:
|
||||
```
|
||||
machine <extractor> login <login> password <password>
|
||||
```
|
||||
For example:
|
||||
```
|
||||
machine youtube login myaccount@gmail.com password my_youtube_password
|
||||
machine twitch login my_twitch_account_name password my_twitch_password
|
||||
```
|
||||
To activate authentication with `.netrc` file you should pass `--netrc` to youtube-dl or place it in [configuration file](#configuration).
|
||||
|
||||
On Windows you may also need to setup `%HOME%` environment variable manually.
|
||||
|
||||
# OUTPUT TEMPLATE
|
||||
|
||||
@ -248,8 +278,10 @@ The `-o` option allows users to indicate a template for the output file names. T
|
||||
- `ext`: The sequence will be replaced by the appropriate extension (like flv or mp4).
|
||||
- `epoch`: The sequence will be replaced by the Unix epoch when creating the file.
|
||||
- `autonumber`: The sequence will be replaced by a five-digit number that will be increased with each download, starting at zero.
|
||||
- `playlist`: The name or the id of the playlist that contains the video.
|
||||
- `playlist_index`: The index of the video in the playlist, a five-digit number.
|
||||
- `playlist`: The sequence will be replaced by the name or the id of the playlist that contains the video.
|
||||
- `playlist_index`: The sequence will be replaced by the index of the video in the playlist padded with leading zeros according to the total length of the playlist.
|
||||
- `format_id`: The sequence will be replaced by the format code specified by `--format`.
|
||||
- `duration`: The sequence will be replaced by the length of the video in seconds.
|
||||
|
||||
The current default template is `%(title)s-%(id)s.%(ext)s`.
|
||||
|
||||
@ -267,9 +299,9 @@ youtube-dl_test_video_.mp4 # A simple file name
|
||||
By default youtube-dl tries to download the best quality, but sometimes you may want to download other format.
|
||||
The simplest case is requesting a specific format, for example `-f 22`. You can get the list of available formats using `--list-formats`, you can also use a file extension (currently it supports aac, m4a, mp3, mp4, ogg, wav, webm) or the special names `best`, `bestvideo`, `bestaudio` and `worst`.
|
||||
|
||||
If you want to download multiple videos and they don't have the same formats available, you can specify the order of preference using slashes, as in `-f 22/17/18`. You can also filter the video results by putting a condition in brackets, as in `-f "best[height=720]"` (or `-f "[filesize>10M]"`). This works for filesize, height, width, tbr, abr, vbr, asr, and fps and the comparisons <, <=, >, >=, =, != and for ext, acodec, vcodec, container, and protocol and the comparisons =, != . Formats for which the value is not known are excluded unless you put a question mark (?) after the operator. You can combine format filters, so `-f "[height <=? 720][tbr>500]"` selects up to 720p videos (or videos where the height is not known) with a bitrate of at least 500 KBit/s. Use commas to download multiple formats, such as `-f 136/137/mp4/bestvideo,140/m4a/bestaudio`. You can merge the video and audio of two formats into a single file using `-f <video-format>+<audio-format>` (requires ffmpeg or avconv), for example `-f bestvideo+bestaudio`.
|
||||
If you want to download multiple videos and they don't have the same formats available, you can specify the order of preference using slashes, as in `-f 22/17/18`. You can also filter the video results by putting a condition in brackets, as in `-f "best[height=720]"` (or `-f "[filesize>10M]"`). This works for filesize, height, width, tbr, abr, vbr, asr, and fps and the comparisons <, <=, >, >=, =, != and for ext, acodec, vcodec, container, and protocol and the comparisons =, != . Formats for which the value is not known are excluded unless you put a question mark (?) after the operator. You can combine format filters, so `-f "[height <=? 720][tbr>500]"` selects up to 720p videos (or videos where the height is not known) with a bitrate of at least 500 KBit/s. Use commas to download multiple formats, such as `-f 136/137/mp4/bestvideo,140/m4a/bestaudio`. You can merge the video and audio of two formats into a single file using `-f <video-format>+<audio-format>` (requires ffmpeg or avconv), for example `-f bestvideo+bestaudio`. Format selectors can also be grouped using parentheses, for example if you want to download the best mp4 and webm formats with a height lower than 480 you can use `-f '(mp4,webm)[height<480]'`.
|
||||
|
||||
Since the end of April 2015 and version 2015.04.26 youtube-dl uses `-f bestvideo+bestaudio/best` as default format selection (see #5447, #5456). If ffmpeg or avconv are installed this results in downloading `bestvideo` and `bestaudio` separately and muxing them together into a single file giving the best overall quality available. Otherwise it falls back to `best` and results in downloading best available quality served as a single file. `best` is also needed for videos that don't come from YouTube because they don't provide the audio and video in two different files. If you want to only download some dash formats (for example if you are not interested in getting videos with a resolution higher than 1080p), you can add `-f bestvideo[height<=?1080]+bestaudio/best` to your configuration file.
|
||||
Since the end of April 2015 and version 2015.04.26 youtube-dl uses `-f bestvideo+bestaudio/best` as default format selection (see #5447, #5456). If ffmpeg or avconv are installed this results in downloading `bestvideo` and `bestaudio` separately and muxing them together into a single file giving the best overall quality available. Otherwise it falls back to `best` and results in downloading best available quality served as a single file. `best` is also needed for videos that don't come from YouTube because they don't provide the audio and video in two different files. If you want to only download some dash formats (for example if you are not interested in getting videos with a resolution higher than 1080p), you can add `-f bestvideo[height<=?1080]+bestaudio/best` to your configuration file. Note that if you use youtube-dl to stream to `stdout` (and most likely to pipe it to your media player then), i.e. you explicitly specify output template as `-o -`, youtube-dl still uses `-f best` format selection in order to start content delivery immediately to your player and not to wait until `bestvideo` and `bestaudio` are downloaded and muxed.
|
||||
|
||||
If you want to preserve the old format selection behavior (prior to youtube-dl 2015.04.26), i.e. you want to download best available quality media served as a single file, you should explicitly specify your choice with `-f best`. You may want to add it to the [configuration file](#configuration) in order not to type it every time you run youtube-dl.
|
||||
|
||||
@ -379,7 +411,7 @@ In February 2015, the new YouTube player contained a character sequence in a str
|
||||
|
||||
### HTTP Error 429: Too Many Requests or 402: Payment Required
|
||||
|
||||
These two error codes indicate that the service is blocking your IP address because of overuse. Contact the service and ask them to unblock your IP address, or - if you have acquired a whitelisted IP address already - use the [`--proxy` or `--network-address` options](#network-options) to select another IP address.
|
||||
These two error codes indicate that the service is blocking your IP address because of overuse. Contact the service and ask them to unblock your IP address, or - if you have acquired a whitelisted IP address already - use the [`--proxy` or `--source-address` options](#network-options) to select another IP address.
|
||||
|
||||
### SyntaxError: Non-ASCII character ###
|
||||
|
||||
@ -417,6 +449,12 @@ Either prepend `http://www.youtube.com/watch?v=` or separate the ID from the opt
|
||||
youtube-dl -- -wNyEUrxzFU
|
||||
youtube-dl "http://www.youtube.com/watch?v=-wNyEUrxzFU"
|
||||
|
||||
### How do I pass cookies to youtube-dl?
|
||||
|
||||
Use the `--cookies` option, for example `--cookies /path/to/cookies/file.txt`. Note that cookies file must be in Mozilla/Netscape format and the first line of cookies file must be either `# HTTP Cookie File` or `# Netscape HTTP Cookie File`. Make sure you have correct [newline format](https://en.wikipedia.org/wiki/Newline) in cookies file and convert newlines if necessary to correspond your OS, namely `CRLF` (`\r\n`) for Windows, `LF` (`\n`) for Linux and `CR` (`\r`) for Mac OS. `HTTP Error 400: Bad Request` when using `--cookies` is a good sign of invalid newline format.
|
||||
|
||||
Passing cookies to youtube-dl is a good way to workaround login when particular extractor does not implement it explicitly.
|
||||
|
||||
### Can you add support for this anime video site, or site which shows current movies for free?
|
||||
|
||||
As a matter of policy (as well as legality), youtube-dl does not include support for services that specialize in infringing copyright. As a rule of thumb, if you cannot easily find a video that the service is quite obviously allowed to distribute (i.e. that has been uploaded by the creator, the creator's distributor, or is published under a free license), the service is probably unfit for inclusion to youtube-dl.
|
||||
@ -516,7 +554,7 @@ If you want to add support for a new site, you can follow this quick list (assum
|
||||
```
|
||||
5. Add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py).
|
||||
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will be then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
|
||||
7. Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Add tests and code for as many as you want.
|
||||
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L62-L200). Add tests and code for as many as you want.
|
||||
8. If you can, check the code with [flake8](https://pypi.python.org/pypi/flake8).
|
||||
9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
|
||||
|
||||
@ -544,7 +582,7 @@ with youtube_dl.YoutubeDL(ydl_opts) as ydl:
|
||||
ydl.download(['http://www.youtube.com/watch?v=BaW_jenozKc'])
|
||||
```
|
||||
|
||||
Most likely, you'll want to use various options. For a list of what can be done, have a look at [youtube_dl/YoutubeDL.py](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L69). For a start, if you want to intercept youtube-dl's output, set a `logger` object.
|
||||
Most likely, you'll want to use various options. For a list of what can be done, have a look at [youtube_dl/YoutubeDL.py](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L117-L265). For a start, if you want to intercept youtube-dl's output, set a `logger` object.
|
||||
|
||||
Here's a more complete example of a program that outputs only errors (and a short message after the download is finished), and downloads/converts the video to an mp3 file:
|
||||
|
||||
|
@ -10,12 +10,14 @@
|
||||
- **56.com**
|
||||
- **5min**
|
||||
- **8tracks**
|
||||
- **91porn**
|
||||
- **9gag**
|
||||
- **abc.net.au**
|
||||
- **Abc7News**
|
||||
- **AcademicEarth:Course**
|
||||
- **AddAnime**
|
||||
- **AdobeTV**
|
||||
- **AdobeTVVideo**
|
||||
- **AdultSwim**
|
||||
- **Aftenposten**
|
||||
- **Aftonbladet**
|
||||
@ -26,8 +28,8 @@
|
||||
- **anitube.se**
|
||||
- **AnySex**
|
||||
- **Aparat**
|
||||
- **AppleDailyAnimationNews**
|
||||
- **AppleDailyRealtimeNews**
|
||||
- **AppleConnect**
|
||||
- **AppleDaily**: 臺灣蘋果日報
|
||||
- **AppleTrailers**
|
||||
- **archive.org**: archive.org videos
|
||||
- **ARD**
|
||||
@ -44,10 +46,12 @@
|
||||
- **audiomack**
|
||||
- **audiomack:album**
|
||||
- **Azubu**
|
||||
- **BaiduVideo**: 百度视频
|
||||
- **bambuser**
|
||||
- **bambuser:channel**
|
||||
- **Bandcamp**
|
||||
- **Bandcamp:album**
|
||||
- **bbc**: BBC
|
||||
- **bbc.co.uk**: BBC iPlayer
|
||||
- **BeatportPro**
|
||||
- **Beeg**
|
||||
@ -63,6 +67,8 @@
|
||||
- **BR**: Bayerischer Rundfunk Mediathek
|
||||
- **Break**
|
||||
- **Brightcove**
|
||||
- **bt:article**: Bergens Tidende Articles
|
||||
- **bt:vestlendingen**: Bergens Tidende - Vestlendingen
|
||||
- **BuzzFeed**
|
||||
- **BYUtv**
|
||||
- **Camdemy**
|
||||
@ -80,7 +86,7 @@
|
||||
- **chirbit:profile**
|
||||
- **Cinchcast**
|
||||
- **Cinemassacre**
|
||||
- **clipfish**
|
||||
- **Clipfish**
|
||||
- **cliphunter**
|
||||
- **Clipsyndicate**
|
||||
- **Cloudy**
|
||||
@ -102,21 +108,24 @@
|
||||
- **Crunchyroll**
|
||||
- **crunchyroll:playlist**
|
||||
- **CSpan**: C-SPAN
|
||||
- **CtsNews**
|
||||
- **CtsNews**: 華視新聞
|
||||
- **culturebox.francetvinfo.fr**
|
||||
- **dailymotion**
|
||||
- **dailymotion:playlist**
|
||||
- **dailymotion:user**
|
||||
- **DailymotionCloud**
|
||||
- **daum.net**
|
||||
- **DBTV**
|
||||
- **DCN**
|
||||
- **DctpTv**
|
||||
- **DeezerPlaylist**
|
||||
- **defense.gouv.fr**
|
||||
- **DHM**: Filmarchiv - Deutsches Historisches Museum
|
||||
- **Discovery**
|
||||
- **divxstage**: DivxStage
|
||||
- **Dotsub**
|
||||
- **DouyuTV**
|
||||
- **DouyuTV**: 斗鱼
|
||||
- **dramafever**
|
||||
- **dramafever:series**
|
||||
- **DRBonanza**
|
||||
- **Dropbox**
|
||||
- **DrTuber**
|
||||
@ -139,6 +148,8 @@
|
||||
- **Eporner**
|
||||
- **EroProfile**
|
||||
- **Escapist**
|
||||
- **ESPN** (Currently broken)
|
||||
- **EsriVideo**
|
||||
- **EveryonesMixtape**
|
||||
- **exfm**: ex.fm
|
||||
- **ExpoTV**
|
||||
@ -148,13 +159,14 @@
|
||||
- **fc2**
|
||||
- **fernsehkritik.tv**
|
||||
- **fernsehkritik.tv:postecke**
|
||||
- **Firedrive**
|
||||
- **Firstpost**
|
||||
- **FiveTV**
|
||||
- **Flickr**
|
||||
- **Folketinget**: Folketinget (ft.dk; Danish parliament)
|
||||
- **FootyRoom**
|
||||
- **Foxgay**
|
||||
- **FoxNews**
|
||||
- **FoxNews**: Fox News and Fox Business Video
|
||||
- **FoxSports**
|
||||
- **france2.fr:generation-quoi**
|
||||
- **FranceCulture**
|
||||
- **FranceInter**
|
||||
@ -182,9 +194,8 @@
|
||||
- **GodTube**
|
||||
- **GoldenMoustache**
|
||||
- **Golem**
|
||||
- **GorillaVid**: GorillaVid.in, daclips.in, movpod.in, fastvideo.in and realvid.net
|
||||
- **GorillaVid**: GorillaVid.in, daclips.in, movpod.in, fastvideo.in, realvid.net and filehoot.com
|
||||
- **Goshgay**
|
||||
- **Grooveshark**
|
||||
- **Groupon**
|
||||
- **Hark**
|
||||
- **HearThisAt**
|
||||
@ -208,12 +219,17 @@
|
||||
- **imdb**: Internet Movie Database trailers
|
||||
- **imdb:list**: Internet Movie Database lists
|
||||
- **Imgur**
|
||||
- **ImgurAlbum**
|
||||
- **Ina**
|
||||
- **Indavideo**
|
||||
- **IndavideoEmbed**
|
||||
- **InfoQ**
|
||||
- **Instagram**
|
||||
- **instagram:user**: Instagram user profile
|
||||
- **InternetVideoArchive**
|
||||
- **IPrima**
|
||||
- **iqiyi**: 爱奇艺
|
||||
- **Ir90Tv**
|
||||
- **ivi**: ivi.ru
|
||||
- **ivi:compilation**: ivi.ru compilations
|
||||
- **Izlesene**
|
||||
@ -226,6 +242,7 @@
|
||||
- **KanalPlay**: Kanal 5/9/11 Play
|
||||
- **Kankan**
|
||||
- **Karaoketv**
|
||||
- **KarriereVideos**
|
||||
- **keek**
|
||||
- **KeezMovies**
|
||||
- **KhanAcademy**
|
||||
@ -233,12 +250,20 @@
|
||||
- **kontrtube**: KontrTube.ru - Труба зовёт
|
||||
- **KrasView**: Красвью
|
||||
- **Ku6**
|
||||
- **kuwo:album**: 酷我音乐 - 专辑
|
||||
- **kuwo:category**: 酷我音乐 - 分类
|
||||
- **kuwo:chart**: 酷我音乐 - 排行榜
|
||||
- **kuwo:mv**: 酷我音乐 - MV
|
||||
- **kuwo:singer**: 酷我音乐 - 歌手
|
||||
- **kuwo:song**: 酷我音乐
|
||||
- **la7.tv**
|
||||
- **Laola1Tv**
|
||||
- **Letv**
|
||||
- **Lecture2Go**
|
||||
- **Letv**: 乐视网
|
||||
- **LetvPlaylist**
|
||||
- **LetvTv**
|
||||
- **Libsyn**
|
||||
- **life:embed**
|
||||
- **lifenews**: LIFE | NEWS
|
||||
- **LiveLeak**
|
||||
- **livestream**
|
||||
@ -260,7 +285,7 @@
|
||||
- **Minhateca**
|
||||
- **MinistryGrid**
|
||||
- **miomio.tv**
|
||||
- **mitele.es**
|
||||
- **MiTele**: mitele.es
|
||||
- **mixcloud**
|
||||
- **MLB**
|
||||
- **MoeVideo**: LetitBit video services: moevideo.net, playreplay.net and videochart.net
|
||||
@ -272,19 +297,23 @@
|
||||
- **Motherless**
|
||||
- **Motorsport**: motorsport.com
|
||||
- **MovieClips**
|
||||
- **MovieFap**
|
||||
- **Moviezine**
|
||||
- **movshare**: MovShare
|
||||
- **MPORA**
|
||||
- **MSNBC**
|
||||
- **MTV**
|
||||
- **mtv.de**
|
||||
- **mtviggy.com**
|
||||
- **mtvservices:embedded**
|
||||
- **MuenchenTV**: münchen.tv
|
||||
- **MusicPlayOn**
|
||||
- **MusicVault**
|
||||
- **muzu.tv**
|
||||
- **Mwave**
|
||||
- **MySpace**
|
||||
- **MySpace:album**
|
||||
- **MySpass**
|
||||
- **Myvi**
|
||||
- **myvideo**
|
||||
- **MyVidster**
|
||||
- **n-tv.de**
|
||||
@ -295,15 +324,24 @@
|
||||
- **NBCNews**
|
||||
- **NBCSports**
|
||||
- **NBCSportsVPlayer**
|
||||
- **ndr**: NDR.de - Mediathek
|
||||
- **ndr**: NDR.de - Norddeutscher Rundfunk
|
||||
- **ndr:embed**
|
||||
- **ndr:embed:base**
|
||||
- **NDTV**
|
||||
- **NerdCubedFeed**
|
||||
- **Nerdist**
|
||||
- **netease:album**: 网易云音乐 - 专辑
|
||||
- **netease:djradio**: 网易云音乐 - 电台
|
||||
- **netease:mv**: 网易云音乐 - MV
|
||||
- **netease:playlist**: 网易云音乐 - 歌单
|
||||
- **netease:program**: 网易云音乐 - 电台节目
|
||||
- **netease:singer**: 网易云音乐 - 歌手
|
||||
- **netease:song**: 网易云音乐
|
||||
- **Netzkino**
|
||||
- **Newgrounds**
|
||||
- **Newstube**
|
||||
- **NextMedia**
|
||||
- **NextMediaActionNews**
|
||||
- **NextMedia**: 蘋果日報
|
||||
- **NextMediaActionNews**: 蘋果日報 - 動新聞
|
||||
- **nfb**: National Film Board of Canada
|
||||
- **nfl.com**
|
||||
- **nhl.com**
|
||||
@ -311,28 +349,36 @@
|
||||
- **nhl.com:videocenter**: NHL videocenter category
|
||||
- **niconico**: ニコニコ動画
|
||||
- **NiconicoPlaylist**
|
||||
- **njoy**: N-JOY
|
||||
- **njoy:embed**
|
||||
- **Noco**
|
||||
- **Normalboots**
|
||||
- **NosVideo**
|
||||
- **Nova**: TN.cz, Prásk.tv, Nova.cz, Novaplus.cz, FANDA.tv, Krásná.cz and Doma.cz
|
||||
- **novamov**: NovaMov
|
||||
- **Nowness**
|
||||
- **nowness**
|
||||
- **nowness:playlist**
|
||||
- **nowness:series**
|
||||
- **NowTV**
|
||||
- **nowvideo**: NowVideo
|
||||
- **npo.nl**
|
||||
- **npo**: npo.nl and ntr.nl
|
||||
- **npo.nl:live**
|
||||
- **npo.nl:radio**
|
||||
- **npo.nl:radio:fragment**
|
||||
- **NRK**
|
||||
- **NRKPlaylist**
|
||||
- **NRKTV**
|
||||
- **NRKTV**: NRK TV and NRK Radio
|
||||
- **ntv.ru**
|
||||
- **Nuvid**
|
||||
- **NYTimes**
|
||||
- **NYTimesArticle**
|
||||
- **ocw.mit.edu**
|
||||
- **Odnoklassniki**
|
||||
- **OktoberfestTV**
|
||||
- **on.aol.com**
|
||||
- **OnionStudios**
|
||||
- **Ooyala**
|
||||
- **OpenFilm**
|
||||
- **OoyalaExternal**
|
||||
- **orf:fm4**: radio FM4
|
||||
- **orf:iptv**: iptv.ORF.at
|
||||
- **orf:oe1**: Radio Österreich 1
|
||||
@ -340,15 +386,20 @@
|
||||
- **parliamentlive.tv**: UK parliament videos
|
||||
- **Patreon**
|
||||
- **PBS**
|
||||
- **Periscope**: Periscope
|
||||
- **PhilharmonieDeParis**: Philharmonie de Paris
|
||||
- **Phoenix**
|
||||
- **Photobucket**
|
||||
- **Pinkbike**
|
||||
- **Pladform**
|
||||
- **PlanetaPlay**
|
||||
- **play.fm**
|
||||
- **played.to**
|
||||
- **Playtvak**: Playtvak.cz, iDNES.cz and Lidovky.cz
|
||||
- **Playvid**
|
||||
- **Playwire**
|
||||
- **pluralsight**
|
||||
- **pluralsight:course**
|
||||
- **plus.google**: Google Plus
|
||||
- **pluzz.francetv.fr**
|
||||
- **podomatic**
|
||||
@ -363,9 +414,12 @@
|
||||
- **prosiebensat1**: ProSiebenSat.1 Digital
|
||||
- **Puls4**
|
||||
- **Pyvideo**
|
||||
- **QQMusic**
|
||||
- **QQMusicAlbum**
|
||||
- **QQMusicSinger**
|
||||
- **qqmusic**: QQ音乐
|
||||
- **qqmusic:album**: QQ音乐 - 专辑
|
||||
- **qqmusic:playlist**: QQ音乐 - 歌单
|
||||
- **qqmusic:singer**: QQ音乐 - 歌手
|
||||
- **qqmusic:toplist**: QQ音乐 - 排行榜
|
||||
- **Quickscope**: Quick Scope
|
||||
- **QuickVid**
|
||||
- **R7**
|
||||
- **radio.de**
|
||||
@ -374,6 +428,7 @@
|
||||
- **RadioJavan**
|
||||
- **Rai**
|
||||
- **RBMARadio**
|
||||
- **RDS**: RDS.ca
|
||||
- **RedTube**
|
||||
- **Restudy**
|
||||
- **ReverbNation**
|
||||
@ -384,12 +439,12 @@
|
||||
- **Rte**
|
||||
- **rtl.nl**: rtl.nl and rtlxl.nl
|
||||
- **RTL2**
|
||||
- **RTLnow**
|
||||
- **RTP**
|
||||
- **RTS**: RTS.ch
|
||||
- **rtve.es:alacarta**: RTVE a la carta
|
||||
- **rtve.es:infantil**: RTVE infantil
|
||||
- **rtve.es:live**: RTVE.es live streams
|
||||
- **RTVNH**
|
||||
- **RUHD**
|
||||
- **rutube**: Rutube videos
|
||||
- **rutube:channel**: Rutube channels
|
||||
@ -397,6 +452,7 @@
|
||||
- **rutube:movie**: Rutube movies
|
||||
- **rutube:person**: Rutube person videos
|
||||
- **RUTV**: RUTV.RU
|
||||
- **Ruutu**
|
||||
- **safari**: safaribooksonline.com online video
|
||||
- **safari:course**: safaribooksonline.com online courses
|
||||
- **Sandia**: Sandia National Laboratories
|
||||
@ -412,7 +468,8 @@
|
||||
- **ServingSys**
|
||||
- **Sexu**
|
||||
- **SexyKarma**: Sexy Karma and Watch Indian Porn
|
||||
- **Shared**
|
||||
- **Shahid**
|
||||
- **Shared**: shared.sx and vivo.sx
|
||||
- **ShareSix**
|
||||
- **Sina**
|
||||
- **Slideshare**
|
||||
@ -421,9 +478,12 @@
|
||||
- **smotri:broadcast**: Smotri.com broadcasts
|
||||
- **smotri:community**: Smotri.com community videos
|
||||
- **smotri:user**: Smotri.com user videos
|
||||
- **SnagFilms**
|
||||
- **SnagFilmsEmbed**
|
||||
- **Snotr**
|
||||
- **Sockshare**
|
||||
- **Sohu**
|
||||
- **soompi**
|
||||
- **soompi:show**
|
||||
- **soundcloud**
|
||||
- **soundcloud:playlist**
|
||||
- **soundcloud:set**
|
||||
@ -433,6 +493,8 @@
|
||||
- **southpark.cc.com**
|
||||
- **southpark.cc.com:español**
|
||||
- **southpark.de**
|
||||
- **southpark.nl**
|
||||
- **southparkstudios.dk**
|
||||
- **Space**
|
||||
- **SpankBang**
|
||||
- **Spankwire**
|
||||
@ -442,7 +504,9 @@
|
||||
- **Spike**
|
||||
- **Sport5**
|
||||
- **SportBox**
|
||||
- **SportBoxEmbed**
|
||||
- **SportDeutschland**
|
||||
- **Sportschau**
|
||||
- **Srf**
|
||||
- **SRMediathek**: Saarländischer Rundfunk
|
||||
- **SSA**
|
||||
@ -452,6 +516,7 @@
|
||||
- **StreamCZ**
|
||||
- **StreetVoice**
|
||||
- **SunPorno**
|
||||
- **SVT**
|
||||
- **SVTPlay**: SVT Play and Öppet arkiv
|
||||
- **SWRMediathek**
|
||||
- **Syfy**
|
||||
@ -467,9 +532,9 @@
|
||||
- **TechTalks**
|
||||
- **techtv.mit.edu**
|
||||
- **ted**
|
||||
- **tegenlicht.vpro.nl**
|
||||
- **TeleBruxelles**
|
||||
- **telecinco.es**
|
||||
- **Telecinco**: telecinco.es, cuatro.com and mediaset.es
|
||||
- **Telegraaf**
|
||||
- **TeleMB**
|
||||
- **TeleTask**
|
||||
- **TenPlay**
|
||||
@ -477,7 +542,9 @@
|
||||
- **TF1**
|
||||
- **TheOnion**
|
||||
- **ThePlatform**
|
||||
- **ThePlatformFeed**
|
||||
- **TheSixtyOne**
|
||||
- **ThisAmericanLife**
|
||||
- **ThisAV**
|
||||
- **THVideo**
|
||||
- **THVideoPlaylist**
|
||||
@ -485,6 +552,7 @@
|
||||
- **tlc.com**
|
||||
- **tlc.de**
|
||||
- **TMZ**
|
||||
- **TMZArticle**
|
||||
- **TNAFlix**
|
||||
- **tou.tv**
|
||||
- **Toypics**: Toypics user profile
|
||||
@ -493,13 +561,18 @@
|
||||
- **Trilulilu**
|
||||
- **TruTube**
|
||||
- **Tube8**
|
||||
- **TubiTv**
|
||||
- **Tudou**
|
||||
- **Tumblr**
|
||||
- **TuneIn**
|
||||
- **Turbo**
|
||||
- **Tutv**
|
||||
- **tv.dfb.de**
|
||||
- **TV2**
|
||||
- **TV2Article**
|
||||
- **TV4**: tv4.se and tv4play.se
|
||||
- **TVC**
|
||||
- **TVCArticle**
|
||||
- **tvigle**: Интернет-телевидение Tvigle.ru
|
||||
- **tvp.pl**
|
||||
- **tvp.pl:Series**
|
||||
@ -512,10 +585,11 @@
|
||||
- **twitch:stream**
|
||||
- **twitch:video**
|
||||
- **twitch:vod**
|
||||
- **TwitterCard**
|
||||
- **Ubu**
|
||||
- **udemy**
|
||||
- **udemy:course**
|
||||
- **UDNEmbed**
|
||||
- **UDNEmbed**: 聯合影音
|
||||
- **Ultimedia**
|
||||
- **Unistra**
|
||||
- **Urort**: NRK P3 Urørt
|
||||
@ -528,13 +602,12 @@
|
||||
- **Vessel**
|
||||
- **Vesti**: Вести.Ru
|
||||
- **Vevo**
|
||||
- **VGTV**
|
||||
- **VGTV**: VGTV and BTTV
|
||||
- **vh1.com**
|
||||
- **Vice**
|
||||
- **Viddler**
|
||||
- **video.google:search**: Google Video search
|
||||
- **video.mit.edu**
|
||||
- **VideoBam**
|
||||
- **VideoDetective**
|
||||
- **videofy.me**
|
||||
- **videolectures.net**
|
||||
@ -548,6 +621,7 @@
|
||||
- **vier:videos**
|
||||
- **Viewster**
|
||||
- **viki**
|
||||
- **viki:channel**
|
||||
- **vimeo**
|
||||
- **vimeo:album**
|
||||
- **vimeo:channel**
|
||||
@ -559,10 +633,13 @@
|
||||
- **Vimple**: Vimple - one-click video hosting
|
||||
- **Vine**
|
||||
- **vine:user**
|
||||
- **vk.com**
|
||||
- **vk.com:user-videos**: vk.com:All of a user's videos
|
||||
- **vk**: VK
|
||||
- **vk:uservideos**: VK - User's Videos
|
||||
- **vlive**
|
||||
- **Vodlocker**
|
||||
- **VoiceRepublic**
|
||||
- **Vporn**
|
||||
- **vpro**: npo.nl and ntr.nl
|
||||
- **VRT**
|
||||
- **vube**: Vube.com
|
||||
- **VuClip**
|
||||
@ -575,31 +652,36 @@
|
||||
- **wdr:mobile**
|
||||
- **WDRMaus**: Sendung mit der Maus
|
||||
- **WebOfStories**
|
||||
- **WebOfStoriesPlaylist**
|
||||
- **Weibo**
|
||||
- **Wimp**
|
||||
- **Wistia**
|
||||
- **WNL**
|
||||
- **WorldStarHipHop**
|
||||
- **wrzuta.pl**
|
||||
- **WSJ**: Wall Street Journal
|
||||
- **XBef**
|
||||
- **XboxClips**
|
||||
- **XHamster**
|
||||
- **XHamsterEmbed**
|
||||
- **XMinus**
|
||||
- **XNXX**
|
||||
- **Xstream**
|
||||
- **XTube**
|
||||
- **XTubeUser**: XTube user profile
|
||||
- **Xuite**
|
||||
- **Xuite**: 隨意窩Xuite影音
|
||||
- **XVideos**
|
||||
- **XXXYMovies**
|
||||
- **Yahoo**: Yahoo screen and movies
|
||||
- **Yam**
|
||||
- **Yam**: 蕃薯藤yam天空部落
|
||||
- **yandexmusic:album**: Яндекс.Музыка - Альбом
|
||||
- **yandexmusic:playlist**: Яндекс.Музыка - Плейлист
|
||||
- **yandexmusic:track**: Яндекс.Музыка - Трек
|
||||
- **YesJapan**
|
||||
- **yinyuetai:video**: 音悦Tai
|
||||
- **Ynet**
|
||||
- **YouJizz**
|
||||
- **Youku**
|
||||
- **youku**: 优酷
|
||||
- **YouPorn**
|
||||
- **YourUpload**
|
||||
- **youtube**: YouTube.com
|
||||
|
@ -133,8 +133,8 @@ def expect_info_dict(self, got_dict, expected_dict):
|
||||
elif isinstance(expected, compat_str) and expected.startswith('mincount:'):
|
||||
got = got_dict.get(info_field)
|
||||
self.assertTrue(
|
||||
isinstance(got, list),
|
||||
'Expected field %s to be a list, but it is of type %s' % (
|
||||
isinstance(got, (list, dict)),
|
||||
'Expected field %s to be a list or a dict, but it is of type %s' % (
|
||||
info_field, type(got).__name__))
|
||||
expected_num = int(expected.partition(':')[2])
|
||||
assertGreaterEqual(
|
||||
@ -160,7 +160,7 @@ def expect_info_dict(self, got_dict, expected_dict):
|
||||
# Are checkable fields missing from the test case definition?
|
||||
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
|
||||
for key, value in got_dict.items()
|
||||
if value and key in ('id', 'title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location'))
|
||||
if value and key in ('id', 'title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location', 'age_limit'))
|
||||
missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
|
||||
if missing_keys:
|
||||
def _repr(v):
|
||||
|
@ -12,9 +12,10 @@ import copy
|
||||
|
||||
from test.helper import FakeYDL, assertRegexpMatches
|
||||
from youtube_dl import YoutubeDL
|
||||
from youtube_dl.compat import compat_str
|
||||
from youtube_dl.extractor import YoutubeIE
|
||||
from youtube_dl.postprocessor.common import PostProcessor
|
||||
from youtube_dl.utils import match_filter_func
|
||||
from youtube_dl.utils import ExtractorError, match_filter_func
|
||||
|
||||
TEST_URL = 'http://localhost/sample.mp4'
|
||||
|
||||
@ -104,6 +105,7 @@ class TestFormatSelection(unittest.TestCase):
|
||||
def test_format_selection(self):
|
||||
formats = [
|
||||
{'format_id': '35', 'ext': 'mp4', 'preference': 1, 'url': TEST_URL},
|
||||
{'format_id': 'example-with-dashes', 'ext': 'webm', 'preference': 1, 'url': TEST_URL},
|
||||
{'format_id': '45', 'ext': 'webm', 'preference': 2, 'url': TEST_URL},
|
||||
{'format_id': '47', 'ext': 'webm', 'preference': 3, 'url': TEST_URL},
|
||||
{'format_id': '2', 'ext': 'flv', 'preference': 4, 'url': TEST_URL},
|
||||
@ -135,6 +137,11 @@ class TestFormatSelection(unittest.TestCase):
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], '35')
|
||||
|
||||
ydl = YDL({'format': 'example-with-dashes'})
|
||||
ydl.process_ie_result(info_dict.copy())
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], 'example-with-dashes')
|
||||
|
||||
def test_format_selection_audio(self):
|
||||
formats = [
|
||||
{'format_id': 'audio-low', 'ext': 'webm', 'preference': 1, 'vcodec': 'none', 'url': TEST_URL},
|
||||
@ -228,21 +235,70 @@ class TestFormatSelection(unittest.TestCase):
|
||||
'141', '172', '140', '171', '139',
|
||||
]
|
||||
|
||||
for f1id, f2id in zip(order, order[1:]):
|
||||
f1 = YoutubeIE._formats[f1id].copy()
|
||||
f1['format_id'] = f1id
|
||||
f1['url'] = 'url:' + f1id
|
||||
f2 = YoutubeIE._formats[f2id].copy()
|
||||
f2['format_id'] = f2id
|
||||
f2['url'] = 'url:' + f2id
|
||||
def format_info(f_id):
|
||||
info = YoutubeIE._formats[f_id].copy()
|
||||
info['format_id'] = f_id
|
||||
info['url'] = 'url:' + f_id
|
||||
return info
|
||||
formats_order = [format_info(f_id) for f_id in order]
|
||||
|
||||
info_dict = _make_result(list(formats_order), extractor='youtube')
|
||||
ydl = YDL({'format': 'bestvideo+bestaudio'})
|
||||
yie = YoutubeIE(ydl)
|
||||
yie._sort_formats(info_dict['formats'])
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], '137+141')
|
||||
self.assertEqual(downloaded['ext'], 'mp4')
|
||||
|
||||
info_dict = _make_result(list(formats_order), extractor='youtube')
|
||||
ydl = YDL({'format': 'bestvideo[height>=999999]+bestaudio/best'})
|
||||
yie = YoutubeIE(ydl)
|
||||
yie._sort_formats(info_dict['formats'])
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], '38')
|
||||
|
||||
info_dict = _make_result(list(formats_order), extractor='youtube')
|
||||
ydl = YDL({'format': 'bestvideo/best,bestaudio'})
|
||||
yie = YoutubeIE(ydl)
|
||||
yie._sort_formats(info_dict['formats'])
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
|
||||
self.assertEqual(downloaded_ids, ['137', '141'])
|
||||
|
||||
info_dict = _make_result(list(formats_order), extractor='youtube')
|
||||
ydl = YDL({'format': '(bestvideo[ext=mp4],bestvideo[ext=webm])+bestaudio'})
|
||||
yie = YoutubeIE(ydl)
|
||||
yie._sort_formats(info_dict['formats'])
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
|
||||
self.assertEqual(downloaded_ids, ['137+141', '248+141'])
|
||||
|
||||
info_dict = _make_result(list(formats_order), extractor='youtube')
|
||||
ydl = YDL({'format': '(bestvideo[ext=mp4],bestvideo[ext=webm])[height<=720]+bestaudio'})
|
||||
yie = YoutubeIE(ydl)
|
||||
yie._sort_formats(info_dict['formats'])
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
|
||||
self.assertEqual(downloaded_ids, ['136+141', '247+141'])
|
||||
|
||||
info_dict = _make_result(list(formats_order), extractor='youtube')
|
||||
ydl = YDL({'format': '(bestvideo[ext=none]/bestvideo[ext=webm])+bestaudio'})
|
||||
yie = YoutubeIE(ydl)
|
||||
yie._sort_formats(info_dict['formats'])
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
|
||||
self.assertEqual(downloaded_ids, ['248+141'])
|
||||
|
||||
for f1, f2 in zip(formats_order, formats_order[1:]):
|
||||
info_dict = _make_result([f1, f2], extractor='youtube')
|
||||
ydl = YDL({'format': 'best/bestvideo'})
|
||||
yie = YoutubeIE(ydl)
|
||||
yie._sort_formats(info_dict['formats'])
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], f1id)
|
||||
self.assertEqual(downloaded['format_id'], f1['format_id'])
|
||||
|
||||
info_dict = _make_result([f2, f1], extractor='youtube')
|
||||
ydl = YDL({'format': 'best/bestvideo'})
|
||||
@ -250,7 +306,18 @@ class TestFormatSelection(unittest.TestCase):
|
||||
yie._sort_formats(info_dict['formats'])
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], f1id)
|
||||
self.assertEqual(downloaded['format_id'], f1['format_id'])
|
||||
|
||||
def test_invalid_format_specs(self):
|
||||
def assert_syntax_error(format_spec):
|
||||
ydl = YDL({'format': format_spec})
|
||||
info_dict = _make_result([{'format_id': 'foo', 'url': TEST_URL}])
|
||||
self.assertRaises(SyntaxError, ydl.process_ie_result, info_dict)
|
||||
|
||||
assert_syntax_error('bestvideo,,best')
|
||||
assert_syntax_error('+bestaudio')
|
||||
assert_syntax_error('bestvideo+')
|
||||
assert_syntax_error('/')
|
||||
|
||||
def test_format_filtering(self):
|
||||
formats = [
|
||||
@ -307,6 +374,18 @@ class TestFormatSelection(unittest.TestCase):
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], 'G')
|
||||
|
||||
ydl = YDL({'format': 'all[width>=400][width<=600]'})
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
|
||||
self.assertEqual(downloaded_ids, ['B', 'C', 'D'])
|
||||
|
||||
ydl = YDL({'format': 'best[height<40]'})
|
||||
try:
|
||||
ydl.process_ie_result(info_dict)
|
||||
except ExtractorError:
|
||||
pass
|
||||
self.assertEqual(ydl.downloaded_info_dicts, [])
|
||||
|
||||
|
||||
class TestYoutubeDL(unittest.TestCase):
|
||||
def test_subtitles(self):
|
||||
@ -507,6 +586,51 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
res = get_videos(f)
|
||||
self.assertEqual(res, ['1'])
|
||||
|
||||
def test_playlist_items_selection(self):
|
||||
entries = [{
|
||||
'id': compat_str(i),
|
||||
'title': compat_str(i),
|
||||
'url': TEST_URL,
|
||||
} for i in range(1, 5)]
|
||||
playlist = {
|
||||
'_type': 'playlist',
|
||||
'id': 'test',
|
||||
'entries': entries,
|
||||
'extractor': 'test:playlist',
|
||||
'extractor_key': 'test:playlist',
|
||||
'webpage_url': 'http://example.com',
|
||||
}
|
||||
|
||||
def get_ids(params):
|
||||
ydl = YDL(params)
|
||||
# make a copy because the dictionary can be modified
|
||||
ydl.process_ie_result(playlist.copy())
|
||||
return [int(v['id']) for v in ydl.downloaded_info_dicts]
|
||||
|
||||
result = get_ids({})
|
||||
self.assertEqual(result, [1, 2, 3, 4])
|
||||
|
||||
result = get_ids({'playlistend': 10})
|
||||
self.assertEqual(result, [1, 2, 3, 4])
|
||||
|
||||
result = get_ids({'playlistend': 2})
|
||||
self.assertEqual(result, [1, 2])
|
||||
|
||||
result = get_ids({'playliststart': 10})
|
||||
self.assertEqual(result, [])
|
||||
|
||||
result = get_ids({'playliststart': 2})
|
||||
self.assertEqual(result, [2, 3, 4])
|
||||
|
||||
result = get_ids({'playlist_items': '2-4'})
|
||||
self.assertEqual(result, [2, 3, 4])
|
||||
|
||||
result = get_ids({'playlist_items': '2,4'})
|
||||
self.assertEqual(result, [2, 4])
|
||||
|
||||
result = get_ids({'playlist_items': '10'})
|
||||
self.assertEqual(result, [])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
@ -39,7 +39,7 @@ class TestAES(unittest.TestCase):
|
||||
encrypted = base64.b64encode(
|
||||
intlist_to_bytes(self.iv[:8]) +
|
||||
b'\x17\x15\x93\xab\x8d\x80V\xcdV\xe0\t\xcdo\xc2\xa5\xd8ksM\r\xe27N\xae'
|
||||
)
|
||||
).decode('utf-8')
|
||||
decrypted = (aes_decrypt_text(encrypted, password, 16))
|
||||
self.assertEqual(decrypted, self.secret_msg)
|
||||
|
||||
@ -47,7 +47,7 @@ class TestAES(unittest.TestCase):
|
||||
encrypted = base64.b64encode(
|
||||
intlist_to_bytes(self.iv[:8]) +
|
||||
b'\x0b\xe6\xa4\xd9z\x0e\xb8\xb9\xd0\xd4i_\x85\x1d\x99\x98_\xe5\x80\xe7.\xbf\xa5\x83'
|
||||
)
|
||||
).decode('utf-8')
|
||||
decrypted = (aes_decrypt_text(encrypted, password, 32))
|
||||
self.assertEqual(decrypted, self.secret_msg)
|
||||
|
||||
|
@ -14,6 +14,9 @@ from youtube_dl.utils import get_filesystem_encoding
|
||||
from youtube_dl.compat import (
|
||||
compat_getenv,
|
||||
compat_expanduser,
|
||||
compat_shlex_split,
|
||||
compat_urllib_parse_unquote,
|
||||
compat_urllib_parse_unquote_plus,
|
||||
)
|
||||
|
||||
|
||||
@ -42,5 +45,31 @@ class TestCompat(unittest.TestCase):
|
||||
dir(youtube_dl.compat))) - set(['unicode_literals'])
|
||||
self.assertEqual(all_names, sorted(present_names))
|
||||
|
||||
def test_compat_urllib_parse_unquote(self):
|
||||
self.assertEqual(compat_urllib_parse_unquote('abc%20def'), 'abc def')
|
||||
self.assertEqual(compat_urllib_parse_unquote('%7e/abc+def'), '~/abc+def')
|
||||
self.assertEqual(compat_urllib_parse_unquote(''), '')
|
||||
self.assertEqual(compat_urllib_parse_unquote('%'), '%')
|
||||
self.assertEqual(compat_urllib_parse_unquote('%%'), '%%')
|
||||
self.assertEqual(compat_urllib_parse_unquote('%%%'), '%%%')
|
||||
self.assertEqual(compat_urllib_parse_unquote('%2F'), '/')
|
||||
self.assertEqual(compat_urllib_parse_unquote('%2f'), '/')
|
||||
self.assertEqual(compat_urllib_parse_unquote('%E6%B4%A5%E6%B3%A2'), '津波')
|
||||
self.assertEqual(
|
||||
compat_urllib_parse_unquote('''<meta property="og:description" content="%E2%96%81%E2%96%82%E2%96%83%E2%96%84%25%E2%96%85%E2%96%86%E2%96%87%E2%96%88" />
|
||||
%<a href="https://ar.wikipedia.org/wiki/%D8%AA%D8%B3%D9%88%D9%86%D8%A7%D9%85%D9%8A">%a'''),
|
||||
'''<meta property="og:description" content="▁▂▃▄%▅▆▇█" />
|
||||
%<a href="https://ar.wikipedia.org/wiki/تسونامي">%a''')
|
||||
self.assertEqual(
|
||||
compat_urllib_parse_unquote('''%28%5E%E2%97%A3_%E2%97%A2%5E%29%E3%81%A3%EF%B8%BB%E3%83%87%E2%95%90%E4%B8%80 %E2%87%80 %E2%87%80 %E2%87%80 %E2%87%80 %E2%87%80 %E2%86%B6%I%Break%25Things%'''),
|
||||
'''(^◣_◢^)っ︻デ═一 ⇀ ⇀ ⇀ ⇀ ⇀ ↶%I%Break%Things%''')
|
||||
|
||||
def test_compat_urllib_parse_unquote_plus(self):
|
||||
self.assertEqual(compat_urllib_parse_unquote_plus('abc%20def'), 'abc def')
|
||||
self.assertEqual(compat_urllib_parse_unquote_plus('%7e/abc+def'), '~/abc def')
|
||||
|
||||
def test_compat_shlex_split(self):
|
||||
self.assertEqual(compat_shlex_split('-option "one two"'), ['-option', 'one two'])
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
@ -136,7 +136,9 @@ def generator(test_case):
|
||||
# We're not using .download here sine that is just a shim
|
||||
# for outside error handling, and returns the exit code
|
||||
# instead of the result dict.
|
||||
res_dict = ydl.extract_info(test_case['url'])
|
||||
res_dict = ydl.extract_info(
|
||||
test_case['url'],
|
||||
force_generic_extractor=params.get('force_generic_extractor', False))
|
||||
except (DownloadError, ExtractorError) as err:
|
||||
# Check if the exception is not a network related one
|
||||
if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError, compat_http_client.BadStatusLine) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503):
|
||||
|
@ -25,6 +25,7 @@ from youtube_dl.extractor import (
|
||||
RaiIE,
|
||||
VikiIE,
|
||||
ThePlatformIE,
|
||||
ThePlatformFeedIE,
|
||||
RTVEALaCartaIE,
|
||||
FunnyOrDieIE,
|
||||
)
|
||||
@ -266,7 +267,7 @@ class TestNRKSubtitles(BaseTestSubtitles):
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(set(subtitles.keys()), set(['no']))
|
||||
self.assertEqual(md5(subtitles['no']), '1d221e6458c95c5494dcd38e6a1f129a')
|
||||
self.assertEqual(md5(subtitles['no']), '544fa917d3197fcbee64634559221cc2')
|
||||
|
||||
|
||||
class TestRaiSubtitles(BaseTestSubtitles):
|
||||
@ -307,6 +308,18 @@ class TestThePlatformSubtitles(BaseTestSubtitles):
|
||||
self.assertEqual(md5(subtitles['en']), '97e7670cbae3c4d26ae8bcc7fdd78d4b')
|
||||
|
||||
|
||||
class TestThePlatformFeedSubtitles(BaseTestSubtitles):
|
||||
url = 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207'
|
||||
IE = ThePlatformFeedIE
|
||||
|
||||
def test_allsubtitles(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(set(subtitles.keys()), set(['en']))
|
||||
self.assertEqual(md5(subtitles['en']), '48649a22e82b2da21c9a67a395eedade')
|
||||
|
||||
|
||||
class TestRtveSubtitles(BaseTestSubtitles):
|
||||
url = 'http://www.rtve.es/alacarta/videos/los-misterios-de-laura/misterios-laura-capitulo-32-misterio-del-numero-17-2-parte/2428621/'
|
||||
IE = RTVEALaCartaIE
|
||||
|
@ -40,7 +40,6 @@ from youtube_dl.utils import (
|
||||
read_batch_urls,
|
||||
sanitize_filename,
|
||||
sanitize_path,
|
||||
sanitize_url_path_consecutive_slashes,
|
||||
prepend_extension,
|
||||
replace_extension,
|
||||
shell_quote,
|
||||
@ -53,15 +52,21 @@ from youtube_dl.utils import (
|
||||
unified_strdate,
|
||||
unsmuggle_url,
|
||||
uppercase_escape,
|
||||
lowercase_escape,
|
||||
url_basename,
|
||||
urlencode_postdata,
|
||||
version_tuple,
|
||||
xpath_with_ns,
|
||||
xpath_element,
|
||||
xpath_text,
|
||||
xpath_attr,
|
||||
render_table,
|
||||
match_str,
|
||||
parse_dfxp_time_expr,
|
||||
dfxp2srt,
|
||||
cli_option,
|
||||
cli_valueless_option,
|
||||
cli_bool_option,
|
||||
)
|
||||
|
||||
|
||||
@ -175,26 +180,6 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertEqual(sanitize_path('./abc'), 'abc')
|
||||
self.assertEqual(sanitize_path('./../abc'), '..\\abc')
|
||||
|
||||
def test_sanitize_url_path_consecutive_slashes(self):
|
||||
self.assertEqual(
|
||||
sanitize_url_path_consecutive_slashes('http://hostname/foo//bar/filename.html'),
|
||||
'http://hostname/foo/bar/filename.html')
|
||||
self.assertEqual(
|
||||
sanitize_url_path_consecutive_slashes('http://hostname//foo/bar/filename.html'),
|
||||
'http://hostname/foo/bar/filename.html')
|
||||
self.assertEqual(
|
||||
sanitize_url_path_consecutive_slashes('http://hostname//'),
|
||||
'http://hostname/')
|
||||
self.assertEqual(
|
||||
sanitize_url_path_consecutive_slashes('http://hostname/foo/bar/filename.html'),
|
||||
'http://hostname/foo/bar/filename.html')
|
||||
self.assertEqual(
|
||||
sanitize_url_path_consecutive_slashes('http://hostname/'),
|
||||
'http://hostname/')
|
||||
self.assertEqual(
|
||||
sanitize_url_path_consecutive_slashes('http://hostname/abc//'),
|
||||
'http://hostname/abc/')
|
||||
|
||||
def test_prepend_extension(self):
|
||||
self.assertEqual(prepend_extension('abc.ext', 'temp'), 'abc.temp.ext')
|
||||
self.assertEqual(prepend_extension('abc.ext', 'temp', 'ext'), 'abc.temp.ext')
|
||||
@ -255,12 +240,21 @@ class TestUtil(unittest.TestCase):
|
||||
<node x="a"/>
|
||||
<node x="a" y="c" />
|
||||
<node x="b" y="d" />
|
||||
<node x="" />
|
||||
</root>'''
|
||||
doc = xml.etree.ElementTree.fromstring(testxml)
|
||||
|
||||
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n'), None)
|
||||
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n', 'v'), None)
|
||||
self.assertEqual(find_xpath_attr(doc, './/node', 'n'), None)
|
||||
self.assertEqual(find_xpath_attr(doc, './/node', 'n', 'v'), None)
|
||||
self.assertEqual(find_xpath_attr(doc, './/node', 'x'), doc[1])
|
||||
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1])
|
||||
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'b'), doc[3])
|
||||
self.assertEqual(find_xpath_attr(doc, './/node', 'y'), doc[2])
|
||||
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
|
||||
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'd'), doc[3])
|
||||
self.assertEqual(find_xpath_attr(doc, './/node', 'x', ''), doc[4])
|
||||
|
||||
def test_xpath_with_ns(self):
|
||||
testxml = '''<root xmlns:media="http://example.com/">
|
||||
@ -275,6 +269,16 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertEqual(find('media:song/media:author').text, 'The Author')
|
||||
self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3')
|
||||
|
||||
def test_xpath_element(self):
|
||||
doc = xml.etree.ElementTree.Element('root')
|
||||
div = xml.etree.ElementTree.SubElement(doc, 'div')
|
||||
p = xml.etree.ElementTree.SubElement(div, 'p')
|
||||
p.text = 'Foo'
|
||||
self.assertEqual(xpath_element(doc, 'div/p'), p)
|
||||
self.assertEqual(xpath_element(doc, 'div/bar', default='default'), 'default')
|
||||
self.assertTrue(xpath_element(doc, 'div/bar') is None)
|
||||
self.assertRaises(ExtractorError, xpath_element, doc, 'div/bar', fatal=True)
|
||||
|
||||
def test_xpath_text(self):
|
||||
testxml = '''<root>
|
||||
<div>
|
||||
@ -283,9 +287,25 @@ class TestUtil(unittest.TestCase):
|
||||
</root>'''
|
||||
doc = xml.etree.ElementTree.fromstring(testxml)
|
||||
self.assertEqual(xpath_text(doc, 'div/p'), 'Foo')
|
||||
self.assertEqual(xpath_text(doc, 'div/bar', default='default'), 'default')
|
||||
self.assertTrue(xpath_text(doc, 'div/bar') is None)
|
||||
self.assertRaises(ExtractorError, xpath_text, doc, 'div/bar', fatal=True)
|
||||
|
||||
def test_xpath_attr(self):
|
||||
testxml = '''<root>
|
||||
<div>
|
||||
<p x="a">Foo</p>
|
||||
</div>
|
||||
</root>'''
|
||||
doc = xml.etree.ElementTree.fromstring(testxml)
|
||||
self.assertEqual(xpath_attr(doc, 'div/p', 'x'), 'a')
|
||||
self.assertEqual(xpath_attr(doc, 'div/bar', 'x'), None)
|
||||
self.assertEqual(xpath_attr(doc, 'div/p', 'y'), None)
|
||||
self.assertEqual(xpath_attr(doc, 'div/bar', 'x', default='default'), 'default')
|
||||
self.assertEqual(xpath_attr(doc, 'div/p', 'y', default='default'), 'default')
|
||||
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/bar', 'x', fatal=True)
|
||||
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/p', 'y', fatal=True)
|
||||
|
||||
def test_smuggle_url(self):
|
||||
data = {"ö": "ö", "abc": [3]}
|
||||
url = 'https://foo.bar/baz?x=y#a'
|
||||
@ -344,6 +364,7 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertEqual(parse_duration('02:03:04'), 7384)
|
||||
self.assertEqual(parse_duration('01:02:03:04'), 93784)
|
||||
self.assertEqual(parse_duration('1 hour 3 minutes'), 3780)
|
||||
self.assertEqual(parse_duration('87 Min.'), 5220)
|
||||
|
||||
def test_fix_xml_ampersands(self):
|
||||
self.assertEqual(
|
||||
@ -418,6 +439,10 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertEqual(uppercase_escape('aä'), 'aä')
|
||||
self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐')
|
||||
|
||||
def test_lowercase_escape(self):
|
||||
self.assertEqual(lowercase_escape('aä'), 'aä')
|
||||
self.assertEqual(lowercase_escape('\\u0026'), '&')
|
||||
|
||||
def test_limit_length(self):
|
||||
self.assertEqual(limit_length(None, 12), None)
|
||||
self.assertEqual(limit_length('foo', 12), 'foo')
|
||||
@ -616,7 +641,7 @@ ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4')
|
||||
<div xml:lang="en">
|
||||
<p begin="0" end="1">The following line contains Chinese characters and special symbols</p>
|
||||
<p begin="1" end="2">第二行<br/>♪♪</p>
|
||||
<p begin="2" end="3"><span>Third<br/>Line</span></p>
|
||||
<p begin="2" dur="1"><span>Third<br/>Line</span></p>
|
||||
</div>
|
||||
</body>
|
||||
</tt>'''
|
||||
@ -637,6 +662,66 @@ Line
|
||||
'''
|
||||
self.assertEqual(dfxp2srt(dfxp_data), srt_data)
|
||||
|
||||
dfxp_data_no_default_namespace = '''<?xml version="1.0" encoding="UTF-8"?>
|
||||
<tt xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
|
||||
<body>
|
||||
<div xml:lang="en">
|
||||
<p begin="0" end="1">The first line</p>
|
||||
</div>
|
||||
</body>
|
||||
</tt>'''
|
||||
srt_data = '''1
|
||||
00:00:00,000 --> 00:00:01,000
|
||||
The first line
|
||||
|
||||
'''
|
||||
self.assertEqual(dfxp2srt(dfxp_data_no_default_namespace), srt_data)
|
||||
|
||||
def test_cli_option(self):
|
||||
self.assertEqual(cli_option({'proxy': '127.0.0.1:3128'}, '--proxy', 'proxy'), ['--proxy', '127.0.0.1:3128'])
|
||||
self.assertEqual(cli_option({'proxy': None}, '--proxy', 'proxy'), [])
|
||||
self.assertEqual(cli_option({}, '--proxy', 'proxy'), [])
|
||||
|
||||
def test_cli_valueless_option(self):
|
||||
self.assertEqual(cli_valueless_option(
|
||||
{'downloader': 'external'}, '--external-downloader', 'downloader', 'external'), ['--external-downloader'])
|
||||
self.assertEqual(cli_valueless_option(
|
||||
{'downloader': 'internal'}, '--external-downloader', 'downloader', 'external'), [])
|
||||
self.assertEqual(cli_valueless_option(
|
||||
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'), ['--no-check-certificate'])
|
||||
self.assertEqual(cli_valueless_option(
|
||||
{'nocheckcertificate': False}, '--no-check-certificate', 'nocheckcertificate'), [])
|
||||
self.assertEqual(cli_valueless_option(
|
||||
{'checkcertificate': True}, '--no-check-certificate', 'checkcertificate', False), [])
|
||||
self.assertEqual(cli_valueless_option(
|
||||
{'checkcertificate': False}, '--no-check-certificate', 'checkcertificate', False), ['--no-check-certificate'])
|
||||
|
||||
def test_cli_bool_option(self):
|
||||
self.assertEqual(
|
||||
cli_bool_option(
|
||||
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'),
|
||||
['--no-check-certificate', 'true'])
|
||||
self.assertEqual(
|
||||
cli_bool_option(
|
||||
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate', separator='='),
|
||||
['--no-check-certificate=true'])
|
||||
self.assertEqual(
|
||||
cli_bool_option(
|
||||
{'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true'),
|
||||
['--check-certificate', 'false'])
|
||||
self.assertEqual(
|
||||
cli_bool_option(
|
||||
{'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
|
||||
['--check-certificate=false'])
|
||||
self.assertEqual(
|
||||
cli_bool_option(
|
||||
{'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true'),
|
||||
['--check-certificate', 'true'])
|
||||
self.assertEqual(
|
||||
cli_bool_option(
|
||||
{'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
|
||||
['--check-certificate=true'])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
@ -33,7 +33,7 @@ params = get_params({
|
||||
|
||||
|
||||
TEST_ID = 'gr51aVj-mLg'
|
||||
ANNOTATIONS_FILE = TEST_ID + '.flv.annotations.xml'
|
||||
ANNOTATIONS_FILE = TEST_ID + '.annotations.xml'
|
||||
EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label']
|
||||
|
||||
|
||||
|
4
tox.ini
4
tox.ini
@ -1,9 +1,11 @@
|
||||
[tox]
|
||||
envlist = py26,py27,py33,py34
|
||||
envlist = py26,py27,py33,py34,py35
|
||||
[testenv]
|
||||
deps =
|
||||
nose
|
||||
coverage
|
||||
# We need a valid $HOME for test_compat_expanduser
|
||||
passenv = HOME
|
||||
defaultargs = test --exclude test_download.py --exclude test_age_restriction.py
|
||||
--exclude test_subtitles.py --exclude test_write_annotations.py
|
||||
--exclude test_youtube_lists.py
|
||||
|
@ -21,24 +21,24 @@ import subprocess
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
import tokenize
|
||||
import traceback
|
||||
|
||||
if os.name == 'nt':
|
||||
import ctypes
|
||||
|
||||
from .compat import (
|
||||
compat_basestring,
|
||||
compat_cookiejar,
|
||||
compat_expanduser,
|
||||
compat_get_terminal_size,
|
||||
compat_http_client,
|
||||
compat_kwargs,
|
||||
compat_str,
|
||||
compat_tokenize_tokenize,
|
||||
compat_urllib_error,
|
||||
compat_urllib_request,
|
||||
)
|
||||
from .utils import (
|
||||
escape_url,
|
||||
ContentTooShortError,
|
||||
date_from_str,
|
||||
DateRange,
|
||||
@ -69,6 +69,7 @@ from .utils import (
|
||||
version_tuple,
|
||||
write_json_file,
|
||||
write_string,
|
||||
YoutubeDLCookieProcessor,
|
||||
YoutubeDLHandler,
|
||||
prepend_extension,
|
||||
replace_extension,
|
||||
@ -118,7 +119,7 @@ class YoutubeDL(object):
|
||||
|
||||
username: Username for authentication purposes.
|
||||
password: Password for authentication purposes.
|
||||
videopassword: Password for acces a video.
|
||||
videopassword: Password for accessing a video.
|
||||
usenetrc: Use netrc for authentication instead.
|
||||
verbose: Print additional info to stdout.
|
||||
quiet: Do not print messages to stdout.
|
||||
@ -138,6 +139,7 @@ class YoutubeDL(object):
|
||||
outtmpl: Template for output names.
|
||||
restrictfilenames: Do not allow "&" and spaces in file names
|
||||
ignoreerrors: Do not stop on download errors.
|
||||
force_generic_extractor: Force downloader to use the generic extractor
|
||||
nooverwrites: Prevent overwriting files.
|
||||
playliststart: Playlist item to start at.
|
||||
playlistend: Playlist item to end at.
|
||||
@ -260,7 +262,8 @@ class YoutubeDL(object):
|
||||
The following options are used by the post processors:
|
||||
prefer_ffmpeg: If True, use ffmpeg instead of avconv if both are available,
|
||||
otherwise prefer avconv.
|
||||
exec_cmd: Arbitrary command to run after downloading
|
||||
postprocessor_args: A list of additional command-line arguments for the
|
||||
postprocessor.
|
||||
"""
|
||||
|
||||
params = None
|
||||
@ -282,7 +285,11 @@ class YoutubeDL(object):
|
||||
self._num_downloads = 0
|
||||
self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
|
||||
self._err_file = sys.stderr
|
||||
self.params = params
|
||||
self.params = {
|
||||
# Default parameters
|
||||
'nocheckcertificate': False,
|
||||
}
|
||||
self.params.update(params)
|
||||
self.cache = Cache(self)
|
||||
|
||||
if params.get('bidi_workaround', False):
|
||||
@ -626,13 +633,16 @@ class YoutubeDL(object):
|
||||
info_dict.setdefault(key, value)
|
||||
|
||||
def extract_info(self, url, download=True, ie_key=None, extra_info={},
|
||||
process=True):
|
||||
process=True, force_generic_extractor=False):
|
||||
'''
|
||||
Returns a list with a dictionary for each video we find.
|
||||
If 'download', also downloads the videos.
|
||||
extra_info is a dict containing the extra values to add to each result
|
||||
'''
|
||||
|
||||
if not ie_key and force_generic_extractor:
|
||||
ie_key = 'Generic'
|
||||
|
||||
if ie_key:
|
||||
ies = [self.get_info_extractor(ie_key)]
|
||||
else:
|
||||
@ -760,7 +770,9 @@ class YoutubeDL(object):
|
||||
if isinstance(ie_entries, list):
|
||||
n_all_entries = len(ie_entries)
|
||||
if playlistitems:
|
||||
entries = [ie_entries[i - 1] for i in playlistitems]
|
||||
entries = [
|
||||
ie_entries[i - 1] for i in playlistitems
|
||||
if -n_all_entries <= i - 1 < n_all_entries]
|
||||
else:
|
||||
entries = ie_entries[playliststart:playlistend]
|
||||
n_entries = len(entries)
|
||||
@ -845,8 +857,8 @@ class YoutubeDL(object):
|
||||
else:
|
||||
raise Exception('Invalid result type: %s' % result_type)
|
||||
|
||||
def _apply_format_filter(self, format_spec, available_formats):
|
||||
" Returns a tuple of the remaining format_spec and filtered formats "
|
||||
def _build_format_filter(self, filter_spec):
|
||||
" Returns a function to filter the formats according to the filter_spec "
|
||||
|
||||
OPERATORS = {
|
||||
'<': operator.lt,
|
||||
@ -856,13 +868,13 @@ class YoutubeDL(object):
|
||||
'=': operator.eq,
|
||||
'!=': operator.ne,
|
||||
}
|
||||
operator_rex = re.compile(r'''(?x)\s*\[
|
||||
operator_rex = re.compile(r'''(?x)\s*
|
||||
(?P<key>width|height|tbr|abr|vbr|asr|filesize|fps)
|
||||
\s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
|
||||
(?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)
|
||||
\]$
|
||||
$
|
||||
''' % '|'.join(map(re.escape, OPERATORS.keys())))
|
||||
m = operator_rex.search(format_spec)
|
||||
m = operator_rex.search(filter_spec)
|
||||
if m:
|
||||
try:
|
||||
comparison_value = int(m.group('value'))
|
||||
@ -873,7 +885,7 @@ class YoutubeDL(object):
|
||||
if comparison_value is None:
|
||||
raise ValueError(
|
||||
'Invalid value %r in format specification %r' % (
|
||||
m.group('value'), format_spec))
|
||||
m.group('value'), filter_spec))
|
||||
op = OPERATORS[m.group('op')]
|
||||
|
||||
if not m:
|
||||
@ -881,84 +893,283 @@ class YoutubeDL(object):
|
||||
'=': operator.eq,
|
||||
'!=': operator.ne,
|
||||
}
|
||||
str_operator_rex = re.compile(r'''(?x)\s*\[
|
||||
str_operator_rex = re.compile(r'''(?x)
|
||||
\s*(?P<key>ext|acodec|vcodec|container|protocol)
|
||||
\s*(?P<op>%s)(?P<none_inclusive>\s*\?)?
|
||||
\s*(?P<value>[a-zA-Z0-9_-]+)
|
||||
\s*\]$
|
||||
\s*$
|
||||
''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
|
||||
m = str_operator_rex.search(format_spec)
|
||||
m = str_operator_rex.search(filter_spec)
|
||||
if m:
|
||||
comparison_value = m.group('value')
|
||||
op = STR_OPERATORS[m.group('op')]
|
||||
|
||||
if not m:
|
||||
raise ValueError('Invalid format specification %r' % format_spec)
|
||||
raise ValueError('Invalid filter specification %r' % filter_spec)
|
||||
|
||||
def _filter(f):
|
||||
actual_value = f.get(m.group('key'))
|
||||
if actual_value is None:
|
||||
return m.group('none_inclusive')
|
||||
return op(actual_value, comparison_value)
|
||||
new_formats = [f for f in available_formats if _filter(f)]
|
||||
return _filter
|
||||
|
||||
new_format_spec = format_spec[:-len(m.group(0))]
|
||||
if not new_format_spec:
|
||||
new_format_spec = 'best'
|
||||
def build_format_selector(self, format_spec):
|
||||
def syntax_error(note, start):
|
||||
message = (
|
||||
'Invalid format specification: '
|
||||
'{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
|
||||
return SyntaxError(message)
|
||||
|
||||
return (new_format_spec, new_formats)
|
||||
PICKFIRST = 'PICKFIRST'
|
||||
MERGE = 'MERGE'
|
||||
SINGLE = 'SINGLE'
|
||||
GROUP = 'GROUP'
|
||||
FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
|
||||
|
||||
def select_format(self, format_spec, available_formats):
|
||||
while format_spec.endswith(']'):
|
||||
format_spec, available_formats = self._apply_format_filter(
|
||||
format_spec, available_formats)
|
||||
if not available_formats:
|
||||
return None
|
||||
def _parse_filter(tokens):
|
||||
filter_parts = []
|
||||
for type, string, start, _, _ in tokens:
|
||||
if type == tokenize.OP and string == ']':
|
||||
return ''.join(filter_parts)
|
||||
else:
|
||||
filter_parts.append(string)
|
||||
|
||||
if format_spec in ['best', 'worst', None]:
|
||||
format_idx = 0 if format_spec == 'worst' else -1
|
||||
audiovideo_formats = [
|
||||
f for f in available_formats
|
||||
if f.get('vcodec') != 'none' and f.get('acodec') != 'none']
|
||||
if audiovideo_formats:
|
||||
return audiovideo_formats[format_idx]
|
||||
# for audio only urls, select the best/worst audio format
|
||||
elif all(f.get('acodec') != 'none' for f in available_formats):
|
||||
return available_formats[format_idx]
|
||||
elif format_spec == 'bestaudio':
|
||||
audio_formats = [
|
||||
f for f in available_formats
|
||||
if f.get('vcodec') == 'none']
|
||||
if audio_formats:
|
||||
return audio_formats[-1]
|
||||
elif format_spec == 'worstaudio':
|
||||
audio_formats = [
|
||||
f for f in available_formats
|
||||
if f.get('vcodec') == 'none']
|
||||
if audio_formats:
|
||||
return audio_formats[0]
|
||||
elif format_spec == 'bestvideo':
|
||||
video_formats = [
|
||||
f for f in available_formats
|
||||
if f.get('acodec') == 'none']
|
||||
if video_formats:
|
||||
return video_formats[-1]
|
||||
elif format_spec == 'worstvideo':
|
||||
video_formats = [
|
||||
f for f in available_formats
|
||||
if f.get('acodec') == 'none']
|
||||
if video_formats:
|
||||
return video_formats[0]
|
||||
else:
|
||||
extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav']
|
||||
if format_spec in extensions:
|
||||
filter_f = lambda f: f['ext'] == format_spec
|
||||
else:
|
||||
filter_f = lambda f: f['format_id'] == format_spec
|
||||
matches = list(filter(filter_f, available_formats))
|
||||
if matches:
|
||||
return matches[-1]
|
||||
return None
|
||||
def _remove_unused_ops(tokens):
|
||||
# Remove operators that we don't use and join them with the sourrounding strings
|
||||
# for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
|
||||
ALLOWED_OPS = ('/', '+', ',', '(', ')')
|
||||
last_string, last_start, last_end, last_line = None, None, None, None
|
||||
for type, string, start, end, line in tokens:
|
||||
if type == tokenize.OP and string == '[':
|
||||
if last_string:
|
||||
yield tokenize.NAME, last_string, last_start, last_end, last_line
|
||||
last_string = None
|
||||
yield type, string, start, end, line
|
||||
# everything inside brackets will be handled by _parse_filter
|
||||
for type, string, start, end, line in tokens:
|
||||
yield type, string, start, end, line
|
||||
if type == tokenize.OP and string == ']':
|
||||
break
|
||||
elif type == tokenize.OP and string in ALLOWED_OPS:
|
||||
if last_string:
|
||||
yield tokenize.NAME, last_string, last_start, last_end, last_line
|
||||
last_string = None
|
||||
yield type, string, start, end, line
|
||||
elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
|
||||
if not last_string:
|
||||
last_string = string
|
||||
last_start = start
|
||||
last_end = end
|
||||
else:
|
||||
last_string += string
|
||||
if last_string:
|
||||
yield tokenize.NAME, last_string, last_start, last_end, last_line
|
||||
|
||||
def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
|
||||
selectors = []
|
||||
current_selector = None
|
||||
for type, string, start, _, _ in tokens:
|
||||
# ENCODING is only defined in python 3.x
|
||||
if type == getattr(tokenize, 'ENCODING', None):
|
||||
continue
|
||||
elif type in [tokenize.NAME, tokenize.NUMBER]:
|
||||
current_selector = FormatSelector(SINGLE, string, [])
|
||||
elif type == tokenize.OP:
|
||||
if string == ')':
|
||||
if not inside_group:
|
||||
# ')' will be handled by the parentheses group
|
||||
tokens.restore_last_token()
|
||||
break
|
||||
elif inside_merge and string in ['/', ',']:
|
||||
tokens.restore_last_token()
|
||||
break
|
||||
elif inside_choice and string == ',':
|
||||
tokens.restore_last_token()
|
||||
break
|
||||
elif string == ',':
|
||||
if not current_selector:
|
||||
raise syntax_error('"," must follow a format selector', start)
|
||||
selectors.append(current_selector)
|
||||
current_selector = None
|
||||
elif string == '/':
|
||||
if not current_selector:
|
||||
raise syntax_error('"/" must follow a format selector', start)
|
||||
first_choice = current_selector
|
||||
second_choice = _parse_format_selection(tokens, inside_choice=True)
|
||||
current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
|
||||
elif string == '[':
|
||||
if not current_selector:
|
||||
current_selector = FormatSelector(SINGLE, 'best', [])
|
||||
format_filter = _parse_filter(tokens)
|
||||
current_selector.filters.append(format_filter)
|
||||
elif string == '(':
|
||||
if current_selector:
|
||||
raise syntax_error('Unexpected "("', start)
|
||||
group = _parse_format_selection(tokens, inside_group=True)
|
||||
current_selector = FormatSelector(GROUP, group, [])
|
||||
elif string == '+':
|
||||
video_selector = current_selector
|
||||
audio_selector = _parse_format_selection(tokens, inside_merge=True)
|
||||
if not video_selector or not audio_selector:
|
||||
raise syntax_error('"+" must be between two format selectors', start)
|
||||
current_selector = FormatSelector(MERGE, (video_selector, audio_selector), [])
|
||||
else:
|
||||
raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
|
||||
elif type == tokenize.ENDMARKER:
|
||||
break
|
||||
if current_selector:
|
||||
selectors.append(current_selector)
|
||||
return selectors
|
||||
|
||||
def _build_selector_function(selector):
|
||||
if isinstance(selector, list):
|
||||
fs = [_build_selector_function(s) for s in selector]
|
||||
|
||||
def selector_function(formats):
|
||||
for f in fs:
|
||||
for format in f(formats):
|
||||
yield format
|
||||
return selector_function
|
||||
elif selector.type == GROUP:
|
||||
selector_function = _build_selector_function(selector.selector)
|
||||
elif selector.type == PICKFIRST:
|
||||
fs = [_build_selector_function(s) for s in selector.selector]
|
||||
|
||||
def selector_function(formats):
|
||||
for f in fs:
|
||||
picked_formats = list(f(formats))
|
||||
if picked_formats:
|
||||
return picked_formats
|
||||
return []
|
||||
elif selector.type == SINGLE:
|
||||
format_spec = selector.selector
|
||||
|
||||
def selector_function(formats):
|
||||
formats = list(formats)
|
||||
if not formats:
|
||||
return
|
||||
if format_spec == 'all':
|
||||
for f in formats:
|
||||
yield f
|
||||
elif format_spec in ['best', 'worst', None]:
|
||||
format_idx = 0 if format_spec == 'worst' else -1
|
||||
audiovideo_formats = [
|
||||
f for f in formats
|
||||
if f.get('vcodec') != 'none' and f.get('acodec') != 'none']
|
||||
if audiovideo_formats:
|
||||
yield audiovideo_formats[format_idx]
|
||||
# for audio only (soundcloud) or video only (imgur) urls, select the best/worst audio format
|
||||
elif (all(f.get('acodec') != 'none' for f in formats) or
|
||||
all(f.get('vcodec') != 'none' for f in formats)):
|
||||
yield formats[format_idx]
|
||||
elif format_spec == 'bestaudio':
|
||||
audio_formats = [
|
||||
f for f in formats
|
||||
if f.get('vcodec') == 'none']
|
||||
if audio_formats:
|
||||
yield audio_formats[-1]
|
||||
elif format_spec == 'worstaudio':
|
||||
audio_formats = [
|
||||
f for f in formats
|
||||
if f.get('vcodec') == 'none']
|
||||
if audio_formats:
|
||||
yield audio_formats[0]
|
||||
elif format_spec == 'bestvideo':
|
||||
video_formats = [
|
||||
f for f in formats
|
||||
if f.get('acodec') == 'none']
|
||||
if video_formats:
|
||||
yield video_formats[-1]
|
||||
elif format_spec == 'worstvideo':
|
||||
video_formats = [
|
||||
f for f in formats
|
||||
if f.get('acodec') == 'none']
|
||||
if video_formats:
|
||||
yield video_formats[0]
|
||||
else:
|
||||
extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav']
|
||||
if format_spec in extensions:
|
||||
filter_f = lambda f: f['ext'] == format_spec
|
||||
else:
|
||||
filter_f = lambda f: f['format_id'] == format_spec
|
||||
matches = list(filter(filter_f, formats))
|
||||
if matches:
|
||||
yield matches[-1]
|
||||
elif selector.type == MERGE:
|
||||
def _merge(formats_info):
|
||||
format_1, format_2 = [f['format_id'] for f in formats_info]
|
||||
# The first format must contain the video and the
|
||||
# second the audio
|
||||
if formats_info[0].get('vcodec') == 'none':
|
||||
self.report_error('The first format must '
|
||||
'contain the video, try using '
|
||||
'"-f %s+%s"' % (format_2, format_1))
|
||||
return
|
||||
output_ext = (
|
||||
formats_info[0]['ext']
|
||||
if self.params.get('merge_output_format') is None
|
||||
else self.params['merge_output_format'])
|
||||
return {
|
||||
'requested_formats': formats_info,
|
||||
'format': '%s+%s' % (formats_info[0].get('format'),
|
||||
formats_info[1].get('format')),
|
||||
'format_id': '%s+%s' % (formats_info[0].get('format_id'),
|
||||
formats_info[1].get('format_id')),
|
||||
'width': formats_info[0].get('width'),
|
||||
'height': formats_info[0].get('height'),
|
||||
'resolution': formats_info[0].get('resolution'),
|
||||
'fps': formats_info[0].get('fps'),
|
||||
'vcodec': formats_info[0].get('vcodec'),
|
||||
'vbr': formats_info[0].get('vbr'),
|
||||
'stretched_ratio': formats_info[0].get('stretched_ratio'),
|
||||
'acodec': formats_info[1].get('acodec'),
|
||||
'abr': formats_info[1].get('abr'),
|
||||
'ext': output_ext,
|
||||
}
|
||||
video_selector, audio_selector = map(_build_selector_function, selector.selector)
|
||||
|
||||
def selector_function(formats):
|
||||
formats = list(formats)
|
||||
for pair in itertools.product(video_selector(formats), audio_selector(formats)):
|
||||
yield _merge(pair)
|
||||
|
||||
filters = [self._build_format_filter(f) for f in selector.filters]
|
||||
|
||||
def final_selector(formats):
|
||||
for _filter in filters:
|
||||
formats = list(filter(_filter, formats))
|
||||
return selector_function(formats)
|
||||
return final_selector
|
||||
|
||||
stream = io.BytesIO(format_spec.encode('utf-8'))
|
||||
try:
|
||||
tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline)))
|
||||
except tokenize.TokenError:
|
||||
raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
|
||||
|
||||
class TokenIterator(object):
|
||||
def __init__(self, tokens):
|
||||
self.tokens = tokens
|
||||
self.counter = 0
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
if self.counter >= len(self.tokens):
|
||||
raise StopIteration()
|
||||
value = self.tokens[self.counter]
|
||||
self.counter += 1
|
||||
return value
|
||||
|
||||
next = __next__
|
||||
|
||||
def restore_last_token(self):
|
||||
self.counter -= 1
|
||||
|
||||
parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
|
||||
return _build_selector_function(parsed_selector)
|
||||
|
||||
def _calc_headers(self, info_dict):
|
||||
res = std_headers.copy()
|
||||
@ -1001,7 +1212,7 @@ class YoutubeDL(object):
|
||||
t.get('preference'), t.get('width'), t.get('height'),
|
||||
t.get('id'), t.get('url')))
|
||||
for i, t in enumerate(thumbnails):
|
||||
if 'width' in t and 'height' in t:
|
||||
if t.get('width') and t.get('height'):
|
||||
t['resolution'] = '%dx%d' % (t['width'], t['height'])
|
||||
if t.get('id') is None:
|
||||
t['id'] = '%d' % i
|
||||
@ -1013,13 +1224,13 @@ class YoutubeDL(object):
|
||||
info_dict['display_id'] = info_dict['id']
|
||||
|
||||
if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None:
|
||||
# Working around negative timestamps in Windows
|
||||
# (see http://bugs.python.org/issue1646728)
|
||||
if info_dict['timestamp'] < 0 and os.name == 'nt':
|
||||
info_dict['timestamp'] = 0
|
||||
upload_date = datetime.datetime.utcfromtimestamp(
|
||||
info_dict['timestamp'])
|
||||
info_dict['upload_date'] = upload_date.strftime('%Y%m%d')
|
||||
# Working around out-of-range timestamp values (e.g. negative ones on Windows,
|
||||
# see http://bugs.python.org/issue1646728)
|
||||
try:
|
||||
upload_date = datetime.datetime.utcfromtimestamp(info_dict['timestamp'])
|
||||
info_dict['upload_date'] = upload_date.strftime('%Y%m%d')
|
||||
except (ValueError, OverflowError, OSError):
|
||||
pass
|
||||
|
||||
if self.params.get('listsubtitles', False):
|
||||
if 'automatic_captions' in info_dict:
|
||||
@ -1030,12 +1241,6 @@ class YoutubeDL(object):
|
||||
info_dict['id'], info_dict.get('subtitles'),
|
||||
info_dict.get('automatic_captions'))
|
||||
|
||||
# This extractors handle format selection themselves
|
||||
if info_dict['extractor'] in ['Youku']:
|
||||
if download:
|
||||
self.process_info(info_dict)
|
||||
return info_dict
|
||||
|
||||
# We now pick which formats have to be downloaded
|
||||
if info_dict.get('formats') is None:
|
||||
# There's only one format available
|
||||
@ -1046,6 +1251,8 @@ class YoutubeDL(object):
|
||||
if not formats:
|
||||
raise ExtractorError('No video formats found!')
|
||||
|
||||
formats_dict = {}
|
||||
|
||||
# We check that all the formats have the format and format_id fields
|
||||
for i, format in enumerate(formats):
|
||||
if 'url' not in format:
|
||||
@ -1053,6 +1260,18 @@ class YoutubeDL(object):
|
||||
|
||||
if format.get('format_id') is None:
|
||||
format['format_id'] = compat_str(i)
|
||||
format_id = format['format_id']
|
||||
if format_id not in formats_dict:
|
||||
formats_dict[format_id] = []
|
||||
formats_dict[format_id].append(format)
|
||||
|
||||
# Make sure all formats have unique format_id
|
||||
for format_id, ambiguous_formats in formats_dict.items():
|
||||
if len(ambiguous_formats) > 1:
|
||||
for i, format in enumerate(ambiguous_formats):
|
||||
format['format_id'] = '%s-%d' % (format_id, i)
|
||||
|
||||
for i, format in enumerate(formats):
|
||||
if format.get('format') is None:
|
||||
format['format'] = '{id} - {res}{note}'.format(
|
||||
id=format['format_id'],
|
||||
@ -1086,60 +1305,16 @@ class YoutubeDL(object):
|
||||
req_format = self.params.get('format')
|
||||
if req_format is None:
|
||||
req_format_list = []
|
||||
if info_dict['extractor'] in ['youtube', 'ted'] and FFmpegMergerPP(self).available:
|
||||
req_format_list.append('bestvideo+bestaudio')
|
||||
if (self.params.get('outtmpl', DEFAULT_OUTTMPL) != '-' and
|
||||
info_dict['extractor'] in ['youtube', 'ted'] and
|
||||
not info_dict.get('is_live')):
|
||||
merger = FFmpegMergerPP(self)
|
||||
if merger.available and merger.can_merge():
|
||||
req_format_list.append('bestvideo+bestaudio')
|
||||
req_format_list.append('best')
|
||||
req_format = '/'.join(req_format_list)
|
||||
formats_to_download = []
|
||||
if req_format == 'all':
|
||||
formats_to_download = formats
|
||||
else:
|
||||
for rfstr in req_format.split(','):
|
||||
# We can accept formats requested in the format: 34/5/best, we pick
|
||||
# the first that is available, starting from left
|
||||
req_formats = rfstr.split('/')
|
||||
for rf in req_formats:
|
||||
if re.match(r'.+?\+.+?', rf) is not None:
|
||||
# Two formats have been requested like '137+139'
|
||||
format_1, format_2 = rf.split('+')
|
||||
formats_info = (self.select_format(format_1, formats),
|
||||
self.select_format(format_2, formats))
|
||||
if all(formats_info):
|
||||
# The first format must contain the video and the
|
||||
# second the audio
|
||||
if formats_info[0].get('vcodec') == 'none':
|
||||
self.report_error('The first format must '
|
||||
'contain the video, try using '
|
||||
'"-f %s+%s"' % (format_2, format_1))
|
||||
return
|
||||
output_ext = (
|
||||
formats_info[0]['ext']
|
||||
if self.params.get('merge_output_format') is None
|
||||
else self.params['merge_output_format'])
|
||||
selected_format = {
|
||||
'requested_formats': formats_info,
|
||||
'format': '%s+%s' % (formats_info[0].get('format'),
|
||||
formats_info[1].get('format')),
|
||||
'format_id': '%s+%s' % (formats_info[0].get('format_id'),
|
||||
formats_info[1].get('format_id')),
|
||||
'width': formats_info[0].get('width'),
|
||||
'height': formats_info[0].get('height'),
|
||||
'resolution': formats_info[0].get('resolution'),
|
||||
'fps': formats_info[0].get('fps'),
|
||||
'vcodec': formats_info[0].get('vcodec'),
|
||||
'vbr': formats_info[0].get('vbr'),
|
||||
'stretched_ratio': formats_info[0].get('stretched_ratio'),
|
||||
'acodec': formats_info[1].get('acodec'),
|
||||
'abr': formats_info[1].get('abr'),
|
||||
'ext': output_ext,
|
||||
}
|
||||
else:
|
||||
selected_format = None
|
||||
else:
|
||||
selected_format = self.select_format(rf, formats)
|
||||
if selected_format is not None:
|
||||
formats_to_download.append(selected_format)
|
||||
break
|
||||
format_selector = self.build_format_selector(req_format)
|
||||
formats_to_download = list(format_selector(formats))
|
||||
if not formats_to_download:
|
||||
raise ExtractorError('requested format not available',
|
||||
expected=True)
|
||||
@ -1364,7 +1539,7 @@ class YoutubeDL(object):
|
||||
postprocessors = []
|
||||
self.report_warning('You have requested multiple '
|
||||
'formats but ffmpeg or avconv are not installed.'
|
||||
' The formats won\'t be merged')
|
||||
' The formats won\'t be merged.')
|
||||
else:
|
||||
postprocessors = [merger]
|
||||
|
||||
@ -1391,8 +1566,8 @@ class YoutubeDL(object):
|
||||
requested_formats = info_dict['requested_formats']
|
||||
if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats):
|
||||
info_dict['ext'] = 'mkv'
|
||||
self.report_warning('You have requested formats incompatible for merge. '
|
||||
'The formats will be merged into mkv')
|
||||
self.report_warning(
|
||||
'Requested formats are incompatible for merge and will be merged into mkv.')
|
||||
# Ensure filename always has a correct extension for successful merge
|
||||
filename = '%s.%s' % (filename_wo_ext, info_dict['ext'])
|
||||
if os.path.exists(encodeFilename(filename)):
|
||||
@ -1479,7 +1654,8 @@ class YoutubeDL(object):
|
||||
for url in url_list:
|
||||
try:
|
||||
# It also downloads the videos
|
||||
res = self.extract_info(url)
|
||||
res = self.extract_info(
|
||||
url, force_generic_extractor=self.params.get('force_generic_extractor', False))
|
||||
except UnavailableVideoError:
|
||||
self.report_error('unable to download video')
|
||||
except MaxDownloadsReached:
|
||||
@ -1523,6 +1699,7 @@ class YoutubeDL(object):
|
||||
pps_chain.extend(ie_info['__postprocessors'])
|
||||
pps_chain.extend(self._pps)
|
||||
for pp in pps_chain:
|
||||
files_to_delete = []
|
||||
try:
|
||||
files_to_delete, info = pp.run(info)
|
||||
except PostProcessingError as e:
|
||||
@ -1685,26 +1862,6 @@ class YoutubeDL(object):
|
||||
|
||||
def urlopen(self, req):
|
||||
""" Start an HTTP download """
|
||||
|
||||
# According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
|
||||
# always respected by websites, some tend to give out URLs with non percent-encoded
|
||||
# non-ASCII characters (see telemb.py, ard.py [#3412])
|
||||
# urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
|
||||
# To work around aforementioned issue we will replace request's original URL with
|
||||
# percent-encoded one
|
||||
req_is_string = isinstance(req, compat_basestring)
|
||||
url = req if req_is_string else req.get_full_url()
|
||||
url_escaped = escape_url(url)
|
||||
|
||||
# Substitute URL if any change after escaping
|
||||
if url != url_escaped:
|
||||
if req_is_string:
|
||||
req = url_escaped
|
||||
else:
|
||||
req = compat_urllib_request.Request(
|
||||
url_escaped, data=req.data, headers=req.headers,
|
||||
origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
|
||||
|
||||
return self._opener.open(req, timeout=self._socket_timeout)
|
||||
|
||||
def print_debug_header(self):
|
||||
@ -1787,8 +1944,7 @@ class YoutubeDL(object):
|
||||
if os.access(opts_cookiefile, os.R_OK):
|
||||
self.cookiejar.load()
|
||||
|
||||
cookie_processor = compat_urllib_request.HTTPCookieProcessor(
|
||||
self.cookiejar)
|
||||
cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
|
||||
if opts_proxy is not None:
|
||||
if opts_proxy == '':
|
||||
proxies = {}
|
||||
@ -1847,7 +2003,7 @@ class YoutubeDL(object):
|
||||
thumb_ext = determine_ext(t['url'], 'jpg')
|
||||
suffix = '_%s' % t['id'] if len(thumbnails) > 1 else ''
|
||||
thumb_display_id = '%s ' % t['id'] if len(thumbnails) > 1 else ''
|
||||
thumb_filename = os.path.splitext(filename)[0] + suffix + '.' + thumb_ext
|
||||
t['filename'] = thumb_filename = os.path.splitext(filename)[0] + suffix + '.' + thumb_ext
|
||||
|
||||
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)):
|
||||
self.to_screen('[%s] %s: Thumbnail %sis already present' %
|
||||
@ -1857,7 +2013,7 @@ class YoutubeDL(object):
|
||||
(info_dict['extractor'], info_dict['id'], thumb_display_id))
|
||||
try:
|
||||
uf = self.urlopen(t['url'])
|
||||
with open(thumb_filename, 'wb') as thumbf:
|
||||
with open(encodeFilename(thumb_filename), 'wb') as thumbf:
|
||||
shutil.copyfileobj(uf, thumbf)
|
||||
self.to_screen('[%s] %s: Writing thumbnail %sto: %s' %
|
||||
(info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
|
||||
|
@ -9,7 +9,6 @@ import codecs
|
||||
import io
|
||||
import os
|
||||
import random
|
||||
import shlex
|
||||
import sys
|
||||
|
||||
|
||||
@ -20,6 +19,7 @@ from .compat import (
|
||||
compat_expanduser,
|
||||
compat_getpass,
|
||||
compat_print,
|
||||
compat_shlex_split,
|
||||
workaround_optparse_bug9161,
|
||||
)
|
||||
from .utils import (
|
||||
@ -169,7 +169,7 @@ def _real_main(argv=None):
|
||||
if not opts.audioquality.isdigit():
|
||||
parser.error('invalid audio quality specified')
|
||||
if opts.recodevideo is not None:
|
||||
if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv']:
|
||||
if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv', 'avi']:
|
||||
parser.error('invalid video recode format specified')
|
||||
if opts.convertsubtitles is not None:
|
||||
if opts.convertsubtitles not in ['srt', 'vtt', 'ass']:
|
||||
@ -240,13 +240,18 @@ def _real_main(argv=None):
|
||||
if opts.xattrs:
|
||||
postprocessors.append({'key': 'XAttrMetadata'})
|
||||
if opts.embedthumbnail:
|
||||
postprocessors.append({'key': 'EmbedThumbnail'})
|
||||
already_have_thumbnail = opts.writethumbnail or opts.write_all_thumbnails
|
||||
postprocessors.append({
|
||||
'key': 'EmbedThumbnail',
|
||||
'already_have_thumbnail': already_have_thumbnail
|
||||
})
|
||||
if not already_have_thumbnail:
|
||||
opts.writethumbnail = True
|
||||
# Please keep ExecAfterDownload towards the bottom as it allows the user to modify the final file in any way.
|
||||
# So if the user is able to remove the file before your postprocessor runs it might cause a few problems.
|
||||
if opts.exec_cmd:
|
||||
postprocessors.append({
|
||||
'key': 'ExecAfterDownload',
|
||||
'verboseOutput': opts.verbose,
|
||||
'exec_cmd': opts.exec_cmd,
|
||||
})
|
||||
if opts.xattr_set_filesize:
|
||||
@ -257,7 +262,10 @@ def _real_main(argv=None):
|
||||
parser.error('setting filesize xattr requested but python-xattr is not available')
|
||||
external_downloader_args = None
|
||||
if opts.external_downloader_args:
|
||||
external_downloader_args = shlex.split(opts.external_downloader_args)
|
||||
external_downloader_args = compat_shlex_split(opts.external_downloader_args)
|
||||
postprocessor_args = None
|
||||
if opts.postprocessor_args:
|
||||
postprocessor_args = compat_shlex_split(opts.postprocessor_args)
|
||||
match_filter = (
|
||||
None if opts.match_filter is None
|
||||
else match_filter_func(opts.match_filter))
|
||||
@ -288,6 +296,7 @@ def _real_main(argv=None):
|
||||
'autonumber_size': opts.autonumber_size,
|
||||
'restrictfilenames': opts.restrictfilenames,
|
||||
'ignoreerrors': opts.ignoreerrors,
|
||||
'force_generic_extractor': opts.force_generic_extractor,
|
||||
'ratelimit': opts.ratelimit,
|
||||
'nooverwrites': opts.nooverwrites,
|
||||
'retries': opts_retries,
|
||||
@ -345,7 +354,6 @@ def _real_main(argv=None):
|
||||
'default_search': opts.default_search,
|
||||
'youtube_include_dash_manifest': opts.youtube_include_dash_manifest,
|
||||
'encoding': opts.encoding,
|
||||
'exec_cmd': opts.exec_cmd,
|
||||
'extract_flat': opts.extract_flat,
|
||||
'merge_output_format': opts.merge_output_format,
|
||||
'postprocessors': postprocessors,
|
||||
@ -362,6 +370,7 @@ def _real_main(argv=None):
|
||||
'ffmpeg_location': opts.ffmpeg_location,
|
||||
'hls_prefer_native': opts.hls_prefer_native,
|
||||
'external_downloader_args': external_downloader_args,
|
||||
'postprocessor_args': postprocessor_args,
|
||||
'cn_verification_proxy': opts.cn_verification_proxy,
|
||||
}
|
||||
|
||||
|
@ -152,7 +152,7 @@ def aes_decrypt_text(data, password, key_size_bytes):
|
||||
"""
|
||||
NONCE_LENGTH_BYTES = 8
|
||||
|
||||
data = bytes_to_intlist(base64.b64decode(data))
|
||||
data = bytes_to_intlist(base64.b64decode(data.encode('utf-8')))
|
||||
password = bytes_to_intlist(password.encode('utf-8'))
|
||||
|
||||
key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password))
|
||||
|
@ -5,10 +5,12 @@ import getpass
|
||||
import optparse
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
import shutil
|
||||
import socket
|
||||
import subprocess
|
||||
import sys
|
||||
import itertools
|
||||
|
||||
|
||||
try:
|
||||
@ -41,6 +43,11 @@ try:
|
||||
except ImportError: # Python 2
|
||||
import cookielib as compat_cookiejar
|
||||
|
||||
try:
|
||||
import http.cookies as compat_cookies
|
||||
except ImportError: # Python 2
|
||||
import Cookie as compat_cookies
|
||||
|
||||
try:
|
||||
import html.entities as compat_html_entities
|
||||
except ImportError: # Python 2
|
||||
@ -74,47 +81,79 @@ except ImportError:
|
||||
import BaseHTTPServer as compat_http_server
|
||||
|
||||
try:
|
||||
compat_str = unicode # Python 2
|
||||
except NameError:
|
||||
compat_str = str
|
||||
|
||||
try:
|
||||
from urllib.parse import unquote_to_bytes as compat_urllib_parse_unquote_to_bytes
|
||||
from urllib.parse import unquote as compat_urllib_parse_unquote
|
||||
except ImportError:
|
||||
def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
|
||||
if string == '':
|
||||
from urllib.parse import unquote_plus as compat_urllib_parse_unquote_plus
|
||||
except ImportError: # Python 2
|
||||
_asciire = (compat_urllib_parse._asciire if hasattr(compat_urllib_parse, '_asciire')
|
||||
else re.compile('([\x00-\x7f]+)'))
|
||||
|
||||
# HACK: The following are the correct unquote_to_bytes, unquote and unquote_plus
|
||||
# implementations from cpython 3.4.3's stdlib. Python 2's version
|
||||
# is apparently broken (see https://github.com/rg3/youtube-dl/pull/6244)
|
||||
|
||||
def compat_urllib_parse_unquote_to_bytes(string):
|
||||
"""unquote_to_bytes('abc%20def') -> b'abc def'."""
|
||||
# Note: strings are encoded as UTF-8. This is only an issue if it contains
|
||||
# unescaped non-ASCII characters, which URIs should not.
|
||||
if not string:
|
||||
# Is it a string-like object?
|
||||
string.split
|
||||
return b''
|
||||
if isinstance(string, compat_str):
|
||||
string = string.encode('utf-8')
|
||||
bits = string.split(b'%')
|
||||
if len(bits) == 1:
|
||||
return string
|
||||
res = string.split('%')
|
||||
if len(res) == 1:
|
||||
res = [bits[0]]
|
||||
append = res.append
|
||||
for item in bits[1:]:
|
||||
try:
|
||||
append(compat_urllib_parse._hextochr[item[:2]])
|
||||
append(item[2:])
|
||||
except KeyError:
|
||||
append(b'%')
|
||||
append(item)
|
||||
return b''.join(res)
|
||||
|
||||
def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
|
||||
"""Replace %xx escapes by their single-character equivalent. The optional
|
||||
encoding and errors parameters specify how to decode percent-encoded
|
||||
sequences into Unicode characters, as accepted by the bytes.decode()
|
||||
method.
|
||||
By default, percent-encoded sequences are decoded with UTF-8, and invalid
|
||||
sequences are replaced by a placeholder character.
|
||||
|
||||
unquote('abc%20def') -> 'abc def'.
|
||||
"""
|
||||
if '%' not in string:
|
||||
string.split
|
||||
return string
|
||||
if encoding is None:
|
||||
encoding = 'utf-8'
|
||||
if errors is None:
|
||||
errors = 'replace'
|
||||
# pct_sequence: contiguous sequence of percent-encoded bytes, decoded
|
||||
pct_sequence = b''
|
||||
string = res[0]
|
||||
for item in res[1:]:
|
||||
try:
|
||||
if not item:
|
||||
raise ValueError
|
||||
pct_sequence += item[:2].decode('hex')
|
||||
rest = item[2:]
|
||||
if not rest:
|
||||
# This segment was just a single percent-encoded character.
|
||||
# May be part of a sequence of code units, so delay decoding.
|
||||
# (Stored in pct_sequence).
|
||||
continue
|
||||
except ValueError:
|
||||
rest = '%' + item
|
||||
# Encountered non-percent-encoded characters. Flush the current
|
||||
# pct_sequence.
|
||||
string += pct_sequence.decode(encoding, errors) + rest
|
||||
pct_sequence = b''
|
||||
if pct_sequence:
|
||||
# Flush the final pct_sequence
|
||||
string += pct_sequence.decode(encoding, errors)
|
||||
return string
|
||||
bits = _asciire.split(string)
|
||||
res = [bits[0]]
|
||||
append = res.append
|
||||
for i in range(1, len(bits), 2):
|
||||
append(compat_urllib_parse_unquote_to_bytes(bits[i]).decode(encoding, errors))
|
||||
append(bits[i + 1])
|
||||
return ''.join(res)
|
||||
|
||||
try:
|
||||
compat_str = unicode # Python 2
|
||||
except NameError:
|
||||
compat_str = str
|
||||
def compat_urllib_parse_unquote_plus(string, encoding='utf-8', errors='replace'):
|
||||
"""Like unquote(), but also replace plus signs by spaces, as required for
|
||||
unquoting HTML form values.
|
||||
|
||||
unquote_plus('%7e/abc+def') -> '~/abc def'
|
||||
"""
|
||||
string = string.replace('+', ' ')
|
||||
return compat_urllib_parse_unquote(string, encoding, errors)
|
||||
|
||||
try:
|
||||
compat_basestring = basestring # Python 2
|
||||
@ -189,6 +228,17 @@ except ImportError: # Python < 3.3
|
||||
return "'" + s.replace("'", "'\"'\"'") + "'"
|
||||
|
||||
|
||||
if sys.version_info >= (2, 7, 3):
|
||||
compat_shlex_split = shlex.split
|
||||
else:
|
||||
# Working around shlex issue with unicode strings on some python 2
|
||||
# versions (see http://bugs.python.org/issue1548891)
|
||||
def compat_shlex_split(s, comments=False, posix=True):
|
||||
if isinstance(s, compat_str):
|
||||
s = s.encode('utf-8')
|
||||
return shlex.split(s, comments, posix)
|
||||
|
||||
|
||||
def compat_ord(c):
|
||||
if type(c) is int:
|
||||
return c
|
||||
@ -388,12 +438,27 @@ else:
|
||||
pass
|
||||
return _terminal_size(columns, lines)
|
||||
|
||||
try:
|
||||
itertools.count(start=0, step=1)
|
||||
compat_itertools_count = itertools.count
|
||||
except TypeError: # Python 2.6
|
||||
def compat_itertools_count(start=0, step=1):
|
||||
n = start
|
||||
while True:
|
||||
yield n
|
||||
n += step
|
||||
|
||||
if sys.version_info >= (3, 0):
|
||||
from tokenize import tokenize as compat_tokenize_tokenize
|
||||
else:
|
||||
from tokenize import generate_tokens as compat_tokenize_tokenize
|
||||
|
||||
__all__ = [
|
||||
'compat_HTTPError',
|
||||
'compat_basestring',
|
||||
'compat_chr',
|
||||
'compat_cookiejar',
|
||||
'compat_cookies',
|
||||
'compat_expanduser',
|
||||
'compat_get_terminal_size',
|
||||
'compat_getenv',
|
||||
@ -401,16 +466,21 @@ __all__ = [
|
||||
'compat_html_entities',
|
||||
'compat_http_client',
|
||||
'compat_http_server',
|
||||
'compat_itertools_count',
|
||||
'compat_kwargs',
|
||||
'compat_ord',
|
||||
'compat_parse_qs',
|
||||
'compat_print',
|
||||
'compat_shlex_split',
|
||||
'compat_socket_create_connection',
|
||||
'compat_str',
|
||||
'compat_subprocess_get_DEVNULL',
|
||||
'compat_tokenize_tokenize',
|
||||
'compat_urllib_error',
|
||||
'compat_urllib_parse',
|
||||
'compat_urllib_parse_unquote',
|
||||
'compat_urllib_parse_unquote_plus',
|
||||
'compat_urllib_parse_unquote_to_bytes',
|
||||
'compat_urllib_parse_urlparse',
|
||||
'compat_urllib_request',
|
||||
'compat_urlparse',
|
||||
|
@ -8,6 +8,7 @@ from .hls import NativeHlsFD
|
||||
from .http import HttpFD
|
||||
from .rtsp import RtspFD
|
||||
from .rtmp import RtmpFD
|
||||
from .dash import DashSegmentsFD
|
||||
|
||||
from ..utils import (
|
||||
determine_protocol,
|
||||
@ -20,6 +21,7 @@ PROTOCOL_MAP = {
|
||||
'mms': RtspFD,
|
||||
'rtsp': RtspFD,
|
||||
'f4m': F4mFD,
|
||||
'http_dash_segments': DashSegmentsFD,
|
||||
}
|
||||
|
||||
|
||||
|
66
youtube_dl/downloader/dash.py
Normal file
66
youtube_dl/downloader/dash.py
Normal file
@ -0,0 +1,66 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import FileDownloader
|
||||
from ..compat import compat_urllib_request
|
||||
|
||||
|
||||
class DashSegmentsFD(FileDownloader):
|
||||
"""
|
||||
Download segments in a DASH manifest
|
||||
"""
|
||||
def real_download(self, filename, info_dict):
|
||||
self.report_destination(filename)
|
||||
tmpfilename = self.temp_name(filename)
|
||||
base_url = info_dict['url']
|
||||
segment_urls = info_dict['segment_urls']
|
||||
|
||||
is_test = self.params.get('test', False)
|
||||
remaining_bytes = self._TEST_FILE_SIZE if is_test else None
|
||||
byte_counter = 0
|
||||
|
||||
def append_url_to_file(outf, target_url, target_name, remaining_bytes=None):
|
||||
self.to_screen('[DashSegments] %s: Downloading %s' % (info_dict['id'], target_name))
|
||||
req = compat_urllib_request.Request(target_url)
|
||||
if remaining_bytes is not None:
|
||||
req.add_header('Range', 'bytes=0-%d' % (remaining_bytes - 1))
|
||||
|
||||
data = self.ydl.urlopen(req).read()
|
||||
|
||||
if remaining_bytes is not None:
|
||||
data = data[:remaining_bytes]
|
||||
|
||||
outf.write(data)
|
||||
return len(data)
|
||||
|
||||
def combine_url(base_url, target_url):
|
||||
if re.match(r'^https?://', target_url):
|
||||
return target_url
|
||||
return '%s%s%s' % (base_url, '' if base_url.endswith('/') else '/', target_url)
|
||||
|
||||
with open(tmpfilename, 'wb') as outf:
|
||||
append_url_to_file(
|
||||
outf, combine_url(base_url, info_dict['initialization_url']),
|
||||
'initialization segment')
|
||||
for i, segment_url in enumerate(segment_urls):
|
||||
segment_len = append_url_to_file(
|
||||
outf, combine_url(base_url, segment_url),
|
||||
'segment %d / %d' % (i + 1, len(segment_urls)),
|
||||
remaining_bytes)
|
||||
byte_counter += segment_len
|
||||
if remaining_bytes is not None:
|
||||
remaining_bytes -= segment_len
|
||||
if remaining_bytes <= 0:
|
||||
break
|
||||
|
||||
self.try_rename(tmpfilename, filename)
|
||||
|
||||
self._hook_progress({
|
||||
'downloaded_bytes': byte_counter,
|
||||
'total_bytes': byte_counter,
|
||||
'filename': filename,
|
||||
'status': 'finished',
|
||||
})
|
||||
|
||||
return True
|
@ -5,6 +5,10 @@ import subprocess
|
||||
|
||||
from .common import FileDownloader
|
||||
from ..utils import (
|
||||
cli_option,
|
||||
cli_valueless_option,
|
||||
cli_bool_option,
|
||||
cli_configuration_args,
|
||||
encodeFilename,
|
||||
encodeArgument,
|
||||
)
|
||||
@ -45,18 +49,17 @@ class ExternalFD(FileDownloader):
|
||||
def supports(cls, info_dict):
|
||||
return info_dict['protocol'] in ('http', 'https', 'ftp', 'ftps')
|
||||
|
||||
def _source_address(self, command_option):
|
||||
source_address = self.params.get('source_address')
|
||||
if source_address is None:
|
||||
return []
|
||||
return [command_option, source_address]
|
||||
def _option(self, command_option, param):
|
||||
return cli_option(self.params, command_option, param)
|
||||
|
||||
def _bool_option(self, command_option, param, true_value='true', false_value='false', separator=None):
|
||||
return cli_bool_option(self.params, command_option, param, true_value, false_value, separator)
|
||||
|
||||
def _valueless_option(self, command_option, param, expected_value=True):
|
||||
return cli_valueless_option(self.params, command_option, param, expected_value)
|
||||
|
||||
def _configuration_args(self, default=[]):
|
||||
ex_args = self.params.get('external_downloader_args')
|
||||
if ex_args is None:
|
||||
return default
|
||||
assert isinstance(ex_args, list)
|
||||
return ex_args
|
||||
return cli_configuration_args(self.params, 'external_downloader_args', default)
|
||||
|
||||
def _call_downloader(self, tmpfilename, info_dict):
|
||||
""" Either overwrite this or implement _make_cmd """
|
||||
@ -77,7 +80,19 @@ class CurlFD(ExternalFD):
|
||||
cmd = [self.exe, '--location', '-o', tmpfilename]
|
||||
for key, val in info_dict['http_headers'].items():
|
||||
cmd += ['--header', '%s: %s' % (key, val)]
|
||||
cmd += self._source_address('--interface')
|
||||
cmd += self._option('--interface', 'source_address')
|
||||
cmd += self._option('--proxy', 'proxy')
|
||||
cmd += self._valueless_option('--insecure', 'nocheckcertificate')
|
||||
cmd += self._configuration_args()
|
||||
cmd += ['--', info_dict['url']]
|
||||
return cmd
|
||||
|
||||
|
||||
class AxelFD(ExternalFD):
|
||||
def _make_cmd(self, tmpfilename, info_dict):
|
||||
cmd = [self.exe, '-o', tmpfilename]
|
||||
for key, val in info_dict['http_headers'].items():
|
||||
cmd += ['-H', '%s: %s' % (key, val)]
|
||||
cmd += self._configuration_args()
|
||||
cmd += ['--', info_dict['url']]
|
||||
return cmd
|
||||
@ -88,7 +103,9 @@ class WgetFD(ExternalFD):
|
||||
cmd = [self.exe, '-O', tmpfilename, '-nv', '--no-cookies']
|
||||
for key, val in info_dict['http_headers'].items():
|
||||
cmd += ['--header', '%s: %s' % (key, val)]
|
||||
cmd += self._source_address('--bind-address')
|
||||
cmd += self._option('--bind-address', 'source_address')
|
||||
cmd += self._option('--proxy', 'proxy')
|
||||
cmd += self._valueless_option('--no-check-certificate', 'nocheckcertificate')
|
||||
cmd += self._configuration_args()
|
||||
cmd += ['--', info_dict['url']]
|
||||
return cmd
|
||||
@ -105,10 +122,20 @@ class Aria2cFD(ExternalFD):
|
||||
cmd += ['--out', os.path.basename(tmpfilename)]
|
||||
for key, val in info_dict['http_headers'].items():
|
||||
cmd += ['--header', '%s: %s' % (key, val)]
|
||||
cmd += self._source_address('--interface')
|
||||
cmd += self._option('--interface', 'source_address')
|
||||
cmd += self._option('--all-proxy', 'proxy')
|
||||
cmd += self._bool_option('--check-certificate', 'nocheckcertificate', 'false', 'true', '=')
|
||||
cmd += ['--', info_dict['url']]
|
||||
return cmd
|
||||
|
||||
|
||||
class HttpieFD(ExternalFD):
|
||||
def _make_cmd(self, tmpfilename, info_dict):
|
||||
cmd = ['http', '--download', '--output', tmpfilename, info_dict['url']]
|
||||
for key, val in info_dict['http_headers'].items():
|
||||
cmd += ['%s:%s' % (key, val)]
|
||||
return cmd
|
||||
|
||||
_BY_NAME = dict(
|
||||
(klass.get_basename(), klass)
|
||||
for name, klass in globals().items()
|
||||
@ -123,5 +150,6 @@ def list_external_downloaders():
|
||||
def get_external_downloader(external_downloader):
|
||||
""" Given the name of the executable, see whether we support the given
|
||||
downloader . """
|
||||
bn = os.path.basename(external_downloader)
|
||||
# Drop .exe extension on Windows
|
||||
bn = os.path.splitext(os.path.basename(external_downloader))[0]
|
||||
return _BY_NAME[bn]
|
||||
|
@ -7,17 +7,16 @@ import os
|
||||
import time
|
||||
import xml.etree.ElementTree as etree
|
||||
|
||||
from .common import FileDownloader
|
||||
from .http import HttpFD
|
||||
from .fragment import FragmentFD
|
||||
from ..compat import (
|
||||
compat_urlparse,
|
||||
compat_urllib_error,
|
||||
)
|
||||
from ..utils import (
|
||||
struct_pack,
|
||||
struct_unpack,
|
||||
encodeFilename,
|
||||
sanitize_open,
|
||||
struct_pack,
|
||||
struct_unpack,
|
||||
xpath_text,
|
||||
)
|
||||
|
||||
@ -226,16 +225,13 @@ def _add_ns(prop):
|
||||
return '{http://ns.adobe.com/f4m/1.0}%s' % prop
|
||||
|
||||
|
||||
class HttpQuietDownloader(HttpFD):
|
||||
def to_screen(self, *args, **kargs):
|
||||
pass
|
||||
|
||||
|
||||
class F4mFD(FileDownloader):
|
||||
class F4mFD(FragmentFD):
|
||||
"""
|
||||
A downloader for f4m manifests or AdobeHDS.
|
||||
"""
|
||||
|
||||
FD_NAME = 'f4m'
|
||||
|
||||
def _get_unencrypted_media(self, doc):
|
||||
media = doc.findall(_add_ns('media'))
|
||||
if not media:
|
||||
@ -288,7 +284,7 @@ class F4mFD(FileDownloader):
|
||||
def real_download(self, filename, info_dict):
|
||||
man_url = info_dict['url']
|
||||
requested_bitrate = info_dict.get('tbr')
|
||||
self.to_screen('[download] Downloading f4m manifest')
|
||||
self.to_screen('[%s] Downloading f4m manifest' % self.FD_NAME)
|
||||
manifest = self.ydl.urlopen(man_url).read()
|
||||
|
||||
doc = etree.fromstring(manifest)
|
||||
@ -320,67 +316,20 @@ class F4mFD(FileDownloader):
|
||||
# For some akamai manifests we'll need to add a query to the fragment url
|
||||
akamai_pv = xpath_text(doc, _add_ns('pv-2.0'))
|
||||
|
||||
self.report_destination(filename)
|
||||
http_dl = HttpQuietDownloader(
|
||||
self.ydl,
|
||||
{
|
||||
'continuedl': True,
|
||||
'quiet': True,
|
||||
'noprogress': True,
|
||||
'ratelimit': self.params.get('ratelimit', None),
|
||||
'test': self.params.get('test', False),
|
||||
}
|
||||
)
|
||||
tmpfilename = self.temp_name(filename)
|
||||
(dest_stream, tmpfilename) = sanitize_open(tmpfilename, 'wb')
|
||||
ctx = {
|
||||
'filename': filename,
|
||||
'total_frags': total_frags,
|
||||
}
|
||||
|
||||
self._prepare_frag_download(ctx)
|
||||
|
||||
dest_stream = ctx['dest_stream']
|
||||
|
||||
write_flv_header(dest_stream)
|
||||
if not live:
|
||||
write_metadata_tag(dest_stream, metadata)
|
||||
|
||||
# This dict stores the download progress, it's updated by the progress
|
||||
# hook
|
||||
state = {
|
||||
'status': 'downloading',
|
||||
'downloaded_bytes': 0,
|
||||
'frag_index': 0,
|
||||
'frag_count': total_frags,
|
||||
'filename': filename,
|
||||
'tmpfilename': tmpfilename,
|
||||
}
|
||||
start = time.time()
|
||||
|
||||
def frag_progress_hook(s):
|
||||
if s['status'] not in ('downloading', 'finished'):
|
||||
return
|
||||
|
||||
frag_total_bytes = s.get('total_bytes', 0)
|
||||
if s['status'] == 'finished':
|
||||
state['downloaded_bytes'] += frag_total_bytes
|
||||
state['frag_index'] += 1
|
||||
|
||||
estimated_size = (
|
||||
(state['downloaded_bytes'] + frag_total_bytes) /
|
||||
(state['frag_index'] + 1) * total_frags)
|
||||
time_now = time.time()
|
||||
state['total_bytes_estimate'] = estimated_size
|
||||
state['elapsed'] = time_now - start
|
||||
|
||||
if s['status'] == 'finished':
|
||||
progress = self.calc_percent(state['frag_index'], total_frags)
|
||||
else:
|
||||
frag_downloaded_bytes = s['downloaded_bytes']
|
||||
frag_progress = self.calc_percent(frag_downloaded_bytes,
|
||||
frag_total_bytes)
|
||||
progress = self.calc_percent(state['frag_index'], total_frags)
|
||||
progress += frag_progress / float(total_frags)
|
||||
|
||||
state['eta'] = self.calc_eta(
|
||||
start, time_now, estimated_size, state['downloaded_bytes'] + frag_downloaded_bytes)
|
||||
state['speed'] = s.get('speed')
|
||||
self._hook_progress(state)
|
||||
|
||||
http_dl.add_progress_hook(frag_progress_hook)
|
||||
self._start_frag_download(ctx)
|
||||
|
||||
frags_filenames = []
|
||||
while fragments_list:
|
||||
@ -391,23 +340,24 @@ class F4mFD(FileDownloader):
|
||||
url += '?' + akamai_pv.strip(';')
|
||||
if info_dict.get('extra_param_to_segment_url'):
|
||||
url += info_dict.get('extra_param_to_segment_url')
|
||||
frag_filename = '%s-%s' % (tmpfilename, name)
|
||||
frag_filename = '%s-%s' % (ctx['tmpfilename'], name)
|
||||
try:
|
||||
success = http_dl.download(frag_filename, {'url': url})
|
||||
success = ctx['dl'].download(frag_filename, {'url': url})
|
||||
if not success:
|
||||
return False
|
||||
with open(frag_filename, 'rb') as down:
|
||||
down_data = down.read()
|
||||
reader = FlvReader(down_data)
|
||||
while True:
|
||||
_, box_type, box_data = reader.read_box_info()
|
||||
if box_type == b'mdat':
|
||||
dest_stream.write(box_data)
|
||||
break
|
||||
(down, frag_sanitized) = sanitize_open(frag_filename, 'rb')
|
||||
down_data = down.read()
|
||||
down.close()
|
||||
reader = FlvReader(down_data)
|
||||
while True:
|
||||
_, box_type, box_data = reader.read_box_info()
|
||||
if box_type == b'mdat':
|
||||
dest_stream.write(box_data)
|
||||
break
|
||||
if live:
|
||||
os.remove(frag_filename)
|
||||
os.remove(encodeFilename(frag_sanitized))
|
||||
else:
|
||||
frags_filenames.append(frag_filename)
|
||||
frags_filenames.append(frag_sanitized)
|
||||
except (compat_urllib_error.HTTPError, ) as err:
|
||||
if live and (err.code == 404 or err.code == 410):
|
||||
# We didn't keep up with the live window. Continue
|
||||
@ -425,20 +375,9 @@ class F4mFD(FileDownloader):
|
||||
msg = 'Missed %d fragments' % (fragments_list[0][1] - (frag_i + 1))
|
||||
self.report_warning(msg)
|
||||
|
||||
dest_stream.close()
|
||||
self._finish_frag_download(ctx)
|
||||
|
||||
elapsed = time.time() - start
|
||||
self.try_rename(tmpfilename, filename)
|
||||
for frag_file in frags_filenames:
|
||||
os.remove(frag_file)
|
||||
|
||||
fsize = os.path.getsize(encodeFilename(filename))
|
||||
self._hook_progress({
|
||||
'downloaded_bytes': fsize,
|
||||
'total_bytes': fsize,
|
||||
'filename': filename,
|
||||
'status': 'finished',
|
||||
'elapsed': elapsed,
|
||||
})
|
||||
os.remove(encodeFilename(frag_file))
|
||||
|
||||
return True
|
||||
|
111
youtube_dl/downloader/fragment.py
Normal file
111
youtube_dl/downloader/fragment.py
Normal file
@ -0,0 +1,111 @@
|
||||
from __future__ import division, unicode_literals
|
||||
|
||||
import os
|
||||
import time
|
||||
|
||||
from .common import FileDownloader
|
||||
from .http import HttpFD
|
||||
from ..utils import (
|
||||
encodeFilename,
|
||||
sanitize_open,
|
||||
)
|
||||
|
||||
|
||||
class HttpQuietDownloader(HttpFD):
|
||||
def to_screen(self, *args, **kargs):
|
||||
pass
|
||||
|
||||
|
||||
class FragmentFD(FileDownloader):
|
||||
"""
|
||||
A base file downloader class for fragmented media (e.g. f4m/m3u8 manifests).
|
||||
"""
|
||||
|
||||
def _prepare_and_start_frag_download(self, ctx):
|
||||
self._prepare_frag_download(ctx)
|
||||
self._start_frag_download(ctx)
|
||||
|
||||
def _prepare_frag_download(self, ctx):
|
||||
self.to_screen('[%s] Total fragments: %d' % (self.FD_NAME, ctx['total_frags']))
|
||||
self.report_destination(ctx['filename'])
|
||||
dl = HttpQuietDownloader(
|
||||
self.ydl,
|
||||
{
|
||||
'continuedl': True,
|
||||
'quiet': True,
|
||||
'noprogress': True,
|
||||
'ratelimit': self.params.get('ratelimit', None),
|
||||
'retries': self.params.get('retries', 0),
|
||||
'test': self.params.get('test', False),
|
||||
}
|
||||
)
|
||||
tmpfilename = self.temp_name(ctx['filename'])
|
||||
dest_stream, tmpfilename = sanitize_open(tmpfilename, 'wb')
|
||||
ctx.update({
|
||||
'dl': dl,
|
||||
'dest_stream': dest_stream,
|
||||
'tmpfilename': tmpfilename,
|
||||
})
|
||||
|
||||
def _start_frag_download(self, ctx):
|
||||
total_frags = ctx['total_frags']
|
||||
# This dict stores the download progress, it's updated by the progress
|
||||
# hook
|
||||
state = {
|
||||
'status': 'downloading',
|
||||
'downloaded_bytes': 0,
|
||||
'frag_index': 0,
|
||||
'frag_count': total_frags,
|
||||
'filename': ctx['filename'],
|
||||
'tmpfilename': ctx['tmpfilename'],
|
||||
}
|
||||
start = time.time()
|
||||
ctx['started'] = start
|
||||
|
||||
def frag_progress_hook(s):
|
||||
if s['status'] not in ('downloading', 'finished'):
|
||||
return
|
||||
|
||||
frag_total_bytes = s.get('total_bytes', 0)
|
||||
if s['status'] == 'finished':
|
||||
state['downloaded_bytes'] += frag_total_bytes
|
||||
state['frag_index'] += 1
|
||||
|
||||
estimated_size = (
|
||||
(state['downloaded_bytes'] + frag_total_bytes) /
|
||||
(state['frag_index'] + 1) * total_frags)
|
||||
time_now = time.time()
|
||||
state['total_bytes_estimate'] = estimated_size
|
||||
state['elapsed'] = time_now - start
|
||||
|
||||
if s['status'] == 'finished':
|
||||
progress = self.calc_percent(state['frag_index'], total_frags)
|
||||
else:
|
||||
frag_downloaded_bytes = s['downloaded_bytes']
|
||||
frag_progress = self.calc_percent(frag_downloaded_bytes,
|
||||
frag_total_bytes)
|
||||
progress = self.calc_percent(state['frag_index'], total_frags)
|
||||
progress += frag_progress / float(total_frags)
|
||||
|
||||
state['eta'] = self.calc_eta(
|
||||
start, time_now, estimated_size, state['downloaded_bytes'] + frag_downloaded_bytes)
|
||||
state['speed'] = s.get('speed')
|
||||
self._hook_progress(state)
|
||||
|
||||
ctx['dl'].add_progress_hook(frag_progress_hook)
|
||||
|
||||
return start
|
||||
|
||||
def _finish_frag_download(self, ctx):
|
||||
ctx['dest_stream'].close()
|
||||
elapsed = time.time() - ctx['started']
|
||||
self.try_rename(ctx['tmpfilename'], ctx['filename'])
|
||||
fsize = os.path.getsize(encodeFilename(ctx['filename']))
|
||||
|
||||
self._hook_progress({
|
||||
'downloaded_bytes': fsize,
|
||||
'total_bytes': fsize,
|
||||
'filename': ctx['filename'],
|
||||
'status': 'finished',
|
||||
'elapsed': elapsed,
|
||||
})
|
@ -4,15 +4,15 @@ import os
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
from ..postprocessor.ffmpeg import FFmpegPostProcessor
|
||||
from .common import FileDownloader
|
||||
from ..compat import (
|
||||
compat_urlparse,
|
||||
compat_urllib_request,
|
||||
)
|
||||
from .fragment import FragmentFD
|
||||
|
||||
from ..compat import compat_urlparse
|
||||
from ..postprocessor.ffmpeg import FFmpegPostProcessor
|
||||
from ..utils import (
|
||||
encodeArgument,
|
||||
encodeFilename,
|
||||
sanitize_open,
|
||||
)
|
||||
|
||||
|
||||
@ -28,10 +28,21 @@ class HlsFD(FileDownloader):
|
||||
return False
|
||||
ffpp.check_version()
|
||||
|
||||
args = [
|
||||
encodeArgument(opt)
|
||||
for opt in (ffpp.executable, '-y', '-i', url, '-f', 'mp4', '-c', 'copy', '-bsf:a', 'aac_adtstoasc')]
|
||||
args.append(encodeFilename(tmpfilename, True))
|
||||
args = [ffpp.executable, '-y']
|
||||
|
||||
if info_dict['http_headers']:
|
||||
# Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv:
|
||||
# [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
|
||||
args += [
|
||||
'-headers',
|
||||
''.join('%s: %s\r\n' % (key, val) for key, val in info_dict['http_headers'].items())]
|
||||
|
||||
args += ['-i', url, '-f', 'mp4', '-c', 'copy', '-bsf:a', 'aac_adtstoasc']
|
||||
|
||||
args = [encodeArgument(opt) for opt in args]
|
||||
args.append(encodeFilename(ffpp._ffmpeg_filename_argument(tmpfilename), True))
|
||||
|
||||
self._debug_cmd(args)
|
||||
|
||||
retval = subprocess.call(args)
|
||||
if retval == 0:
|
||||
@ -51,54 +62,51 @@ class HlsFD(FileDownloader):
|
||||
return False
|
||||
|
||||
|
||||
class NativeHlsFD(FileDownloader):
|
||||
class NativeHlsFD(FragmentFD):
|
||||
""" A more limited implementation that does not require ffmpeg """
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
url = info_dict['url']
|
||||
self.report_destination(filename)
|
||||
tmpfilename = self.temp_name(filename)
|
||||
FD_NAME = 'hlsnative'
|
||||
|
||||
self.to_screen(
|
||||
'[hlsnative] %s: Downloading m3u8 manifest' % info_dict['id'])
|
||||
data = self.ydl.urlopen(url).read()
|
||||
s = data.decode('utf-8', 'ignore')
|
||||
segment_urls = []
|
||||
def real_download(self, filename, info_dict):
|
||||
man_url = info_dict['url']
|
||||
self.to_screen('[%s] Downloading m3u8 manifest' % self.FD_NAME)
|
||||
manifest = self.ydl.urlopen(man_url).read()
|
||||
|
||||
s = manifest.decode('utf-8', 'ignore')
|
||||
fragment_urls = []
|
||||
for line in s.splitlines():
|
||||
line = line.strip()
|
||||
if line and not line.startswith('#'):
|
||||
segment_url = (
|
||||
line
|
||||
if re.match(r'^https?://', line)
|
||||
else compat_urlparse.urljoin(url, line))
|
||||
segment_urls.append(segment_url)
|
||||
|
||||
is_test = self.params.get('test', False)
|
||||
remaining_bytes = self._TEST_FILE_SIZE if is_test else None
|
||||
byte_counter = 0
|
||||
with open(tmpfilename, 'wb') as outf:
|
||||
for i, segurl in enumerate(segment_urls):
|
||||
self.to_screen(
|
||||
'[hlsnative] %s: Downloading segment %d / %d' %
|
||||
(info_dict['id'], i + 1, len(segment_urls)))
|
||||
seg_req = compat_urllib_request.Request(segurl)
|
||||
if remaining_bytes is not None:
|
||||
seg_req.add_header('Range', 'bytes=0-%d' % (remaining_bytes - 1))
|
||||
|
||||
segment = self.ydl.urlopen(seg_req).read()
|
||||
if remaining_bytes is not None:
|
||||
segment = segment[:remaining_bytes]
|
||||
remaining_bytes -= len(segment)
|
||||
outf.write(segment)
|
||||
byte_counter += len(segment)
|
||||
if remaining_bytes is not None and remaining_bytes <= 0:
|
||||
else compat_urlparse.urljoin(man_url, line))
|
||||
fragment_urls.append(segment_url)
|
||||
# We only download the first fragment during the test
|
||||
if self.params.get('test', False):
|
||||
break
|
||||
|
||||
self._hook_progress({
|
||||
'downloaded_bytes': byte_counter,
|
||||
'total_bytes': byte_counter,
|
||||
ctx = {
|
||||
'filename': filename,
|
||||
'status': 'finished',
|
||||
})
|
||||
self.try_rename(tmpfilename, filename)
|
||||
'total_frags': len(fragment_urls),
|
||||
}
|
||||
|
||||
self._prepare_and_start_frag_download(ctx)
|
||||
|
||||
frags_filenames = []
|
||||
for i, frag_url in enumerate(fragment_urls):
|
||||
frag_filename = '%s-Frag%d' % (ctx['tmpfilename'], i)
|
||||
success = ctx['dl'].download(frag_filename, {'url': frag_url})
|
||||
if not success:
|
||||
return False
|
||||
down, frag_sanitized = sanitize_open(frag_filename, 'rb')
|
||||
ctx['dest_stream'].write(down.read())
|
||||
down.close()
|
||||
frags_filenames.append(frag_sanitized)
|
||||
|
||||
self._finish_frag_download(ctx)
|
||||
|
||||
for frag_file in frags_filenames:
|
||||
os.remove(encodeFilename(frag_file))
|
||||
|
||||
return True
|
||||
|
@ -4,6 +4,7 @@ import errno
|
||||
import os
|
||||
import socket
|
||||
import time
|
||||
import re
|
||||
|
||||
from .common import FileDownloader
|
||||
from ..compat import (
|
||||
@ -57,6 +58,24 @@ class HttpFD(FileDownloader):
|
||||
# Establish connection
|
||||
try:
|
||||
data = self.ydl.urlopen(request)
|
||||
# When trying to resume, Content-Range HTTP header of response has to be checked
|
||||
# to match the value of requested Range HTTP header. This is due to a webservers
|
||||
# that don't support resuming and serve a whole file with no Content-Range
|
||||
# set in response despite of requested Range (see
|
||||
# https://github.com/rg3/youtube-dl/issues/6057#issuecomment-126129799)
|
||||
if resume_len > 0:
|
||||
content_range = data.headers.get('Content-Range')
|
||||
if content_range:
|
||||
content_range_m = re.search(r'bytes (\d+)-', content_range)
|
||||
# Content-Range is present and matches requested Range, resume is possible
|
||||
if content_range_m and resume_len == int(content_range_m.group(1)):
|
||||
break
|
||||
# Content-Range is either not present or invalid. Assuming remote webserver is
|
||||
# trying to send the whole file, resume is not possible, so wiping the local file
|
||||
# and performing entire redownload
|
||||
self.report_unable_to_resume()
|
||||
resume_len = 0
|
||||
open_mode = 'wb'
|
||||
break
|
||||
except (compat_urllib_error.HTTPError, ) as err:
|
||||
if (err.code < 500 or err.code >= 600) and err.code != 416:
|
||||
|
@ -131,7 +131,7 @@ class RtmpFD(FileDownloader):
|
||||
if play_path is not None:
|
||||
basic_args += ['--playpath', play_path]
|
||||
if tc_url is not None:
|
||||
basic_args += ['--tcUrl', url]
|
||||
basic_args += ['--tcUrl', tc_url]
|
||||
if test:
|
||||
basic_args += ['--stop', '1']
|
||||
if flash_version is not None:
|
||||
|
@ -4,7 +4,10 @@ from .abc import ABCIE
|
||||
from .abc7news import Abc7NewsIE
|
||||
from .academicearth import AcademicEarthCourseIE
|
||||
from .addanime import AddAnimeIE
|
||||
from .adobetv import AdobeTVIE
|
||||
from .adobetv import (
|
||||
AdobeTVIE,
|
||||
AdobeTVVideoIE,
|
||||
)
|
||||
from .adultswim import AdultSwimIE
|
||||
from .aftenposten import AftenpostenIE
|
||||
from .aftonbladet import AftonbladetIE
|
||||
@ -16,9 +19,14 @@ from .anysex import AnySexIE
|
||||
from .aol import AolIE
|
||||
from .allocine import AllocineIE
|
||||
from .aparat import AparatIE
|
||||
from .appleconnect import AppleConnectIE
|
||||
from .appletrailers import AppleTrailersIE
|
||||
from .archiveorg import ArchiveOrgIE
|
||||
from .ard import ARDIE, ARDMediathekIE
|
||||
from .ard import (
|
||||
ARDIE,
|
||||
ARDMediathekIE,
|
||||
SportschauIE,
|
||||
)
|
||||
from .arte import (
|
||||
ArteTvIE,
|
||||
ArteTVPlus7IE,
|
||||
@ -35,7 +43,10 @@ from .azubu import AzubuIE
|
||||
from .baidu import BaiduVideoIE
|
||||
from .bambuser import BambuserIE, BambuserChannelIE
|
||||
from .bandcamp import BandcampIE, BandcampAlbumIE
|
||||
from .bbccouk import BBCCoUkIE
|
||||
from .bbc import (
|
||||
BBCCoUkIE,
|
||||
BBCIE,
|
||||
)
|
||||
from .beeg import BeegIE
|
||||
from .behindkink import BehindKinkIE
|
||||
from .beatportpro import BeatportProIE
|
||||
@ -103,15 +114,21 @@ from .dailymotion import (
|
||||
DailymotionIE,
|
||||
DailymotionPlaylistIE,
|
||||
DailymotionUserIE,
|
||||
DailymotionCloudIE,
|
||||
)
|
||||
from .daum import DaumIE
|
||||
from .dbtv import DBTVIE
|
||||
from .dcn import DCNIE
|
||||
from .dctp import DctpTvIE
|
||||
from .deezer import DeezerPlaylistIE
|
||||
from .dfb import DFBIE
|
||||
from .dhm import DHMIE
|
||||
from .dotsub import DotsubIE
|
||||
from .douyutv import DouyuTVIE
|
||||
from .dramafever import (
|
||||
DramaFeverIE,
|
||||
DramaFeverSeriesIE,
|
||||
)
|
||||
from .dreisat import DreiSatIE
|
||||
from .drbonanza import DRBonanzaIE
|
||||
from .drtuber import DrTuberIE
|
||||
@ -121,7 +138,6 @@ from .dump import DumpIE
|
||||
from .dumpert import DumpertIE
|
||||
from .defense import DefenseGouvFrIE
|
||||
from .discovery import DiscoveryIE
|
||||
from .divxstage import DivxStageIE
|
||||
from .dropbox import DropboxIE
|
||||
from .eagleplatform import EaglePlatformIE
|
||||
from .ebaumsworld import EbaumsWorldIE
|
||||
@ -136,11 +152,12 @@ from .ellentv import (
|
||||
)
|
||||
from .elpais import ElPaisIE
|
||||
from .embedly import EmbedlyIE
|
||||
from .empflix import EMPFlixIE
|
||||
from .engadget import EngadgetIE
|
||||
from .eporner import EpornerIE
|
||||
from .eroprofile import EroProfileIE
|
||||
from .escapist import EscapistIE
|
||||
from .espn import ESPNIE
|
||||
from .esri import EsriVideoIE
|
||||
from .everyonesmixtape import EveryonesMixtapeIE
|
||||
from .exfm import ExfmIE
|
||||
from .expotv import ExpoTVIE
|
||||
@ -148,14 +165,11 @@ from .extremetube import ExtremeTubeIE
|
||||
from .facebook import FacebookIE
|
||||
from .faz import FazIE
|
||||
from .fc2 import FC2IE
|
||||
from .firedrive import FiredriveIE
|
||||
from .firstpost import FirstpostIE
|
||||
from .firsttv import FirstTVIE
|
||||
from .fivemin import FiveMinIE
|
||||
from .fktv import (
|
||||
FKTVIE,
|
||||
FKTVPosteckeIE,
|
||||
)
|
||||
from .fivetv import FiveTVIE
|
||||
from .fktv import FKTVIE
|
||||
from .flickr import FlickrIE
|
||||
from .folketinget import FolketingetIE
|
||||
from .footyroom import FootyRoomIE
|
||||
@ -211,7 +225,6 @@ from .historicfilms import HistoricFilmsIE
|
||||
from .history import HistoryIE
|
||||
from .hitbox import HitboxIE, HitboxLiveIE
|
||||
from .hornbunny import HornBunnyIE
|
||||
from .hostingbulk import HostingBulkIE
|
||||
from .hotnewhiphop import HotNewHipHopIE
|
||||
from .howcast import HowcastIE
|
||||
from .howstuffworks import HowStuffWorksIE
|
||||
@ -223,12 +236,21 @@ from .imdb import (
|
||||
ImdbIE,
|
||||
ImdbListIE
|
||||
)
|
||||
from .imgur import ImgurIE
|
||||
from .imgur import (
|
||||
ImgurIE,
|
||||
ImgurAlbumIE,
|
||||
)
|
||||
from .ina import InaIE
|
||||
from .indavideo import (
|
||||
IndavideoIE,
|
||||
IndavideoEmbedIE,
|
||||
)
|
||||
from .infoq import InfoQIE
|
||||
from .instagram import InstagramIE, InstagramUserIE
|
||||
from .internetvideoarchive import InternetVideoArchiveIE
|
||||
from .iprima import IPrimaIE
|
||||
from .iqiyi import IqiyiIE
|
||||
from .ir90tv import Ir90TvIE
|
||||
from .ivi import (
|
||||
IviIE,
|
||||
IviCompilationIE
|
||||
@ -243,6 +265,7 @@ from .kaltura import KalturaIE
|
||||
from .kanalplay import KanalPlayIE
|
||||
from .kankan import KankanIE
|
||||
from .karaoketv import KaraoketvIE
|
||||
from .karrierevideos import KarriereVideosIE
|
||||
from .keezmovies import KeezMoviesIE
|
||||
from .khanacademy import KhanAcademyIE
|
||||
from .kickstarter import KickStarterIE
|
||||
@ -250,15 +273,27 @@ from .keek import KeekIE
|
||||
from .kontrtube import KontrTubeIE
|
||||
from .krasview import KrasViewIE
|
||||
from .ku6 import Ku6IE
|
||||
from .kuwo import (
|
||||
KuwoIE,
|
||||
KuwoAlbumIE,
|
||||
KuwoChartIE,
|
||||
KuwoSingerIE,
|
||||
KuwoCategoryIE,
|
||||
KuwoMvIE,
|
||||
)
|
||||
from .la7 import LA7IE
|
||||
from .laola1tv import Laola1TvIE
|
||||
from .lecture2go import Lecture2GoIE
|
||||
from .letv import (
|
||||
LetvIE,
|
||||
LetvTvIE,
|
||||
LetvPlaylistIE
|
||||
)
|
||||
from .libsyn import LibsynIE
|
||||
from .lifenews import LifeNewsIE
|
||||
from .lifenews import (
|
||||
LifeNewsIE,
|
||||
LifeEmbedIE,
|
||||
)
|
||||
from .liveleak import LiveLeakIE
|
||||
from .livestream import (
|
||||
LivestreamIE,
|
||||
@ -303,13 +338,15 @@ from .mtv import (
|
||||
MTVIE,
|
||||
MTVServicesEmbeddedIE,
|
||||
MTVIggyIE,
|
||||
MTVDEIE,
|
||||
)
|
||||
from .muenchentv import MuenchenTVIE
|
||||
from .musicplayon import MusicPlayOnIE
|
||||
from .musicvault import MusicVaultIE
|
||||
from .muzu import MuzuTVIE
|
||||
from .mwave import MwaveIE
|
||||
from .myspace import MySpaceIE, MySpaceAlbumIE
|
||||
from .myspass import MySpassIE
|
||||
from .myvi import MyviIE
|
||||
from .myvideo import MyVideoIE
|
||||
from .myvidster import MyVidsterIE
|
||||
from .nationalgeographic import NationalGeographicIE
|
||||
@ -320,19 +357,34 @@ from .nbc import (
|
||||
NBCNewsIE,
|
||||
NBCSportsIE,
|
||||
NBCSportsVPlayerIE,
|
||||
MSNBCIE,
|
||||
)
|
||||
from .ndr import (
|
||||
NDRIE,
|
||||
NJoyIE,
|
||||
NDREmbedBaseIE,
|
||||
NDREmbedIE,
|
||||
NJoyEmbedIE,
|
||||
)
|
||||
from .ndr import NDRIE
|
||||
from .ndtv import NDTVIE
|
||||
from .netzkino import NetzkinoIE
|
||||
from .nerdcubed import NerdCubedFeedIE
|
||||
from .nerdist import NerdistIE
|
||||
from .neteasemusic import (
|
||||
NetEaseMusicIE,
|
||||
NetEaseMusicAlbumIE,
|
||||
NetEaseMusicSingerIE,
|
||||
NetEaseMusicListIE,
|
||||
NetEaseMusicMvIE,
|
||||
NetEaseMusicProgramIE,
|
||||
NetEaseMusicDjRadioIE,
|
||||
)
|
||||
from .newgrounds import NewgroundsIE
|
||||
from .newstube import NewstubeIE
|
||||
from .nextmedia import (
|
||||
NextMediaIE,
|
||||
NextMediaActionNewsIE,
|
||||
AppleDailyRealtimeNewsIE,
|
||||
AppleDailyAnimationNewsIE
|
||||
AppleDailyIE,
|
||||
)
|
||||
from .nfb import NFBIE
|
||||
from .nfl import NFLIE
|
||||
@ -346,15 +398,22 @@ from .ninegag import NineGagIE
|
||||
from .noco import NocoIE
|
||||
from .normalboots import NormalbootsIE
|
||||
from .nosvideo import NosVideoIE
|
||||
from .nova import NovaIE
|
||||
from .novamov import NovaMovIE
|
||||
from .nowness import NownessIE
|
||||
from .nowness import (
|
||||
NownessIE,
|
||||
NownessPlaylistIE,
|
||||
NownessSeriesIE,
|
||||
)
|
||||
from .nowtv import NowTVIE
|
||||
from .nowvideo import NowVideoIE
|
||||
from .npo import (
|
||||
NPOIE,
|
||||
NPOLiveIE,
|
||||
NPORadioIE,
|
||||
NPORadioFragmentIE,
|
||||
TegenlichtVproIE,
|
||||
VPROIE,
|
||||
WNLIE
|
||||
)
|
||||
from .nrk import (
|
||||
NRKIE,
|
||||
@ -363,12 +422,18 @@ from .nrk import (
|
||||
)
|
||||
from .ntvde import NTVDeIE
|
||||
from .ntvru import NTVRuIE
|
||||
from .nytimes import NYTimesIE
|
||||
from .nytimes import (
|
||||
NYTimesIE,
|
||||
NYTimesArticleIE,
|
||||
)
|
||||
from .nuvid import NuvidIE
|
||||
from .odnoklassniki import OdnoklassnikiIE
|
||||
from .oktoberfesttv import OktoberfestTVIE
|
||||
from .ooyala import OoyalaIE
|
||||
from .openfilm import OpenFilmIE
|
||||
from .onionstudios import OnionStudiosIE
|
||||
from .ooyala import (
|
||||
OoyalaIE,
|
||||
OoyalaExternalIE,
|
||||
)
|
||||
from .orf import (
|
||||
ORFTVthekIE,
|
||||
ORFOE1IE,
|
||||
@ -378,16 +443,27 @@ from .orf import (
|
||||
from .parliamentliveuk import ParliamentLiveUKIE
|
||||
from .patreon import PatreonIE
|
||||
from .pbs import PBSIE
|
||||
from .periscope import (
|
||||
PeriscopeIE,
|
||||
QuickscopeIE,
|
||||
)
|
||||
from .philharmoniedeparis import PhilharmonieDeParisIE
|
||||
from .phoenix import PhoenixIE
|
||||
from .photobucket import PhotobucketIE
|
||||
from .pinkbike import PinkbikeIE
|
||||
from .planetaplay import PlanetaPlayIE
|
||||
from .pladform import PladformIE
|
||||
from .played import PlayedIE
|
||||
from .playfm import PlayFMIE
|
||||
from .playtvak import PlaytvakIE
|
||||
from .playvid import PlayvidIE
|
||||
from .playwire import PlaywireIE
|
||||
from .pluralsight import (
|
||||
PluralsightIE,
|
||||
PluralsightCourseIE,
|
||||
)
|
||||
from .podomatic import PodomaticIE
|
||||
from .porn91 import Porn91IE
|
||||
from .pornhd import PornHdIE
|
||||
from .pornhub import (
|
||||
PornHubIE,
|
||||
@ -405,6 +481,8 @@ from .qqmusic import (
|
||||
QQMusicIE,
|
||||
QQMusicSingerIE,
|
||||
QQMusicAlbumIE,
|
||||
QQMusicToplistIE,
|
||||
QQMusicPlaylistIE,
|
||||
)
|
||||
from .quickvid import QuickVidIE
|
||||
from .r7 import R7IE
|
||||
@ -414,6 +492,7 @@ from .radiobremen import RadioBremenIE
|
||||
from .radiofrance import RadioFranceIE
|
||||
from .rai import RaiIE
|
||||
from .rbmaradio import RBMARadioIE
|
||||
from .rds import RDSIE
|
||||
from .redtube import RedTubeIE
|
||||
from .restudy import RestudyIE
|
||||
from .reverbnation import ReverbNationIE
|
||||
@ -424,11 +503,11 @@ from .roxwel import RoxwelIE
|
||||
from .rtbf import RTBFIE
|
||||
from .rte import RteIE
|
||||
from .rtlnl import RtlNlIE
|
||||
from .rtlnow import RTLnowIE
|
||||
from .rtl2 import RTL2IE
|
||||
from .rtp import RTPIE
|
||||
from .rts import RTSIE
|
||||
from .rtve import RTVEALaCartaIE, RTVELiveIE, RTVEInfantilIE
|
||||
from .rtvnh import RTVNHIE
|
||||
from .ruhd import RUHDIE
|
||||
from .rutube import (
|
||||
RutubeIE,
|
||||
@ -438,6 +517,7 @@ from .rutube import (
|
||||
RutubePersonIE,
|
||||
)
|
||||
from .rutv import RUTVIE
|
||||
from .ruutu import RuutuIE
|
||||
from .sandia import SandiaIE
|
||||
from .safari import (
|
||||
SafariIE,
|
||||
@ -454,6 +534,7 @@ from .senateisvp import SenateISVPIE
|
||||
from .servingsys import ServingSysIE
|
||||
from .sexu import SexuIE
|
||||
from .sexykarma import SexyKarmaIE
|
||||
from .shahid import ShahidIE
|
||||
from .shared import SharedIE
|
||||
from .sharesix import ShareSixIE
|
||||
from .sina import SinaIE
|
||||
@ -465,9 +546,16 @@ from .smotri import (
|
||||
SmotriUserIE,
|
||||
SmotriBroadcastIE,
|
||||
)
|
||||
from .snagfilms import (
|
||||
SnagFilmsIE,
|
||||
SnagFilmsEmbedIE,
|
||||
)
|
||||
from .snotr import SnotrIE
|
||||
from .sockshare import SockshareIE
|
||||
from .sohu import SohuIE
|
||||
from .soompi import (
|
||||
SoompiIE,
|
||||
SoompiShowIE,
|
||||
)
|
||||
from .soundcloud import (
|
||||
SoundcloudIE,
|
||||
SoundcloudSetIE,
|
||||
@ -480,8 +568,10 @@ from .soundgasm import (
|
||||
)
|
||||
from .southpark import (
|
||||
SouthParkIE,
|
||||
SouthParkDeIE,
|
||||
SouthParkDkIE,
|
||||
SouthParkEsIE,
|
||||
SouthparkDeIE,
|
||||
SouthParkNlIE
|
||||
)
|
||||
from .space import SpaceIE
|
||||
from .spankbang import SpankBangIE
|
||||
@ -490,7 +580,10 @@ from .spiegel import SpiegelIE, SpiegelArticleIE
|
||||
from .spiegeltv import SpiegeltvIE
|
||||
from .spike import SpikeIE
|
||||
from .sport5 import Sport5IE
|
||||
from .sportbox import SportBoxIE
|
||||
from .sportbox import (
|
||||
SportBoxIE,
|
||||
SportBoxEmbedIE,
|
||||
)
|
||||
from .sportdeutschland import SportDeutschlandIE
|
||||
from .srf import SrfIE
|
||||
from .srmediathek import SRMediathekIE
|
||||
@ -501,7 +594,10 @@ from .streamcloud import StreamcloudIE
|
||||
from .streamcz import StreamCZIE
|
||||
from .streetvoice import StreetVoiceIE
|
||||
from .sunporno import SunPornoIE
|
||||
from .svtplay import SVTPlayIE
|
||||
from .svt import (
|
||||
SVTIE,
|
||||
SVTPlayIE,
|
||||
)
|
||||
from .swrmediathek import SWRMediathekIE
|
||||
from .syfy import SyfyIE
|
||||
from .sztvhu import SztvHuIE
|
||||
@ -518,6 +614,7 @@ from .techtalks import TechTalksIE
|
||||
from .ted import TEDIE
|
||||
from .telebruxelles import TeleBruxellesIE
|
||||
from .telecinco import TelecincoIE
|
||||
from .telegraaf import TelegraafIE
|
||||
from .telemb import TeleMBIE
|
||||
from .teletask import TeleTaskIE
|
||||
from .tenplay import TenPlayIE
|
||||
@ -525,13 +622,24 @@ from .testurl import TestURLIE
|
||||
from .testtube import TestTubeIE
|
||||
from .tf1 import TF1IE
|
||||
from .theonion import TheOnionIE
|
||||
from .theplatform import ThePlatformIE
|
||||
from .theplatform import (
|
||||
ThePlatformIE,
|
||||
ThePlatformFeedIE,
|
||||
)
|
||||
from .thesixtyone import TheSixtyOneIE
|
||||
from .thisamericanlife import ThisAmericanLifeIE
|
||||
from .thisav import ThisAVIE
|
||||
from .tinypic import TinyPicIE
|
||||
from .tlc import TlcIE, TlcDeIE
|
||||
from .tmz import TMZIE
|
||||
from .tnaflix import TNAFlixIE
|
||||
from .tmz import (
|
||||
TMZIE,
|
||||
TMZArticleIE,
|
||||
)
|
||||
from .tnaflix import (
|
||||
TNAFlixIE,
|
||||
EMPFlixIE,
|
||||
MovieFapIE,
|
||||
)
|
||||
from .thvideo import (
|
||||
THVideoIE,
|
||||
THVideoPlaylistIE
|
||||
@ -542,12 +650,21 @@ from .traileraddict import TrailerAddictIE
|
||||
from .trilulilu import TriluliluIE
|
||||
from .trutube import TruTubeIE
|
||||
from .tube8 import Tube8IE
|
||||
from .tubitv import TubiTvIE
|
||||
from .tudou import TudouIE
|
||||
from .tumblr import TumblrIE
|
||||
from .tunein import TuneInIE
|
||||
from .turbo import TurboIE
|
||||
from .tutv import TutvIE
|
||||
from .tv2 import (
|
||||
TV2IE,
|
||||
TV2ArticleIE,
|
||||
)
|
||||
from .tv4 import TV4IE
|
||||
from .tvc import (
|
||||
TVCIE,
|
||||
TVCArticleIE,
|
||||
)
|
||||
from .tvigle import TvigleIE
|
||||
from .tvp import TvpIE, TvpSeriesIE
|
||||
from .tvplay import TVPlayIE
|
||||
@ -566,6 +683,7 @@ from .twitch import (
|
||||
TwitchBookmarksIE,
|
||||
TwitchStreamIE,
|
||||
)
|
||||
from .twitter import TwitterCardIE
|
||||
from .ubu import UbuIE
|
||||
from .udemy import (
|
||||
UdemyIE,
|
||||
@ -583,11 +701,14 @@ from .veoh import VeohIE
|
||||
from .vessel import VesselIE
|
||||
from .vesti import VestiIE
|
||||
from .vevo import VevoIE
|
||||
from .vgtv import VGTVIE
|
||||
from .vgtv import (
|
||||
BTArticleIE,
|
||||
BTVestlendingenIE,
|
||||
VGTVIE,
|
||||
)
|
||||
from .vh1 import VH1IE
|
||||
from .vice import ViceIE
|
||||
from .viddler import ViddlerIE
|
||||
from .videobam import VideoBamIE
|
||||
from .videodetective import VideoDetectiveIE
|
||||
from .videolecturesnet import VideoLecturesNetIE
|
||||
from .videofyme import VideofyMeIE
|
||||
@ -614,12 +735,17 @@ from .vine import (
|
||||
VineIE,
|
||||
VineUserIE,
|
||||
)
|
||||
from .viki import VikiIE
|
||||
from .viki import (
|
||||
VikiIE,
|
||||
VikiChannelIE,
|
||||
)
|
||||
from .vk import (
|
||||
VKIE,
|
||||
VKUserVideosIE,
|
||||
)
|
||||
from .vlive import VLiveIE
|
||||
from .vodlocker import VodlockerIE
|
||||
from .voicerepublic import VoiceRepublicIE
|
||||
from .vporn import VpornIE
|
||||
from .vrt import VRTIE
|
||||
from .vube import VubeIE
|
||||
@ -634,7 +760,10 @@ from .wdr import (
|
||||
WDRMobileIE,
|
||||
WDRMausIE,
|
||||
)
|
||||
from .webofstories import WebOfStoriesIE
|
||||
from .webofstories import (
|
||||
WebOfStoriesIE,
|
||||
WebOfStoriesPlaylistIE,
|
||||
)
|
||||
from .weibo import WeiboIE
|
||||
from .wimp import WimpIE
|
||||
from .wistia import WistiaIE
|
||||
@ -643,12 +772,16 @@ from .wrzuta import WrzutaIE
|
||||
from .wsj import WSJIE
|
||||
from .xbef import XBefIE
|
||||
from .xboxclips import XboxClipsIE
|
||||
from .xhamster import XHamsterIE
|
||||
from .xhamster import (
|
||||
XHamsterIE,
|
||||
XHamsterEmbedIE,
|
||||
)
|
||||
from .xminus import XMinusIE
|
||||
from .xnxx import XNXXIE
|
||||
from .xvideos import XVideosIE
|
||||
from .xstream import XstreamIE
|
||||
from .xtube import XTubeUserIE, XTubeIE
|
||||
from .xuite import XuiteIE
|
||||
from .xvideos import XVideosIE
|
||||
from .xxxymovies import XXXYMoviesIE
|
||||
from .yahoo import (
|
||||
YahooIE,
|
||||
@ -661,6 +794,7 @@ from .yandexmusic import (
|
||||
YandexMusicPlaylistIE,
|
||||
)
|
||||
from .yesjapan import YesJapanIE
|
||||
from .yinyuetai import YinYueTaiIE
|
||||
from .ynet import YnetIE
|
||||
from .youjizz import YouJizzIE
|
||||
from .youku import YoukuIE
|
||||
|
@ -1,16 +1,20 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
js_to_json,
|
||||
int_or_none,
|
||||
)
|
||||
|
||||
|
||||
class ABCIE(InfoExtractor):
|
||||
IE_NAME = 'abc.net.au'
|
||||
_VALID_URL = r'http://www\.abc\.net\.au/news/[^/]+/[^/]+/(?P<id>\d+)'
|
||||
|
||||
_TEST = {
|
||||
_TESTS = [{
|
||||
'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334',
|
||||
'md5': 'cb3dd03b18455a661071ee1e28344d9f',
|
||||
'info_dict': {
|
||||
@ -19,22 +23,47 @@ class ABCIE(InfoExtractor):
|
||||
'title': 'Australia to help staff Ebola treatment centre in Sierra Leone',
|
||||
'description': 'md5:809ad29c67a05f54eb41f2a105693a67',
|
||||
},
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.abc.net.au/news/2015-08-17/warren-entsch-introduces-same-sex-marriage-bill/6702326',
|
||||
'md5': 'db2a5369238b51f9811ad815b69dc086',
|
||||
'info_dict': {
|
||||
'id': 'NvqvPeNZsHU',
|
||||
'ext': 'mp4',
|
||||
'upload_date': '20150816',
|
||||
'uploader': 'ABC News (Australia)',
|
||||
'description': 'Government backbencher Warren Entsch introduces a cross-party sponsored bill to legalise same-sex marriage, saying the bill is designed to promote "an inclusive Australia, not a divided one.". Read more here: http://ab.co/1Mwc6ef',
|
||||
'uploader_id': 'NewsOnABC',
|
||||
'title': 'Marriage Equality: Warren Entsch introduces same sex marriage bill',
|
||||
},
|
||||
'add_ie': ['Youtube'],
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
urls_info_json = self._search_regex(
|
||||
r'inlineVideoData\.push\((.*?)\);', webpage, 'video urls',
|
||||
flags=re.DOTALL)
|
||||
urls_info = json.loads(urls_info_json.replace('\'', '"'))
|
||||
mobj = re.search(
|
||||
r'inline(?P<type>Video|YouTube)Data\.push\((?P<json_data>[^)]+)\);',
|
||||
webpage)
|
||||
if mobj is None:
|
||||
raise ExtractorError('Unable to extract video urls')
|
||||
|
||||
urls_info = self._parse_json(
|
||||
mobj.group('json_data'), video_id, transform_source=js_to_json)
|
||||
|
||||
if not isinstance(urls_info, list):
|
||||
urls_info = [urls_info]
|
||||
|
||||
if mobj.group('type') == 'YouTube':
|
||||
return self.playlist_result([
|
||||
self.url_result(url_info['url']) for url_info in urls_info])
|
||||
|
||||
formats = [{
|
||||
'url': url_info['url'],
|
||||
'width': int(url_info['width']),
|
||||
'height': int(url_info['height']),
|
||||
'tbr': int(url_info['bitrate']),
|
||||
'filesize': int(url_info['filesize']),
|
||||
'width': int_or_none(url_info.get('width')),
|
||||
'height': int_or_none(url_info.get('height')),
|
||||
'tbr': int_or_none(url_info.get('bitrate')),
|
||||
'filesize': int_or_none(url_info.get('filesize')),
|
||||
} for url_info in urls_info]
|
||||
self._sort_formats(formats)
|
||||
|
||||
|
@ -15,7 +15,7 @@ class AcademicEarthCourseIE(InfoExtractor):
|
||||
'title': 'Laws of Nature',
|
||||
'description': 'Introduce yourself to the laws of nature with these free online college lectures from Yale, Harvard, and MIT.',
|
||||
},
|
||||
'playlist_count': 4,
|
||||
'playlist_count': 3,
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@ -5,6 +5,8 @@ from ..utils import (
|
||||
parse_duration,
|
||||
unified_strdate,
|
||||
str_to_int,
|
||||
float_or_none,
|
||||
ISO639Utils,
|
||||
)
|
||||
|
||||
|
||||
@ -69,3 +71,61 @@ class AdobeTVIE(InfoExtractor):
|
||||
'view_count': view_count,
|
||||
'formats': formats,
|
||||
}
|
||||
|
||||
|
||||
class AdobeTVVideoIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://video\.tv\.adobe\.com/v/(?P<id>\d+)'
|
||||
|
||||
_TEST = {
|
||||
# From https://helpx.adobe.com/acrobat/how-to/new-experience-acrobat-dc.html?set=acrobat--get-started--essential-beginners
|
||||
'url': 'https://video.tv.adobe.com/v/2456/',
|
||||
'md5': '43662b577c018ad707a63766462b1e87',
|
||||
'info_dict': {
|
||||
'id': '2456',
|
||||
'ext': 'mp4',
|
||||
'title': 'New experience with Acrobat DC',
|
||||
'description': 'New experience with Acrobat DC',
|
||||
'duration': 248.667,
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
player_params = self._parse_json(self._search_regex(
|
||||
r'var\s+bridge\s*=\s*([^;]+);', webpage, 'player parameters'),
|
||||
video_id)
|
||||
|
||||
formats = [{
|
||||
'url': source['src'],
|
||||
'width': source.get('width'),
|
||||
'height': source.get('height'),
|
||||
'tbr': source.get('bitrate'),
|
||||
} for source in player_params['sources']]
|
||||
|
||||
# For both metadata and downloaded files the duration varies among
|
||||
# formats. I just pick the max one
|
||||
duration = max(filter(None, [
|
||||
float_or_none(source.get('duration'), scale=1000)
|
||||
for source in player_params['sources']]))
|
||||
|
||||
subtitles = {}
|
||||
for translation in player_params.get('translations', []):
|
||||
lang_id = translation.get('language_w3c') or ISO639Utils.long2short(translation['language_medium'])
|
||||
if lang_id not in subtitles:
|
||||
subtitles[lang_id] = []
|
||||
subtitles[lang_id].append({
|
||||
'url': translation['vttPath'],
|
||||
'ext': 'vtt',
|
||||
})
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'formats': formats,
|
||||
'title': player_params['title'],
|
||||
'description': self._og_search_description(webpage),
|
||||
'duration': duration,
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
@ -156,7 +156,7 @@ class AdultSwimIE(InfoExtractor):
|
||||
xpath_text(idoc, './/trt', 'segment duration').strip())
|
||||
|
||||
formats = []
|
||||
file_els = idoc.findall('.//files/file')
|
||||
file_els = idoc.findall('.//files/file') or idoc.findall('./files/file')
|
||||
|
||||
for file_el in file_els:
|
||||
bitrate = file_el.attrib.get('bitrate')
|
||||
|
@ -1,21 +1,11 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
parse_iso8601,
|
||||
xpath_with_ns,
|
||||
xpath_text,
|
||||
find_xpath_attr,
|
||||
)
|
||||
|
||||
|
||||
class AftenpostenIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?aftenposten\.no/webtv/(?:#!/)?video/(?P<id>\d+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.aftenposten.no/webtv/#!/video/21039/trailer-sweatshop-i-can-t-take-any-more',
|
||||
'md5': 'fd828cd29774a729bf4d4425fe192972',
|
||||
@ -30,69 +20,4 @@ class AftenpostenIE(InfoExtractor):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
data = self._download_xml(
|
||||
'http://frontend.xstream.dk/ap/feed/video/?platform=web&id=%s' % video_id, video_id)
|
||||
|
||||
NS_MAP = {
|
||||
'atom': 'http://www.w3.org/2005/Atom',
|
||||
'xt': 'http://xstream.dk/',
|
||||
'media': 'http://search.yahoo.com/mrss/',
|
||||
}
|
||||
|
||||
entry = data.find(xpath_with_ns('./atom:entry', NS_MAP))
|
||||
|
||||
title = xpath_text(
|
||||
entry, xpath_with_ns('./atom:title', NS_MAP), 'title')
|
||||
description = xpath_text(
|
||||
entry, xpath_with_ns('./atom:summary', NS_MAP), 'description')
|
||||
timestamp = parse_iso8601(xpath_text(
|
||||
entry, xpath_with_ns('./atom:published', NS_MAP), 'upload date'))
|
||||
|
||||
formats = []
|
||||
media_group = entry.find(xpath_with_ns('./media:group', NS_MAP))
|
||||
for media_content in media_group.findall(xpath_with_ns('./media:content', NS_MAP)):
|
||||
media_url = media_content.get('url')
|
||||
if not media_url:
|
||||
continue
|
||||
tbr = int_or_none(media_content.get('bitrate'))
|
||||
mobj = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>[^/]+))/(?P<playpath>.+)$', media_url)
|
||||
if mobj:
|
||||
formats.append({
|
||||
'url': mobj.group('url'),
|
||||
'play_path': 'mp4:%s' % mobj.group('playpath'),
|
||||
'app': mobj.group('app'),
|
||||
'ext': 'flv',
|
||||
'tbr': tbr,
|
||||
'format_id': 'rtmp-%d' % tbr,
|
||||
})
|
||||
else:
|
||||
formats.append({
|
||||
'url': media_url,
|
||||
'tbr': tbr,
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
link = find_xpath_attr(
|
||||
entry, xpath_with_ns('./atom:link', NS_MAP), 'rel', 'original')
|
||||
if link is not None:
|
||||
formats.append({
|
||||
'url': link.get('href'),
|
||||
'format_id': link.get('rel'),
|
||||
})
|
||||
|
||||
thumbnails = [{
|
||||
'url': splash.get('url'),
|
||||
'width': int_or_none(splash.get('width')),
|
||||
'height': int_or_none(splash.get('height')),
|
||||
} for splash in media_group.findall(xpath_with_ns('./xt:splash', NS_MAP))]
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'timestamp': timestamp,
|
||||
'formats': formats,
|
||||
'thumbnails': thumbnails,
|
||||
}
|
||||
return self.url_result('xstream:ap:%s' % self._match_id(url), 'Xstream')
|
||||
|
@ -6,11 +6,11 @@ from ..utils import int_or_none
|
||||
|
||||
|
||||
class AftonbladetIE(InfoExtractor):
|
||||
_VALID_URL = r'http://tv\.aftonbladet\.se/webbtv.+?(?P<id>article[0-9]+)\.ab(?:$|[?#])'
|
||||
_VALID_URL = r'http://tv\.aftonbladet\.se/abtv/articles/(?P<id>[0-9]+)'
|
||||
_TEST = {
|
||||
'url': 'http://tv.aftonbladet.se/webbtv/nyheter/vetenskap/rymden/article36015.ab',
|
||||
'url': 'http://tv.aftonbladet.se/abtv/articles/36015',
|
||||
'info_dict': {
|
||||
'id': 'article36015',
|
||||
'id': '36015',
|
||||
'ext': 'mp4',
|
||||
'title': 'Vulkanutbrott i rymden - nu släpper NASA bilderna',
|
||||
'description': 'Jupiters måne mest aktiv av alla himlakroppar',
|
||||
@ -25,8 +25,9 @@ class AftonbladetIE(InfoExtractor):
|
||||
|
||||
# find internal video meta data
|
||||
meta_url = 'http://aftonbladet-play.drlib.aptoma.no/video/%s.json'
|
||||
internal_meta_id = self._html_search_regex(
|
||||
r'data-aptomaId="([\w\d]+)"', webpage, 'internal_meta_id')
|
||||
player_config = self._parse_json(self._html_search_regex(
|
||||
r'data-player-config="([^"]+)"', webpage, 'player config'), video_id)
|
||||
internal_meta_id = player_config['videoId']
|
||||
internal_meta_url = meta_url % internal_meta_id
|
||||
internal_meta_json = self._download_json(
|
||||
internal_meta_url, video_id, 'Downloading video meta data')
|
||||
|
@ -20,14 +20,14 @@ class AirMozillaIE(InfoExtractor):
|
||||
'id': '6x4q2w',
|
||||
'ext': 'mp4',
|
||||
'title': 'Privacy Lab - a meetup for privacy minded people in San Francisco',
|
||||
'thumbnail': 're:https://\w+\.cloudfront\.net/6x4q2w/poster\.jpg\?t=\d+',
|
||||
'thumbnail': 're:https?://vid\.ly/(?P<id>[0-9a-z-]+)/poster',
|
||||
'description': 'Brings together privacy professionals and others interested in privacy at for-profits, non-profits, and NGOs in an effort to contribute to the state of the ecosystem...',
|
||||
'timestamp': 1422487800,
|
||||
'upload_date': '20150128',
|
||||
'location': 'SFO Commons',
|
||||
'duration': 3780,
|
||||
'view_count': int,
|
||||
'categories': ['Main'],
|
||||
'categories': ['Main', 'Privacy'],
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -16,6 +16,7 @@ class AlJazeeraIE(InfoExtractor):
|
||||
'uploader': 'Al Jazeera English',
|
||||
},
|
||||
'add_ie': ['Brightcove'],
|
||||
'skip': 'Not accessible from Travis CI server',
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
50
youtube_dl/extractor/appleconnect.py
Normal file
50
youtube_dl/extractor/appleconnect.py
Normal file
@ -0,0 +1,50 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
str_to_int,
|
||||
ExtractorError
|
||||
)
|
||||
|
||||
|
||||
class AppleConnectIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://itunes\.apple\.com/\w{0,2}/?post/idsa\.(?P<id>[\w-]+)'
|
||||
_TEST = {
|
||||
'url': 'https://itunes.apple.com/us/post/idsa.4ab17a39-2720-11e5-96c5-a5b38f6c42d3',
|
||||
'md5': '10d0f2799111df4cb1c924520ca78f98',
|
||||
'info_dict': {
|
||||
'id': '4ab17a39-2720-11e5-96c5-a5b38f6c42d3',
|
||||
'ext': 'm4v',
|
||||
'title': 'Energy',
|
||||
'uploader': 'Drake',
|
||||
'thumbnail': 'http://is5.mzstatic.com/image/thumb/Video5/v4/78/61/c5/7861c5fa-ad6d-294b-1464-cf7605b911d6/source/1920x1080sr.jpg',
|
||||
'upload_date': '20150710',
|
||||
'timestamp': 1436545535,
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
try:
|
||||
video_json = self._html_search_regex(
|
||||
r'class="auc-video-data">(\{.*?\})', webpage, 'json')
|
||||
except ExtractorError:
|
||||
raise ExtractorError('This post doesn\'t contain a video', expected=True)
|
||||
|
||||
video_data = self._parse_json(video_json, video_id)
|
||||
timestamp = str_to_int(self._html_search_regex(r'data-timestamp="(\d+)"', webpage, 'timestamp'))
|
||||
like_count = str_to_int(self._html_search_regex(r'(\d+) Loves', webpage, 'like count'))
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_data['sslSrc'],
|
||||
'title': video_data['title'],
|
||||
'description': video_data['description'],
|
||||
'uploader': video_data['artistName'],
|
||||
'thumbnail': video_data['artworkUrl'],
|
||||
'timestamp': timestamp,
|
||||
'like_count': like_count,
|
||||
}
|
@ -33,7 +33,7 @@ class ArchiveOrgIE(InfoExtractor):
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
json_url = url + ('?' if '?' in url else '&') + 'output=json'
|
||||
json_url = url + ('&' if '?' in url else '?') + 'output=json'
|
||||
data = self._download_json(json_url, video_id)
|
||||
|
||||
def get_optional(data_dict, field):
|
||||
|
@ -8,6 +8,7 @@ from .generic import GenericIE
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
ExtractorError,
|
||||
get_element_by_attribute,
|
||||
qualities,
|
||||
int_or_none,
|
||||
parse_duration,
|
||||
@ -22,19 +23,125 @@ class ARDMediathekIE(InfoExtractor):
|
||||
_VALID_URL = r'^https?://(?:(?:www\.)?ardmediathek\.de|mediathek\.daserste\.de)/(?:.*/)(?P<video_id>[0-9]+|[^0-9][^/\?]+)[^/\?]*(?:\?.*)?'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.ardmediathek.de/tv/Dokumentation-und-Reportage/Ich-liebe-das-Leben-trotzdem/rbb-Fernsehen/Video?documentId=29582122&bcastId=3822114',
|
||||
'info_dict': {
|
||||
'id': '29582122',
|
||||
'ext': 'mp4',
|
||||
'title': 'Ich liebe das Leben trotzdem',
|
||||
'description': 'md5:45e4c225c72b27993314b31a84a5261c',
|
||||
'duration': 4557,
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.ardmediathek.de/tv/Tatort/Tatort-Scheinwelten-H%C3%B6rfassung-Video/Das-Erste/Video?documentId=29522730&bcastId=602916',
|
||||
'md5': 'f4d98b10759ac06c0072bbcd1f0b9e3e',
|
||||
'info_dict': {
|
||||
'id': '29522730',
|
||||
'ext': 'mp4',
|
||||
'title': 'Tatort: Scheinwelten - Hörfassung (Video tgl. ab 20 Uhr)',
|
||||
'description': 'md5:196392e79876d0ac94c94e8cdb2875f1',
|
||||
'duration': 5252,
|
||||
},
|
||||
}, {
|
||||
# audio
|
||||
'url': 'http://www.ardmediathek.de/tv/WDR-H%C3%B6rspiel-Speicher/Tod-eines-Fu%C3%9Fballers/WDR-3/Audio-Podcast?documentId=28488308&bcastId=23074086',
|
||||
'md5': '219d94d8980b4f538c7fcb0865eb7f2c',
|
||||
'info_dict': {
|
||||
'id': '28488308',
|
||||
'ext': 'mp3',
|
||||
'title': 'Tod eines Fußballers',
|
||||
'description': 'md5:f6e39f3461f0e1f54bfa48c8875c86ef',
|
||||
'duration': 3240,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://mediathek.daserste.de/sendungen_a-z/328454_anne-will/22429276_vertrauen-ist-gut-spionieren-ist-besser-geht',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.ardmediathek.de/tv/Tatort/Das-Wunder-von-Wolbeck-Video-tgl-ab-20/Das-Erste/Video?documentId=22490580&bcastId=602916',
|
||||
'info_dict': {
|
||||
'id': '22490580',
|
||||
'ext': 'mp4',
|
||||
'title': 'Das Wunder von Wolbeck (Video tgl. ab 20 Uhr)',
|
||||
'description': 'Auf einem restaurierten Hof bei Wolbeck wird der Heilpraktiker Raffael Lembeck eines morgens von seiner Frau Stella tot aufgefunden. Das Opfer war offensichtlich in seiner Praxis zu Fall gekommen und ist dann verblutet, erklärt Prof. Boerne am Tatort.',
|
||||
},
|
||||
'skip': 'Blocked outside of Germany',
|
||||
}]
|
||||
|
||||
def _extract_media_info(self, media_info_url, webpage, video_id):
|
||||
media_info = self._download_json(
|
||||
media_info_url, video_id, 'Downloading media JSON')
|
||||
|
||||
formats = self._extract_formats(media_info, video_id)
|
||||
|
||||
if not formats:
|
||||
if '"fsk"' in webpage:
|
||||
raise ExtractorError(
|
||||
'This video is only available after 20:00', expected=True)
|
||||
elif media_info.get('_geoblocked'):
|
||||
raise ExtractorError('This video is not available due to geo restriction', expected=True)
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
duration = int_or_none(media_info.get('_duration'))
|
||||
thumbnail = media_info.get('_previewImage')
|
||||
|
||||
subtitles = {}
|
||||
subtitle_url = media_info.get('_subtitleUrl')
|
||||
if subtitle_url:
|
||||
subtitles['de'] = [{
|
||||
'ext': 'srt',
|
||||
'url': subtitle_url,
|
||||
}]
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'duration': duration,
|
||||
'thumbnail': thumbnail,
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
||||
def _extract_formats(self, media_info, video_id):
|
||||
type_ = media_info.get('_type')
|
||||
media_array = media_info.get('_mediaArray', [])
|
||||
formats = []
|
||||
for num, media in enumerate(media_array):
|
||||
for stream in media.get('_mediaStreamArray', []):
|
||||
stream_urls = stream.get('_stream')
|
||||
if not stream_urls:
|
||||
continue
|
||||
if not isinstance(stream_urls, list):
|
||||
stream_urls = [stream_urls]
|
||||
quality = stream.get('_quality')
|
||||
server = stream.get('_server')
|
||||
for stream_url in stream_urls:
|
||||
ext = determine_ext(stream_url)
|
||||
if ext == 'f4m':
|
||||
formats.extend(self._extract_f4m_formats(
|
||||
stream_url + '?hdcore=3.1.1&plugin=aasp-3.1.1.69.124',
|
||||
video_id, preference=-1, f4m_id='hds'))
|
||||
elif ext == 'm3u8':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
stream_url, video_id, 'mp4', preference=1, m3u8_id='hls'))
|
||||
else:
|
||||
if server and server.startswith('rtmp'):
|
||||
f = {
|
||||
'url': server,
|
||||
'play_path': stream_url,
|
||||
'format_id': 'a%s-rtmp-%s' % (num, quality),
|
||||
}
|
||||
elif stream_url.startswith('http'):
|
||||
f = {
|
||||
'url': stream_url,
|
||||
'format_id': 'a%s-%s-%s' % (num, ext, quality)
|
||||
}
|
||||
else:
|
||||
continue
|
||||
m = re.search(r'_(?P<width>\d+)x(?P<height>\d+)\.mp4$', stream_url)
|
||||
if m:
|
||||
f.update({
|
||||
'width': int(m.group('width')),
|
||||
'height': int(m.group('height')),
|
||||
})
|
||||
if type_ == 'audio':
|
||||
f['vcodec'] = 'none'
|
||||
formats.append(f)
|
||||
return formats
|
||||
|
||||
def _real_extract(self, url):
|
||||
# determine video id from url
|
||||
m = re.match(self._VALID_URL, url)
|
||||
@ -92,46 +199,22 @@ class ARDMediathekIE(InfoExtractor):
|
||||
'format_id': fid,
|
||||
'url': furl,
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
info = {
|
||||
'formats': formats,
|
||||
}
|
||||
else: # request JSON file
|
||||
media_info = self._download_json(
|
||||
'http://www.ardmediathek.de/play/media/%s' % video_id, video_id)
|
||||
# The second element of the _mediaArray contains the standard http urls
|
||||
streams = media_info['_mediaArray'][1]['_mediaStreamArray']
|
||||
if not streams:
|
||||
if '"fsk"' in webpage:
|
||||
raise ExtractorError('This video is only available after 20:00')
|
||||
info = self._extract_media_info(
|
||||
'http://www.ardmediathek.de/play/media/%s' % video_id, webpage, video_id)
|
||||
|
||||
formats = []
|
||||
for s in streams:
|
||||
if type(s['_stream']) == list:
|
||||
for index, url in enumerate(s['_stream'][::-1]):
|
||||
quality = s['_quality'] + index
|
||||
formats.append({
|
||||
'quality': quality,
|
||||
'url': url,
|
||||
'format_id': '%s-%s' % (determine_ext(url), quality)
|
||||
})
|
||||
continue
|
||||
|
||||
format = {
|
||||
'quality': s['_quality'],
|
||||
'url': s['_stream'],
|
||||
}
|
||||
|
||||
format['format_id'] = '%s-%s' % (
|
||||
determine_ext(format['url']), format['quality'])
|
||||
|
||||
formats.append(format)
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
info.update({
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'formats': formats,
|
||||
'thumbnail': thumbnail,
|
||||
}
|
||||
})
|
||||
|
||||
return info
|
||||
|
||||
|
||||
class ARDIE(InfoExtractor):
|
||||
@ -189,3 +272,41 @@ class ARDIE(InfoExtractor):
|
||||
'upload_date': upload_date,
|
||||
'thumbnail': thumbnail,
|
||||
}
|
||||
|
||||
|
||||
class SportschauIE(ARDMediathekIE):
|
||||
IE_NAME = 'Sportschau'
|
||||
_VALID_URL = r'(?P<baseurl>https?://(?:www\.)?sportschau\.de/(?:[^/]+/)+video(?P<id>[^/#?]+))\.html'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.sportschau.de/tourdefrance/videoseppeltkokainhatnichtsmitklassischemdopingzutun100.html',
|
||||
'info_dict': {
|
||||
'id': 'seppeltkokainhatnichtsmitklassischemdopingzutun100',
|
||||
'ext': 'mp4',
|
||||
'title': 'Seppelt: "Kokain hat nichts mit klassischem Doping zu tun"',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'description': 'Der ARD-Doping Experte Hajo Seppelt gibt seine Einschätzung zum ersten Dopingfall der diesjährigen Tour de France um den Italiener Luca Paolini ab.',
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
base_url = mobj.group('baseurl')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
title = get_element_by_attribute('class', 'headline', webpage)
|
||||
description = self._html_search_meta('description', webpage, 'description')
|
||||
|
||||
info = self._extract_media_info(
|
||||
base_url + '-mc_defaultQuality-h.json', webpage, video_id)
|
||||
|
||||
info.update({
|
||||
'title': title,
|
||||
'description': description,
|
||||
})
|
||||
|
||||
return info
|
||||
|
@ -4,10 +4,13 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_parse_qs,
|
||||
compat_urllib_parse_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
find_xpath_attr,
|
||||
unified_strdate,
|
||||
get_element_by_id,
|
||||
get_element_by_attribute,
|
||||
int_or_none,
|
||||
qualities,
|
||||
@ -78,7 +81,13 @@ class ArteTVPlus7IE(InfoExtractor):
|
||||
def _extract_from_webpage(self, webpage, video_id, lang):
|
||||
json_url = self._html_search_regex(
|
||||
[r'arte_vp_url=["\'](.*?)["\']', r'data-url=["\']([^"]+)["\']'],
|
||||
webpage, 'json vp url')
|
||||
webpage, 'json vp url', default=None)
|
||||
if not json_url:
|
||||
iframe_url = self._html_search_regex(
|
||||
r'<iframe[^>]+src=(["\'])(?P<url>.+\bjson_url=.+?)\1',
|
||||
webpage, 'iframe url', group='url')
|
||||
json_url = compat_parse_qs(
|
||||
compat_urllib_parse_urlparse(iframe_url).query)['json_url'][0]
|
||||
return self._extract_from_json_url(json_url, video_id, lang)
|
||||
|
||||
def _extract_from_json_url(self, json_url, video_id, lang):
|
||||
@ -195,7 +204,9 @@ class ArteTVFutureIE(ArteTVPlus7IE):
|
||||
def _real_extract(self, url):
|
||||
anchor_id, lang = self._extract_url_info(url)
|
||||
webpage = self._download_webpage(url, anchor_id)
|
||||
row = get_element_by_id(anchor_id, webpage)
|
||||
row = self._search_regex(
|
||||
r'(?s)id="%s"[^>]*>.+?(<div[^>]*arte_vp_url[^>]*>)' % anchor_id,
|
||||
webpage, 'row')
|
||||
return self._extract_from_webpage(row, anchor_id, lang)
|
||||
|
||||
|
||||
|
@ -8,6 +8,7 @@ from ..compat import compat_urlparse
|
||||
|
||||
|
||||
class BaiduVideoIE(InfoExtractor):
|
||||
IE_DESC = '百度视频'
|
||||
_VALID_URL = r'http://v\.baidu\.com/(?P<type>[a-z]+)/(?P<id>\d+)\.htm'
|
||||
_TESTS = [{
|
||||
'url': 'http://v.baidu.com/comic/1069.htm?frp=bdbrand&q=%E4%B8%AD%E5%8D%8E%E5%B0%8F%E5%BD%93%E5%AE%B6',
|
||||
|
787
youtube_dl/extractor/bbc.py
Normal file
787
youtube_dl/extractor/bbc.py
Normal file
@ -0,0 +1,787 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
parse_duration,
|
||||
parse_iso8601,
|
||||
)
|
||||
from ..compat import compat_HTTPError
|
||||
|
||||
|
||||
class BBCCoUkIE(InfoExtractor):
|
||||
IE_NAME = 'bbc.co.uk'
|
||||
IE_DESC = 'BBC iPlayer'
|
||||
_VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:(?:(?:programmes|iplayer(?:/[^/]+)?/(?:episode|playlist))/)|music/clips[/#])(?P<id>[\da-z]{8})'
|
||||
|
||||
_MEDIASELECTOR_URLS = [
|
||||
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/iptv-all/vpid/%s',
|
||||
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s',
|
||||
]
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://www.bbc.co.uk/programmes/b039g8p7',
|
||||
'info_dict': {
|
||||
'id': 'b039d07m',
|
||||
'ext': 'flv',
|
||||
'title': 'Kaleidoscope, Leonard Cohen',
|
||||
'description': 'The Canadian poet and songwriter reflects on his musical career.',
|
||||
'duration': 1740,
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'http://www.bbc.co.uk/iplayer/episode/b00yng5w/The_Man_in_Black_Series_3_The_Printed_Name/',
|
||||
'info_dict': {
|
||||
'id': 'b00yng1d',
|
||||
'ext': 'flv',
|
||||
'title': 'The Man in Black: Series 3: The Printed Name',
|
||||
'description': "Mark Gatiss introduces Nicholas Pierpan's chilling tale of a writer's devilish pact with a mysterious man. Stars Ewan Bailey.",
|
||||
'duration': 1800,
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
},
|
||||
'skip': 'Episode is no longer available on BBC iPlayer Radio',
|
||||
},
|
||||
{
|
||||
'url': 'http://www.bbc.co.uk/iplayer/episode/b03vhd1f/The_Voice_UK_Series_3_Blind_Auditions_5/',
|
||||
'info_dict': {
|
||||
'id': 'b00yng1d',
|
||||
'ext': 'flv',
|
||||
'title': 'The Voice UK: Series 3: Blind Auditions 5',
|
||||
'description': "Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.",
|
||||
'duration': 5100,
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
},
|
||||
'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
|
||||
},
|
||||
{
|
||||
'url': 'http://www.bbc.co.uk/iplayer/episode/p026c7jt/tomorrows-worlds-the-unearthly-history-of-science-fiction-2-invasion',
|
||||
'info_dict': {
|
||||
'id': 'b03k3pb7',
|
||||
'ext': 'flv',
|
||||
'title': "Tomorrow's Worlds: The Unearthly History of Science Fiction",
|
||||
'description': '2. Invasion',
|
||||
'duration': 3600,
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
},
|
||||
'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
|
||||
}, {
|
||||
'url': 'http://www.bbc.co.uk/programmes/b04v20dw',
|
||||
'info_dict': {
|
||||
'id': 'b04v209v',
|
||||
'ext': 'flv',
|
||||
'title': 'Pete Tong, The Essential New Tune Special',
|
||||
'description': "Pete has a very special mix - all of 2014's Essential New Tunes!",
|
||||
'duration': 10800,
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.bbc.co.uk/music/clips/p02frcc3',
|
||||
'note': 'Audio',
|
||||
'info_dict': {
|
||||
'id': 'p02frcch',
|
||||
'ext': 'flv',
|
||||
'title': 'Pete Tong, Past, Present and Future Special, Madeon - After Hours mix',
|
||||
'description': 'French house superstar Madeon takes us out of the club and onto the after party.',
|
||||
'duration': 3507,
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.bbc.co.uk/music/clips/p025c0zz',
|
||||
'note': 'Video',
|
||||
'info_dict': {
|
||||
'id': 'p025c103',
|
||||
'ext': 'flv',
|
||||
'title': 'Reading and Leeds Festival, 2014, Rae Morris - Closer (Live on BBC Three)',
|
||||
'description': 'Rae Morris performs Closer for BBC Three at Reading 2014',
|
||||
'duration': 226,
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.bbc.co.uk/iplayer/episode/b054fn09/ad/natural-world-20152016-2-super-powered-owls',
|
||||
'info_dict': {
|
||||
'id': 'p02n76xf',
|
||||
'ext': 'flv',
|
||||
'title': 'Natural World, 2015-2016: 2. Super Powered Owls',
|
||||
'description': 'md5:e4db5c937d0e95a7c6b5e654d429183d',
|
||||
'duration': 3540,
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
},
|
||||
'skip': 'geolocation',
|
||||
}, {
|
||||
'url': 'http://www.bbc.co.uk/iplayer/episode/b05zmgwn/royal-academy-summer-exhibition',
|
||||
'info_dict': {
|
||||
'id': 'b05zmgw1',
|
||||
'ext': 'flv',
|
||||
'description': 'Kirsty Wark and Morgan Quaintance visit the Royal Academy as it prepares for its annual artistic extravaganza, meeting people who have come together to make the show unique.',
|
||||
'title': 'Royal Academy Summer Exhibition',
|
||||
'duration': 3540,
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
},
|
||||
'skip': 'geolocation',
|
||||
}, {
|
||||
'url': 'http://www.bbc.co.uk/iplayer/playlist/p01dvks4',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.bbc.co.uk/music/clips#p02frcc3',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.bbc.co.uk/iplayer/cbeebies/episode/b0480276/bing-14-atchoo',
|
||||
'only_matching': True,
|
||||
}
|
||||
]
|
||||
|
||||
class MediaSelectionError(Exception):
|
||||
def __init__(self, id):
|
||||
self.id = id
|
||||
|
||||
def _extract_asx_playlist(self, connection, programme_id):
|
||||
asx = self._download_xml(connection.get('href'), programme_id, 'Downloading ASX playlist')
|
||||
return [ref.get('href') for ref in asx.findall('./Entry/ref')]
|
||||
|
||||
def _extract_connection(self, connection, programme_id):
|
||||
formats = []
|
||||
protocol = connection.get('protocol')
|
||||
supplier = connection.get('supplier')
|
||||
if protocol == 'http':
|
||||
href = connection.get('href')
|
||||
transfer_format = connection.get('transferFormat')
|
||||
# ASX playlist
|
||||
if supplier == 'asx':
|
||||
for i, ref in enumerate(self._extract_asx_playlist(connection, programme_id)):
|
||||
formats.append({
|
||||
'url': ref,
|
||||
'format_id': 'ref%s_%s' % (i, supplier),
|
||||
})
|
||||
# Skip DASH until supported
|
||||
elif transfer_format == 'dash':
|
||||
pass
|
||||
elif transfer_format == 'hls':
|
||||
m3u8_formats = self._extract_m3u8_formats(
|
||||
href, programme_id, ext='mp4', entry_protocol='m3u8_native',
|
||||
m3u8_id=supplier, fatal=False)
|
||||
if m3u8_formats:
|
||||
formats.extend(m3u8_formats)
|
||||
# Direct link
|
||||
else:
|
||||
formats.append({
|
||||
'url': href,
|
||||
'format_id': supplier,
|
||||
})
|
||||
elif protocol == 'rtmp':
|
||||
application = connection.get('application', 'ondemand')
|
||||
auth_string = connection.get('authString')
|
||||
identifier = connection.get('identifier')
|
||||
server = connection.get('server')
|
||||
formats.append({
|
||||
'url': '%s://%s/%s?%s' % (protocol, server, application, auth_string),
|
||||
'play_path': identifier,
|
||||
'app': '%s?%s' % (application, auth_string),
|
||||
'page_url': 'http://www.bbc.co.uk',
|
||||
'player_url': 'http://www.bbc.co.uk/emp/releases/iplayer/revisions/617463_618125_4/617463_618125_4_emp.swf',
|
||||
'rtmp_live': False,
|
||||
'ext': 'flv',
|
||||
'format_id': supplier,
|
||||
})
|
||||
return formats
|
||||
|
||||
def _extract_items(self, playlist):
|
||||
return playlist.findall('./{http://bbc.co.uk/2008/emp/playlist}item')
|
||||
|
||||
def _extract_medias(self, media_selection):
|
||||
error = media_selection.find('./{http://bbc.co.uk/2008/mp/mediaselection}error')
|
||||
if error is not None:
|
||||
raise BBCCoUkIE.MediaSelectionError(error.get('id'))
|
||||
return media_selection.findall('./{http://bbc.co.uk/2008/mp/mediaselection}media')
|
||||
|
||||
def _extract_connections(self, media):
|
||||
return media.findall('./{http://bbc.co.uk/2008/mp/mediaselection}connection')
|
||||
|
||||
def _extract_video(self, media, programme_id):
|
||||
formats = []
|
||||
vbr = int_or_none(media.get('bitrate'))
|
||||
vcodec = media.get('encoding')
|
||||
service = media.get('service')
|
||||
width = int_or_none(media.get('width'))
|
||||
height = int_or_none(media.get('height'))
|
||||
file_size = int_or_none(media.get('media_file_size'))
|
||||
for connection in self._extract_connections(media):
|
||||
conn_formats = self._extract_connection(connection, programme_id)
|
||||
for format in conn_formats:
|
||||
format.update({
|
||||
'format_id': '%s_%s' % (service, format['format_id']),
|
||||
'width': width,
|
||||
'height': height,
|
||||
'vbr': vbr,
|
||||
'vcodec': vcodec,
|
||||
'filesize': file_size,
|
||||
})
|
||||
formats.extend(conn_formats)
|
||||
return formats
|
||||
|
||||
def _extract_audio(self, media, programme_id):
|
||||
formats = []
|
||||
abr = int_or_none(media.get('bitrate'))
|
||||
acodec = media.get('encoding')
|
||||
service = media.get('service')
|
||||
for connection in self._extract_connections(media):
|
||||
conn_formats = self._extract_connection(connection, programme_id)
|
||||
for format in conn_formats:
|
||||
format.update({
|
||||
'format_id': '%s_%s' % (service, format['format_id']),
|
||||
'abr': abr,
|
||||
'acodec': acodec,
|
||||
})
|
||||
formats.extend(conn_formats)
|
||||
return formats
|
||||
|
||||
def _get_subtitles(self, media, programme_id):
|
||||
subtitles = {}
|
||||
for connection in self._extract_connections(media):
|
||||
captions = self._download_xml(connection.get('href'), programme_id, 'Downloading captions')
|
||||
lang = captions.get('{http://www.w3.org/XML/1998/namespace}lang', 'en')
|
||||
subtitles[lang] = [
|
||||
{
|
||||
'url': connection.get('href'),
|
||||
'ext': 'ttml',
|
||||
},
|
||||
]
|
||||
return subtitles
|
||||
|
||||
def _raise_extractor_error(self, media_selection_error):
|
||||
raise ExtractorError(
|
||||
'%s returned error: %s' % (self.IE_NAME, media_selection_error.id),
|
||||
expected=True)
|
||||
|
||||
def _download_media_selector(self, programme_id):
|
||||
last_exception = None
|
||||
for mediaselector_url in self._MEDIASELECTOR_URLS:
|
||||
try:
|
||||
return self._download_media_selector_url(
|
||||
mediaselector_url % programme_id, programme_id)
|
||||
except BBCCoUkIE.MediaSelectionError as e:
|
||||
if e.id == 'notukerror':
|
||||
last_exception = e
|
||||
continue
|
||||
self._raise_extractor_error(e)
|
||||
self._raise_extractor_error(last_exception)
|
||||
|
||||
def _download_media_selector_url(self, url, programme_id=None):
|
||||
try:
|
||||
media_selection = self._download_xml(
|
||||
url, programme_id, 'Downloading media selection XML')
|
||||
except ExtractorError as ee:
|
||||
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
|
||||
media_selection = xml.etree.ElementTree.fromstring(ee.cause.read().decode('utf-8'))
|
||||
else:
|
||||
raise
|
||||
return self._process_media_selector(media_selection, programme_id)
|
||||
|
||||
def _process_media_selector(self, media_selection, programme_id):
|
||||
formats = []
|
||||
subtitles = None
|
||||
|
||||
for media in self._extract_medias(media_selection):
|
||||
kind = media.get('kind')
|
||||
if kind == 'audio':
|
||||
formats.extend(self._extract_audio(media, programme_id))
|
||||
elif kind == 'video':
|
||||
formats.extend(self._extract_video(media, programme_id))
|
||||
elif kind == 'captions':
|
||||
subtitles = self.extract_subtitles(media, programme_id)
|
||||
return formats, subtitles
|
||||
|
||||
def _download_playlist(self, playlist_id):
|
||||
try:
|
||||
playlist = self._download_json(
|
||||
'http://www.bbc.co.uk/programmes/%s/playlist.json' % playlist_id,
|
||||
playlist_id, 'Downloading playlist JSON')
|
||||
|
||||
version = playlist.get('defaultAvailableVersion')
|
||||
if version:
|
||||
smp_config = version['smpConfig']
|
||||
title = smp_config['title']
|
||||
description = smp_config['summary']
|
||||
for item in smp_config['items']:
|
||||
kind = item['kind']
|
||||
if kind != 'programme' and kind != 'radioProgramme':
|
||||
continue
|
||||
programme_id = item.get('vpid')
|
||||
duration = int_or_none(item.get('duration'))
|
||||
formats, subtitles = self._download_media_selector(programme_id)
|
||||
return programme_id, title, description, duration, formats, subtitles
|
||||
except ExtractorError as ee:
|
||||
if not (isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404):
|
||||
raise
|
||||
|
||||
# fallback to legacy playlist
|
||||
return self._process_legacy_playlist(playlist_id)
|
||||
|
||||
def _process_legacy_playlist_url(self, url, display_id):
|
||||
playlist = self._download_legacy_playlist_url(url, display_id)
|
||||
return self._extract_from_legacy_playlist(playlist, display_id)
|
||||
|
||||
def _process_legacy_playlist(self, playlist_id):
|
||||
return self._process_legacy_playlist_url(
|
||||
'http://www.bbc.co.uk/iplayer/playlist/%s' % playlist_id, playlist_id)
|
||||
|
||||
def _download_legacy_playlist_url(self, url, playlist_id=None):
|
||||
return self._download_xml(
|
||||
url, playlist_id, 'Downloading legacy playlist XML')
|
||||
|
||||
def _extract_from_legacy_playlist(self, playlist, playlist_id):
|
||||
no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems')
|
||||
if no_items is not None:
|
||||
reason = no_items.get('reason')
|
||||
if reason == 'preAvailability':
|
||||
msg = 'Episode %s is not yet available' % playlist_id
|
||||
elif reason == 'postAvailability':
|
||||
msg = 'Episode %s is no longer available' % playlist_id
|
||||
elif reason == 'noMedia':
|
||||
msg = 'Episode %s is not currently available' % playlist_id
|
||||
else:
|
||||
msg = 'Episode %s is not available: %s' % (playlist_id, reason)
|
||||
raise ExtractorError(msg, expected=True)
|
||||
|
||||
for item in self._extract_items(playlist):
|
||||
kind = item.get('kind')
|
||||
if kind != 'programme' and kind != 'radioProgramme':
|
||||
continue
|
||||
title = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}title').text
|
||||
description = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}summary').text
|
||||
|
||||
def get_programme_id(item):
|
||||
def get_from_attributes(item):
|
||||
for p in('identifier', 'group'):
|
||||
value = item.get(p)
|
||||
if value and re.match(r'^[pb][\da-z]{7}$', value):
|
||||
return value
|
||||
get_from_attributes(item)
|
||||
mediator = item.find('./{http://bbc.co.uk/2008/emp/playlist}mediator')
|
||||
if mediator is not None:
|
||||
return get_from_attributes(mediator)
|
||||
|
||||
programme_id = get_programme_id(item)
|
||||
duration = int_or_none(item.get('duration'))
|
||||
# TODO: programme_id can be None and media items can be incorporated right inside
|
||||
# playlist's item (e.g. http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu)
|
||||
# as f4m and m3u8
|
||||
formats, subtitles = self._download_media_selector(programme_id)
|
||||
|
||||
return programme_id, title, description, duration, formats, subtitles
|
||||
|
||||
def _real_extract(self, url):
|
||||
group_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, group_id, 'Downloading video page')
|
||||
|
||||
programme_id = None
|
||||
|
||||
tviplayer = self._search_regex(
|
||||
r'mediator\.bind\(({.+?})\s*,\s*document\.getElementById',
|
||||
webpage, 'player', default=None)
|
||||
|
||||
if tviplayer:
|
||||
player = self._parse_json(tviplayer, group_id).get('player', {})
|
||||
duration = int_or_none(player.get('duration'))
|
||||
programme_id = player.get('vpid')
|
||||
|
||||
if not programme_id:
|
||||
programme_id = self._search_regex(
|
||||
r'"vpid"\s*:\s*"([\da-z]{8})"', webpage, 'vpid', fatal=False, default=None)
|
||||
|
||||
if programme_id:
|
||||
formats, subtitles = self._download_media_selector(programme_id)
|
||||
title = self._og_search_title(webpage)
|
||||
description = self._search_regex(
|
||||
r'<p class="[^"]*medium-description[^"]*">([^<]+)</p>',
|
||||
webpage, 'description', fatal=False)
|
||||
else:
|
||||
programme_id, title, description, duration, formats, subtitles = self._download_playlist(group_id)
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': programme_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnail': self._og_search_thumbnail(webpage, default=None),
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
||||
|
||||
class BBCIE(BBCCoUkIE):
|
||||
IE_NAME = 'bbc'
|
||||
IE_DESC = 'BBC'
|
||||
_VALID_URL = r'https?://(?:www\.)?bbc\.(?:com|co\.uk)/(?:[^/]+/)+(?P<id>[^/#?]+)'
|
||||
|
||||
_MEDIASELECTOR_URLS = [
|
||||
# Provides more formats, namely direct mp4 links, but fails on some videos with
|
||||
# notukerror for non UK (?) users (e.g.
|
||||
# http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret)
|
||||
'http://open.live.bbc.co.uk/mediaselector/4/mtis/stream/%s',
|
||||
# Provides fewer formats, but works everywhere for everybody (hopefully)
|
||||
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/journalism-pc/vpid/%s',
|
||||
]
|
||||
|
||||
_TESTS = [{
|
||||
# article with multiple videos embedded with data-media-meta containing
|
||||
# playlist.sxml, externalId and no direct video links
|
||||
'url': 'http://www.bbc.com/news/world-europe-32668511',
|
||||
'info_dict': {
|
||||
'id': 'world-europe-32668511',
|
||||
'title': 'Russia stages massive WW2 parade despite Western boycott',
|
||||
'description': 'md5:00ff61976f6081841f759a08bf78cc9c',
|
||||
},
|
||||
'playlist_count': 2,
|
||||
}, {
|
||||
# article with multiple videos embedded with data-media-meta (more videos)
|
||||
'url': 'http://www.bbc.com/news/business-28299555',
|
||||
'info_dict': {
|
||||
'id': 'business-28299555',
|
||||
'title': 'Farnborough Airshow: Video highlights',
|
||||
'description': 'BBC reports and video highlights at the Farnborough Airshow.',
|
||||
},
|
||||
'playlist_count': 9,
|
||||
'skip': 'Save time',
|
||||
}, {
|
||||
# article with multiple videos embedded with `new SMP()`
|
||||
'url': 'http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460',
|
||||
'info_dict': {
|
||||
'id': '3662a707-0af9-3149-963f-47bea720b460',
|
||||
'title': 'BBC Blogs - Adam Curtis - BUGGER',
|
||||
},
|
||||
'playlist_count': 18,
|
||||
}, {
|
||||
# single video embedded with mediaAssetPage.init()
|
||||
'url': 'http://www.bbc.com/news/world-europe-32041533',
|
||||
'info_dict': {
|
||||
'id': 'p02mprgb',
|
||||
'ext': 'mp4',
|
||||
'title': 'Aerial footage showed the site of the crash in the Alps - courtesy BFM TV',
|
||||
'duration': 47,
|
||||
'timestamp': 1427219242,
|
||||
'upload_date': '20150324',
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
}
|
||||
}, {
|
||||
# article with single video embedded with data-media-meta containing
|
||||
# direct video links (for now these are extracted) and playlist.xml (with
|
||||
# media items as f4m and m3u8 - currently unsupported)
|
||||
'url': 'http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu',
|
||||
'info_dict': {
|
||||
'id': '150615_telabyad_kentin_cogu',
|
||||
'ext': 'mp4',
|
||||
'title': "YPG: Tel Abyad'ın tamamı kontrolümüzde",
|
||||
'duration': 47,
|
||||
'timestamp': 1434397334,
|
||||
'upload_date': '20150615',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
}, {
|
||||
# single video embedded with mediaAssetPage.init() (regional section)
|
||||
'url': 'http://www.bbc.com/mundo/video_fotos/2015/06/150619_video_honduras_militares_hospitales_corrupcion_aw',
|
||||
'info_dict': {
|
||||
'id': '150619_video_honduras_militares_hospitales_corrupcion_aw',
|
||||
'ext': 'mp4',
|
||||
'title': 'Honduras militariza sus hospitales por nuevo escándalo de corrupción',
|
||||
'duration': 87,
|
||||
'timestamp': 1434713142,
|
||||
'upload_date': '20150619',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
}, {
|
||||
# single video from video playlist embedded with vxp-playlist-data JSON
|
||||
'url': 'http://www.bbc.com/news/video_and_audio/must_see/33376376',
|
||||
'info_dict': {
|
||||
'id': 'p02w6qjc',
|
||||
'ext': 'mp4',
|
||||
'title': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''',
|
||||
'duration': 56,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
}, {
|
||||
# single video story with digitalData
|
||||
'url': 'http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret',
|
||||
'info_dict': {
|
||||
'id': 'p02q6gc4',
|
||||
'ext': 'flv',
|
||||
'title': 'Sri Lanka’s spicy secret',
|
||||
'description': 'As a new train line to Jaffna opens up the country’s north, travellers can experience a truly distinct slice of Tamil culture.',
|
||||
'timestamp': 1437674293,
|
||||
'upload_date': '20150723',
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
}
|
||||
}, {
|
||||
# single video story without digitalData
|
||||
'url': 'http://www.bbc.com/autos/story/20130513-hyundais-rock-star',
|
||||
'info_dict': {
|
||||
'id': 'p018zqqg',
|
||||
'ext': 'mp4',
|
||||
'title': 'Hyundai Santa Fe Sport: Rock star',
|
||||
'description': 'md5:b042a26142c4154a6e472933cf20793d',
|
||||
'timestamp': 1368473503,
|
||||
'upload_date': '20130513',
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
}
|
||||
}, {
|
||||
# single video with playlist.sxml URL
|
||||
'url': 'http://www.bbc.com/sport/0/football/33653409',
|
||||
'info_dict': {
|
||||
'id': 'p02xycnp',
|
||||
'ext': 'mp4',
|
||||
'title': 'Transfers: Cristiano Ronaldo to Man Utd, Arsenal to spend?',
|
||||
'description': 'md5:398fca0e2e701c609d726e034fa1fc89',
|
||||
'duration': 140,
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
}
|
||||
}, {
|
||||
# single video with playlist URL from weather section
|
||||
'url': 'http://www.bbc.com/weather/features/33601775',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# custom redirection to www.bbc.com
|
||||
'url': 'http://www.bbc.co.uk/news/science-environment-33661876',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
return False if BBCCoUkIE.suitable(url) else super(BBCIE, cls).suitable(url)
|
||||
|
||||
def _extract_from_media_meta(self, media_meta, video_id):
|
||||
# Direct links to media in media metadata (e.g.
|
||||
# http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu)
|
||||
# TODO: there are also f4m and m3u8 streams incorporated in playlist.sxml
|
||||
source_files = media_meta.get('sourceFiles')
|
||||
if source_files:
|
||||
return [{
|
||||
'url': f['url'],
|
||||
'format_id': format_id,
|
||||
'ext': f.get('encoding'),
|
||||
'tbr': float_or_none(f.get('bitrate'), 1000),
|
||||
'filesize': int_or_none(f.get('filesize')),
|
||||
} for format_id, f in source_files.items() if f.get('url')], []
|
||||
|
||||
programme_id = media_meta.get('externalId')
|
||||
if programme_id:
|
||||
return self._download_media_selector(programme_id)
|
||||
|
||||
# Process playlist.sxml as legacy playlist
|
||||
href = media_meta.get('href')
|
||||
if href:
|
||||
playlist = self._download_legacy_playlist_url(href)
|
||||
_, _, _, _, formats, subtitles = self._extract_from_legacy_playlist(playlist, video_id)
|
||||
return formats, subtitles
|
||||
|
||||
return [], []
|
||||
|
||||
def _real_extract(self, url):
|
||||
playlist_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
|
||||
timestamp = parse_iso8601(self._search_regex(
|
||||
[r'"datePublished":\s*"([^"]+)',
|
||||
r'<meta[^>]+property="article:published_time"[^>]+content="([^"]+)"',
|
||||
r'itemprop="datePublished"[^>]+datetime="([^"]+)"'],
|
||||
webpage, 'date', default=None))
|
||||
|
||||
# single video with playlist.sxml URL (e.g. http://www.bbc.com/sport/0/football/3365340ng)
|
||||
playlist = self._search_regex(
|
||||
r'<param[^>]+name="playlist"[^>]+value="([^"]+)"',
|
||||
webpage, 'playlist', default=None)
|
||||
if playlist:
|
||||
programme_id, title, description, duration, formats, subtitles = \
|
||||
self._process_legacy_playlist_url(playlist, playlist_id)
|
||||
self._sort_formats(formats)
|
||||
return {
|
||||
'id': programme_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'duration': duration,
|
||||
'timestamp': timestamp,
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
||||
# single video story (e.g. http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret)
|
||||
programme_id = self._search_regex(
|
||||
[r'data-video-player-vpid="([\da-z]{8})"',
|
||||
r'<param[^>]+name="externalIdentifier"[^>]+value="([\da-z]{8})"'],
|
||||
webpage, 'vpid', default=None)
|
||||
if programme_id:
|
||||
formats, subtitles = self._download_media_selector(programme_id)
|
||||
self._sort_formats(formats)
|
||||
# digitalData may be missing (e.g. http://www.bbc.com/autos/story/20130513-hyundais-rock-star)
|
||||
digital_data = self._parse_json(
|
||||
self._search_regex(
|
||||
r'var\s+digitalData\s*=\s*({.+?});?\n', webpage, 'digital data', default='{}'),
|
||||
programme_id, fatal=False)
|
||||
page_info = digital_data.get('page', {}).get('pageInfo', {})
|
||||
title = page_info.get('pageName') or self._og_search_title(webpage)
|
||||
description = page_info.get('description') or self._og_search_description(webpage)
|
||||
timestamp = parse_iso8601(page_info.get('publicationDate')) or timestamp
|
||||
return {
|
||||
'id': programme_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'timestamp': timestamp,
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
||||
playlist_title = self._html_search_regex(
|
||||
r'<title>(.*?)(?:\s*-\s*BBC [^ ]+)?</title>', webpage, 'playlist title')
|
||||
playlist_description = self._og_search_description(webpage, default=None)
|
||||
|
||||
def extract_all(pattern):
|
||||
return list(filter(None, map(
|
||||
lambda s: self._parse_json(s, playlist_id, fatal=False),
|
||||
re.findall(pattern, webpage))))
|
||||
|
||||
# Multiple video article (e.g.
|
||||
# http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460)
|
||||
EMBED_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:[^/]+/)+[\da-z]{8}(?:\b[^"]+)?'
|
||||
entries = []
|
||||
for match in extract_all(r'new\s+SMP\(({.+?})\)'):
|
||||
embed_url = match.get('playerSettings', {}).get('externalEmbedUrl')
|
||||
if embed_url and re.match(EMBED_URL, embed_url):
|
||||
entries.append(embed_url)
|
||||
entries.extend(re.findall(
|
||||
r'setPlaylist\("(%s)"\)' % EMBED_URL, webpage))
|
||||
if entries:
|
||||
return self.playlist_result(
|
||||
[self.url_result(entry, 'BBCCoUk') for entry in entries],
|
||||
playlist_id, playlist_title, playlist_description)
|
||||
|
||||
# Multiple video article (e.g. http://www.bbc.com/news/world-europe-32668511)
|
||||
medias = extract_all(r"data-media-meta='({[^']+})'")
|
||||
|
||||
if not medias:
|
||||
# Single video article (e.g. http://www.bbc.com/news/video_and_audio/international)
|
||||
media_asset = self._search_regex(
|
||||
r'mediaAssetPage\.init\(\s*({.+?}), "/',
|
||||
webpage, 'media asset', default=None)
|
||||
if media_asset:
|
||||
media_asset_page = self._parse_json(media_asset, playlist_id, fatal=False)
|
||||
medias = []
|
||||
for video in media_asset_page.get('videos', {}).values():
|
||||
medias.extend(video.values())
|
||||
|
||||
if not medias:
|
||||
# Multiple video playlist with single `now playing` entry (e.g.
|
||||
# http://www.bbc.com/news/video_and_audio/must_see/33767813)
|
||||
vxp_playlist = self._parse_json(
|
||||
self._search_regex(
|
||||
r'<script[^>]+class="vxp-playlist-data"[^>]+type="application/json"[^>]*>([^<]+)</script>',
|
||||
webpage, 'playlist data'),
|
||||
playlist_id)
|
||||
playlist_medias = []
|
||||
for item in vxp_playlist:
|
||||
media = item.get('media')
|
||||
if not media:
|
||||
continue
|
||||
playlist_medias.append(media)
|
||||
# Download single video if found media with asset id matching the video id from URL
|
||||
if item.get('advert', {}).get('assetId') == playlist_id:
|
||||
medias = [media]
|
||||
break
|
||||
# Fallback to the whole playlist
|
||||
if not medias:
|
||||
medias = playlist_medias
|
||||
|
||||
entries = []
|
||||
for num, media_meta in enumerate(medias, start=1):
|
||||
formats, subtitles = self._extract_from_media_meta(media_meta, playlist_id)
|
||||
if not formats:
|
||||
continue
|
||||
self._sort_formats(formats)
|
||||
|
||||
video_id = media_meta.get('externalId')
|
||||
if not video_id:
|
||||
video_id = playlist_id if len(medias) == 1 else '%s-%s' % (playlist_id, num)
|
||||
|
||||
title = media_meta.get('caption')
|
||||
if not title:
|
||||
title = playlist_title if len(medias) == 1 else '%s - Video %s' % (playlist_title, num)
|
||||
|
||||
duration = int_or_none(media_meta.get('durationInSeconds')) or parse_duration(media_meta.get('duration'))
|
||||
|
||||
images = []
|
||||
for image in media_meta.get('images', {}).values():
|
||||
images.extend(image.values())
|
||||
if 'image' in media_meta:
|
||||
images.append(media_meta['image'])
|
||||
|
||||
thumbnails = [{
|
||||
'url': image.get('href'),
|
||||
'width': int_or_none(image.get('width')),
|
||||
'height': int_or_none(image.get('height')),
|
||||
} for image in images]
|
||||
|
||||
entries.append({
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'thumbnails': thumbnails,
|
||||
'duration': duration,
|
||||
'timestamp': timestamp,
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
})
|
||||
|
||||
return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
|
@ -1,380 +0,0 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
)
|
||||
from ..compat import compat_HTTPError
|
||||
|
||||
|
||||
class BBCCoUkIE(InfoExtractor):
|
||||
IE_NAME = 'bbc.co.uk'
|
||||
IE_DESC = 'BBC iPlayer'
|
||||
_VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:(?:(?:programmes|iplayer(?:/[^/]+)?/(?:episode|playlist))/)|music/clips[/#])(?P<id>[\da-z]{8})'
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://www.bbc.co.uk/programmes/b039g8p7',
|
||||
'info_dict': {
|
||||
'id': 'b039d07m',
|
||||
'ext': 'flv',
|
||||
'title': 'Kaleidoscope, Leonard Cohen',
|
||||
'description': 'The Canadian poet and songwriter reflects on his musical career.',
|
||||
'duration': 1740,
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'http://www.bbc.co.uk/iplayer/episode/b00yng5w/The_Man_in_Black_Series_3_The_Printed_Name/',
|
||||
'info_dict': {
|
||||
'id': 'b00yng1d',
|
||||
'ext': 'flv',
|
||||
'title': 'The Man in Black: Series 3: The Printed Name',
|
||||
'description': "Mark Gatiss introduces Nicholas Pierpan's chilling tale of a writer's devilish pact with a mysterious man. Stars Ewan Bailey.",
|
||||
'duration': 1800,
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
},
|
||||
'skip': 'Episode is no longer available on BBC iPlayer Radio',
|
||||
},
|
||||
{
|
||||
'url': 'http://www.bbc.co.uk/iplayer/episode/b03vhd1f/The_Voice_UK_Series_3_Blind_Auditions_5/',
|
||||
'info_dict': {
|
||||
'id': 'b00yng1d',
|
||||
'ext': 'flv',
|
||||
'title': 'The Voice UK: Series 3: Blind Auditions 5',
|
||||
'description': "Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.",
|
||||
'duration': 5100,
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
},
|
||||
'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
|
||||
},
|
||||
{
|
||||
'url': 'http://www.bbc.co.uk/iplayer/episode/p026c7jt/tomorrows-worlds-the-unearthly-history-of-science-fiction-2-invasion',
|
||||
'info_dict': {
|
||||
'id': 'b03k3pb7',
|
||||
'ext': 'flv',
|
||||
'title': "Tomorrow's Worlds: The Unearthly History of Science Fiction",
|
||||
'description': '2. Invasion',
|
||||
'duration': 3600,
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
},
|
||||
'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
|
||||
}, {
|
||||
'url': 'http://www.bbc.co.uk/programmes/b04v20dw',
|
||||
'info_dict': {
|
||||
'id': 'b04v209v',
|
||||
'ext': 'flv',
|
||||
'title': 'Pete Tong, The Essential New Tune Special',
|
||||
'description': "Pete has a very special mix - all of 2014's Essential New Tunes!",
|
||||
'duration': 10800,
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.bbc.co.uk/music/clips/p02frcc3',
|
||||
'note': 'Audio',
|
||||
'info_dict': {
|
||||
'id': 'p02frcch',
|
||||
'ext': 'flv',
|
||||
'title': 'Pete Tong, Past, Present and Future Special, Madeon - After Hours mix',
|
||||
'description': 'French house superstar Madeon takes us out of the club and onto the after party.',
|
||||
'duration': 3507,
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.bbc.co.uk/music/clips/p025c0zz',
|
||||
'note': 'Video',
|
||||
'info_dict': {
|
||||
'id': 'p025c103',
|
||||
'ext': 'flv',
|
||||
'title': 'Reading and Leeds Festival, 2014, Rae Morris - Closer (Live on BBC Three)',
|
||||
'description': 'Rae Morris performs Closer for BBC Three at Reading 2014',
|
||||
'duration': 226,
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.bbc.co.uk/iplayer/episode/b054fn09/ad/natural-world-20152016-2-super-powered-owls',
|
||||
'info_dict': {
|
||||
'id': 'p02n76xf',
|
||||
'ext': 'flv',
|
||||
'title': 'Natural World, 2015-2016: 2. Super Powered Owls',
|
||||
'description': 'md5:e4db5c937d0e95a7c6b5e654d429183d',
|
||||
'duration': 3540,
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
},
|
||||
'skip': 'geolocation',
|
||||
}, {
|
||||
'url': 'http://www.bbc.co.uk/iplayer/playlist/p01dvks4',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.bbc.co.uk/music/clips#p02frcc3',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.bbc.co.uk/iplayer/cbeebies/episode/b0480276/bing-14-atchoo',
|
||||
'only_matching': True,
|
||||
}
|
||||
]
|
||||
|
||||
def _extract_asx_playlist(self, connection, programme_id):
|
||||
asx = self._download_xml(connection.get('href'), programme_id, 'Downloading ASX playlist')
|
||||
return [ref.get('href') for ref in asx.findall('./Entry/ref')]
|
||||
|
||||
def _extract_connection(self, connection, programme_id):
|
||||
formats = []
|
||||
protocol = connection.get('protocol')
|
||||
supplier = connection.get('supplier')
|
||||
if protocol == 'http':
|
||||
href = connection.get('href')
|
||||
# ASX playlist
|
||||
if supplier == 'asx':
|
||||
for i, ref in enumerate(self._extract_asx_playlist(connection, programme_id)):
|
||||
formats.append({
|
||||
'url': ref,
|
||||
'format_id': 'ref%s_%s' % (i, supplier),
|
||||
})
|
||||
# Direct link
|
||||
else:
|
||||
formats.append({
|
||||
'url': href,
|
||||
'format_id': supplier,
|
||||
})
|
||||
elif protocol == 'rtmp':
|
||||
application = connection.get('application', 'ondemand')
|
||||
auth_string = connection.get('authString')
|
||||
identifier = connection.get('identifier')
|
||||
server = connection.get('server')
|
||||
formats.append({
|
||||
'url': '%s://%s/%s?%s' % (protocol, server, application, auth_string),
|
||||
'play_path': identifier,
|
||||
'app': '%s?%s' % (application, auth_string),
|
||||
'page_url': 'http://www.bbc.co.uk',
|
||||
'player_url': 'http://www.bbc.co.uk/emp/releases/iplayer/revisions/617463_618125_4/617463_618125_4_emp.swf',
|
||||
'rtmp_live': False,
|
||||
'ext': 'flv',
|
||||
'format_id': supplier,
|
||||
})
|
||||
return formats
|
||||
|
||||
def _extract_items(self, playlist):
|
||||
return playlist.findall('./{http://bbc.co.uk/2008/emp/playlist}item')
|
||||
|
||||
def _extract_medias(self, media_selection):
|
||||
error = media_selection.find('./{http://bbc.co.uk/2008/mp/mediaselection}error')
|
||||
if error is not None:
|
||||
raise ExtractorError(
|
||||
'%s returned error: %s' % (self.IE_NAME, error.get('id')), expected=True)
|
||||
return media_selection.findall('./{http://bbc.co.uk/2008/mp/mediaselection}media')
|
||||
|
||||
def _extract_connections(self, media):
|
||||
return media.findall('./{http://bbc.co.uk/2008/mp/mediaselection}connection')
|
||||
|
||||
def _extract_video(self, media, programme_id):
|
||||
formats = []
|
||||
vbr = int(media.get('bitrate'))
|
||||
vcodec = media.get('encoding')
|
||||
service = media.get('service')
|
||||
width = int(media.get('width'))
|
||||
height = int(media.get('height'))
|
||||
file_size = int(media.get('media_file_size'))
|
||||
for connection in self._extract_connections(media):
|
||||
conn_formats = self._extract_connection(connection, programme_id)
|
||||
for format in conn_formats:
|
||||
format.update({
|
||||
'format_id': '%s_%s' % (service, format['format_id']),
|
||||
'width': width,
|
||||
'height': height,
|
||||
'vbr': vbr,
|
||||
'vcodec': vcodec,
|
||||
'filesize': file_size,
|
||||
})
|
||||
formats.extend(conn_formats)
|
||||
return formats
|
||||
|
||||
def _extract_audio(self, media, programme_id):
|
||||
formats = []
|
||||
abr = int(media.get('bitrate'))
|
||||
acodec = media.get('encoding')
|
||||
service = media.get('service')
|
||||
for connection in self._extract_connections(media):
|
||||
conn_formats = self._extract_connection(connection, programme_id)
|
||||
for format in conn_formats:
|
||||
format.update({
|
||||
'format_id': '%s_%s' % (service, format['format_id']),
|
||||
'abr': abr,
|
||||
'acodec': acodec,
|
||||
})
|
||||
formats.extend(conn_formats)
|
||||
return formats
|
||||
|
||||
def _get_subtitles(self, media, programme_id):
|
||||
subtitles = {}
|
||||
for connection in self._extract_connections(media):
|
||||
captions = self._download_xml(connection.get('href'), programme_id, 'Downloading captions')
|
||||
lang = captions.get('{http://www.w3.org/XML/1998/namespace}lang', 'en')
|
||||
ps = captions.findall('./{0}body/{0}div/{0}p'.format('{http://www.w3.org/2006/10/ttaf1}'))
|
||||
srt = ''
|
||||
|
||||
def _extract_text(p):
|
||||
if p.text is not None:
|
||||
stripped_text = p.text.strip()
|
||||
if stripped_text:
|
||||
return stripped_text
|
||||
return ' '.join(span.text.strip() for span in p.findall('{http://www.w3.org/2006/10/ttaf1}span'))
|
||||
for pos, p in enumerate(ps):
|
||||
srt += '%s\r\n%s --> %s\r\n%s\r\n\r\n' % (str(pos), p.get('begin'), p.get('end'), _extract_text(p))
|
||||
subtitles[lang] = [
|
||||
{
|
||||
'url': connection.get('href'),
|
||||
'ext': 'ttml',
|
||||
},
|
||||
{
|
||||
'data': srt,
|
||||
'ext': 'srt',
|
||||
},
|
||||
]
|
||||
return subtitles
|
||||
|
||||
def _download_media_selector(self, programme_id):
|
||||
try:
|
||||
media_selection = self._download_xml(
|
||||
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s' % programme_id,
|
||||
programme_id, 'Downloading media selection XML')
|
||||
except ExtractorError as ee:
|
||||
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
|
||||
media_selection = xml.etree.ElementTree.fromstring(ee.cause.read().encode('utf-8'))
|
||||
else:
|
||||
raise
|
||||
|
||||
formats = []
|
||||
subtitles = None
|
||||
|
||||
for media in self._extract_medias(media_selection):
|
||||
kind = media.get('kind')
|
||||
if kind == 'audio':
|
||||
formats.extend(self._extract_audio(media, programme_id))
|
||||
elif kind == 'video':
|
||||
formats.extend(self._extract_video(media, programme_id))
|
||||
elif kind == 'captions':
|
||||
subtitles = self.extract_subtitles(media, programme_id)
|
||||
|
||||
return formats, subtitles
|
||||
|
||||
def _download_playlist(self, playlist_id):
|
||||
try:
|
||||
playlist = self._download_json(
|
||||
'http://www.bbc.co.uk/programmes/%s/playlist.json' % playlist_id,
|
||||
playlist_id, 'Downloading playlist JSON')
|
||||
|
||||
version = playlist.get('defaultAvailableVersion')
|
||||
if version:
|
||||
smp_config = version['smpConfig']
|
||||
title = smp_config['title']
|
||||
description = smp_config['summary']
|
||||
for item in smp_config['items']:
|
||||
kind = item['kind']
|
||||
if kind != 'programme' and kind != 'radioProgramme':
|
||||
continue
|
||||
programme_id = item.get('vpid')
|
||||
duration = int(item.get('duration'))
|
||||
formats, subtitles = self._download_media_selector(programme_id)
|
||||
return programme_id, title, description, duration, formats, subtitles
|
||||
except ExtractorError as ee:
|
||||
if not (isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404):
|
||||
raise
|
||||
|
||||
# fallback to legacy playlist
|
||||
playlist = self._download_xml(
|
||||
'http://www.bbc.co.uk/iplayer/playlist/%s' % playlist_id,
|
||||
playlist_id, 'Downloading legacy playlist XML')
|
||||
|
||||
no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems')
|
||||
if no_items is not None:
|
||||
reason = no_items.get('reason')
|
||||
if reason == 'preAvailability':
|
||||
msg = 'Episode %s is not yet available' % playlist_id
|
||||
elif reason == 'postAvailability':
|
||||
msg = 'Episode %s is no longer available' % playlist_id
|
||||
elif reason == 'noMedia':
|
||||
msg = 'Episode %s is not currently available' % playlist_id
|
||||
else:
|
||||
msg = 'Episode %s is not available: %s' % (playlist_id, reason)
|
||||
raise ExtractorError(msg, expected=True)
|
||||
|
||||
for item in self._extract_items(playlist):
|
||||
kind = item.get('kind')
|
||||
if kind != 'programme' and kind != 'radioProgramme':
|
||||
continue
|
||||
title = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}title').text
|
||||
description = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}summary').text
|
||||
programme_id = item.get('identifier')
|
||||
duration = int(item.get('duration'))
|
||||
formats, subtitles = self._download_media_selector(programme_id)
|
||||
|
||||
return programme_id, title, description, duration, formats, subtitles
|
||||
|
||||
def _real_extract(self, url):
|
||||
group_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, group_id, 'Downloading video page')
|
||||
|
||||
programme_id = None
|
||||
|
||||
tviplayer = self._search_regex(
|
||||
r'mediator\.bind\(({.+?})\s*,\s*document\.getElementById',
|
||||
webpage, 'player', default=None)
|
||||
|
||||
if tviplayer:
|
||||
player = self._parse_json(tviplayer, group_id).get('player', {})
|
||||
duration = int_or_none(player.get('duration'))
|
||||
programme_id = player.get('vpid')
|
||||
|
||||
if not programme_id:
|
||||
programme_id = self._search_regex(
|
||||
r'"vpid"\s*:\s*"([\da-z]{8})"', webpage, 'vpid', fatal=False, default=None)
|
||||
|
||||
if programme_id:
|
||||
formats, subtitles = self._download_media_selector(programme_id)
|
||||
title = self._og_search_title(webpage)
|
||||
description = self._search_regex(
|
||||
r'<p class="medium-description">([^<]+)</p>',
|
||||
webpage, 'description', fatal=False)
|
||||
else:
|
||||
programme_id, title, description, duration, formats, subtitles = self._download_playlist(group_id)
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': programme_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnail': self._og_search_thumbnail(webpage, default=None),
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
}
|
@ -1,7 +1,7 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urllib_parse
|
||||
from ..compat import compat_urllib_parse_unquote
|
||||
from ..utils import (
|
||||
xpath_text,
|
||||
xpath_with_ns,
|
||||
@ -16,11 +16,11 @@ class BetIE(InfoExtractor):
|
||||
{
|
||||
'url': 'http://www.bet.com/news/politics/2014/12/08/in-bet-exclusive-obama-talks-race-and-racism.html',
|
||||
'info_dict': {
|
||||
'id': '740ab250-bb94-4a8a-8787-fe0de7c74471',
|
||||
'id': 'news/national/2014/a-conversation-with-president-obama',
|
||||
'display_id': 'in-bet-exclusive-obama-talks-race-and-racism',
|
||||
'ext': 'flv',
|
||||
'title': 'BET News Presents: A Conversation With President Obama',
|
||||
'description': 'md5:5a88d8ae912c1b33e090290af7ec33c6',
|
||||
'title': 'A Conversation With President Obama',
|
||||
'description': 'md5:699d0652a350cf3e491cd15cc745b5da',
|
||||
'duration': 1534,
|
||||
'timestamp': 1418075340,
|
||||
'upload_date': '20141208',
|
||||
@ -35,7 +35,7 @@ class BetIE(InfoExtractor):
|
||||
{
|
||||
'url': 'http://www.bet.com/video/news/national/2014/justice-for-ferguson-a-community-reacts.html',
|
||||
'info_dict': {
|
||||
'id': 'bcd1b1df-673a-42cf-8d01-b282db608f2d',
|
||||
'id': 'news/national/2014/justice-for-ferguson-a-community-reacts',
|
||||
'display_id': 'justice-for-ferguson-a-community-reacts',
|
||||
'ext': 'flv',
|
||||
'title': 'Justice for Ferguson: A Community Reacts',
|
||||
@ -57,10 +57,13 @@ class BetIE(InfoExtractor):
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
media_url = compat_urllib_parse.unquote(self._search_regex(
|
||||
media_url = compat_urllib_parse_unquote(self._search_regex(
|
||||
[r'mediaURL\s*:\s*"([^"]+)"', r"var\s+mrssMediaUrl\s*=\s*'([^']+)'"],
|
||||
webpage, 'media URL'))
|
||||
|
||||
video_id = self._search_regex(
|
||||
r'/video/(.*)/_jcr_content/', media_url, 'video id')
|
||||
|
||||
mrss = self._download_xml(media_url, display_id)
|
||||
|
||||
item = mrss.find('./channel/item')
|
||||
@ -75,8 +78,6 @@ class BetIE(InfoExtractor):
|
||||
description = xpath_text(
|
||||
item, './description', 'description', fatal=False)
|
||||
|
||||
video_id = xpath_text(item, './guid', 'video id', fatal=False)
|
||||
|
||||
timestamp = parse_iso8601(xpath_text(
|
||||
item, xpath_with_ns('./dc:date', NS_MAP),
|
||||
'upload date', fatal=False))
|
||||
|
@ -2,7 +2,10 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import int_or_none
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
fix_xml_ampersands,
|
||||
)
|
||||
|
||||
|
||||
class BildIE(InfoExtractor):
|
||||
@ -15,7 +18,7 @@ class BildIE(InfoExtractor):
|
||||
'id': '38184146',
|
||||
'ext': 'mp4',
|
||||
'title': 'BILD hat sie getestet',
|
||||
'thumbnail': 'http://bilder.bild.de/fotos/stand-das-koennen-die-neuen-ipads-38184138/Bild/1.bild.jpg',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'duration': 196,
|
||||
'description': 'Mit dem iPad Air 2 und dem iPad Mini 3 hat Apple zwei neue Tablet-Modelle präsentiert. BILD-Reporter Sven Stein durfte die Geräte bereits testen. ',
|
||||
}
|
||||
@ -25,7 +28,7 @@ class BildIE(InfoExtractor):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
xml_url = url.split(".bild.html")[0] + ",view=xml.bild.xml"
|
||||
doc = self._download_xml(xml_url, video_id)
|
||||
doc = self._download_xml(xml_url, video_id, transform_source=fix_xml_ampersands)
|
||||
|
||||
duration = int_or_none(doc.attrib.get('duration'), scale=1000)
|
||||
|
||||
|
@ -3,6 +3,8 @@ from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import itertools
|
||||
import json
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
@ -39,8 +41,15 @@ class BiliBiliIE(InfoExtractor):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
if self._search_regex(r'(此视频不存在或被删除)', webpage, 'error message', default=None):
|
||||
raise ExtractorError('The video does not exist or was deleted', expected=True)
|
||||
if '(此视频不存在或被删除)' in webpage:
|
||||
raise ExtractorError(
|
||||
'The video does not exist or was deleted', expected=True)
|
||||
|
||||
if '>你没有权限浏览! 由于版权相关问题 我们不对您所在的地区提供服务<' in webpage:
|
||||
raise ExtractorError(
|
||||
'The video is not available in your region due to copyright reasons',
|
||||
expected=True)
|
||||
|
||||
video_code = self._search_regex(
|
||||
r'(?s)<div itemprop="video".*?>(.*?)</div>', webpage, 'video code')
|
||||
|
||||
@ -67,11 +76,19 @@ class BiliBiliIE(InfoExtractor):
|
||||
|
||||
entries = []
|
||||
|
||||
lq_doc = self._download_xml(
|
||||
lq_page = self._download_webpage(
|
||||
'http://interface.bilibili.com/v_cdn_play?appkey=1&cid=%s' % cid,
|
||||
video_id,
|
||||
note='Downloading LQ video info'
|
||||
)
|
||||
try:
|
||||
err_info = json.loads(lq_page)
|
||||
raise ExtractorError(
|
||||
'BiliBili said: ' + err_info['error_text'], expected=True)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
lq_doc = ET.fromstring(lq_page)
|
||||
lq_durls = lq_doc.findall('./durl')
|
||||
|
||||
hq_doc = self._download_xml(
|
||||
@ -80,9 +97,11 @@ class BiliBiliIE(InfoExtractor):
|
||||
note='Downloading HQ video info',
|
||||
fatal=False,
|
||||
)
|
||||
hq_durls = hq_doc.findall('./durl') if hq_doc is not False else itertools.repeat(None)
|
||||
|
||||
assert len(lq_durls) == len(hq_durls)
|
||||
if hq_doc is not False:
|
||||
hq_durls = hq_doc.findall('./durl')
|
||||
assert len(lq_durls) == len(hq_durls)
|
||||
else:
|
||||
hq_durls = itertools.repeat(None)
|
||||
|
||||
i = 1
|
||||
for lq_durl, hq_durl in zip(lq_durls, hq_durls):
|
||||
@ -93,7 +112,7 @@ class BiliBiliIE(InfoExtractor):
|
||||
'filesize': int_or_none(
|
||||
lq_durl.find('./size'), get_attr='text'),
|
||||
}]
|
||||
if hq_durl:
|
||||
if hq_durl is not None:
|
||||
formats.append({
|
||||
'format_id': 'hq',
|
||||
'quality': 2,
|
||||
|
@ -5,7 +5,6 @@ import re
|
||||
from .common import InfoExtractor
|
||||
|
||||
from ..compat import (
|
||||
compat_str,
|
||||
compat_urllib_request,
|
||||
compat_urlparse,
|
||||
)
|
||||
@ -14,6 +13,8 @@ from ..utils import (
|
||||
int_or_none,
|
||||
parse_iso8601,
|
||||
unescapeHTML,
|
||||
xpath_text,
|
||||
xpath_with_ns,
|
||||
)
|
||||
|
||||
|
||||
@ -23,10 +24,10 @@ class BlipTVIE(InfoExtractor):
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352',
|
||||
'md5': 'c6934ad0b6acf2bd920720ec888eb812',
|
||||
'md5': '80baf1ec5c3d2019037c1c707d676b9f',
|
||||
'info_dict': {
|
||||
'id': '5779306',
|
||||
'ext': 'mov',
|
||||
'ext': 'm4v',
|
||||
'title': 'CBR EXCLUSIVE: "Gotham City Imposters" Bats VS Jokerz Short 3',
|
||||
'description': 'md5:9bc31f227219cde65e47eeec8d2dc596',
|
||||
'timestamp': 1323138843,
|
||||
@ -100,6 +101,20 @@ class BlipTVIE(InfoExtractor):
|
||||
'vcodec': 'none',
|
||||
}
|
||||
},
|
||||
{
|
||||
# missing duration
|
||||
'url': 'http://blip.tv/rss/flash/6700880',
|
||||
'info_dict': {
|
||||
'id': '6684191',
|
||||
'ext': 'm4v',
|
||||
'title': 'Cowboy Bebop: Gateway Shuffle Review',
|
||||
'description': 'md5:3acc480c0f9ae157f5fe88547ecaf3f8',
|
||||
'timestamp': 1386639757,
|
||||
'upload_date': '20131210',
|
||||
'uploader': 'sfdebris',
|
||||
'uploader_id': '706520',
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
@ -128,35 +143,34 @@ class BlipTVIE(InfoExtractor):
|
||||
|
||||
rss = self._download_xml('http://blip.tv/rss/flash/%s' % video_id, video_id, 'Downloading video RSS')
|
||||
|
||||
def blip(s):
|
||||
return '{http://blip.tv/dtd/blip/1.0}%s' % s
|
||||
|
||||
def media(s):
|
||||
return '{http://search.yahoo.com/mrss/}%s' % s
|
||||
|
||||
def itunes(s):
|
||||
return '{http://www.itunes.com/dtds/podcast-1.0.dtd}%s' % s
|
||||
def _x(p):
|
||||
return xpath_with_ns(p, {
|
||||
'blip': 'http://blip.tv/dtd/blip/1.0',
|
||||
'media': 'http://search.yahoo.com/mrss/',
|
||||
'itunes': 'http://www.itunes.com/dtds/podcast-1.0.dtd',
|
||||
})
|
||||
|
||||
item = rss.find('channel/item')
|
||||
|
||||
video_id = item.find(blip('item_id')).text
|
||||
title = item.find('./title').text
|
||||
description = clean_html(compat_str(item.find(blip('puredescription')).text))
|
||||
timestamp = parse_iso8601(item.find(blip('datestamp')).text)
|
||||
uploader = item.find(blip('user')).text
|
||||
uploader_id = item.find(blip('userid')).text
|
||||
duration = int(item.find(blip('runtime')).text)
|
||||
media_thumbnail = item.find(media('thumbnail'))
|
||||
thumbnail = media_thumbnail.get('url') if media_thumbnail is not None else item.find(itunes('image')).text
|
||||
categories = [category.text for category in item.findall('category')]
|
||||
video_id = xpath_text(item, _x('blip:item_id'), 'video id') or lookup_id
|
||||
title = xpath_text(item, 'title', 'title', fatal=True)
|
||||
description = clean_html(xpath_text(item, _x('blip:puredescription'), 'description'))
|
||||
timestamp = parse_iso8601(xpath_text(item, _x('blip:datestamp'), 'timestamp'))
|
||||
uploader = xpath_text(item, _x('blip:user'), 'uploader')
|
||||
uploader_id = xpath_text(item, _x('blip:userid'), 'uploader id')
|
||||
duration = int_or_none(xpath_text(item, _x('blip:runtime'), 'duration'))
|
||||
media_thumbnail = item.find(_x('media:thumbnail'))
|
||||
thumbnail = (media_thumbnail.get('url') if media_thumbnail is not None
|
||||
else xpath_text(item, 'image', 'thumbnail'))
|
||||
categories = [category.text for category in item.findall('category') if category is not None]
|
||||
|
||||
formats = []
|
||||
subtitles_urls = {}
|
||||
|
||||
media_group = item.find(media('group'))
|
||||
for media_content in media_group.findall(media('content')):
|
||||
media_group = item.find(_x('media:group'))
|
||||
for media_content in media_group.findall(_x('media:content')):
|
||||
url = media_content.get('url')
|
||||
role = media_content.get(blip('role'))
|
||||
role = media_content.get(_x('blip:role'))
|
||||
msg = self._download_webpage(
|
||||
url + '?showplayer=20140425131715&referrer=http://blip.tv&mask=7&skin=flashvars&view=url',
|
||||
video_id, 'Resolving URL for %s' % role)
|
||||
@ -175,8 +189,8 @@ class BlipTVIE(InfoExtractor):
|
||||
'url': real_url,
|
||||
'format_id': role,
|
||||
'format_note': media_type,
|
||||
'vcodec': media_content.get(blip('vcodec')) or 'none',
|
||||
'acodec': media_content.get(blip('acodec')),
|
||||
'vcodec': media_content.get(_x('blip:vcodec')) or 'none',
|
||||
'acodec': media_content.get(_x('blip:acodec')),
|
||||
'filesize': media_content.get('filesize'),
|
||||
'width': int_or_none(media_content.get('width')),
|
||||
'height': int_or_none(media_content.get('height')),
|
||||
|
@ -16,27 +16,38 @@ class BRIE(InfoExtractor):
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://www.br.de/mediathek/video/sendungen/heimatsound/heimatsound-festival-2014-trailer-100.html',
|
||||
'md5': '93556dd2bcb2948d9259f8670c516d59',
|
||||
'url': 'http://www.br.de/mediathek/video/sendungen/abendschau/betriebliche-altersvorsorge-104.html',
|
||||
'md5': '83a0477cf0b8451027eb566d88b51106',
|
||||
'info_dict': {
|
||||
'id': '25e279aa-1ffd-40fd-9955-5325bd48a53a',
|
||||
'id': '48f656ef-287e-486f-be86-459122db22cc',
|
||||
'ext': 'mp4',
|
||||
'title': 'Wenn das Traditions-Theater wackelt',
|
||||
'description': 'Heimatsound-Festival 2014: Wenn das Traditions-Theater wackelt',
|
||||
'duration': 34,
|
||||
'uploader': 'BR',
|
||||
'upload_date': '20140802',
|
||||
'title': 'Die böse Überraschung',
|
||||
'description': 'Betriebliche Altersvorsorge: Die böse Überraschung',
|
||||
'duration': 180,
|
||||
'uploader': 'Reinhard Weber',
|
||||
'upload_date': '20150422',
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'http://www.br.de/nachrichten/schaeuble-haushaltsentwurf-bundestag-100.html',
|
||||
'md5': '3db0df1a9a9cd9fa0c70e6ea8aa8e820',
|
||||
'url': 'http://www.br.de/nachrichten/oberbayern/inhalt/muenchner-polizeipraesident-schreiber-gestorben-100.html',
|
||||
'md5': 'a44396d73ab6a68a69a568fae10705bb',
|
||||
'info_dict': {
|
||||
'id': 'c6aae3de-2cf9-43f2-957f-f17fef9afaab',
|
||||
'id': 'a4b83e34-123d-4b81-9f4e-c0d3121a4e05',
|
||||
'ext': 'mp4',
|
||||
'title': 'Manfred Schreiber ist tot',
|
||||
'description': 'Abendschau kompakt: Manfred Schreiber ist tot',
|
||||
'duration': 26,
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'http://www.br.de/radio/br-klassik/sendungen/allegro/premiere-urauffuehrung-the-land-2015-dance-festival-muenchen-100.html',
|
||||
'md5': '8b5b27c0b090f3b35eac4ab3f7a73d3d',
|
||||
'info_dict': {
|
||||
'id': '74c603c9-26d3-48bb-b85b-079aeed66e0b',
|
||||
'ext': 'aac',
|
||||
'title': '"Keine neuen Schulden im nächsten Jahr"',
|
||||
'description': 'Haushaltsentwurf: "Keine neuen Schulden im nächsten Jahr"',
|
||||
'duration': 64,
|
||||
'title': 'Kurzweilig und sehr bewegend',
|
||||
'description': '"The Land" von Peeping Tom: Kurzweilig und sehr bewegend',
|
||||
'duration': 296,
|
||||
}
|
||||
},
|
||||
{
|
||||
|
@ -18,6 +18,7 @@ class BreakIE(InfoExtractor):
|
||||
'id': '2468056',
|
||||
'ext': 'mp4',
|
||||
'title': 'When Girls Act Like D-Bags',
|
||||
'age_limit': 13,
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.break.com/video/ugc/baby-flex-2773063',
|
||||
|
@ -13,6 +13,7 @@ from ..compat import (
|
||||
compat_urllib_parse_urlparse,
|
||||
compat_urllib_request,
|
||||
compat_urlparse,
|
||||
compat_xml_parse_error,
|
||||
)
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
@ -119,7 +120,7 @@ class BrightcoveIE(InfoExtractor):
|
||||
|
||||
try:
|
||||
object_doc = xml.etree.ElementTree.fromstring(object_str.encode('utf-8'))
|
||||
except xml.etree.ElementTree.ParseError:
|
||||
except compat_xml_parse_error:
|
||||
return
|
||||
|
||||
fv_el = find_xpath_attr(object_doc, './param', 'name', 'flashVars')
|
||||
@ -156,6 +157,28 @@ class BrightcoveIE(InfoExtractor):
|
||||
linkBase = find_param('linkBaseURL')
|
||||
if linkBase is not None:
|
||||
params['linkBaseURL'] = linkBase
|
||||
return cls._make_brightcove_url(params)
|
||||
|
||||
@classmethod
|
||||
def _build_brighcove_url_from_js(cls, object_js):
|
||||
# The layout of JS is as follows:
|
||||
# customBC.createVideo = function (width, height, playerID, playerKey, videoPlayer, VideoRandomID) {
|
||||
# // build Brightcove <object /> XML
|
||||
# }
|
||||
m = re.search(
|
||||
r'''(?x)customBC.\createVideo\(
|
||||
.*? # skipping width and height
|
||||
["\'](?P<playerID>\d+)["\']\s*,\s* # playerID
|
||||
["\'](?P<playerKey>AQ[^"\']{48})[^"\']*["\']\s*,\s* # playerKey begins with AQ and is 50 characters
|
||||
# in length, however it's appended to itself
|
||||
# in places, so truncate
|
||||
["\'](?P<videoID>\d+)["\'] # @videoPlayer
|
||||
''', object_js)
|
||||
if m:
|
||||
return cls._make_brightcove_url(m.groupdict())
|
||||
|
||||
@classmethod
|
||||
def _make_brightcove_url(cls, params):
|
||||
data = compat_urllib_parse.urlencode(params)
|
||||
return cls._FEDERATED_URL_TEMPLATE % data
|
||||
|
||||
@ -172,7 +195,7 @@ class BrightcoveIE(InfoExtractor):
|
||||
"""Return a list of all Brightcove URLs from the webpage """
|
||||
|
||||
url_m = re.search(
|
||||
r'<meta\s+property="og:video"\s+content="(https?://(?:secure|c)\.brightcove.com/[^"]+)"',
|
||||
r'<meta\s+property=[\'"]og:video[\'"]\s+content=[\'"](https?://(?:secure|c)\.brightcove.com/[^\'"]+)[\'"]',
|
||||
webpage)
|
||||
if url_m:
|
||||
url = unescapeHTML(url_m.group(1))
|
||||
@ -188,7 +211,12 @@ class BrightcoveIE(InfoExtractor):
|
||||
[^>]*?>\s*<param\s+name="movie"\s+value="https?://[^/]*brightcove\.com/
|
||||
).+?>\s*</object>''',
|
||||
webpage)
|
||||
return list(filter(None, [cls._build_brighcove_url(m) for m in matches]))
|
||||
if matches:
|
||||
return list(filter(None, [cls._build_brighcove_url(m) for m in matches]))
|
||||
|
||||
return list(filter(None, [
|
||||
cls._build_brighcove_url_from_js(custom_bc)
|
||||
for custom_bc in re.findall(r'(customBC\.createVideo\(.+?\);)', webpage)]))
|
||||
|
||||
def _real_extract(self, url):
|
||||
url, smuggled_data = unsmuggle_url(url, {})
|
||||
|
@ -16,7 +16,7 @@ class BYUtvIE(InfoExtractor):
|
||||
'ext': 'mp4',
|
||||
'description': 'md5:5438d33774b6bdc662f9485a340401cc',
|
||||
'title': 'Season 5 Episode 5',
|
||||
'thumbnail': 're:^https?://.*promo.*'
|
||||
'thumbnail': 're:^https?://.*\.jpg$'
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
|
@ -25,14 +25,14 @@ class CanalplusIE(InfoExtractor):
|
||||
}
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.canalplus.fr/c-infos-documentaires/pid1830-c-zapping.html?vid=922470',
|
||||
'md5': '3db39fb48b9685438ecf33a1078023e4',
|
||||
'url': 'http://www.canalplus.fr/c-emissions/pid1830-c-zapping.html?vid=1263092',
|
||||
'md5': 'b3481d7ca972f61e37420798d0a9d934',
|
||||
'info_dict': {
|
||||
'id': '922470',
|
||||
'id': '1263092',
|
||||
'ext': 'flv',
|
||||
'title': 'Zapping - 26/08/13',
|
||||
'description': 'Le meilleur de toutes les chaînes, tous les jours.\nEmission du 26 août 2013',
|
||||
'upload_date': '20130826',
|
||||
'title': 'Le Zapping - 13/05/15',
|
||||
'description': 'md5:09738c0d06be4b5d06a0940edb0da73f',
|
||||
'upload_date': '20150513',
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.piwiplus.fr/videos-piwi/pid1405-le-labyrinthe-boing-super-ranger.html?vid=1108190',
|
||||
@ -56,7 +56,7 @@ class CanalplusIE(InfoExtractor):
|
||||
'skip': 'videos get deleted after a while',
|
||||
}, {
|
||||
'url': 'http://www.itele.fr/france/video/aubervilliers-un-lycee-en-colere-111559',
|
||||
'md5': '65aa83ad62fe107ce29e564bb8712580',
|
||||
'md5': 'f3a46edcdf28006598ffaf5b30e6a2d4',
|
||||
'info_dict': {
|
||||
'id': '1213714',
|
||||
'ext': 'flv',
|
||||
@ -106,15 +106,11 @@ class CanalplusIE(InfoExtractor):
|
||||
continue
|
||||
format_id = fmt.tag
|
||||
if format_id == 'HLS':
|
||||
hls_formats = self._extract_m3u8_formats(format_url, video_id, 'flv')
|
||||
for fmt in hls_formats:
|
||||
fmt['preference'] = preference(format_id)
|
||||
formats.extend(hls_formats)
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
format_url, video_id, 'mp4', preference=preference(format_id)))
|
||||
elif format_id == 'HDS':
|
||||
hds_formats = self._extract_f4m_formats(format_url + '?hdcore=2.11.3', video_id)
|
||||
for fmt in hds_formats:
|
||||
fmt['preference'] = preference(format_id)
|
||||
formats.extend(hds_formats)
|
||||
formats.extend(self._extract_f4m_formats(
|
||||
format_url + '?hdcore=2.11.3', video_id, preference=preference(format_id)))
|
||||
else:
|
||||
formats.append({
|
||||
'url': format_url,
|
||||
|
@ -4,12 +4,13 @@ from .common import InfoExtractor
|
||||
|
||||
|
||||
class CBSIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?cbs\.com/shows/[^/]+/(?:video|artist)/(?P<id>[^/]+)/.*'
|
||||
_VALID_URL = r'https?://(?:www\.)?(?:cbs\.com/shows/[^/]+/(?:video|artist)|colbertlateshow\.com/(?:video|podcasts))/[^/]+/(?P<id>[^/]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.cbs.com/shows/garth-brooks/video/_u7W953k6la293J7EPTd9oHkSPs6Xn6_/connect-chat-feat-garth-brooks/',
|
||||
'info_dict': {
|
||||
'id': '4JUVEwq3wUT7',
|
||||
'display_id': 'connect-chat-feat-garth-brooks',
|
||||
'ext': 'flv',
|
||||
'title': 'Connect Chat feat. Garth Brooks',
|
||||
'description': 'Connect with country music singer Garth Brooks, as he chats with fans on Wednesday November 27, 2013. Be sure to tune in to Garth Brooks: Live from Las Vegas, Friday November 29, at 9/8c on CBS!',
|
||||
@ -24,6 +25,7 @@ class CBSIE(InfoExtractor):
|
||||
'url': 'http://www.cbs.com/shows/liveonletterman/artist/221752/st-vincent/',
|
||||
'info_dict': {
|
||||
'id': 'WWF_5KqY3PK1',
|
||||
'display_id': 'st-vincent',
|
||||
'ext': 'flv',
|
||||
'title': 'Live on Letterman - St. Vincent',
|
||||
'description': 'Live On Letterman: St. Vincent in concert from New York\'s Ed Sullivan Theater on Tuesday, July 16, 2014.',
|
||||
@ -34,12 +36,23 @@ class CBSIE(InfoExtractor):
|
||||
'skip_download': True,
|
||||
},
|
||||
'_skip': 'Blocked outside the US',
|
||||
}, {
|
||||
'url': 'http://colbertlateshow.com/video/8GmB0oY0McANFvp2aEffk9jZZZ2YyXxy/the-colbeard/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.colbertlateshow.com/podcasts/dYSwjqPs_X1tvbV_P2FcPWRa_qT6akTC/in-the-bad-room-with-stephen/',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
real_id = self._search_regex(
|
||||
r"video\.settings\.pid\s*=\s*'([^']+)';",
|
||||
[r"video\.settings\.pid\s*=\s*'([^']+)';", r"cbsplayer\.pid\s*=\s*'([^']+)';"],
|
||||
webpage, 'real video ID')
|
||||
return self.url_result('theplatform:%s' % real_id)
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'ie_key': 'ThePlatform',
|
||||
'url': 'theplatform:%s' % real_id,
|
||||
'display_id': display_id,
|
||||
}
|
||||
|
@ -32,7 +32,7 @@ class CBSNewsIE(InfoExtractor):
|
||||
'id': 'fort-hood-shooting-army-downplays-mental-illness-as-cause-of-attack',
|
||||
'ext': 'flv',
|
||||
'title': 'Fort Hood shooting: Army downplays mental illness as cause of attack',
|
||||
'thumbnail': 'http://cbsnews2.cbsistatic.com/hub/i/r/2014/04/04/0c9fbc66-576b-41ca-8069-02d122060dd2/thumbnail/140x90/6dad7a502f88875ceac38202984b6d58/en-0404-werner-replace-640x360.jpg',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'duration': 205,
|
||||
},
|
||||
'params': {
|
||||
|
@ -16,7 +16,7 @@ class CCCIE(InfoExtractor):
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://media.ccc.de/browse/congress/2013/30C3_-_5443_-_en_-_saal_g_-_201312281830_-_introduction_to_processor_design_-_byterazor.html#video',
|
||||
'md5': '205a365d0d57c0b1e43a12c9ffe8f9be',
|
||||
'md5': '3a1eda8f3a29515d27f5adb967d7e740',
|
||||
'info_dict': {
|
||||
'id': '20131228183',
|
||||
'ext': 'mp4',
|
||||
@ -51,7 +51,7 @@ class CCCIE(InfoExtractor):
|
||||
|
||||
matches = re.finditer(r'''(?xs)
|
||||
<(?:span|div)\s+class='label\s+filetype'>(?P<format>.*?)</(?:span|div)>\s*
|
||||
<a\s+href='(?P<http_url>[^']+)'>\s*
|
||||
<a\s+download\s+href='(?P<http_url>[^']+)'>\s*
|
||||
(?:
|
||||
.*?
|
||||
<a\s+href='(?P<torrent_url>[^']+\.torrent)'
|
||||
|
@ -7,6 +7,7 @@ from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_request,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_parse_unquote,
|
||||
compat_urllib_parse_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
@ -16,55 +17,81 @@ from ..utils import (
|
||||
|
||||
|
||||
class CeskaTelevizeIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://www\.ceskatelevize\.cz/(porady|ivysilani)/(.+/)?(?P<id>[^?#]+)'
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://www.ceskatelevize.cz/ivysilani/ivysilani/10441294653-hyde-park-civilizace/214411058091220',
|
||||
'info_dict': {
|
||||
'id': '214411058091220',
|
||||
'ext': 'mp4',
|
||||
'title': 'Hyde Park Civilizace',
|
||||
'description': 'Věda a současná civilizace. Interaktivní pořad - prostor pro vaše otázky a komentáře',
|
||||
'thumbnail': 're:^https?://.*\.jpg',
|
||||
'duration': 3350,
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
_VALID_URL = r'https?://www\.ceskatelevize\.cz/(porady|ivysilani)/(?:[^/]+/)*(?P<id>[^/#?]+)/*(?:[#?].*)?$'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.ceskatelevize.cz/ivysilani/ivysilani/10441294653-hyde-park-civilizace/214411058091220',
|
||||
'info_dict': {
|
||||
'id': '61924494876951776',
|
||||
'ext': 'mp4',
|
||||
'title': 'Hyde Park Civilizace',
|
||||
'description': 'md5:fe93f6eda372d150759d11644ebbfb4a',
|
||||
'thumbnail': 're:^https?://.*\.jpg',
|
||||
'duration': 3350,
|
||||
},
|
||||
{
|
||||
'url': 'http://www.ceskatelevize.cz/ivysilani/10532695142-prvni-republika/bonus/14716-zpevacka-z-duparny-bobina',
|
||||
'info_dict': {
|
||||
'id': '14716',
|
||||
'ext': 'mp4',
|
||||
'title': 'První republika: Zpěvačka z Dupárny Bobina',
|
||||
'description': 'Sága mapující atmosféru první republiky od r. 1918 do r. 1945.',
|
||||
'thumbnail': 're:^https?://.*\.jpg',
|
||||
'duration': 88.4,
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
]
|
||||
}, {
|
||||
'url': 'http://www.ceskatelevize.cz/ivysilani/10532695142-prvni-republika/bonus/14716-zpevacka-z-duparny-bobina',
|
||||
'info_dict': {
|
||||
'id': '61924494876844374',
|
||||
'ext': 'mp4',
|
||||
'title': 'První republika: Zpěvačka z Dupárny Bobina',
|
||||
'description': 'Sága mapující atmosféru první republiky od r. 1918 do r. 1945.',
|
||||
'thumbnail': 're:^https?://.*\.jpg',
|
||||
'duration': 88.4,
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
# video with 18+ caution trailer
|
||||
'url': 'http://www.ceskatelevize.cz/porady/10520528904-queer/215562210900007-bogotart/',
|
||||
'info_dict': {
|
||||
'id': '215562210900007-bogotart',
|
||||
'title': 'Queer: Bogotart',
|
||||
'description': 'Alternativní průvodce současným queer světem',
|
||||
},
|
||||
'playlist': [{
|
||||
'info_dict': {
|
||||
'id': '61924494876844842',
|
||||
'ext': 'mp4',
|
||||
'title': 'Queer: Bogotart (Varování 18+)',
|
||||
'duration': 10.2,
|
||||
},
|
||||
}, {
|
||||
'info_dict': {
|
||||
'id': '61924494877068022',
|
||||
'ext': 'mp4',
|
||||
'title': 'Queer: Bogotart (Queer)',
|
||||
'thumbnail': 're:^https?://.*\.jpg',
|
||||
'duration': 1558.3,
|
||||
},
|
||||
}],
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
url = url.replace('/porady/', '/ivysilani/').replace('/video/', '')
|
||||
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
playlist_id = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
|
||||
NOT_AVAILABLE_STRING = 'This content is not available at your territory due to limited copyright.'
|
||||
if '%s</p>' % NOT_AVAILABLE_STRING in webpage:
|
||||
raise ExtractorError(NOT_AVAILABLE_STRING, expected=True)
|
||||
|
||||
typ = self._html_search_regex(r'getPlaylistUrl\(\[\{"type":"(.+?)","id":".+?"\}\],', webpage, 'type')
|
||||
episode_id = self._html_search_regex(r'getPlaylistUrl\(\[\{"type":".+?","id":"(.+?)"\}\],', webpage, 'episode_id')
|
||||
typ = self._html_search_regex(
|
||||
r'getPlaylistUrl\(\[\{"type":"(.+?)","id":".+?"\}\],', webpage, 'type')
|
||||
episode_id = self._html_search_regex(
|
||||
r'getPlaylistUrl\(\[\{"type":".+?","id":"(.+?)"\}\],', webpage, 'episode_id')
|
||||
|
||||
data = {
|
||||
'playlist[0][type]': typ,
|
||||
@ -82,42 +109,52 @@ class CeskaTelevizeIE(InfoExtractor):
|
||||
req.add_header('X-Requested-With', 'XMLHttpRequest')
|
||||
req.add_header('Referer', url)
|
||||
|
||||
playlistpage = self._download_json(req, video_id)
|
||||
playlistpage = self._download_json(req, playlist_id)
|
||||
|
||||
playlist_url = playlistpage['url']
|
||||
if playlist_url == 'error_region':
|
||||
raise ExtractorError(NOT_AVAILABLE_STRING, expected=True)
|
||||
|
||||
req = compat_urllib_request.Request(compat_urllib_parse.unquote(playlist_url))
|
||||
req = compat_urllib_request.Request(compat_urllib_parse_unquote(playlist_url))
|
||||
req.add_header('Referer', url)
|
||||
|
||||
playlist = self._download_json(req, video_id)
|
||||
playlist_title = self._og_search_title(webpage)
|
||||
playlist_description = self._og_search_description(webpage)
|
||||
|
||||
item = playlist['playlist'][0]
|
||||
formats = []
|
||||
for format_id, stream_url in item['streamUrls'].items():
|
||||
formats.extend(self._extract_m3u8_formats(stream_url, video_id, 'mp4'))
|
||||
self._sort_formats(formats)
|
||||
playlist = self._download_json(req, playlist_id)['playlist']
|
||||
playlist_len = len(playlist)
|
||||
|
||||
title = self._og_search_title(webpage)
|
||||
description = self._og_search_description(webpage)
|
||||
duration = float_or_none(item.get('duration'))
|
||||
thumbnail = item.get('previewImageUrl')
|
||||
entries = []
|
||||
for item in playlist:
|
||||
formats = []
|
||||
for format_id, stream_url in item['streamUrls'].items():
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
stream_url, playlist_id, 'mp4', entry_protocol='m3u8_native'))
|
||||
self._sort_formats(formats)
|
||||
|
||||
subtitles = {}
|
||||
subs = item.get('subtitles')
|
||||
if subs:
|
||||
subtitles = self.extract_subtitles(episode_id, subs)
|
||||
item_id = item.get('id') or item['assetId']
|
||||
title = item['title']
|
||||
|
||||
return {
|
||||
'id': episode_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
duration = float_or_none(item.get('duration'))
|
||||
thumbnail = item.get('previewImageUrl')
|
||||
|
||||
subtitles = {}
|
||||
if item.get('type') == 'VOD':
|
||||
subs = item.get('subtitles')
|
||||
if subs:
|
||||
subtitles = self.extract_subtitles(episode_id, subs)
|
||||
|
||||
entries.append({
|
||||
'id': item_id,
|
||||
'title': playlist_title if playlist_len == 1 else '%s (%s)' % (playlist_title, title),
|
||||
'description': playlist_description if playlist_len == 1 else None,
|
||||
'thumbnail': thumbnail,
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
})
|
||||
|
||||
return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
|
||||
|
||||
def _get_subtitles(self, episode_id, subs):
|
||||
original_subtitles = self._download_webpage(
|
||||
|
@ -57,7 +57,7 @@ class ChilloutzoneIE(InfoExtractor):
|
||||
|
||||
base64_video_info = self._html_search_regex(
|
||||
r'var cozVidData = "(.+?)";', webpage, 'video data')
|
||||
decoded_video_info = base64.b64decode(base64_video_info).decode("utf-8")
|
||||
decoded_video_info = base64.b64decode(base64_video_info.encode('utf-8')).decode('utf-8')
|
||||
video_info_dict = json.loads(decoded_video_info)
|
||||
|
||||
# get video information from dict
|
||||
|
@ -6,6 +6,7 @@ import re
|
||||
from .common import InfoExtractor
|
||||
from ..utils import ExtractorError
|
||||
from .bliptv import BlipTVIE
|
||||
from .screenwavemedia import ScreenwaveMediaIE
|
||||
|
||||
|
||||
class CinemassacreIE(InfoExtractor):
|
||||
@ -60,6 +61,17 @@ class CinemassacreIE(InfoExtractor):
|
||||
'uploader_id': 'Cinemassacre',
|
||||
'title': 'AVGN: McKids',
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'http://cinemassacre.com/2015/05/25/mario-kart-64-nintendo-64-james-mike-mondays/',
|
||||
'md5': '1376908e49572389e7b06251a53cdd08',
|
||||
'info_dict': {
|
||||
'id': 'Cinemassacre-555779690c440',
|
||||
'ext': 'mp4',
|
||||
'description': 'Let’s Play Mario Kart 64 !! Mario Kart 64 is a classic go-kart racing game released for the Nintendo 64 (N64). Today James & Mike do 4 player Battle Mode with Kyle and Bootsy!',
|
||||
'title': 'Mario Kart 64 (Nintendo 64) James & Mike Mondays',
|
||||
'upload_date': '20150525',
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
@ -72,10 +84,10 @@ class CinemassacreIE(InfoExtractor):
|
||||
|
||||
playerdata_url = self._search_regex(
|
||||
[
|
||||
r'src="(http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?[^"]*\bid=.+?)"',
|
||||
r'<iframe[^>]+src="((?:https?:)?//(?:[^.]+\.)?youtube\.com/.+?)"',
|
||||
ScreenwaveMediaIE.EMBED_PATTERN,
|
||||
r'<iframe[^>]+src="(?P<url>(?:https?:)?//(?:[^.]+\.)?youtube\.com/.+?)"',
|
||||
],
|
||||
webpage, 'player data URL', default=None)
|
||||
webpage, 'player data URL', default=None, group='url')
|
||||
if not playerdata_url:
|
||||
playerdata_url = BlipTVIE._extract_url(webpage)
|
||||
if not playerdata_url:
|
||||
|
@ -1,53 +1,68 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import time
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
parse_duration,
|
||||
determine_ext,
|
||||
int_or_none,
|
||||
js_to_json,
|
||||
parse_iso8601,
|
||||
remove_end,
|
||||
)
|
||||
|
||||
|
||||
class ClipfishIE(InfoExtractor):
|
||||
IE_NAME = 'clipfish'
|
||||
|
||||
_VALID_URL = r'^https?://(?:www\.)?clipfish\.de/.*?/video/(?P<id>[0-9]+)/'
|
||||
_VALID_URL = r'https?://(?:www\.)?clipfish\.de/(?:[^/]+/)+video/(?P<id>[0-9]+)'
|
||||
_TEST = {
|
||||
'url': 'http://www.clipfish.de/special/game-trailer/video/3966754/fifa-14-e3-2013-trailer/',
|
||||
'md5': '2521cd644e862936cf2e698206e47385',
|
||||
'md5': '79bc922f3e8a9097b3d68a93780fd475',
|
||||
'info_dict': {
|
||||
'id': '3966754',
|
||||
'ext': 'mp4',
|
||||
'title': 'FIFA 14 - E3 2013 Trailer',
|
||||
'timestamp': 1370938118,
|
||||
'upload_date': '20130611',
|
||||
'duration': 82,
|
||||
},
|
||||
'skip': 'Blocked in the US'
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group(1)
|
||||
video_id = self._match_id(url)
|
||||
|
||||
info_url = ('http://www.clipfish.de/devxml/videoinfo/%s?ts=%d' %
|
||||
(video_id, int(time.time())))
|
||||
doc = self._download_xml(
|
||||
info_url, video_id, note='Downloading info page')
|
||||
title = doc.find('title').text
|
||||
video_url = doc.find('filename').text
|
||||
if video_url is None:
|
||||
xml_bytes = xml.etree.ElementTree.tostring(doc)
|
||||
raise ExtractorError('Cannot find video URL in document %r' %
|
||||
xml_bytes)
|
||||
thumbnail = doc.find('imageurl').text
|
||||
duration = parse_duration(doc.find('duration').text)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
video_info = self._parse_json(
|
||||
js_to_json(self._html_search_regex(
|
||||
'(?s)videoObject\s*=\s*({.+?});', webpage, 'video object')),
|
||||
video_id)
|
||||
|
||||
formats = []
|
||||
for video_url in re.findall(r'var\s+videourl\s*=\s*"([^"]+)"', webpage):
|
||||
ext = determine_ext(video_url)
|
||||
if ext == 'm3u8':
|
||||
formats.append({
|
||||
'url': video_url.replace('de.hls.fra.clipfish.de', 'hls.fra.clipfish.de'),
|
||||
'ext': 'mp4',
|
||||
'format_id': 'hls',
|
||||
})
|
||||
else:
|
||||
formats.append({
|
||||
'url': video_url,
|
||||
'format_id': ext,
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
title = remove_end(self._og_search_title(webpage), ' - Video')
|
||||
thumbnail = self._og_search_thumbnail(webpage)
|
||||
duration = int_or_none(video_info.get('length'))
|
||||
timestamp = parse_iso8601(self._html_search_meta('uploadDate', webpage, 'upload date'))
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'url': video_url,
|
||||
'formats': formats,
|
||||
'thumbnail': thumbnail,
|
||||
'duration': duration,
|
||||
'timestamp': timestamp,
|
||||
}
|
||||
|
@ -1,7 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
find_xpath_attr,
|
||||
@ -10,9 +8,9 @@ from ..utils import (
|
||||
|
||||
|
||||
class ClipsyndicateIE(InfoExtractor):
|
||||
_VALID_URL = r'http://www\.clipsyndicate\.com/video/play(list/\d+)?/(?P<id>\d+)'
|
||||
_VALID_URL = r'http://(?:chic|www)\.clipsyndicate\.com/video/play(list/\d+)?/(?P<id>\d+)'
|
||||
|
||||
_TEST = {
|
||||
_TESTS = [{
|
||||
'url': 'http://www.clipsyndicate.com/video/play/4629301/brick_briscoe',
|
||||
'md5': '4d7d549451bad625e0ff3d7bd56d776c',
|
||||
'info_dict': {
|
||||
@ -22,11 +20,13 @@ class ClipsyndicateIE(InfoExtractor):
|
||||
'duration': 612,
|
||||
'thumbnail': 're:^https?://.+\.jpg',
|
||||
},
|
||||
}
|
||||
}, {
|
||||
'url': 'http://chic.clipsyndicate.com/video/play/5844117/shark_attack',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
video_id = self._match_id(url)
|
||||
js_player = self._download_webpage(
|
||||
'http://eplayer.clipsyndicate.com/embed/player.js?va_id=%s' % video_id,
|
||||
video_id, 'Downlaoding player')
|
||||
|
@ -12,9 +12,9 @@ from ..utils import (
|
||||
|
||||
|
||||
class ClubicIE(InfoExtractor):
|
||||
_VALID_URL = r'http://(?:www\.)?clubic\.com/video/[^/]+/video.*-(?P<id>[0-9]+)\.html'
|
||||
_VALID_URL = r'http://(?:www\.)?clubic\.com/video/(?:[^/]+/)*video.*-(?P<id>[0-9]+)\.html'
|
||||
|
||||
_TEST = {
|
||||
_TESTS = [{
|
||||
'url': 'http://www.clubic.com/video/clubic-week/video-clubic-week-2-0-le-fbi-se-lance-dans-la-photo-d-identite-448474.html',
|
||||
'md5': '1592b694ba586036efac1776b0b43cd3',
|
||||
'info_dict': {
|
||||
@ -24,7 +24,10 @@ class ClubicIE(InfoExtractor):
|
||||
'description': 're:Gueule de bois chez Nokia. Le constructeur a indiqué cette.*',
|
||||
'thumbnail': 're:^http://img\.clubic\.com/.*\.jpg$',
|
||||
}
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.clubic.com/video/video-clubic-week-2-0-apple-iphone-6s-et-plus-mais-surtout-le-pencil-469792.html',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
@ -11,7 +11,7 @@ from ..utils import (
|
||||
|
||||
class CNETIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?cnet\.com/videos/(?P<id>[^/]+)/'
|
||||
_TEST = {
|
||||
_TESTS = [{
|
||||
'url': 'http://www.cnet.com/videos/hands-on-with-microsofts-windows-8-1-update/',
|
||||
'info_dict': {
|
||||
'id': '56f4ea68-bd21-4852-b08c-4de5b8354c60',
|
||||
@ -25,7 +25,20 @@ class CNETIE(InfoExtractor):
|
||||
'params': {
|
||||
'skip_download': 'requires rtmpdump',
|
||||
}
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.cnet.com/videos/whiny-pothole-tweets-at-local-government-when-hit-by-cars-tomorrow-daily-187/',
|
||||
'info_dict': {
|
||||
'id': '56527b93-d25d-44e3-b738-f989ce2e49ba',
|
||||
'ext': 'flv',
|
||||
'description': 'Khail and Ashley wonder what other civic woes can be solved by self-tweeting objects, investigate a new kind of VR camera and watch an origami robot self-assemble, walk, climb, dig and dissolve. #TDPothole',
|
||||
'uploader_id': 'b163284d-6b73-44fc-b3e6-3da66c392d40',
|
||||
'uploader': 'Ashley Esqueda',
|
||||
'title': 'Whiny potholes tweet at local government when hit by cars (Tomorrow Daily 187)',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True, # requires rtmpdump
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
@ -42,7 +55,7 @@ class CNETIE(InfoExtractor):
|
||||
raise ExtractorError('Cannot find video data')
|
||||
|
||||
mpx_account = data['config']['players']['default']['mpx_account']
|
||||
vid = vdata['files']['rtmp']
|
||||
vid = vdata['files'].get('rtmp', vdata['files']['hds'])
|
||||
tp_link = 'http://link.theplatform.com/s/%s/%s' % (mpx_account, vid)
|
||||
|
||||
video_id = vdata['id']
|
||||
|
@ -12,7 +12,7 @@ from ..utils import (
|
||||
|
||||
class CNNIE(InfoExtractor):
|
||||
_VALID_URL = r'''(?x)https?://(?:(?:edition|www)\.)?cnn\.com/video/(?:data/.+?|\?)/
|
||||
(?P<path>.+?/(?P<title>[^/]+?)(?:\.(?:[a-z]{3,5})(?:-ap)?|(?=&)))'''
|
||||
(?P<path>.+?/(?P<title>[^/]+?)(?:\.(?:[a-z\-]+)|(?=&)))'''
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://edition.cnn.com/video/?/video/sports/2013/06/09/nadal-1-on-1.cnn',
|
||||
|
@ -36,7 +36,7 @@ class ComCarCoffIE(InfoExtractor):
|
||||
webpage, 'full data json'))
|
||||
|
||||
video_id = full_data['activeVideo']['video']
|
||||
video_data = full_data['videos'][video_id]
|
||||
video_data = full_data.get('videos', {}).get(video_id) or full_data['singleshots'][video_id]
|
||||
thumbnails = [{
|
||||
'url': video_data['images']['thumb'],
|
||||
}, {
|
||||
|
@ -14,26 +14,35 @@ import xml.etree.ElementTree
|
||||
|
||||
from ..compat import (
|
||||
compat_cookiejar,
|
||||
compat_cookies,
|
||||
compat_getpass,
|
||||
compat_HTTPError,
|
||||
compat_http_client,
|
||||
compat_urllib_error,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_parse_urlparse,
|
||||
compat_urllib_request,
|
||||
compat_urlparse,
|
||||
compat_str,
|
||||
)
|
||||
from ..utils import (
|
||||
NO_DEFAULT,
|
||||
age_restricted,
|
||||
bug_reports_message,
|
||||
clean_html,
|
||||
compiled_regex_type,
|
||||
determine_ext,
|
||||
ExtractorError,
|
||||
fix_xml_ampersands,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
RegexNotFoundError,
|
||||
sanitize_filename,
|
||||
unescapeHTML,
|
||||
url_basename,
|
||||
xpath_text,
|
||||
xpath_with_ns,
|
||||
)
|
||||
_NO_DEFAULT = object()
|
||||
|
||||
|
||||
class InfoExtractor(object):
|
||||
@ -63,7 +72,7 @@ class InfoExtractor(object):
|
||||
|
||||
Potential fields:
|
||||
* url Mandatory. The URL of the video file
|
||||
* ext Will be calculated from url if missing
|
||||
* ext Will be calculated from URL if missing
|
||||
* format A human-readable description of the format
|
||||
("mp4 container with h264/opus").
|
||||
Calculated from the format_id, width, height.
|
||||
@ -153,7 +162,7 @@ class InfoExtractor(object):
|
||||
lower to higher preference, each element is a dictionary
|
||||
with the "ext" entry and one of:
|
||||
* "data": The subtitles file contents
|
||||
* "url": A url pointing to the subtitles file
|
||||
* "url": A URL pointing to the subtitles file
|
||||
automatic_captions: Like 'subtitles', used by the YoutubeIE for
|
||||
automatically generated captions
|
||||
duration: Length of the video in seconds, as an integer.
|
||||
@ -174,13 +183,18 @@ class InfoExtractor(object):
|
||||
Set to "root" to indicate that this is a
|
||||
comment to the original video.
|
||||
age_limit: Age restriction for the video, as an integer (years)
|
||||
webpage_url: The url to the video webpage, if given to youtube-dl it
|
||||
webpage_url: The URL to the video webpage, if given to youtube-dl it
|
||||
should allow to get the same result again. (It will be set
|
||||
by YoutubeDL if it's missing)
|
||||
categories: A list of categories that the video falls in, for example
|
||||
["Sports", "Berlin"]
|
||||
tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
|
||||
is_live: True, False, or None (=unknown). Whether this video is a
|
||||
live stream that goes on instead of a fixed-length video.
|
||||
start_time: Time in seconds where the reproduction should start, as
|
||||
specified in the URL.
|
||||
end_time: Time in seconds where the reproduction should end, as
|
||||
specified in the URL.
|
||||
|
||||
Unless mentioned otherwise, the fields should be Unicode strings.
|
||||
|
||||
@ -191,8 +205,8 @@ class InfoExtractor(object):
|
||||
There must be a key "entries", which is a list, an iterable, or a PagedList
|
||||
object, each element of which is a valid dictionary by this specification.
|
||||
|
||||
Additionally, playlists can have "title" and "id" attributes with the same
|
||||
semantics as videos (see above).
|
||||
Additionally, playlists can have "title", "description" and "id" attributes
|
||||
with the same semantics as videos (see above).
|
||||
|
||||
|
||||
_type "multi_video" indicates that there are multiple videos that
|
||||
@ -496,10 +510,22 @@ class InfoExtractor(object):
|
||||
"""Report attempt to log in."""
|
||||
self.to_screen('Logging in')
|
||||
|
||||
@staticmethod
|
||||
def raise_login_required(msg='This video is only available for registered users'):
|
||||
raise ExtractorError(
|
||||
'%s. Use --username and --password or --netrc to provide account credentials.' % msg,
|
||||
expected=True)
|
||||
|
||||
@staticmethod
|
||||
def raise_geo_restricted(msg='This video is not available from your location due to geo restriction'):
|
||||
raise ExtractorError(
|
||||
'%s. You might want to use --proxy to workaround.' % msg,
|
||||
expected=True)
|
||||
|
||||
# Methods for following #608
|
||||
@staticmethod
|
||||
def url_result(url, ie=None, video_id=None, video_title=None):
|
||||
"""Returns a url that points to a page that should be processed"""
|
||||
"""Returns a URL that points to a page that should be processed"""
|
||||
# TODO: ie should be the class used for getting the info
|
||||
video_info = {'_type': 'url',
|
||||
'url': url,
|
||||
@ -523,7 +549,7 @@ class InfoExtractor(object):
|
||||
video_info['description'] = playlist_description
|
||||
return video_info
|
||||
|
||||
def _search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0, group=None):
|
||||
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
|
||||
"""
|
||||
Perform a regex search on the given string, using a single or a list of
|
||||
patterns returning the first matching group.
|
||||
@ -549,7 +575,7 @@ class InfoExtractor(object):
|
||||
return next(g for g in mobj.groups() if g is not None)
|
||||
else:
|
||||
return mobj.group(group)
|
||||
elif default is not _NO_DEFAULT:
|
||||
elif default is not NO_DEFAULT:
|
||||
return default
|
||||
elif fatal:
|
||||
raise RegexNotFoundError('Unable to extract %s' % _name)
|
||||
@ -557,7 +583,7 @@ class InfoExtractor(object):
|
||||
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
|
||||
return None
|
||||
|
||||
def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0, group=None):
|
||||
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
|
||||
"""
|
||||
Like _search_regex, but strips HTML tags and unescapes entities.
|
||||
"""
|
||||
@ -597,7 +623,7 @@ class InfoExtractor(object):
|
||||
|
||||
return (username, password)
|
||||
|
||||
def _get_tfa_info(self):
|
||||
def _get_tfa_info(self, note='two-factor verification code'):
|
||||
"""
|
||||
Get the two-factor authentication info
|
||||
TODO - asking the user will be required for sms/phone verify
|
||||
@ -611,7 +637,7 @@ class InfoExtractor(object):
|
||||
if downloader_params.get('twofactor', None) is not None:
|
||||
return downloader_params['twofactor']
|
||||
|
||||
return None
|
||||
return compat_getpass('Type %s and press [Return]: ' % note)
|
||||
|
||||
# Helper functions for extracting OpenGraph info
|
||||
@staticmethod
|
||||
@ -624,6 +650,12 @@ class InfoExtractor(object):
|
||||
template % (content_re, property_re),
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def _meta_regex(prop):
|
||||
return r'''(?isx)<meta
|
||||
(?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
|
||||
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
|
||||
|
||||
def _og_search_property(self, prop, html, name=None, **kargs):
|
||||
if name is None:
|
||||
name = 'OpenGraph %s' % prop
|
||||
@ -633,7 +665,7 @@ class InfoExtractor(object):
|
||||
return unescapeHTML(escaped)
|
||||
|
||||
def _og_search_thumbnail(self, html, **kargs):
|
||||
return self._og_search_property('image', html, 'thumbnail url', fatal=False, **kargs)
|
||||
return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
|
||||
|
||||
def _og_search_description(self, html, **kargs):
|
||||
return self._og_search_property('description', html, fatal=False, **kargs)
|
||||
@ -654,9 +686,7 @@ class InfoExtractor(object):
|
||||
if display_name is None:
|
||||
display_name = name
|
||||
return self._html_search_regex(
|
||||
r'''(?isx)<meta
|
||||
(?=[^>]+(?:itemprop|name|property)=(["\']?)%s\1)
|
||||
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(name),
|
||||
self._meta_regex(name),
|
||||
html, display_name, fatal=fatal, group='content', **kwargs)
|
||||
|
||||
def _dc_search_uploader(self, html):
|
||||
@ -705,6 +735,28 @@ class InfoExtractor(object):
|
||||
return self._html_search_meta('twitter:player', html,
|
||||
'twitter card player')
|
||||
|
||||
@staticmethod
|
||||
def _hidden_inputs(html):
|
||||
html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
|
||||
hidden_inputs = {}
|
||||
for input in re.findall(r'(?i)<input([^>]+)>', html):
|
||||
if not re.search(r'type=(["\'])(?:hidden|submit)\1', input):
|
||||
continue
|
||||
name = re.search(r'name=(["\'])(?P<value>.+?)\1', input)
|
||||
if not name:
|
||||
continue
|
||||
value = re.search(r'value=(["\'])(?P<value>.*?)\1', input)
|
||||
if not value:
|
||||
continue
|
||||
hidden_inputs[name.group('value')] = value.group('value')
|
||||
return hidden_inputs
|
||||
|
||||
def _form_hidden_inputs(self, form_id, html):
|
||||
form = self._search_regex(
|
||||
r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
|
||||
html, '%s form' % form_id, group='form')
|
||||
return self._hidden_inputs(form)
|
||||
|
||||
def _sort_formats(self, formats, field_preference=None):
|
||||
if not formats:
|
||||
raise ExtractorError('No video formats found')
|
||||
@ -764,7 +816,7 @@ class InfoExtractor(object):
|
||||
f.get('fps') if f.get('fps') is not None else -1,
|
||||
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
|
||||
f.get('source_preference') if f.get('source_preference') is not None else -1,
|
||||
f.get('format_id'),
|
||||
f.get('format_id') if f.get('format_id') is not None else '',
|
||||
)
|
||||
formats.sort(key=_formats_key)
|
||||
|
||||
@ -786,8 +838,8 @@ class InfoExtractor(object):
|
||||
return True
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, compat_HTTPError):
|
||||
self.report_warning(
|
||||
'%s URL is invalid, skipping' % item, video_id)
|
||||
self.to_screen(
|
||||
'%s: %s URL is invalid, skipping' % (video_id, item))
|
||||
return False
|
||||
raise
|
||||
|
||||
@ -815,10 +867,14 @@ class InfoExtractor(object):
|
||||
self.to_screen(msg)
|
||||
time.sleep(timeout)
|
||||
|
||||
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None):
|
||||
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
|
||||
transform_source=lambda s: fix_xml_ampersands(s).strip()):
|
||||
manifest = self._download_xml(
|
||||
manifest_url, video_id, 'Downloading f4m manifest',
|
||||
'Unable to download f4m manifest')
|
||||
'Unable to download f4m manifest',
|
||||
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
|
||||
# (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
|
||||
transform_source=transform_source)
|
||||
|
||||
formats = []
|
||||
manifest_version = '1.0'
|
||||
@ -828,8 +884,19 @@ class InfoExtractor(object):
|
||||
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
|
||||
for i, media_el in enumerate(media_nodes):
|
||||
if manifest_version == '2.0':
|
||||
manifest_url = ('/'.join(manifest_url.split('/')[:-1]) + '/' +
|
||||
(media_el.attrib.get('href') or media_el.attrib.get('url')))
|
||||
media_url = media_el.attrib.get('href') or media_el.attrib.get('url')
|
||||
if not media_url:
|
||||
continue
|
||||
manifest_url = (
|
||||
media_url if media_url.startswith('http://') or media_url.startswith('https://')
|
||||
else ('/'.join(manifest_url.split('/')[:-1]) + '/' + media_url))
|
||||
# If media_url is itself a f4m manifest do the recursive extraction
|
||||
# since bitrates in parent manifest (this one) and media_url manifest
|
||||
# may differ leading to inability to resolve the format by requested
|
||||
# bitrate in f4m downloader
|
||||
if determine_ext(manifest_url) == 'f4m':
|
||||
formats.extend(self._extract_f4m_formats(manifest_url, video_id, preference, f4m_id))
|
||||
continue
|
||||
tbr = int_or_none(media_el.attrib.get('bitrate'))
|
||||
formats.append({
|
||||
'format_id': '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)])),
|
||||
@ -846,7 +913,8 @@ class InfoExtractor(object):
|
||||
|
||||
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
|
||||
entry_protocol='m3u8', preference=None,
|
||||
m3u8_id=None):
|
||||
m3u8_id=None, note=None, errnote=None,
|
||||
fatal=True):
|
||||
|
||||
formats = [{
|
||||
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
|
||||
@ -865,8 +933,11 @@ class InfoExtractor(object):
|
||||
|
||||
m3u8_doc = self._download_webpage(
|
||||
m3u8_url, video_id,
|
||||
note='Downloading m3u8 information',
|
||||
errnote='Failed to download m3u8 information')
|
||||
note=note or 'Downloading m3u8 information',
|
||||
errnote=errnote or 'Failed to download m3u8 information',
|
||||
fatal=fatal)
|
||||
if m3u8_doc is False:
|
||||
return m3u8_doc
|
||||
last_info = None
|
||||
last_media = None
|
||||
kv_rex = re.compile(
|
||||
@ -896,7 +967,7 @@ class InfoExtractor(object):
|
||||
format_id = []
|
||||
if m3u8_id:
|
||||
format_id.append(m3u8_id)
|
||||
last_media_name = last_media.get('NAME') if last_media else None
|
||||
last_media_name = last_media.get('NAME') if last_media and last_media.get('TYPE') != 'SUBTITLES' else None
|
||||
format_id.append(last_media_name if last_media_name else '%d' % (tbr if tbr else len(formats)))
|
||||
f = {
|
||||
'format_id': '-'.join(format_id),
|
||||
@ -927,69 +998,221 @@ class InfoExtractor(object):
|
||||
self._sort_formats(formats)
|
||||
return formats
|
||||
|
||||
# TODO: improve extraction
|
||||
def _extract_smil_formats(self, smil_url, video_id, fatal=True):
|
||||
smil = self._download_xml(
|
||||
smil_url, video_id, 'Downloading SMIL file',
|
||||
'Unable to download SMIL file', fatal=fatal)
|
||||
@staticmethod
|
||||
def _xpath_ns(path, namespace=None):
|
||||
if not namespace:
|
||||
return path
|
||||
out = []
|
||||
for c in path.split('/'):
|
||||
if not c or c == '.':
|
||||
out.append(c)
|
||||
else:
|
||||
out.append('{%s}%s' % (namespace, c))
|
||||
return '/'.join(out)
|
||||
|
||||
def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None):
|
||||
smil = self._download_smil(smil_url, video_id, fatal=fatal)
|
||||
|
||||
if smil is False:
|
||||
assert not fatal
|
||||
return []
|
||||
|
||||
base = smil.find('./head/meta').get('base')
|
||||
namespace = self._parse_smil_namespace(smil)
|
||||
|
||||
return self._parse_smil_formats(
|
||||
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
|
||||
|
||||
def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
|
||||
smil = self._download_smil(smil_url, video_id, fatal=fatal)
|
||||
if smil is False:
|
||||
return {}
|
||||
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
|
||||
|
||||
def _download_smil(self, smil_url, video_id, fatal=True):
|
||||
return self._download_xml(
|
||||
smil_url, video_id, 'Downloading SMIL file',
|
||||
'Unable to download SMIL file', fatal=fatal)
|
||||
|
||||
def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
|
||||
namespace = self._parse_smil_namespace(smil)
|
||||
|
||||
formats = self._parse_smil_formats(
|
||||
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
|
||||
subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
|
||||
|
||||
video_id = os.path.splitext(url_basename(smil_url))[0]
|
||||
title = None
|
||||
description = None
|
||||
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
|
||||
name = meta.attrib.get('name')
|
||||
content = meta.attrib.get('content')
|
||||
if not name or not content:
|
||||
continue
|
||||
if not title and name == 'title':
|
||||
title = content
|
||||
elif not description and name in ('description', 'abstract'):
|
||||
description = content
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title or video_id,
|
||||
'description': description,
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
||||
def _parse_smil_namespace(self, smil):
|
||||
return self._search_regex(
|
||||
r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
|
||||
|
||||
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
|
||||
base = smil_url
|
||||
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
|
||||
b = meta.get('base') or meta.get('httpBase')
|
||||
if b:
|
||||
base = b
|
||||
break
|
||||
|
||||
formats = []
|
||||
rtmp_count = 0
|
||||
if smil.findall('./body/seq/video'):
|
||||
video = smil.findall('./body/seq/video')[0]
|
||||
fmts, rtmp_count = self._parse_smil_video(video, video_id, base, rtmp_count)
|
||||
formats.extend(fmts)
|
||||
else:
|
||||
for video in smil.findall('./body/switch/video'):
|
||||
fmts, rtmp_count = self._parse_smil_video(video, video_id, base, rtmp_count)
|
||||
formats.extend(fmts)
|
||||
http_count = 0
|
||||
|
||||
videos = smil.findall(self._xpath_ns('.//video', namespace))
|
||||
for video in videos:
|
||||
src = video.get('src')
|
||||
if not src:
|
||||
continue
|
||||
|
||||
bitrate = int_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
|
||||
filesize = int_or_none(video.get('size') or video.get('fileSize'))
|
||||
width = int_or_none(video.get('width'))
|
||||
height = int_or_none(video.get('height'))
|
||||
proto = video.get('proto')
|
||||
ext = video.get('ext')
|
||||
src_ext = determine_ext(src)
|
||||
streamer = video.get('streamer') or base
|
||||
|
||||
if proto == 'rtmp' or streamer.startswith('rtmp'):
|
||||
rtmp_count += 1
|
||||
formats.append({
|
||||
'url': streamer,
|
||||
'play_path': src,
|
||||
'ext': 'flv',
|
||||
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
|
||||
'tbr': bitrate,
|
||||
'filesize': filesize,
|
||||
'width': width,
|
||||
'height': height,
|
||||
})
|
||||
if transform_rtmp_url:
|
||||
streamer, src = transform_rtmp_url(streamer, src)
|
||||
formats[-1].update({
|
||||
'url': streamer,
|
||||
'play_path': src,
|
||||
})
|
||||
continue
|
||||
|
||||
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
|
||||
|
||||
if proto == 'm3u8' or src_ext == 'm3u8':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
src_url, video_id, ext or 'mp4', m3u8_id='hls'))
|
||||
continue
|
||||
|
||||
if src_ext == 'f4m':
|
||||
f4m_url = src_url
|
||||
if not f4m_params:
|
||||
f4m_params = {
|
||||
'hdcore': '3.2.0',
|
||||
'plugin': 'flowplayer-3.2.0.1',
|
||||
}
|
||||
f4m_url += '&' if '?' in f4m_url else '?'
|
||||
f4m_url += compat_urllib_parse.urlencode(f4m_params)
|
||||
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds'))
|
||||
continue
|
||||
|
||||
if src_url.startswith('http'):
|
||||
http_count += 1
|
||||
formats.append({
|
||||
'url': src_url,
|
||||
'ext': ext or src_ext or 'flv',
|
||||
'format_id': 'http-%d' % (bitrate or http_count),
|
||||
'tbr': bitrate,
|
||||
'filesize': filesize,
|
||||
'width': width,
|
||||
'height': height,
|
||||
})
|
||||
continue
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return formats
|
||||
|
||||
def _parse_smil_video(self, video, video_id, base, rtmp_count):
|
||||
src = video.get('src')
|
||||
if not src:
|
||||
return ([], rtmp_count)
|
||||
bitrate = int_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
|
||||
width = int_or_none(video.get('width'))
|
||||
height = int_or_none(video.get('height'))
|
||||
proto = video.get('proto')
|
||||
if not proto:
|
||||
if base:
|
||||
if base.startswith('rtmp'):
|
||||
proto = 'rtmp'
|
||||
elif base.startswith('http'):
|
||||
proto = 'http'
|
||||
ext = video.get('ext')
|
||||
if proto == 'm3u8':
|
||||
return (self._extract_m3u8_formats(src, video_id, ext), rtmp_count)
|
||||
elif proto == 'rtmp':
|
||||
rtmp_count += 1
|
||||
streamer = video.get('streamer') or base
|
||||
return ([{
|
||||
'url': streamer,
|
||||
'play_path': src,
|
||||
'ext': 'flv',
|
||||
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
|
||||
'tbr': bitrate,
|
||||
'width': width,
|
||||
'height': height,
|
||||
}], rtmp_count)
|
||||
elif proto.startswith('http'):
|
||||
return ([{
|
||||
'url': base + src,
|
||||
'ext': ext or 'flv',
|
||||
'tbr': bitrate,
|
||||
'width': width,
|
||||
'height': height,
|
||||
}], rtmp_count)
|
||||
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
|
||||
subtitles = {}
|
||||
for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
|
||||
src = textstream.get('src')
|
||||
if not src:
|
||||
continue
|
||||
ext = textstream.get('ext') or determine_ext(src)
|
||||
if not ext:
|
||||
type_ = textstream.get('type')
|
||||
SUBTITLES_TYPES = {
|
||||
'text/vtt': 'vtt',
|
||||
'text/srt': 'srt',
|
||||
'application/smptett+xml': 'tt',
|
||||
}
|
||||
if type_ in SUBTITLES_TYPES:
|
||||
ext = SUBTITLES_TYPES[type_]
|
||||
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
|
||||
subtitles.setdefault(lang, []).append({
|
||||
'url': src,
|
||||
'ext': ext,
|
||||
})
|
||||
return subtitles
|
||||
|
||||
def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True):
|
||||
xspf = self._download_xml(
|
||||
playlist_url, playlist_id, 'Downloading xpsf playlist',
|
||||
'Unable to download xspf manifest', fatal=fatal)
|
||||
if xspf is False:
|
||||
return []
|
||||
return self._parse_xspf(xspf, playlist_id)
|
||||
|
||||
def _parse_xspf(self, playlist, playlist_id):
|
||||
NS_MAP = {
|
||||
'xspf': 'http://xspf.org/ns/0/',
|
||||
's1': 'http://static.streamone.nl/player/ns/0',
|
||||
}
|
||||
|
||||
entries = []
|
||||
for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
|
||||
title = xpath_text(
|
||||
track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
|
||||
description = xpath_text(
|
||||
track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
|
||||
thumbnail = xpath_text(
|
||||
track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
|
||||
duration = float_or_none(
|
||||
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
|
||||
|
||||
formats = [{
|
||||
'url': location.text,
|
||||
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
|
||||
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
|
||||
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
|
||||
} for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
|
||||
self._sort_formats(formats)
|
||||
|
||||
entries.append({
|
||||
'id': playlist_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
})
|
||||
return entries
|
||||
|
||||
def _live_title(self, name):
|
||||
""" Generate the title for a live video """
|
||||
@ -1025,6 +1248,12 @@ class InfoExtractor(object):
|
||||
None, '/', True, False, expire_time, '', None, None, None)
|
||||
self._downloader.cookiejar.set_cookie(cookie)
|
||||
|
||||
def _get_cookies(self, url):
|
||||
""" Return a compat_cookies.SimpleCookie with the cookies for the url """
|
||||
req = compat_urllib_request.Request(url)
|
||||
self._downloader.cookiejar.add_cookie_header(req)
|
||||
return compat_cookies.SimpleCookie(req.get_header('Cookie'))
|
||||
|
||||
def get_testcases(self, include_onlymatching=False):
|
||||
t = getattr(self, '_TEST', None)
|
||||
if t:
|
||||
@ -1063,6 +1292,23 @@ class InfoExtractor(object):
|
||||
def _get_subtitles(self, *args, **kwargs):
|
||||
raise NotImplementedError("This method must be implemented by subclasses")
|
||||
|
||||
@staticmethod
|
||||
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
|
||||
""" Merge subtitle items for one language. Items with duplicated URLs
|
||||
will be dropped. """
|
||||
list1_urls = set([item['url'] for item in subtitle_list1])
|
||||
ret = list(subtitle_list1)
|
||||
ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
|
||||
return ret
|
||||
|
||||
@classmethod
|
||||
def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
|
||||
""" Merge two subtitle dictionaries, language by language. """
|
||||
ret = dict(subtitle_dict1)
|
||||
for lang in subtitle_dict2:
|
||||
ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
|
||||
return ret
|
||||
|
||||
def extract_automatic_captions(self, *args, **kwargs):
|
||||
if (self._downloader.params.get('writeautomaticsub', False) or
|
||||
self._downloader.params.get('listsubtitles')):
|
||||
@ -1072,14 +1318,11 @@ class InfoExtractor(object):
|
||||
def _get_automatic_captions(self, *args, **kwargs):
|
||||
raise NotImplementedError("This method must be implemented by subclasses")
|
||||
|
||||
def _subtitles_timecode(self, seconds):
|
||||
return '%02d:%02d:%02d.%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000)
|
||||
|
||||
|
||||
class SearchInfoExtractor(InfoExtractor):
|
||||
"""
|
||||
Base class for paged search queries extractors.
|
||||
They accept urls in the format _SEARCH_KEY(|all|[0-9]):{query}
|
||||
They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
|
||||
Instances should define _SEARCH_KEY and _MAX_RESULTS.
|
||||
"""
|
||||
|
||||
|
@ -2,7 +2,6 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
@ -24,16 +23,28 @@ class CondeNastIE(InfoExtractor):
|
||||
# The keys are the supported sites and the values are the name to be shown
|
||||
# to the user and in the extractor description.
|
||||
_SITES = {
|
||||
'wired': 'WIRED',
|
||||
'gq': 'GQ',
|
||||
'vogue': 'Vogue',
|
||||
'glamour': 'Glamour',
|
||||
'wmagazine': 'W Magazine',
|
||||
'vanityfair': 'Vanity Fair',
|
||||
'allure': 'Allure',
|
||||
'architecturaldigest': 'Architectural Digest',
|
||||
'arstechnica': 'Ars Technica',
|
||||
'bonappetit': 'Bon Appétit',
|
||||
'brides': 'Brides',
|
||||
'cnevids': 'Condé Nast',
|
||||
'cntraveler': 'Condé Nast Traveler',
|
||||
'details': 'Details',
|
||||
'epicurious': 'Epicurious',
|
||||
'glamour': 'Glamour',
|
||||
'golfdigest': 'Golf Digest',
|
||||
'gq': 'GQ',
|
||||
'newyorker': 'The New Yorker',
|
||||
'self': 'SELF',
|
||||
'teenvogue': 'Teen Vogue',
|
||||
'vanityfair': 'Vanity Fair',
|
||||
'vogue': 'Vogue',
|
||||
'wired': 'WIRED',
|
||||
'wmagazine': 'W Magazine',
|
||||
}
|
||||
|
||||
_VALID_URL = r'http://(video|www|player)\.(?P<site>%s)\.com/(?P<type>watch|series|video|embed)/(?P<id>[^/?#]+)' % '|'.join(_SITES.keys())
|
||||
_VALID_URL = r'http://(?:video|www|player)\.(?P<site>%s)\.com/(?P<type>watch|series|video|embed)/(?P<id>[^/?#]+)' % '|'.join(_SITES.keys())
|
||||
IE_DESC = 'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values()))
|
||||
|
||||
EMBED_URL = r'(?:https?:)?//player\.(?P<site>%s)\.com/(?P<type>embed)/.+?' % '|'.join(_SITES.keys())
|
||||
@ -86,8 +97,8 @@ class CondeNastIE(InfoExtractor):
|
||||
info_url = base_info_url + data
|
||||
info_page = self._download_webpage(info_url, video_id,
|
||||
'Downloading video info')
|
||||
video_info = self._search_regex(r'var video = ({.+?});', info_page, 'video info')
|
||||
video_info = json.loads(video_info)
|
||||
video_info = self._search_regex(r'var\s+video\s*=\s*({.+?});', info_page, 'video info')
|
||||
video_info = self._parse_json(video_info, video_id)
|
||||
|
||||
formats = [{
|
||||
'format_id': '%s-%s' % (fdata['type'].split('/')[-1], fdata['quality']),
|
||||
|
@ -12,22 +12,43 @@ from math import pow, sqrt, floor
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse,
|
||||
compat_urllib_parse_unquote,
|
||||
compat_urllib_request,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
bytes_to_intlist,
|
||||
intlist_to_bytes,
|
||||
int_or_none,
|
||||
remove_end,
|
||||
unified_strdate,
|
||||
urlencode_postdata,
|
||||
xpath_text,
|
||||
)
|
||||
from ..aes import (
|
||||
aes_cbc_decrypt,
|
||||
)
|
||||
|
||||
|
||||
class CrunchyrollIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.(?:com|fr)/(?:[^/]*/[^/?&]*?|media/\?id=)(?P<video_id>[0-9]+))(?:[/?&]|$)'
|
||||
class CrunchyrollBaseIE(InfoExtractor):
|
||||
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None):
|
||||
request = (url_or_request if isinstance(url_or_request, compat_urllib_request.Request)
|
||||
else compat_urllib_request.Request(url_or_request))
|
||||
# Accept-Language must be set explicitly to accept any language to avoid issues
|
||||
# similar to https://github.com/rg3/youtube-dl/issues/6797.
|
||||
# Along with IP address Crunchyroll uses Accept-Language to guess whether georestriction
|
||||
# should be imposed or not (from what I can see it just takes the first language
|
||||
# ignoring the priority and requires it to correspond the IP). By the way this causes
|
||||
# Crunchyroll to not work in georestriction cases in some browsers that don't place
|
||||
# the locale lang first in header. However allowing any language seems to workaround the issue.
|
||||
request.add_header('Accept-Language', '*')
|
||||
return super(CrunchyrollBaseIE, self)._download_webpage(
|
||||
request, video_id, note, errnote, fatal, tries, timeout, encoding)
|
||||
|
||||
|
||||
class CrunchyrollIE(CrunchyrollBaseIE):
|
||||
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.(?:com|fr)/(?:media(?:-|/\?id=)|[^/]*/[^/?&]*?)(?P<video_id>[0-9]+))(?:[/?&]|$)'
|
||||
_NETRC_MACHINE = 'crunchyroll'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513',
|
||||
@ -45,6 +66,22 @@ class CrunchyrollIE(InfoExtractor):
|
||||
# rtmp
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.crunchyroll.com/media-589804/culture-japan-1',
|
||||
'info_dict': {
|
||||
'id': '589804',
|
||||
'ext': 'flv',
|
||||
'title': 'Culture Japan Episode 1 – Rebuilding Japan after the 3.11',
|
||||
'description': 'md5:fe2743efedb49d279552926d0bd0cd9e',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'uploader': 'Danny Choo Network',
|
||||
'upload_date': '20120213',
|
||||
},
|
||||
'params': {
|
||||
# rtmp
|
||||
'skip_download': True,
|
||||
},
|
||||
|
||||
}, {
|
||||
'url': 'http://www.crunchyroll.fr/girl-friend-beta/episode-11-goodbye-la-mode-661697',
|
||||
'only_matching': True,
|
||||
@ -76,8 +113,8 @@ class CrunchyrollIE(InfoExtractor):
|
||||
self._login()
|
||||
|
||||
def _decrypt_subtitles(self, data, iv, id):
|
||||
data = bytes_to_intlist(data)
|
||||
iv = bytes_to_intlist(iv)
|
||||
data = bytes_to_intlist(base64.b64decode(data.encode('utf-8')))
|
||||
iv = bytes_to_intlist(base64.b64decode(iv.encode('utf-8')))
|
||||
id = int(id)
|
||||
|
||||
def obfuscate_key_aux(count, modulo, start):
|
||||
@ -179,6 +216,16 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
|
||||
return output
|
||||
|
||||
def _extract_subtitles(self, subtitle):
|
||||
sub_root = xml.etree.ElementTree.fromstring(subtitle)
|
||||
return [{
|
||||
'ext': 'srt',
|
||||
'data': self._convert_subtitles_to_srt(sub_root),
|
||||
}, {
|
||||
'ext': 'ass',
|
||||
'data': self._convert_subtitles_to_ass(sub_root),
|
||||
}]
|
||||
|
||||
def _get_subtitles(self, video_id, webpage):
|
||||
subtitles = {}
|
||||
for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage):
|
||||
@ -190,25 +237,11 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False)
|
||||
if not id or not iv or not data:
|
||||
continue
|
||||
id = int(id)
|
||||
iv = base64.b64decode(iv)
|
||||
data = base64.b64decode(data)
|
||||
|
||||
subtitle = self._decrypt_subtitles(data, iv, id).decode('utf-8')
|
||||
lang_code = self._search_regex(r'lang_code=["\']([^"\']+)', subtitle, 'subtitle_lang_code', fatal=False)
|
||||
if not lang_code:
|
||||
continue
|
||||
sub_root = xml.etree.ElementTree.fromstring(subtitle)
|
||||
subtitles[lang_code] = [
|
||||
{
|
||||
'ext': 'srt',
|
||||
'data': self._convert_subtitles_to_srt(sub_root),
|
||||
},
|
||||
{
|
||||
'ext': 'ass',
|
||||
'data': self._convert_subtitles_to_ass(sub_root),
|
||||
},
|
||||
]
|
||||
subtitles[lang_code] = self._extract_subtitles(subtitle)
|
||||
return subtitles
|
||||
|
||||
def _real_extract(self, url):
|
||||
@ -222,7 +255,9 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
webpage_url = 'http://www.' + mobj.group('url')
|
||||
|
||||
webpage = self._download_webpage(webpage_url, video_id, 'Downloading webpage')
|
||||
note_m = self._html_search_regex(r'<div class="showmedia-trailer-notice">(.+?)</div>', webpage, 'trailer-notice', default='')
|
||||
note_m = self._html_search_regex(
|
||||
r'<div class="showmedia-trailer-notice">(.+?)</div>',
|
||||
webpage, 'trailer-notice', default='')
|
||||
if note_m:
|
||||
raise ExtractorError(note_m)
|
||||
|
||||
@ -232,17 +267,24 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
if msg.get('type') == 'error':
|
||||
raise ExtractorError('crunchyroll returned error: %s' % msg['message_body'], expected=True)
|
||||
|
||||
if 'To view this, please log in to verify you are 18 or older.' in webpage:
|
||||
self.raise_login_required()
|
||||
|
||||
video_title = self._html_search_regex(r'<h1[^>]*>(.+?)</h1>', webpage, 'video_title', flags=re.DOTALL)
|
||||
video_title = re.sub(r' {2,}', ' ', video_title)
|
||||
video_description = self._html_search_regex(r'"description":"([^"]+)', webpage, 'video_description', default='')
|
||||
if not video_description:
|
||||
video_description = None
|
||||
video_upload_date = self._html_search_regex(r'<div>Availability for free users:(.+?)</div>', webpage, 'video_upload_date', fatal=False, flags=re.DOTALL)
|
||||
video_upload_date = self._html_search_regex(
|
||||
[r'<div>Availability for free users:(.+?)</div>', r'<div>[^<>]+<span>\s*(.+?\d{4})\s*</span></div>'],
|
||||
webpage, 'video_upload_date', fatal=False, flags=re.DOTALL)
|
||||
if video_upload_date:
|
||||
video_upload_date = unified_strdate(video_upload_date)
|
||||
video_uploader = self._html_search_regex(r'<div>\s*Publisher:(.+?)</div>', webpage, 'video_uploader', fatal=False, flags=re.DOTALL)
|
||||
video_uploader = self._html_search_regex(
|
||||
r'<a[^>]+href="/publisher/[^"]+"[^>]*>([^<]+)</a>', webpage,
|
||||
'video_uploader', fatal=False)
|
||||
|
||||
playerdata_url = compat_urllib_parse.unquote(self._html_search_regex(r'"config_url":"([^"]+)', webpage, 'playerdata_url'))
|
||||
playerdata_url = compat_urllib_parse_unquote(self._html_search_regex(r'"config_url":"([^"]+)', webpage, 'playerdata_url'))
|
||||
playerdata_req = compat_urllib_request.Request(playerdata_url)
|
||||
playerdata_req.data = compat_urllib_parse.urlencode({'current_page': webpage_url})
|
||||
playerdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
@ -255,23 +297,44 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
for fmt in re.findall(r'showmedia\.([0-9]{3,4})p', webpage):
|
||||
stream_quality, stream_format = self._FORMAT_IDS[fmt]
|
||||
video_format = fmt + 'p'
|
||||
streamdata_req = compat_urllib_request.Request('http://www.crunchyroll.com/xml/')
|
||||
# urlencode doesn't work!
|
||||
streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality=' + stream_quality + '&media%5Fid=' + stream_id + '&video%5Fformat=' + stream_format
|
||||
streamdata_req = compat_urllib_request.Request(
|
||||
'http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=%s&video_quality=%s'
|
||||
% (stream_id, stream_format, stream_quality),
|
||||
compat_urllib_parse.urlencode({'current_page': url}).encode('utf-8'))
|
||||
streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
streamdata_req.add_header('Content-Length', str(len(streamdata_req.data)))
|
||||
streamdata = self._download_xml(
|
||||
streamdata_req, video_id,
|
||||
note='Downloading media info for %s' % video_format)
|
||||
video_url = streamdata.find('./host').text
|
||||
video_play_path = streamdata.find('./file').text
|
||||
formats.append({
|
||||
stream_info = streamdata.find('./{default}preload/stream_info')
|
||||
video_url = stream_info.find('./host').text
|
||||
video_play_path = stream_info.find('./file').text
|
||||
metadata = stream_info.find('./metadata')
|
||||
format_info = {
|
||||
'format': video_format,
|
||||
'format_id': video_format,
|
||||
'height': int_or_none(xpath_text(metadata, './height')),
|
||||
'width': int_or_none(xpath_text(metadata, './width')),
|
||||
}
|
||||
|
||||
if '.fplive.net/' in video_url:
|
||||
video_url = re.sub(r'^rtmpe?://', 'http://', video_url.strip())
|
||||
parsed_video_url = compat_urlparse.urlparse(video_url)
|
||||
direct_video_url = compat_urlparse.urlunparse(parsed_video_url._replace(
|
||||
netloc='v.lvlt.crcdn.net',
|
||||
path='%s/%s' % (remove_end(parsed_video_url.path, '/'), video_play_path.split(':')[-1])))
|
||||
if self._is_valid_url(direct_video_url, video_id, video_format):
|
||||
format_info.update({
|
||||
'url': direct_video_url,
|
||||
})
|
||||
formats.append(format_info)
|
||||
continue
|
||||
|
||||
format_info.update({
|
||||
'url': video_url,
|
||||
'play_path': video_play_path,
|
||||
'ext': 'flv',
|
||||
'format': video_format,
|
||||
'format_id': video_format,
|
||||
})
|
||||
formats.append(format_info)
|
||||
|
||||
subtitles = self.extract_subtitles(video_id, webpage)
|
||||
|
||||
@ -287,7 +350,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
}
|
||||
|
||||
|
||||
class CrunchyrollShowPlaylistIE(InfoExtractor):
|
||||
class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE):
|
||||
IE_NAME = "crunchyroll:playlist"
|
||||
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?$'
|
||||
|
||||
|
@ -6,6 +6,7 @@ from ..utils import parse_iso8601, ExtractorError
|
||||
|
||||
|
||||
class CtsNewsIE(InfoExtractor):
|
||||
IE_DESC = '華視新聞'
|
||||
# https connection failed (Connection reset)
|
||||
_VALID_URL = r'http://news\.cts\.com\.tw/[a-z]+/[a-z]+/\d+/(?P<id>\d+)\.html'
|
||||
_TESTS = [{
|
||||
|
@ -13,8 +13,9 @@ from ..compat import (
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
determine_ext,
|
||||
int_or_none,
|
||||
orderedSet,
|
||||
parse_iso8601,
|
||||
str_to_int,
|
||||
unescapeHTML,
|
||||
)
|
||||
@ -28,10 +29,16 @@ class DailymotionBaseInfoExtractor(InfoExtractor):
|
||||
request.add_header('Cookie', 'family_filter=off; ff=off')
|
||||
return request
|
||||
|
||||
def _download_webpage_handle_no_ff(self, url, *args, **kwargs):
|
||||
request = self._build_request(url)
|
||||
return self._download_webpage_handle(request, *args, **kwargs)
|
||||
|
||||
def _download_webpage_no_ff(self, url, *args, **kwargs):
|
||||
request = self._build_request(url)
|
||||
return self._download_webpage(request, *args, **kwargs)
|
||||
|
||||
|
||||
class DailymotionIE(DailymotionBaseInfoExtractor):
|
||||
"""Information Extractor for Dailymotion"""
|
||||
|
||||
_VALID_URL = r'(?i)(?:https?://)?(?:(www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(embed|#)/)?video/(?P<id>[^/?_]+)'
|
||||
IE_NAME = 'dailymotion'
|
||||
|
||||
@ -50,8 +57,17 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
|
||||
'info_dict': {
|
||||
'id': 'x2iuewm',
|
||||
'ext': 'mp4',
|
||||
'uploader': 'IGN',
|
||||
'title': 'Steam Machine Models, Pricing Listed on Steam Store - IGN News',
|
||||
'description': 'Several come bundled with the Steam Controller.',
|
||||
'thumbnail': 're:^https?:.*\.(?:jpg|png)$',
|
||||
'duration': 74,
|
||||
'timestamp': 1425657362,
|
||||
'upload_date': '20150306',
|
||||
'uploader': 'IGN',
|
||||
'uploader_id': 'xijv66',
|
||||
'age_limit': 0,
|
||||
'view_count': int,
|
||||
'comment_count': int,
|
||||
}
|
||||
},
|
||||
# Vevo video
|
||||
@ -85,38 +101,106 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
url = 'http://www.dailymotion.com/video/%s' % video_id
|
||||
|
||||
# Retrieve video webpage to extract further information
|
||||
request = self._build_request(url)
|
||||
webpage = self._download_webpage(request, video_id)
|
||||
|
||||
# Extract URL, uploader and title from webpage
|
||||
self.report_extraction(video_id)
|
||||
|
||||
# It may just embed a vevo video:
|
||||
m_vevo = re.search(
|
||||
r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?video=(?P<id>[\w]*)',
|
||||
webpage)
|
||||
if m_vevo is not None:
|
||||
vevo_id = m_vevo.group('id')
|
||||
self.to_screen('Vevo video detected: %s' % vevo_id)
|
||||
return self.url_result('vevo:%s' % vevo_id, ie='Vevo')
|
||||
webpage = self._download_webpage_no_ff(
|
||||
'https://www.dailymotion.com/video/%s' % video_id, video_id)
|
||||
|
||||
age_limit = self._rta_search(webpage)
|
||||
|
||||
video_upload_date = None
|
||||
mobj = re.search(r'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage)
|
||||
if mobj is not None:
|
||||
video_upload_date = mobj.group(3) + mobj.group(2) + mobj.group(1)
|
||||
description = self._og_search_description(webpage) or self._html_search_meta(
|
||||
'description', webpage, 'description')
|
||||
|
||||
view_count = str_to_int(self._search_regex(
|
||||
[r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserPlays:(\d+)"',
|
||||
r'video_views_count[^>]+>\s+([\d\.,]+)'],
|
||||
webpage, 'view count', fatal=False))
|
||||
comment_count = int_or_none(self._search_regex(
|
||||
r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserComments:(\d+)"',
|
||||
webpage, 'comment count', fatal=False))
|
||||
|
||||
player_v5 = self._search_regex(
|
||||
r'playerV5\s*=\s*dmp\.create\([^,]+?,\s*({.+?})\);',
|
||||
webpage, 'player v5', default=None)
|
||||
if player_v5:
|
||||
player = self._parse_json(player_v5, video_id)
|
||||
metadata = player['metadata']
|
||||
formats = []
|
||||
for quality, media_list in metadata['qualities'].items():
|
||||
for media in media_list:
|
||||
media_url = media.get('url')
|
||||
if not media_url:
|
||||
continue
|
||||
type_ = media.get('type')
|
||||
if type_ == 'application/vnd.lumberjack.manifest':
|
||||
continue
|
||||
if type_ == 'application/x-mpegURL' or determine_ext(media_url) == 'm3u8':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
media_url, video_id, 'mp4', m3u8_id='hls'))
|
||||
else:
|
||||
f = {
|
||||
'url': media_url,
|
||||
'format_id': quality,
|
||||
}
|
||||
m = re.search(r'H264-(?P<width>\d+)x(?P<height>\d+)', media_url)
|
||||
if m:
|
||||
f.update({
|
||||
'width': int(m.group('width')),
|
||||
'height': int(m.group('height')),
|
||||
})
|
||||
formats.append(f)
|
||||
self._sort_formats(formats)
|
||||
|
||||
title = metadata['title']
|
||||
duration = int_or_none(metadata.get('duration'))
|
||||
timestamp = int_or_none(metadata.get('created_time'))
|
||||
thumbnail = metadata.get('poster_url')
|
||||
uploader = metadata.get('owner', {}).get('screenname')
|
||||
uploader_id = metadata.get('owner', {}).get('id')
|
||||
|
||||
subtitles = {}
|
||||
for subtitle_lang, subtitle in metadata.get('subtitles', {}).get('data', {}).items():
|
||||
subtitles[subtitle_lang] = [{
|
||||
'ext': determine_ext(subtitle_url),
|
||||
'url': subtitle_url,
|
||||
} for subtitle_url in subtitle.get('urls', [])]
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'duration': duration,
|
||||
'timestamp': timestamp,
|
||||
'uploader': uploader,
|
||||
'uploader_id': uploader_id,
|
||||
'age_limit': age_limit,
|
||||
'view_count': view_count,
|
||||
'comment_count': comment_count,
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
||||
# vevo embed
|
||||
vevo_id = self._search_regex(
|
||||
r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?video=(?P<id>[\w]*)',
|
||||
webpage, 'vevo embed', default=None)
|
||||
if vevo_id:
|
||||
return self.url_result('vevo:%s' % vevo_id, 'Vevo')
|
||||
|
||||
# fallback old player
|
||||
embed_page = self._download_webpage_no_ff(
|
||||
'https://www.dailymotion.com/embed/video/%s' % video_id,
|
||||
video_id, 'Downloading embed page')
|
||||
|
||||
timestamp = parse_iso8601(self._html_search_meta(
|
||||
'video:release_date', webpage, 'upload date'))
|
||||
|
||||
info = self._parse_json(
|
||||
self._search_regex(
|
||||
r'var info = ({.*?}),$', embed_page,
|
||||
'video info', flags=re.MULTILINE),
|
||||
video_id)
|
||||
|
||||
embed_url = 'http://www.dailymotion.com/embed/video/%s' % video_id
|
||||
embed_request = self._build_request(embed_url)
|
||||
embed_page = self._download_webpage(
|
||||
embed_request, video_id, 'Downloading embed page')
|
||||
info = self._search_regex(r'var info = ({.*?}),$', embed_page,
|
||||
'video info', flags=re.MULTILINE)
|
||||
info = json.loads(info)
|
||||
if info.get('error') is not None:
|
||||
msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title']
|
||||
raise ExtractorError(msg, expected=True)
|
||||
@ -137,16 +221,11 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
|
||||
'width': width,
|
||||
'height': height,
|
||||
})
|
||||
if not formats:
|
||||
raise ExtractorError('Unable to extract video URL')
|
||||
self._sort_formats(formats)
|
||||
|
||||
# subtitles
|
||||
video_subtitles = self.extract_subtitles(video_id, webpage)
|
||||
|
||||
view_count = str_to_int(self._search_regex(
|
||||
r'video_views_count[^>]+>\s+([\d\.,]+)',
|
||||
webpage, 'view count', fatal=False))
|
||||
|
||||
title = self._og_search_title(webpage, default=None)
|
||||
if title is None:
|
||||
title = self._html_search_regex(
|
||||
@ -157,12 +236,14 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
|
||||
'id': video_id,
|
||||
'formats': formats,
|
||||
'uploader': info['owner.screenname'],
|
||||
'upload_date': video_upload_date,
|
||||
'timestamp': timestamp,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'subtitles': video_subtitles,
|
||||
'thumbnail': info['thumbnail_url'],
|
||||
'age_limit': age_limit,
|
||||
'view_count': view_count,
|
||||
'duration': info['duration']
|
||||
}
|
||||
|
||||
def _get_subtitles(self, video_id, webpage):
|
||||
@ -196,18 +277,26 @@ class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
|
||||
}]
|
||||
|
||||
def _extract_entries(self, id):
|
||||
video_ids = []
|
||||
video_ids = set()
|
||||
processed_urls = set()
|
||||
for pagenum in itertools.count(1):
|
||||
request = self._build_request(self._PAGE_TEMPLATE % (id, pagenum))
|
||||
webpage = self._download_webpage(request,
|
||||
id, 'Downloading page %s' % pagenum)
|
||||
page_url = self._PAGE_TEMPLATE % (id, pagenum)
|
||||
webpage, urlh = self._download_webpage_handle_no_ff(
|
||||
page_url, id, 'Downloading page %s' % pagenum)
|
||||
if urlh.geturl() in processed_urls:
|
||||
self.report_warning('Stopped at duplicated page %s, which is the same as %s' % (
|
||||
page_url, urlh.geturl()), id)
|
||||
break
|
||||
|
||||
video_ids.extend(re.findall(r'data-xid="(.+?)"', webpage))
|
||||
processed_urls.add(urlh.geturl())
|
||||
|
||||
for video_id in re.findall(r'data-xid="(.+?)"', webpage):
|
||||
if video_id not in video_ids:
|
||||
yield self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion')
|
||||
video_ids.add(video_id)
|
||||
|
||||
if re.search(self._MORE_PAGES_INDICATOR, webpage) is None:
|
||||
break
|
||||
return [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion')
|
||||
for video_id in orderedSet(video_ids)]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
@ -224,7 +313,7 @@ class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
|
||||
|
||||
class DailymotionUserIE(DailymotionPlaylistIE):
|
||||
IE_NAME = 'dailymotion:user'
|
||||
_VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/(?:old/)?user/(?P<user>[^/]+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/(?!(?:embed|#|video|playlist)/)(?:(?:old/)?user/)?(?P<user>[^/]+)'
|
||||
_PAGE_TEMPLATE = 'http://www.dailymotion.com/user/%s/%s'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.dailymotion.com/user/nqtv',
|
||||
@ -233,12 +322,24 @@ class DailymotionUserIE(DailymotionPlaylistIE):
|
||||
'title': 'Rémi Gaillard',
|
||||
},
|
||||
'playlist_mincount': 100,
|
||||
}, {
|
||||
'url': 'http://www.dailymotion.com/user/UnderProject',
|
||||
'info_dict': {
|
||||
'id': 'UnderProject',
|
||||
'title': 'UnderProject',
|
||||
},
|
||||
'playlist_mincount': 1800,
|
||||
'expected_warnings': [
|
||||
'Stopped at duplicated page',
|
||||
],
|
||||
'skip': 'Takes too long time',
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
user = mobj.group('user')
|
||||
webpage = self._download_webpage(url, user)
|
||||
webpage = self._download_webpage(
|
||||
'https://www.dailymotion.com/user/%s' % user, user)
|
||||
full_user = unescapeHTML(self._html_search_regex(
|
||||
r'<a class="nav-image" title="([^"]+)" href="/%s">' % re.escape(user),
|
||||
webpage, 'user'))
|
||||
@ -249,3 +350,52 @@ class DailymotionUserIE(DailymotionPlaylistIE):
|
||||
'title': full_user,
|
||||
'entries': self._extract_entries(user),
|
||||
}
|
||||
|
||||
|
||||
class DailymotionCloudIE(DailymotionBaseInfoExtractor):
|
||||
_VALID_URL_PREFIX = r'http://api\.dmcloud\.net/(?:player/)?embed/'
|
||||
_VALID_URL = r'%s[^/]+/(?P<id>[^/?]+)' % _VALID_URL_PREFIX
|
||||
_VALID_EMBED_URL = r'%s[^/]+/[^\'"]+' % _VALID_URL_PREFIX
|
||||
|
||||
_TESTS = [{
|
||||
# From http://www.francetvinfo.fr/economie/entreprises/les-entreprises-familiales-le-secret-de-la-reussite_933271.html
|
||||
# Tested at FranceTvInfo_2
|
||||
'url': 'http://api.dmcloud.net/embed/4e7343f894a6f677b10006b4/556e03339473995ee145930c?auth=1464865870-0-jyhsm84b-ead4c701fb750cf9367bf4447167a3db&autoplay=1',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# http://www.francetvinfo.fr/societe/larguez-les-amarres-le-cobaturage-se-developpe_980101.html
|
||||
'url': 'http://api.dmcloud.net/player/embed/4e7343f894a6f677b10006b4/559545469473996d31429f06?auth=1467430263-0-90tglw2l-a3a4b64ed41efe48d7fccad85b8b8fda&autoplay=1',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
@classmethod
|
||||
def _extract_dmcloud_url(self, webpage):
|
||||
mobj = re.search(r'<iframe[^>]+src=[\'"](%s)[\'"]' % self._VALID_EMBED_URL, webpage)
|
||||
if mobj:
|
||||
return mobj.group(1)
|
||||
|
||||
mobj = re.search(
|
||||
r'<input[^>]+id=[\'"]dmcloudUrlEmissionSelect[\'"][^>]+value=[\'"](%s)[\'"]' % self._VALID_EMBED_URL,
|
||||
webpage)
|
||||
if mobj:
|
||||
return mobj.group(1)
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage_no_ff(url, video_id)
|
||||
|
||||
title = self._html_search_regex(r'<title>([^>]+)</title>', webpage, 'title')
|
||||
|
||||
video_info = self._parse_json(self._search_regex(
|
||||
r'var\s+info\s*=\s*([^;]+);', webpage, 'video info'), video_id)
|
||||
|
||||
# TODO: parse ios_url, which is in fact a manifest
|
||||
video_url = video_info['mp4_url']
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'title': title,
|
||||
'thumbnail': video_info.get('thumbnail_url'),
|
||||
}
|
||||
|
84
youtube_dl/extractor/dcn.py
Normal file
84
youtube_dl/extractor/dcn.py
Normal file
@ -0,0 +1,84 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
)
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
parse_iso8601,
|
||||
)
|
||||
|
||||
|
||||
class DCNIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?dcndigital\.ae/(?:#/)?(?:video/.+|show/\d+/.+?)/(?P<id>\d+)'
|
||||
_TEST = {
|
||||
'url': 'http://www.dcndigital.ae/#/show/199074/%D8%B1%D8%AD%D9%84%D8%A9-%D8%A7%D9%84%D8%B9%D9%85%D8%B1-%D8%A7%D9%84%D8%AD%D9%84%D9%82%D8%A9-1/17375/6887',
|
||||
'info_dict':
|
||||
{
|
||||
'id': '17375',
|
||||
'ext': 'mp4',
|
||||
'title': 'رحلة العمر : الحلقة 1',
|
||||
'description': 'md5:0156e935d870acb8ef0a66d24070c6d6',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'duration': 2041,
|
||||
'timestamp': 1227504126,
|
||||
'upload_date': '20081124',
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
request = compat_urllib_request.Request(
|
||||
'http://admin.mangomolo.com/analytics/index.php/plus/video?id=%s' % video_id,
|
||||
headers={'Origin': 'http://www.dcndigital.ae'})
|
||||
|
||||
video = self._download_json(request, video_id)
|
||||
title = video.get('title_en') or video['title_ar']
|
||||
|
||||
webpage = self._download_webpage(
|
||||
'http://admin.mangomolo.com/analytics/index.php/customers/embed/video?' +
|
||||
compat_urllib_parse.urlencode({
|
||||
'id': video['id'],
|
||||
'user_id': video['user_id'],
|
||||
'signature': video['signature'],
|
||||
'countries': 'Q0M=',
|
||||
'filter': 'DENY',
|
||||
}), video_id)
|
||||
|
||||
m3u8_url = self._html_search_regex(r'file:\s*"([^"]+)', webpage, 'm3u8 url')
|
||||
formats = self._extract_m3u8_formats(
|
||||
m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls')
|
||||
|
||||
rtsp_url = self._search_regex(
|
||||
r'<a[^>]+href="(rtsp://[^"]+)"', webpage, 'rtsp url', fatal=False)
|
||||
if rtsp_url:
|
||||
formats.append({
|
||||
'url': rtsp_url,
|
||||
'format_id': 'rtsp',
|
||||
})
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
img = video.get('img')
|
||||
thumbnail = 'http://admin.mangomolo.com/analytics/%s' % img if img else None
|
||||
duration = int_or_none(video.get('duration'))
|
||||
description = video.get('description_en') or video.get('description_ar')
|
||||
timestamp = parse_iso8601(video.get('create_time') or video.get('update_time'), ' ')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'duration': duration,
|
||||
'timestamp': timestamp,
|
||||
'formats': formats,
|
||||
}
|
@ -3,42 +3,47 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import unified_strdate
|
||||
|
||||
|
||||
class DFBIE(InfoExtractor):
|
||||
IE_NAME = 'tv.dfb.de'
|
||||
_VALID_URL = r'https?://tv\.dfb\.de/video/[^/]+/(?P<id>\d+)'
|
||||
_VALID_URL = r'https?://tv\.dfb\.de/video/(?P<display_id>[^/]+)/(?P<id>\d+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://tv.dfb.de/video/highlights-des-empfangs-in-berlin/9070/',
|
||||
'url': 'http://tv.dfb.de/video/u-19-em-stimmen-zum-spiel-gegen-russland/11633/',
|
||||
# The md5 is different each time
|
||||
'info_dict': {
|
||||
'id': '9070',
|
||||
'id': '11633',
|
||||
'display_id': 'u-19-em-stimmen-zum-spiel-gegen-russland',
|
||||
'ext': 'flv',
|
||||
'title': 'Highlights des Empfangs in Berlin',
|
||||
'upload_date': '20140716',
|
||||
'title': 'U 19-EM: Stimmen zum Spiel gegen Russland',
|
||||
'upload_date': '20150714',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
display_id = mobj.group('display_id')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
player_info = self._download_xml(
|
||||
'http://tv.dfb.de/server/hd_video.php?play=%s' % video_id,
|
||||
video_id)
|
||||
display_id)
|
||||
video_info = player_info.find('video')
|
||||
|
||||
f4m_info = self._download_xml(self._proto_relative_url(video_info.find('url').text.strip()), video_id)
|
||||
f4m_info = self._download_xml(
|
||||
self._proto_relative_url(video_info.find('url').text.strip()), display_id)
|
||||
token_el = f4m_info.find('token')
|
||||
manifest_url = token_el.attrib['url'] + '?' + 'hdnea=' + token_el.attrib['auth'] + '&hdcore=3.2.0'
|
||||
formats = self._extract_f4m_formats(manifest_url, display_id)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
'title': video_info.find('title').text,
|
||||
'url': manifest_url,
|
||||
'ext': 'flv',
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'upload_date': ''.join(video_info.find('time_date').text.split('.')[::-1]),
|
||||
'upload_date': unified_strdate(video_info.find('time_date').text),
|
||||
'formats': formats,
|
||||
}
|
||||
|
@ -1,10 +1,7 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
xpath_text,
|
||||
parse_duration,
|
||||
)
|
||||
from ..utils import parse_duration
|
||||
|
||||
|
||||
class DHMIE(InfoExtractor):
|
||||
@ -34,24 +31,14 @@ class DHMIE(InfoExtractor):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
playlist_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
|
||||
playlist_url = self._search_regex(
|
||||
r"file\s*:\s*'([^']+)'", webpage, 'playlist url')
|
||||
|
||||
playlist = self._download_xml(playlist_url, video_id)
|
||||
|
||||
track = playlist.find(
|
||||
'./{http://xspf.org/ns/0/}trackList/{http://xspf.org/ns/0/}track')
|
||||
|
||||
video_url = xpath_text(
|
||||
track, './{http://xspf.org/ns/0/}location',
|
||||
'video url', fatal=True)
|
||||
thumbnail = xpath_text(
|
||||
track, './{http://xspf.org/ns/0/}image',
|
||||
'thumbnail')
|
||||
entries = self._extract_xspf_playlist(playlist_url, playlist_id)
|
||||
|
||||
title = self._search_regex(
|
||||
[r'dc:title="([^"]+)"', r'<title> »([^<]+)</title>'],
|
||||
@ -63,11 +50,10 @@ class DHMIE(InfoExtractor):
|
||||
r'<em>Length\s*</em>\s*:\s*</strong>([^<]+)',
|
||||
webpage, 'duration', default=None))
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
entries[0].update({
|
||||
'title': title,
|
||||
'description': description,
|
||||
'duration': duration,
|
||||
'thumbnail': thumbnail,
|
||||
}
|
||||
})
|
||||
|
||||
return self.playlist_result(entries, playlist_id)
|
||||
|
@ -2,19 +2,19 @@ from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
parse_duration,
|
||||
parse_iso8601,
|
||||
int_or_none,
|
||||
)
|
||||
from ..compat import compat_str
|
||||
|
||||
|
||||
class DiscoveryIE(InfoExtractor):
|
||||
_VALID_URL = r'http://www\.discovery\.com\/[a-zA-Z0-9\-]*/[a-zA-Z0-9\-]*/videos/(?P<id>[a-zA-Z0-9_\-]*)(?:\.htm)?'
|
||||
_TEST = {
|
||||
_TESTS = [{
|
||||
'url': 'http://www.discovery.com/tv-shows/mythbusters/videos/mission-impossible-outtakes.htm',
|
||||
'md5': '3c69d77d9b0d82bfd5e5932a60f26504',
|
||||
'info_dict': {
|
||||
'id': 'mission-impossible-outtakes',
|
||||
'ext': 'flv',
|
||||
'id': '20769',
|
||||
'ext': 'mp4',
|
||||
'title': 'Mission Impossible Outtakes',
|
||||
'description': ('Watch Jamie Hyneman and Adam Savage practice being'
|
||||
' each other -- to the point of confusing Jamie\'s dog -- and '
|
||||
@ -24,22 +24,36 @@ class DiscoveryIE(InfoExtractor):
|
||||
'timestamp': 1303099200,
|
||||
'upload_date': '20110418',
|
||||
},
|
||||
}
|
||||
'params': {
|
||||
'skip_download': True, # requires ffmpeg
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.discovery.com/tv-shows/mythbusters/videos/mythbusters-the-simpsons',
|
||||
'info_dict': {
|
||||
'id': 'mythbusters-the-simpsons',
|
||||
'title': 'MythBusters: The Simpsons',
|
||||
},
|
||||
'playlist_count': 9,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
info = self._download_json(url + '?flat=1', video_id)
|
||||
|
||||
info = self._parse_json(self._search_regex(
|
||||
r'(?s)<script type="application/ld\+json">(.*?)</script>',
|
||||
webpage, 'video info'), video_id)
|
||||
video_title = info.get('playlist_title') or info.get('video_title')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': info['name'],
|
||||
'url': info['contentURL'],
|
||||
'description': info.get('description'),
|
||||
'thumbnail': info.get('thumbnailUrl'),
|
||||
'timestamp': parse_iso8601(info.get('uploadDate')),
|
||||
'duration': int_or_none(info.get('duration')),
|
||||
}
|
||||
entries = [{
|
||||
'id': compat_str(video_info['id']),
|
||||
'formats': self._extract_m3u8_formats(
|
||||
video_info['src'], video_id, ext='mp4',
|
||||
note='Download m3u8 information for video %d' % (idx + 1)),
|
||||
'title': video_info['title'],
|
||||
'description': video_info.get('description'),
|
||||
'duration': parse_duration(video_info.get('video_length')),
|
||||
'webpage_url': video_info.get('href'),
|
||||
'thumbnail': video_info.get('thumbnailURL'),
|
||||
'alt_title': video_info.get('secondary_title'),
|
||||
'timestamp': parse_iso8601(video_info.get('publishedDate')),
|
||||
} for idx, video_info in enumerate(info['playlist'])]
|
||||
|
||||
return self.playlist_result(entries, video_id, video_title)
|
||||
|
@ -1,27 +0,0 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .novamov import NovaMovIE
|
||||
|
||||
|
||||
class DivxStageIE(NovaMovIE):
|
||||
IE_NAME = 'divxstage'
|
||||
IE_DESC = 'DivxStage'
|
||||
|
||||
_VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'divxstage\.(?:eu|net|ch|co|at|ag|to)'}
|
||||
|
||||
_HOST = 'www.divxstage.eu'
|
||||
|
||||
_FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<'
|
||||
_TITLE_REGEX = r'<div class="video_det">\s*<strong>([^<]+)</strong>'
|
||||
_DESCRIPTION_REGEX = r'<div class="video_det">\s*<strong>[^<]+</strong>\s*<p>([^<]+)</p>'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.divxstage.eu/video/57f238e2e5e01',
|
||||
'md5': '63969f6eb26533a1968c4d325be63e72',
|
||||
'info_dict': {
|
||||
'id': '57f238e2e5e01',
|
||||
'ext': 'flv',
|
||||
'title': 'youtubedl test video',
|
||||
'description': 'This is a test video for youtubedl.',
|
||||
}
|
||||
}
|
@ -9,6 +9,7 @@ from ..compat import (compat_str, compat_basestring)
|
||||
|
||||
|
||||
class DouyuTVIE(InfoExtractor):
|
||||
IE_DESC = '斗鱼'
|
||||
_VALID_URL = r'http://(?:www\.)?douyutv\.com/(?P<id>[A-Za-z0-9]+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.douyutv.com/iseven',
|
||||
|
216
youtube_dl/extractor/dramafever.py
Normal file
216
youtube_dl/extractor/dramafever.py
Normal file
@ -0,0 +1,216 @@
|
||||
# encoding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import itertools
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_HTTPError,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
clean_html,
|
||||
determine_ext,
|
||||
int_or_none,
|
||||
parse_iso8601,
|
||||
)
|
||||
|
||||
|
||||
class DramaFeverBaseIE(InfoExtractor):
|
||||
_LOGIN_URL = 'https://www.dramafever.com/accounts/login/'
|
||||
_NETRC_MACHINE = 'dramafever'
|
||||
|
||||
_CONSUMER_SECRET = 'DA59dtVXYLxajktV'
|
||||
|
||||
_consumer_secret = None
|
||||
|
||||
def _get_consumer_secret(self):
|
||||
mainjs = self._download_webpage(
|
||||
'http://www.dramafever.com/static/51afe95/df2014/scripts/main.js',
|
||||
None, 'Downloading main.js', fatal=False)
|
||||
if not mainjs:
|
||||
return self._CONSUMER_SECRET
|
||||
return self._search_regex(
|
||||
r"var\s+cs\s*=\s*'([^']+)'", mainjs,
|
||||
'consumer secret', default=self._CONSUMER_SECRET)
|
||||
|
||||
def _real_initialize(self):
|
||||
self._login()
|
||||
self._consumer_secret = self._get_consumer_secret()
|
||||
|
||||
def _login(self):
|
||||
(username, password) = self._get_login_info()
|
||||
if username is None:
|
||||
return
|
||||
|
||||
login_form = {
|
||||
'username': username,
|
||||
'password': password,
|
||||
}
|
||||
|
||||
request = compat_urllib_request.Request(
|
||||
self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
|
||||
response = self._download_webpage(
|
||||
request, None, 'Logging in as %s' % username)
|
||||
|
||||
if all(logout_pattern not in response
|
||||
for logout_pattern in ['href="/accounts/logout/"', '>Log out<']):
|
||||
error = self._html_search_regex(
|
||||
r'(?s)class="hidden-xs prompt"[^>]*>(.+?)<',
|
||||
response, 'error message', default=None)
|
||||
if error:
|
||||
raise ExtractorError('Unable to login: %s' % error, expected=True)
|
||||
raise ExtractorError('Unable to log in')
|
||||
|
||||
|
||||
class DramaFeverIE(DramaFeverBaseIE):
|
||||
IE_NAME = 'dramafever'
|
||||
_VALID_URL = r'https?://(?:www\.)?dramafever\.com/drama/(?P<id>[0-9]+/[0-9]+)(?:/|$)'
|
||||
_TEST = {
|
||||
'url': 'http://www.dramafever.com/drama/4512/1/Cooking_with_Shin/',
|
||||
'info_dict': {
|
||||
'id': '4512.1',
|
||||
'ext': 'flv',
|
||||
'title': 'Cooking with Shin 4512.1',
|
||||
'description': 'md5:a8eec7942e1664a6896fcd5e1287bfd0',
|
||||
'thumbnail': 're:^https?://.*\.jpg',
|
||||
'timestamp': 1404336058,
|
||||
'upload_date': '20140702',
|
||||
'duration': 343,
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url).replace('/', '.')
|
||||
|
||||
try:
|
||||
feed = self._download_json(
|
||||
'http://www.dramafever.com/amp/episode/feed.json?guid=%s' % video_id,
|
||||
video_id, 'Downloading episode JSON')['channel']['item']
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, compat_HTTPError):
|
||||
raise ExtractorError(
|
||||
'Currently unavailable in your country.', expected=True)
|
||||
raise
|
||||
|
||||
media_group = feed.get('media-group', {})
|
||||
|
||||
formats = []
|
||||
for media_content in media_group['media-content']:
|
||||
src = media_content.get('@attributes', {}).get('url')
|
||||
if not src:
|
||||
continue
|
||||
ext = determine_ext(src)
|
||||
if ext == 'f4m':
|
||||
formats.extend(self._extract_f4m_formats(
|
||||
src, video_id, f4m_id='hds'))
|
||||
elif ext == 'm3u8':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
src, video_id, 'mp4', m3u8_id='hls'))
|
||||
else:
|
||||
formats.append({
|
||||
'url': src,
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
title = media_group.get('media-title')
|
||||
description = media_group.get('media-description')
|
||||
duration = int_or_none(media_group['media-content'][0].get('@attributes', {}).get('duration'))
|
||||
thumbnail = self._proto_relative_url(
|
||||
media_group.get('media-thumbnail', {}).get('@attributes', {}).get('url'))
|
||||
timestamp = parse_iso8601(feed.get('pubDate'), ' ')
|
||||
|
||||
subtitles = {}
|
||||
for media_subtitle in media_group.get('media-subTitle', []):
|
||||
lang = media_subtitle.get('@attributes', {}).get('lang')
|
||||
href = media_subtitle.get('@attributes', {}).get('href')
|
||||
if not lang or not href:
|
||||
continue
|
||||
subtitles[lang] = [{
|
||||
'ext': 'ttml',
|
||||
'url': href,
|
||||
}]
|
||||
|
||||
series_id, episode_number = video_id.split('.')
|
||||
episode_info = self._download_json(
|
||||
# We only need a single episode info, so restricting page size to one episode
|
||||
# and dealing with page number as with episode number
|
||||
r'http://www.dramafever.com/api/4/episode/series/?cs=%s&series_id=%s&page_number=%s&page_size=1'
|
||||
% (self._consumer_secret, series_id, episode_number),
|
||||
video_id, 'Downloading episode info JSON', fatal=False)
|
||||
if episode_info:
|
||||
value = episode_info.get('value')
|
||||
if value:
|
||||
subfile = value[0].get('subfile') or value[0].get('new_subfile')
|
||||
if subfile and subfile != 'http://www.dramafever.com/st/':
|
||||
subtitles.setdefault('English', []).append({
|
||||
'ext': 'srt',
|
||||
'url': subfile,
|
||||
})
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'timestamp': timestamp,
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
||||
|
||||
class DramaFeverSeriesIE(DramaFeverBaseIE):
|
||||
IE_NAME = 'dramafever:series'
|
||||
_VALID_URL = r'https?://(?:www\.)?dramafever\.com/drama/(?P<id>[0-9]+)(?:/(?:(?!\d+(?:/|$)).+)?)?$'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.dramafever.com/drama/4512/Cooking_with_Shin/',
|
||||
'info_dict': {
|
||||
'id': '4512',
|
||||
'title': 'Cooking with Shin',
|
||||
'description': 'md5:84a3f26e3cdc3fb7f500211b3593b5c1',
|
||||
},
|
||||
'playlist_count': 4,
|
||||
}, {
|
||||
'url': 'http://www.dramafever.com/drama/124/IRIS/',
|
||||
'info_dict': {
|
||||
'id': '124',
|
||||
'title': 'IRIS',
|
||||
'description': 'md5:b3a30e587cf20c59bd1c01ec0ee1b862',
|
||||
},
|
||||
'playlist_count': 20,
|
||||
}]
|
||||
|
||||
_PAGE_SIZE = 60 # max is 60 (see http://api.drama9.com/#get--api-4-episode-series-)
|
||||
|
||||
def _real_extract(self, url):
|
||||
series_id = self._match_id(url)
|
||||
|
||||
series = self._download_json(
|
||||
'http://www.dramafever.com/api/4/series/query/?cs=%s&series_id=%s'
|
||||
% (self._consumer_secret, series_id),
|
||||
series_id, 'Downloading series JSON')['series'][series_id]
|
||||
|
||||
title = clean_html(series['name'])
|
||||
description = clean_html(series.get('description') or series.get('description_short'))
|
||||
|
||||
entries = []
|
||||
for page_num in itertools.count(1):
|
||||
episodes = self._download_json(
|
||||
'http://www.dramafever.com/api/4/episode/series/?cs=%s&series_id=%s&page_size=%d&page_number=%d'
|
||||
% (self._consumer_secret, series_id, self._PAGE_SIZE, page_num),
|
||||
series_id, 'Downloading episodes JSON page #%d' % page_num)
|
||||
for episode in episodes.get('value', []):
|
||||
episode_url = episode.get('episode_url')
|
||||
if not episode_url:
|
||||
continue
|
||||
entries.append(self.url_result(
|
||||
compat_urlparse.urljoin(url, episode_url),
|
||||
'DramaFever', episode.get('guid')))
|
||||
if page_num == episodes['num_pages']:
|
||||
break
|
||||
|
||||
return self.playlist_result(entries, series_id, title, description)
|
@ -15,7 +15,6 @@ class DRBonanzaIE(InfoExtractor):
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.dr.dk/bonanza/serie/portraetter/Talkshowet.htm?assetId=65517',
|
||||
'md5': 'fe330252ddea607635cf2eb2c99a0af3',
|
||||
'info_dict': {
|
||||
'id': '65517',
|
||||
'ext': 'mp4',
|
||||
@ -26,6 +25,9 @@ class DRBonanzaIE(InfoExtractor):
|
||||
'upload_date': '20110120',
|
||||
'duration': 3664,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True, # requires rtmp
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.dr.dk/bonanza/radio/serie/sport/fodbold.htm?assetId=59410',
|
||||
'md5': '6dfe039417e76795fb783c52da3de11d',
|
||||
@ -93,6 +95,11 @@ class DRBonanzaIE(InfoExtractor):
|
||||
'format_id': file['Type'].replace('Video', ''),
|
||||
'preference': preferencemap.get(file['Type'], -10),
|
||||
})
|
||||
if format['url'].startswith('rtmp'):
|
||||
rtmp_url = format['url']
|
||||
format['rtmp_live'] = True # --resume does not work
|
||||
if '/bonanza/' in rtmp_url:
|
||||
format['play_path'] = rtmp_url.split('/bonanza/')[1]
|
||||
formats.append(format)
|
||||
elif file['Type'] == "Thumb":
|
||||
thumbnail = file['Location']
|
||||
@ -111,9 +118,6 @@ class DRBonanzaIE(InfoExtractor):
|
||||
description = '%s\n%s\n%s\n' % (
|
||||
info['Description'], info['Actors'], info['Colophon'])
|
||||
|
||||
for f in formats:
|
||||
f['url'] = f['url'].replace('rtmp://vod-bonanza.gss.dr.dk/bonanza/', 'http://vodfiles.dr.dk/')
|
||||
f['url'] = f['url'].replace('mp4:bonanza', 'bonanza')
|
||||
self._sort_formats(formats)
|
||||
|
||||
display_id = re.sub(r'[^\w\d-]', '', re.sub(r' ', '-', title.lower())) + '-' + asset_id
|
||||
|
@ -36,25 +36,24 @@ class DrTuberIE(InfoExtractor):
|
||||
r'<source src="([^"]+)"', webpage, 'video URL')
|
||||
|
||||
title = self._html_search_regex(
|
||||
[r'class="hd_title" style="[^"]+">([^<]+)</h1>', r'<title>([^<]+) - \d+'],
|
||||
[r'<p[^>]+class="title_substrate">([^<]+)</p>', r'<title>([^<]+) - \d+'],
|
||||
webpage, 'title')
|
||||
|
||||
thumbnail = self._html_search_regex(
|
||||
r'poster="([^"]+)"',
|
||||
webpage, 'thumbnail', fatal=False)
|
||||
|
||||
like_count = str_to_int(self._html_search_regex(
|
||||
r'<span id="rate_likes">\s*<img[^>]+>\s*<span>([\d,\.]+)</span>',
|
||||
webpage, 'like count', fatal=False))
|
||||
dislike_count = str_to_int(self._html_search_regex(
|
||||
r'<span id="rate_dislikes">\s*<img[^>]+>\s*<span>([\d,\.]+)</span>',
|
||||
webpage, 'like count', fatal=False))
|
||||
comment_count = str_to_int(self._html_search_regex(
|
||||
r'<span class="comments_count">([\d,\.]+)</span>',
|
||||
webpage, 'comment count', fatal=False))
|
||||
def extract_count(id_, name):
|
||||
return str_to_int(self._html_search_regex(
|
||||
r'<span[^>]+(?:class|id)="%s"[^>]*>([\d,\.]+)</span>' % id_,
|
||||
webpage, '%s count' % name, fatal=False))
|
||||
|
||||
like_count = extract_count('rate_likes', 'like')
|
||||
dislike_count = extract_count('rate_dislikes', 'dislike')
|
||||
comment_count = extract_count('comments_count', 'comment')
|
||||
|
||||
cats_str = self._search_regex(
|
||||
r'<span>Categories:</span><div>(.+?)</div>', webpage, 'categories', fatal=False)
|
||||
r'<div[^>]+class="categories_list">(.+?)</div>', webpage, 'categories', fatal=False)
|
||||
categories = [] if not cats_str else re.findall(r'<a title="([^"]+)"', cats_str)
|
||||
|
||||
return {
|
||||
|
@ -1,8 +1,11 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor, ExtractorError
|
||||
from ..utils import parse_iso8601
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
parse_iso8601,
|
||||
)
|
||||
|
||||
|
||||
class DRTVIE(InfoExtractor):
|
||||
@ -60,19 +63,31 @@ class DRTVIE(InfoExtractor):
|
||||
restricted_to_denmark = asset['RestrictedToDenmark']
|
||||
spoken_subtitles = asset['Target'] == 'SpokenSubtitles'
|
||||
for link in asset['Links']:
|
||||
target = link['Target']
|
||||
uri = link['Uri']
|
||||
target = link['Target']
|
||||
format_id = target
|
||||
preference = -1 if target == 'HDS' else -2
|
||||
preference = None
|
||||
if spoken_subtitles:
|
||||
preference -= 2
|
||||
preference = -1
|
||||
format_id += '-spoken-subtitles'
|
||||
formats.append({
|
||||
'url': uri + '?hdcore=3.3.0&plugin=aasp-3.3.0.99.43' if target == 'HDS' else uri,
|
||||
'format_id': format_id,
|
||||
'ext': link['FileFormat'],
|
||||
'preference': preference,
|
||||
})
|
||||
if target == 'HDS':
|
||||
formats.extend(self._extract_f4m_formats(
|
||||
uri + '?hdcore=3.3.0&plugin=aasp-3.3.0.99.43',
|
||||
video_id, preference, f4m_id=format_id))
|
||||
elif target == 'HLS':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
uri, video_id, 'mp4', preference=preference,
|
||||
m3u8_id=format_id))
|
||||
else:
|
||||
bitrate = link.get('Bitrate')
|
||||
if bitrate:
|
||||
format_id += '-%s' % bitrate
|
||||
formats.append({
|
||||
'url': uri,
|
||||
'format_id': format_id,
|
||||
'tbr': bitrate,
|
||||
'ext': link.get('FileFormat'),
|
||||
})
|
||||
subtitles_list = asset.get('SubtitlesList')
|
||||
if isinstance(subtitles_list, list):
|
||||
LANGS = {
|
||||
|
@ -9,8 +9,8 @@ from ..utils import qualities
|
||||
|
||||
|
||||
class DumpertIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?dumpert\.nl/mediabase/(?P<id>[0-9]+/[0-9a-zA-Z]+)'
|
||||
_TEST = {
|
||||
_VALID_URL = r'https?://(?:www\.)?dumpert\.nl/(?:mediabase|embed)/(?P<id>[0-9]+/[0-9a-zA-Z]+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.dumpert.nl/mediabase/6646981/951bc60f/',
|
||||
'md5': '1b9318d7d5054e7dcb9dc7654f21d643',
|
||||
'info_dict': {
|
||||
@ -20,13 +20,17 @@ class DumpertIE(InfoExtractor):
|
||||
'description': 'Niet schrikken hoor',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
}
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.dumpert.nl/embed/6675421/dc440fe7/',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
url = 'https://www.dumpert.nl/mediabase/' + video_id
|
||||
req = compat_urllib_request.Request(url)
|
||||
req.add_header('Cookie', 'nsfw=1')
|
||||
req.add_header('Cookie', 'nsfw=1; cpc=10')
|
||||
webpage = self._download_webpage(req, video_id)
|
||||
|
||||
files_base64 = self._search_regex(
|
||||
|
@ -79,7 +79,7 @@ class EaglePlatformIE(InfoExtractor):
|
||||
age_limit = 0 if age_restriction == 'allow_all' else 18
|
||||
|
||||
m3u8_data = self._download_json(
|
||||
media['sources']['secure_m3u8']['auto'],
|
||||
self._proto_relative_url(media['sources']['secure_m3u8']['auto'], 'http:'),
|
||||
video_id, 'Downloading m3u8 JSON')
|
||||
|
||||
formats = self._extract_m3u8_formats(
|
||||
|
@ -1,9 +1,7 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ..compat import (
|
||||
compat_urllib_parse,
|
||||
)
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urllib_parse_unquote
|
||||
|
||||
|
||||
class EHowIE(InfoExtractor):
|
||||
@ -26,7 +24,7 @@ class EHowIE(InfoExtractor):
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
video_url = self._search_regex(
|
||||
r'(?:file|source)=(http[^\'"&]*)', webpage, 'video URL')
|
||||
final_url = compat_urllib_parse.unquote(video_url)
|
||||
final_url = compat_urllib_parse_unquote(video_url)
|
||||
uploader = self._html_search_meta('uploader', webpage)
|
||||
title = self._og_search_title(webpage).replace(' | eHow', '')
|
||||
|
||||
|
@ -1,25 +0,0 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .tnaflix import TNAFlixIE
|
||||
|
||||
|
||||
class EMPFlixIE(TNAFlixIE):
|
||||
_VALID_URL = r'^https?://www\.empflix\.com/videos/(?P<display_id>[0-9a-zA-Z-]+)-(?P<id>[0-9]+)\.html'
|
||||
|
||||
_TITLE_REGEX = r'name="title" value="(?P<title>[^"]*)"'
|
||||
_DESCRIPTION_REGEX = r'name="description" value="([^"]*)"'
|
||||
_CONFIG_REGEX = r'flashvars\.config\s*=\s*escape\("([^"]+)"'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.empflix.com/videos/Amateur-Finger-Fuck-33051.html',
|
||||
'md5': 'b1bc15b6412d33902d6e5952035fcabc',
|
||||
'info_dict': {
|
||||
'id': '33051',
|
||||
'display_id': 'Amateur-Finger-Fuck',
|
||||
'ext': 'mp4',
|
||||
'title': 'Amateur Finger Fuck',
|
||||
'description': 'Amateur solo finger fucking.',
|
||||
'thumbnail': 're:https?://.*\.jpg$',
|
||||
'age_limit': 18,
|
||||
}
|
||||
}
|
@ -4,7 +4,10 @@ import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urllib_parse
|
||||
from ..utils import ExtractorError
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
unescapeHTML
|
||||
)
|
||||
|
||||
|
||||
class EroProfileIE(InfoExtractor):
|
||||
@ -68,15 +71,14 @@ class EroProfileIE(InfoExtractor):
|
||||
|
||||
m = re.search(r'You must be logged in to view this video\.', webpage)
|
||||
if m:
|
||||
raise ExtractorError(
|
||||
'This video requires login. Please specify a username and password and try again.', expected=True)
|
||||
self.raise_login_required('This video requires login')
|
||||
|
||||
video_id = self._search_regex(
|
||||
[r"glbUpdViews\s*\('\d*','(\d+)'", r'p/report/video/(\d+)'],
|
||||
webpage, 'video id', default=None)
|
||||
|
||||
video_url = self._search_regex(
|
||||
r'<source src="([^"]+)', webpage, 'video url')
|
||||
video_url = unescapeHTML(self._search_regex(
|
||||
r'<source src="([^"]+)', webpage, 'video url'))
|
||||
title = self._html_search_regex(
|
||||
r'Title:</th><td>([^<]+)</td>', webpage, 'title')
|
||||
thumbnail = self._search_regex(
|
||||
|
@ -8,7 +8,8 @@ from ..compat import compat_urllib_request
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
clean_html,
|
||||
qualities,
|
||||
int_or_none,
|
||||
float_or_none,
|
||||
)
|
||||
|
||||
|
||||
@ -36,10 +37,10 @@ def _decrypt_config(key, string):
|
||||
|
||||
|
||||
class EscapistIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://?(www\.)?escapistmagazine\.com/videos/view/[^/?#]+/(?P<id>[0-9]+)-[^/?#]*(?:$|[?#])'
|
||||
_VALID_URL = r'https?://?(?:www\.)?escapistmagazine\.com/videos/view/[^/?#]+/(?P<id>[0-9]+)-[^/?#]*(?:$|[?#])'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.escapistmagazine.com/videos/view/the-escapist-presents/6618-Breaking-Down-Baldurs-Gate',
|
||||
'md5': 'c6793dbda81388f4264c1ba18684a74d',
|
||||
'md5': 'ab3a706c681efca53f0a35f1415cf0d1',
|
||||
'info_dict': {
|
||||
'id': '6618',
|
||||
'ext': 'mp4',
|
||||
@ -47,10 +48,11 @@ class EscapistIE(InfoExtractor):
|
||||
'title': "Breaking Down Baldur's Gate",
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'duration': 264,
|
||||
'uploader': 'The Escapist',
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.escapistmagazine.com/videos/view/zero-punctuation/10044-Evolve-One-vs-Multiplayer',
|
||||
'md5': 'cf8842a8a46444d241f9a9980d7874f2',
|
||||
'md5': '9e8c437b0dbb0387d3bd3255ca77f6bf',
|
||||
'info_dict': {
|
||||
'id': '10044',
|
||||
'ext': 'mp4',
|
||||
@ -58,6 +60,7 @@ class EscapistIE(InfoExtractor):
|
||||
'title': 'Evolve - One vs Multiplayer',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'duration': 304,
|
||||
'uploader': 'The Escapist',
|
||||
}
|
||||
}]
|
||||
|
||||
@ -65,35 +68,33 @@ class EscapistIE(InfoExtractor):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
imsVideo = self._parse_json(
|
||||
ims_video = self._parse_json(
|
||||
self._search_regex(
|
||||
r'imsVideo\.play\(({.+?})\);', webpage, 'imsVideo'),
|
||||
video_id)
|
||||
video_id = imsVideo['videoID']
|
||||
key = imsVideo['hash']
|
||||
video_id = ims_video['videoID']
|
||||
key = ims_video['hash']
|
||||
|
||||
quality = qualities(['lq', 'hq', 'hd'])
|
||||
config_req = compat_urllib_request.Request(
|
||||
'http://www.escapistmagazine.com/videos/'
|
||||
'vidconfig.php?videoID=%s&hash=%s' % (video_id, key))
|
||||
config_req.add_header('Referer', url)
|
||||
config = self._download_webpage(config_req, video_id, 'Downloading video config')
|
||||
|
||||
formats = []
|
||||
for q in ['lq', 'hq', 'hd']:
|
||||
config_req = compat_urllib_request.Request(
|
||||
'http://www.escapistmagazine.com/videos/'
|
||||
'vidconfig.php?videoID=%s&hash=%s&quality=%s' % (video_id, key, 'mp4_' + q))
|
||||
config_req.add_header('Referer', url)
|
||||
config = self._download_webpage(config_req, video_id, 'Downloading video config ' + q.upper())
|
||||
data = json.loads(_decrypt_config(key, config))
|
||||
|
||||
data = json.loads(_decrypt_config(key, config))
|
||||
video_data = data['videoData']
|
||||
|
||||
title = clean_html(data['videoData']['title'])
|
||||
duration = data['videoData']['duration'] / 1000
|
||||
title = clean_html(video_data['title'])
|
||||
duration = float_or_none(video_data.get('duration'), 1000)
|
||||
uploader = video_data.get('publisher')
|
||||
|
||||
for i, v in enumerate(data['files']['videos']):
|
||||
|
||||
formats.append({
|
||||
'url': v,
|
||||
'format_id': determine_ext(v) + '_' + q + str(i),
|
||||
'quality': quality(q),
|
||||
})
|
||||
formats = [{
|
||||
'url': video['src'],
|
||||
'format_id': '%s-%sp' % (determine_ext(video['src']), video['res']),
|
||||
'height': int_or_none(video.get('res')),
|
||||
} for video in data['files']['videos']]
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
@ -102,4 +103,5 @@ class EscapistIE(InfoExtractor):
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'description': self._og_search_description(webpage),
|
||||
'duration': duration,
|
||||
'uploader': uploader,
|
||||
}
|
||||
|
55
youtube_dl/extractor/espn.py
Normal file
55
youtube_dl/extractor/espn.py
Normal file
@ -0,0 +1,55 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class ESPNIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://espn\.go\.com/(?:[^/]+/)*(?P<id>[^/]+)'
|
||||
_WORKING = False
|
||||
_TESTS = [{
|
||||
'url': 'http://espn.go.com/video/clip?id=10365079',
|
||||
'info_dict': {
|
||||
'id': 'FkYWtmazr6Ed8xmvILvKLWjd4QvYZpzG',
|
||||
'ext': 'mp4',
|
||||
'title': 'dm_140128_30for30Shorts___JudgingJewellv2',
|
||||
'description': '',
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://espn.go.com/video/iframe/twitter/?cms=espn&id=10365079',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://espn.go.com/nba/recap?gameId=400793786',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://espn.go.com/blog/golden-state-warriors/post/_/id/593/how-warriors-rapidly-regained-a-winning-edge',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://espn.go.com/sports/endurance/story/_/id/12893522/dzhokhar-tsarnaev-sentenced-role-boston-marathon-bombings',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://espn.go.com/nba/playoffs/2015/story/_/id/12887571/john-wall-washington-wizards-no-swelling-left-hand-wrist-game-5-return',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
video_id = self._search_regex(
|
||||
r'class="video-play-button"[^>]+data-id="(\d+)',
|
||||
webpage, 'video id')
|
||||
|
||||
player = self._download_webpage(
|
||||
'https://espn.go.com/video/iframe/twitter/?id=%s' % video_id, video_id)
|
||||
|
||||
pcode = self._search_regex(
|
||||
r'["\']pcode=([^"\']+)["\']', player, 'pcode')
|
||||
|
||||
return self.url_result(
|
||||
'ooyalaexternal:espn:%s:%s' % (video_id, pcode),
|
||||
'OoyalaExternal')
|
74
youtube_dl/extractor/esri.py
Normal file
74
youtube_dl/extractor/esri.py
Normal file
@ -0,0 +1,74 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urlparse
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
parse_filesize,
|
||||
unified_strdate,
|
||||
)
|
||||
|
||||
|
||||
class EsriVideoIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://video\.esri\.com/watch/(?P<id>[0-9]+)'
|
||||
_TEST = {
|
||||
'url': 'https://video.esri.com/watch/1124/arcgis-online-_dash_-developing-applications',
|
||||
'md5': 'd4aaf1408b221f1b38227a9bbaeb95bc',
|
||||
'info_dict': {
|
||||
'id': '1124',
|
||||
'ext': 'mp4',
|
||||
'title': 'ArcGIS Online - Developing Applications',
|
||||
'description': 'Jeremy Bartley demonstrates how to develop applications with ArcGIS Online.',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'duration': 185,
|
||||
'upload_date': '20120419',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
formats = []
|
||||
for width, height, content in re.findall(
|
||||
r'(?s)<li><strong>(\d+)x(\d+):</strong>(.+?)</li>', webpage):
|
||||
for video_url, ext, filesize in re.findall(
|
||||
r'<a[^>]+href="([^"]+)">([^<]+) \(([^<]+)\)</a>', content):
|
||||
formats.append({
|
||||
'url': compat_urlparse.urljoin(url, video_url),
|
||||
'ext': ext.lower(),
|
||||
'format_id': '%s-%s' % (ext.lower(), height),
|
||||
'width': int(width),
|
||||
'height': int(height),
|
||||
'filesize_approx': parse_filesize(filesize),
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
title = self._html_search_meta('title', webpage, 'title')
|
||||
description = self._html_search_meta(
|
||||
'description', webpage, 'description', fatal=False)
|
||||
|
||||
thumbnail = self._html_search_meta('thumbnail', webpage, 'thumbnail', fatal=False)
|
||||
if thumbnail:
|
||||
thumbnail = re.sub(r'_[st]\.jpg$', '_x.jpg', thumbnail)
|
||||
|
||||
duration = int_or_none(self._search_regex(
|
||||
[r'var\s+videoSeconds\s*=\s*(\d+)', r"'duration'\s*:\s*(\d+)"],
|
||||
webpage, 'duration', fatal=False))
|
||||
|
||||
upload_date = unified_strdate(self._html_search_meta(
|
||||
'last-modified', webpage, 'upload date', fatal=None))
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'duration': duration,
|
||||
'upload_date': upload_date,
|
||||
'formats': formats
|
||||
}
|
@ -9,7 +9,7 @@ from ..compat import (
|
||||
compat_http_client,
|
||||
compat_str,
|
||||
compat_urllib_error,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_parse_unquote,
|
||||
compat_urllib_request,
|
||||
)
|
||||
from ..utils import (
|
||||
@ -17,6 +17,8 @@ from ..utils import (
|
||||
int_or_none,
|
||||
limit_length,
|
||||
urlencode_postdata,
|
||||
get_element_by_id,
|
||||
clean_html,
|
||||
)
|
||||
|
||||
|
||||
@ -42,6 +44,7 @@ class FacebookIE(InfoExtractor):
|
||||
'id': '637842556329505',
|
||||
'ext': 'mp4',
|
||||
'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
|
||||
'uploader': 'Tennis on Facebook',
|
||||
}
|
||||
}, {
|
||||
'note': 'Video without discernible title',
|
||||
@ -50,7 +53,11 @@ class FacebookIE(InfoExtractor):
|
||||
'id': '274175099429670',
|
||||
'ext': 'mp4',
|
||||
'title': 'Facebook video #274175099429670',
|
||||
}
|
||||
'uploader': 'Asif Nawab Butt',
|
||||
},
|
||||
'expected_warnings': [
|
||||
'title'
|
||||
]
|
||||
}, {
|
||||
'url': 'https://www.facebook.com/video.php?v=10204634152394104',
|
||||
'only_matching': True,
|
||||
@ -133,7 +140,7 @@ class FacebookIE(InfoExtractor):
|
||||
else:
|
||||
raise ExtractorError('Cannot parse data')
|
||||
data = dict(json.loads(m.group(1)))
|
||||
params_raw = compat_urllib_parse.unquote(data['params'])
|
||||
params_raw = compat_urllib_parse_unquote(data['params'])
|
||||
params = json.loads(params_raw)
|
||||
video_data = params['video_data'][0]
|
||||
|
||||
@ -149,15 +156,16 @@ class FacebookIE(InfoExtractor):
|
||||
raise ExtractorError('Cannot find video formats')
|
||||
|
||||
video_title = self._html_search_regex(
|
||||
r'<h2 class="uiHeaderTitle">([^<]*)</h2>', webpage, 'title',
|
||||
fatal=False)
|
||||
r'<h2\s+[^>]*class="uiHeaderTitle"[^>]*>([^<]*)</h2>', webpage, 'title',
|
||||
default=None)
|
||||
if not video_title:
|
||||
video_title = self._html_search_regex(
|
||||
r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
|
||||
webpage, 'alternative title', default=None)
|
||||
webpage, 'alternative title', fatal=False)
|
||||
video_title = limit_length(video_title, 80)
|
||||
if not video_title:
|
||||
video_title = 'Facebook video #%s' % video_id
|
||||
uploader = clean_html(get_element_by_id('fbPhotoPageAuthorName', webpage))
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
@ -165,4 +173,5 @@ class FacebookIE(InfoExtractor):
|
||||
'formats': formats,
|
||||
'duration': int_or_none(video_data.get('video_duration')),
|
||||
'thumbnail': video_data.get('thumbnail_src'),
|
||||
'uploader': uploader,
|
||||
}
|
||||
|
@ -6,9 +6,9 @@ from .common import InfoExtractor
|
||||
|
||||
class FazIE(InfoExtractor):
|
||||
IE_NAME = 'faz.net'
|
||||
_VALID_URL = r'https?://www\.faz\.net/multimedia/videos/.*?-(?P<id>\d+)\.html'
|
||||
_VALID_URL = r'https?://(?:www\.)?faz\.net/(?:[^/]+/)*.*?-(?P<id>\d+)\.html'
|
||||
|
||||
_TEST = {
|
||||
_TESTS = [{
|
||||
'url': 'http://www.faz.net/multimedia/videos/stockholm-chemie-nobelpreis-fuer-drei-amerikanische-forscher-12610585.html',
|
||||
'info_dict': {
|
||||
'id': '12610585',
|
||||
@ -16,7 +16,22 @@ class FazIE(InfoExtractor):
|
||||
'title': 'Stockholm: Chemie-Nobelpreis für drei amerikanische Forscher',
|
||||
'description': 'md5:1453fbf9a0d041d985a47306192ea253',
|
||||
},
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.faz.net/aktuell/politik/berlin-gabriel-besteht-zerreissprobe-ueber-datenspeicherung-13659345.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.faz.net/berlin-gabriel-besteht-zerreissprobe-ueber-datenspeicherung-13659345.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.faz.net/-13659345.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.faz.net/aktuell/politik/-13659345.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.faz.net/foobarblafasel-13659345.html',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
@ -10,12 +10,13 @@ from ..compat import (
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
encode_dict,
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class FC2IE(InfoExtractor):
|
||||
_VALID_URL = r'^http://video\.fc2\.com/(?:[^/]+/)?content/(?P<id>[^/]+)'
|
||||
_VALID_URL = r'^http://video\.fc2\.com/(?:[^/]+/)*content/(?P<id>[^/]+)'
|
||||
IE_NAME = 'fc2'
|
||||
_NETRC_MACHINE = 'fc2'
|
||||
_TESTS = [{
|
||||
@ -37,6 +38,9 @@ class FC2IE(InfoExtractor):
|
||||
'password': '(snip)',
|
||||
'skip': 'requires actual password'
|
||||
}
|
||||
}, {
|
||||
'url': 'http://video.fc2.com/en/a/content/20130926eZpARwsF',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _login(self):
|
||||
@ -52,10 +56,7 @@ class FC2IE(InfoExtractor):
|
||||
'Submit': ' Login ',
|
||||
}
|
||||
|
||||
# Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
|
||||
# chokes on unicode
|
||||
login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in login_form_strs.items())
|
||||
login_data = compat_urllib_parse.urlencode(login_form).encode('utf-8')
|
||||
login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('utf-8')
|
||||
request = compat_urllib_request.Request(
|
||||
'https://secure.id.fc2.com/index.php?mode=login&switch_language=en', login_data)
|
||||
|
||||
@ -80,13 +81,13 @@ class FC2IE(InfoExtractor):
|
||||
|
||||
title = self._og_search_title(webpage)
|
||||
thumbnail = self._og_search_thumbnail(webpage)
|
||||
refer = url.replace('/content/', '/a/content/')
|
||||
refer = url.replace('/content/', '/a/content/') if '/a/content/' not in url else url
|
||||
|
||||
mimi = hashlib.md5((video_id + '_gGddgPfeaf_gzyr').encode('utf-8')).hexdigest()
|
||||
|
||||
info_url = (
|
||||
"http://video.fc2.com/ginfo.php?mimi={1:s}&href={2:s}&v={0:s}&fversion=WIN%2011%2C6%2C602%2C180&from=2&otag=0&upid={0:s}&tk=null&".
|
||||
format(video_id, mimi, compat_urllib_request.quote(refer, safe='').replace('.', '%2E')))
|
||||
format(video_id, mimi, compat_urllib_request.quote(refer, safe=b'').replace('.', '%2E')))
|
||||
|
||||
info_webpage = self._download_webpage(
|
||||
info_url, video_id, note='Downloading info page')
|
||||
|
@ -1,80 +0,0 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class FiredriveIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?firedrive\.com/' + \
|
||||
'(?:file|embed)/(?P<id>[0-9a-zA-Z]+)'
|
||||
_FILE_DELETED_REGEX = r'<div class="removed_file_image">'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://www.firedrive.com/file/FEB892FA160EBD01',
|
||||
'md5': 'd5d4252f80ebeab4dc2d5ceaed1b7970',
|
||||
'info_dict': {
|
||||
'id': 'FEB892FA160EBD01',
|
||||
'ext': 'flv',
|
||||
'title': 'bbb_theora_486kbit.flv',
|
||||
'thumbnail': 're:^http://.*\.jpg$',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
url = 'http://firedrive.com/file/%s' % video_id
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
if re.search(self._FILE_DELETED_REGEX, webpage) is not None:
|
||||
raise ExtractorError('Video %s does not exist' % video_id,
|
||||
expected=True)
|
||||
|
||||
fields = dict(re.findall(r'''(?x)<input\s+
|
||||
type="hidden"\s+
|
||||
name="([^"]+)"\s+
|
||||
value="([^"]*)"
|
||||
''', webpage))
|
||||
|
||||
post = compat_urllib_parse.urlencode(fields)
|
||||
req = compat_urllib_request.Request(url, post)
|
||||
req.add_header('Content-type', 'application/x-www-form-urlencoded')
|
||||
|
||||
# Apparently, this header is required for confirmation to work.
|
||||
req.add_header('Host', 'www.firedrive.com')
|
||||
|
||||
webpage = self._download_webpage(req, video_id,
|
||||
'Downloading video page')
|
||||
|
||||
title = self._search_regex(r'class="external_title_left">(.+)</div>',
|
||||
webpage, 'title')
|
||||
thumbnail = self._search_regex(r'image:\s?"(//[^\"]+)', webpage,
|
||||
'thumbnail', fatal=False)
|
||||
if thumbnail is not None:
|
||||
thumbnail = 'http:' + thumbnail
|
||||
|
||||
ext = self._search_regex(r'type:\s?\'([^\']+)\',',
|
||||
webpage, 'extension', fatal=False)
|
||||
video_url = self._search_regex(
|
||||
r'file:\s?loadURL\(\'(http[^\']+)\'\),', webpage, 'file url')
|
||||
|
||||
formats = [{
|
||||
'format_id': 'sd',
|
||||
'url': video_url,
|
||||
'ext': ext,
|
||||
}]
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'thumbnail': thumbnail,
|
||||
'formats': formats,
|
||||
}
|
88
youtube_dl/extractor/fivetv.py
Normal file
88
youtube_dl/extractor/fivetv.py
Normal file
@ -0,0 +1,88 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import int_or_none
|
||||
|
||||
|
||||
class FiveTVIE(InfoExtractor):
|
||||
_VALID_URL = r'''(?x)
|
||||
http://
|
||||
(?:www\.)?5-tv\.ru/
|
||||
(?:
|
||||
(?:[^/]+/)+(?P<id>\d+)|
|
||||
(?P<path>[^/?#]+)(?:[/?#])?
|
||||
)
|
||||
'''
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://5-tv.ru/news/96814/',
|
||||
'md5': 'bbff554ad415ecf5416a2f48c22d9283',
|
||||
'info_dict': {
|
||||
'id': '96814',
|
||||
'ext': 'mp4',
|
||||
'title': 'Россияне выбрали имя для общенациональной платежной системы',
|
||||
'description': 'md5:a8aa13e2b7ad36789e9f77a74b6de660',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'duration': 180,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://5-tv.ru/video/1021729/',
|
||||
'info_dict': {
|
||||
'id': '1021729',
|
||||
'ext': 'mp4',
|
||||
'title': '3D принтер',
|
||||
'description': 'md5:d76c736d29ef7ec5c0cf7d7c65ffcb41',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'duration': 180,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.5-tv.ru/glavnoe/#itemDetails',
|
||||
'info_dict': {
|
||||
'id': 'glavnoe',
|
||||
'ext': 'mp4',
|
||||
'title': 'Итоги недели с 8 по 14 июня 2015 года',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.5-tv.ru/glavnoe/broadcasts/508645/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://5-tv.ru/films/1507502/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://5-tv.ru/programs/broadcast/508713/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://5-tv.ru/angel/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.5-tv.ru/schedule/?iframe=true&width=900&height=450',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id') or mobj.group('path')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
video_url = self._search_regex(
|
||||
r'<a[^>]+?href="([^"]+)"[^>]+?class="videoplayer"',
|
||||
webpage, 'video url')
|
||||
|
||||
title = self._og_search_title(webpage, default=None) or self._search_regex(
|
||||
r'<title>([^<]+)</title>', webpage, 'title')
|
||||
duration = int_or_none(self._og_search_property(
|
||||
'video:duration', webpage, 'duration', default=None))
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'title': title,
|
||||
'description': self._og_search_description(webpage, default=None),
|
||||
'thumbnail': self._og_search_thumbnail(webpage, default=None),
|
||||
'duration': duration,
|
||||
}
|
@ -1,13 +1,12 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import random
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
get_element_by_id,
|
||||
clean_html,
|
||||
determine_ext,
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
@ -17,66 +16,40 @@ class FKTVIE(InfoExtractor):
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://fernsehkritik.tv/folge-1',
|
||||
'md5': '21f0b0c99bce7d5b524eb1b17b1c6d79',
|
||||
'info_dict': {
|
||||
'id': '00011',
|
||||
'ext': 'flv',
|
||||
'id': '1',
|
||||
'ext': 'mp4',
|
||||
'title': 'Folge 1 vom 10. April 2007',
|
||||
'description': 'md5:fb4818139c7cfe6907d4b83412a6864f',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
episode = int(self._match_id(url))
|
||||
episode = self._match_id(url)
|
||||
|
||||
video_thumbnail = 'http://fernsehkritik.tv/images/magazin/folge%s.jpg' % episode
|
||||
start_webpage = self._download_webpage('http://fernsehkritik.tv/folge-%s/Start' % episode,
|
||||
episode)
|
||||
playlist = self._search_regex(r'playlist = (\[.*?\]);', start_webpage,
|
||||
'playlist', flags=re.DOTALL)
|
||||
files = json.loads(re.sub('{[^{}]*?}', '{}', playlist))
|
||||
webpage = self._download_webpage(
|
||||
'http://fernsehkritik.tv/folge-%s/play' % episode, episode)
|
||||
title = clean_html(self._html_search_regex(
|
||||
'<h3>([^<]+)</h3>', webpage, 'title'))
|
||||
matches = re.search(
|
||||
r'(?s)<video(?:(?!poster)[^>])+(?:poster="([^"]+)")?[^>]*>(.*)</video>',
|
||||
webpage)
|
||||
if matches is None:
|
||||
raise ExtractorError('Unable to extract the video')
|
||||
|
||||
videos = []
|
||||
for i, _ in enumerate(files, 1):
|
||||
video_id = '%04d%d' % (episode, i)
|
||||
video_url = 'http://fernsehkritik.tv/js/directme.php?file=%s%s.flv' % (episode, '' if i == 1 else '-%d' % i)
|
||||
videos.append({
|
||||
'ext': 'flv',
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'title': clean_html(get_element_by_id('eptitle', start_webpage)),
|
||||
'description': clean_html(get_element_by_id('contentlist', start_webpage)),
|
||||
'thumbnail': video_thumbnail
|
||||
})
|
||||
poster, sources = matches.groups()
|
||||
if poster is None:
|
||||
self.report_warning('unable to extract thumbnail')
|
||||
|
||||
urls = re.findall(r'<source[^>]+src="([^"]+)"', sources)
|
||||
formats = [{
|
||||
'url': furl,
|
||||
'format_id': determine_ext(url),
|
||||
} for furl in urls]
|
||||
return {
|
||||
'_type': 'multi_video',
|
||||
'entries': videos,
|
||||
'id': 'folge-%s' % episode,
|
||||
}
|
||||
|
||||
|
||||
class FKTVPosteckeIE(InfoExtractor):
|
||||
IE_NAME = 'fernsehkritik.tv:postecke'
|
||||
_VALID_URL = r'http://(?:www\.)?fernsehkritik\.tv/inline-video/postecke\.php\?(.*&)?ep=(?P<ep>[0-9]+)(&|$)'
|
||||
_TEST = {
|
||||
'url': 'http://fernsehkritik.tv/inline-video/postecke.php?iframe=true&width=625&height=440&ep=120',
|
||||
'md5': '262f0adbac80317412f7e57b4808e5c4',
|
||||
'info_dict': {
|
||||
'id': '0120',
|
||||
'ext': 'flv',
|
||||
'title': 'Postecke 120',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
episode = int(mobj.group('ep'))
|
||||
|
||||
server = random.randint(2, 4)
|
||||
video_id = '%04d' % episode
|
||||
video_url = 'http://dl%d.fernsehkritik.tv/postecke/postecke%d.flv' % (server, episode)
|
||||
video_title = 'Postecke %d' % episode
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'title': video_title,
|
||||
'id': episode,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'thumbnail': poster,
|
||||
}
|
||||
|
@ -30,6 +30,10 @@ class FolketingetIE(InfoExtractor):
|
||||
'upload_date': '20141120',
|
||||
'duration': 3960,
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@ -32,6 +32,7 @@ class FourTubeIE(InfoExtractor):
|
||||
'view_count': int,
|
||||
'like_count': int,
|
||||
'categories': list,
|
||||
'age_limit': 18,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
parse_iso8601,
|
||||
@ -8,7 +10,8 @@ from ..utils import (
|
||||
|
||||
|
||||
class FoxNewsIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://video\.foxnews\.com/v/(?:video-embed\.html\?video_id=)?(?P<id>\d+)'
|
||||
IE_DESC = 'Fox News and Fox Business Video'
|
||||
_VALID_URL = r'https?://(?P<host>video\.fox(?:news|business)\.com)/v/(?:video-embed\.html\?video_id=)?(?P<id>\d+)'
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://video.foxnews.com/v/3937480/frozen-in-time/#sp=show-clips',
|
||||
@ -42,13 +45,19 @@ class FoxNewsIE(InfoExtractor):
|
||||
'url': 'http://video.foxnews.com/v/video-embed.html?video_id=3937480&d=video.foxnews.com',
|
||||
'only_matching': True,
|
||||
},
|
||||
{
|
||||
'url': 'http://video.foxbusiness.com/v/4442309889001',
|
||||
'only_matching': True,
|
||||
},
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
host = mobj.group('host')
|
||||
|
||||
video = self._download_json(
|
||||
'http://video.foxnews.com/v/feed/video/%s.js?template=fox' % video_id, video_id)
|
||||
'http://%s/v/feed/video/%s.js?template=fox' % (host, video_id), video_id)
|
||||
|
||||
item = video['channel']['item']
|
||||
title = item['title']
|
||||
|
@ -5,7 +5,7 @@ from ..utils import smuggle_url
|
||||
|
||||
|
||||
class FoxSportsIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?foxsports\.com/video\?vid=(?P<id>\d+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?foxsports\.com/(?:[^/]+/)*(?P<id>[^/]+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.foxsports.com/video?vid=432609859715',
|
||||
|
@ -6,18 +6,15 @@ import re
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse_urlparse,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..compat import compat_urlparse
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
float_or_none,
|
||||
parse_duration,
|
||||
determine_ext,
|
||||
)
|
||||
from .dailymotion import DailymotionCloudIE
|
||||
|
||||
|
||||
class FranceTVBaseInfoExtractor(InfoExtractor):
|
||||
@ -58,12 +55,12 @@ class FranceTVBaseInfoExtractor(InfoExtractor):
|
||||
# See https://github.com/rg3/youtube-dl/issues/3963
|
||||
# m3u8 urls work fine
|
||||
continue
|
||||
video_url_parsed = compat_urllib_parse_urlparse(video_url)
|
||||
f4m_url = self._download_webpage(
|
||||
'http://hdfauth.francetv.fr/esi/urltokengen2.html?url=%s' % video_url_parsed.path,
|
||||
'http://hdfauth.francetv.fr/esi/TA?url=%s' % video_url,
|
||||
video_id, 'Downloading f4m manifest token', fatal=False)
|
||||
if f4m_url:
|
||||
formats.extend(self._extract_f4m_formats(f4m_url, video_id, 1, format_id))
|
||||
formats.extend(self._extract_f4m_formats(
|
||||
f4m_url + '&hdcore=3.7.0&plugin=aasp-3.7.0.39.44', video_id, 1, format_id))
|
||||
elif ext == 'm3u8':
|
||||
formats.extend(self._extract_m3u8_formats(video_url, video_id, 'mp4', m3u8_id=format_id))
|
||||
elif video_url.startswith('rtmp'):
|
||||
@ -81,12 +78,17 @@ class FranceTVBaseInfoExtractor(InfoExtractor):
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
title = info['titre']
|
||||
subtitle = info.get('sous_titre')
|
||||
if subtitle:
|
||||
title += ' - %s' % subtitle
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': info['titre'],
|
||||
'title': title,
|
||||
'description': clean_html(info['synopsis']),
|
||||
'thumbnail': compat_urlparse.urljoin('http://pluzz.francetv.fr', info['image']),
|
||||
'duration': float_or_none(info.get('real_duration'), 1000) or parse_duration(info['duree']),
|
||||
'duration': int_or_none(info.get('real_duration')) or parse_duration(info['duree']),
|
||||
'timestamp': int_or_none(info['diffusion']['timestamp']),
|
||||
'formats': formats,
|
||||
}
|
||||
@ -131,12 +133,26 @@ class FranceTvInfoIE(FranceTVBaseInfoExtractor):
|
||||
'skip_download': 'HLS (reqires ffmpeg)'
|
||||
},
|
||||
'skip': 'Ce direct est terminé et sera disponible en rattrapage dans quelques minutes.',
|
||||
}, {
|
||||
'url': 'http://www.francetvinfo.fr/economie/entreprises/les-entreprises-familiales-le-secret-de-la-reussite_933271.html',
|
||||
'md5': 'f485bda6e185e7d15dbc69b72bae993e',
|
||||
'info_dict': {
|
||||
'id': '556e03339473995ee145930c',
|
||||
'ext': 'mp4',
|
||||
'title': 'Les entreprises familiales : le secret de la réussite',
|
||||
'thumbnail': 're:^https?://.*\.jpe?g$',
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
page_title = mobj.group('title')
|
||||
webpage = self._download_webpage(url, page_title)
|
||||
|
||||
dmcloud_url = DailymotionCloudIE._extract_dmcloud_url(webpage)
|
||||
if dmcloud_url:
|
||||
return self.url_result(dmcloud_url, 'DailymotionCloud')
|
||||
|
||||
video_id, catalogue = self._search_regex(
|
||||
r'id-video=([^@]+@[^"]+)', webpage, 'video id').split('@')
|
||||
return self._extract_video(video_id, catalogue)
|
||||
@ -145,11 +161,21 @@ class FranceTvInfoIE(FranceTVBaseInfoExtractor):
|
||||
class FranceTVIE(FranceTVBaseInfoExtractor):
|
||||
IE_NAME = 'francetv'
|
||||
IE_DESC = 'France 2, 3, 4, 5 and Ô'
|
||||
_VALID_URL = r'''(?x)https?://www\.france[2345o]\.fr/
|
||||
(?:
|
||||
emissions/.*?/(videos|emissions)/(?P<id>[^/?]+)
|
||||
| (emissions?|jt)/(?P<key>[^/?]+)
|
||||
)'''
|
||||
_VALID_URL = r'''(?x)
|
||||
https?://
|
||||
(?:
|
||||
(?:www\.)?france[2345o]\.fr/
|
||||
(?:
|
||||
emissions/[^/]+/(?:videos|diffusions)|
|
||||
emission/[^/]+|
|
||||
videos|
|
||||
jt
|
||||
)
|
||||
/|
|
||||
embed\.francetv\.fr/\?ue=
|
||||
)
|
||||
(?P<id>[^/?]+)
|
||||
'''
|
||||
|
||||
_TESTS = [
|
||||
# france2
|
||||
@ -193,37 +219,59 @@ class FranceTVIE(FranceTVBaseInfoExtractor):
|
||||
},
|
||||
# france5
|
||||
{
|
||||
'url': 'http://www.france5.fr/emissions/c-a-dire/videos/92837968',
|
||||
'md5': '78f0f4064f9074438e660785bbf2c5d9',
|
||||
'url': 'http://www.france5.fr/emissions/c-a-dire/videos/quels_sont_les_enjeux_de_cette_rentree_politique__31-08-2015_908948?onglet=tous&page=1',
|
||||
'md5': 'f6c577df3806e26471b3d21631241fd0',
|
||||
'info_dict': {
|
||||
'id': '108961659',
|
||||
'id': '123327454',
|
||||
'ext': 'flv',
|
||||
'title': 'C à dire ?!',
|
||||
'description': 'md5:1a4aeab476eb657bf57c4ff122129f81',
|
||||
'upload_date': '20140915',
|
||||
'timestamp': 1410795000,
|
||||
'title': 'C à dire ?! - Quels sont les enjeux de cette rentrée politique ?',
|
||||
'description': 'md5:4a0d5cb5dce89d353522a84462bae5a4',
|
||||
'upload_date': '20150831',
|
||||
'timestamp': 1441035120,
|
||||
},
|
||||
},
|
||||
# franceo
|
||||
{
|
||||
'url': 'http://www.franceo.fr/jt/info-afrique/04-12-2013',
|
||||
'md5': '52f0bfe202848b15915a2f39aaa8981b',
|
||||
'url': 'http://www.franceo.fr/jt/info-soir/18-07-2015',
|
||||
'md5': '47d5816d3b24351cdce512ad7ab31da8',
|
||||
'info_dict': {
|
||||
'id': '108634970',
|
||||
'id': '125377621',
|
||||
'ext': 'flv',
|
||||
'title': 'Infô Afrique',
|
||||
'description': 'md5:ebf346da789428841bee0fd2a935ea55',
|
||||
'upload_date': '20140915',
|
||||
'timestamp': 1410822000,
|
||||
'title': 'Infô soir',
|
||||
'description': 'md5:01b8c6915a3d93d8bbbd692651714309',
|
||||
'upload_date': '20150718',
|
||||
'timestamp': 1437241200,
|
||||
'duration': 414,
|
||||
},
|
||||
},
|
||||
{
|
||||
# francetv embed
|
||||
'url': 'http://embed.francetv.fr/?ue=8d7d3da1e3047c42ade5a5d7dfd3fc87',
|
||||
'info_dict': {
|
||||
'id': 'EV_30231',
|
||||
'ext': 'flv',
|
||||
'title': 'Alcaline, le concert avec Calogero',
|
||||
'description': 'md5:61f08036dcc8f47e9cfc33aed08ffaff',
|
||||
'upload_date': '20150226',
|
||||
'timestamp': 1424989860,
|
||||
'duration': 5400,
|
||||
},
|
||||
},
|
||||
{
|
||||
'url': 'http://www.france4.fr/emission/highlander/diffusion-du-17-07-2015-04h05',
|
||||
'only_matching': True,
|
||||
},
|
||||
{
|
||||
'url': 'http://www.franceo.fr/videos/125377617',
|
||||
'only_matching': True,
|
||||
}
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
webpage = self._download_webpage(url, mobj.group('key') or mobj.group('id'))
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
video_id, catalogue = self._html_search_regex(
|
||||
r'href="http://videos\.francetv\.fr/video/([^@]+@[^"]+)"',
|
||||
r'href="http://videos?\.francetv\.fr/video/([^@]+@[^"]+)"',
|
||||
webpage, 'video ID').split('@')
|
||||
return self._extract_video(video_id, catalogue)
|
||||
|
||||
|
@ -53,7 +53,7 @@ class FunnyOrDieIE(InfoExtractor):
|
||||
for bitrate in bitrates:
|
||||
for link in links:
|
||||
formats.append({
|
||||
'url': '%s%d.%s' % (link[0], bitrate, link[1]),
|
||||
'url': self._proto_relative_url('%s%d.%s' % (link[0], bitrate, link[1])),
|
||||
'format_id': '%s-%d' % (link[1], bitrate),
|
||||
'vbr': bitrate,
|
||||
})
|
||||
|
@ -5,7 +5,7 @@ import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse,
|
||||
compat_urllib_parse_unquote,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
@ -14,8 +14,8 @@ from ..utils import (
|
||||
|
||||
|
||||
class GameSpotIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?gamespot\.com/.*-(?P<id>\d+)/?'
|
||||
_TEST = {
|
||||
_VALID_URL = r'http://(?:www\.)?gamespot\.com/.*-(?P<id>\d+)/?'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.gamespot.com/videos/arma-3-community-guide-sitrep-i/2300-6410818/',
|
||||
'md5': 'b2a30deaa8654fcccd43713a6b6a4825',
|
||||
'info_dict': {
|
||||
@ -23,8 +23,16 @@ class GameSpotIE(InfoExtractor):
|
||||
'ext': 'mp4',
|
||||
'title': 'Arma 3 - Community Guide: SITREP I',
|
||||
'description': 'Check out this video where some of the basics of Arma 3 is explained.',
|
||||
}
|
||||
}
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.gamespot.com/videos/the-witcher-3-wild-hunt-xbox-one-now-playing/2300-6424837/',
|
||||
'info_dict': {
|
||||
'id': 'gs-2300-6424837',
|
||||
'ext': 'flv',
|
||||
'title': 'The Witcher 3: Wild Hunt [Xbox ONE] - Now Playing',
|
||||
'description': 'Join us as we take a look at the early hours of The Witcher 3: Wild Hunt and more.',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
page_id = self._match_id(url)
|
||||
@ -32,30 +40,42 @@ class GameSpotIE(InfoExtractor):
|
||||
data_video_json = self._search_regex(
|
||||
r'data-video=["\'](.*?)["\']', webpage, 'data video')
|
||||
data_video = json.loads(unescapeHTML(data_video_json))
|
||||
streams = data_video['videoStreams']
|
||||
|
||||
# Transform the manifest url to a link to the mp4 files
|
||||
# they are used in mobile devices.
|
||||
f4m_url = data_video['videoStreams']['f4m_stream']
|
||||
f4m_path = compat_urlparse.urlparse(f4m_url).path
|
||||
QUALITIES_RE = r'((,\d+)+,?)'
|
||||
qualities = self._search_regex(QUALITIES_RE, f4m_path, 'qualities').strip(',').split(',')
|
||||
http_path = f4m_path[1:].split('/', 1)[1]
|
||||
http_template = re.sub(QUALITIES_RE, r'%s', http_path)
|
||||
http_template = http_template.replace('.csmil/manifest.f4m', '')
|
||||
http_template = compat_urlparse.urljoin(
|
||||
'http://video.gamespotcdn.com/', http_template)
|
||||
formats = []
|
||||
for q in qualities:
|
||||
formats.append({
|
||||
'url': http_template % q,
|
||||
'ext': 'mp4',
|
||||
'format_id': q,
|
||||
})
|
||||
f4m_url = streams.get('f4m_stream')
|
||||
if f4m_url is not None:
|
||||
# Transform the manifest url to a link to the mp4 files
|
||||
# they are used in mobile devices.
|
||||
f4m_path = compat_urlparse.urlparse(f4m_url).path
|
||||
QUALITIES_RE = r'((,\d+)+,?)'
|
||||
qualities = self._search_regex(QUALITIES_RE, f4m_path, 'qualities').strip(',').split(',')
|
||||
http_path = f4m_path[1:].split('/', 1)[1]
|
||||
http_template = re.sub(QUALITIES_RE, r'%s', http_path)
|
||||
http_template = http_template.replace('.csmil/manifest.f4m', '')
|
||||
http_template = compat_urlparse.urljoin(
|
||||
'http://video.gamespotcdn.com/', http_template)
|
||||
for q in qualities:
|
||||
formats.append({
|
||||
'url': http_template % q,
|
||||
'ext': 'mp4',
|
||||
'format_id': q,
|
||||
})
|
||||
else:
|
||||
for quality in ['sd', 'hd']:
|
||||
# It's actually a link to a flv file
|
||||
flv_url = streams.get('f4m_{0}'.format(quality))
|
||||
if flv_url is not None:
|
||||
formats.append({
|
||||
'url': flv_url,
|
||||
'ext': 'flv',
|
||||
'format_id': quality,
|
||||
})
|
||||
|
||||
return {
|
||||
'id': data_video['guid'],
|
||||
'display_id': page_id,
|
||||
'title': compat_urllib_parse.unquote(data_video['title']),
|
||||
'title': compat_urllib_parse_unquote(data_video['title']),
|
||||
'formats': formats,
|
||||
'description': self._html_search_meta('description', webpage),
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
|
@ -7,7 +7,10 @@ from ..compat import (
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
)
|
||||
from ..utils import remove_end
|
||||
from ..utils import (
|
||||
remove_end,
|
||||
HEADRequest,
|
||||
)
|
||||
|
||||
|
||||
class GDCVaultIE(InfoExtractor):
|
||||
@ -73,10 +76,20 @@ class GDCVaultIE(InfoExtractor):
|
||||
return video_formats
|
||||
|
||||
def _parse_flv(self, xml_description):
|
||||
video_formats = []
|
||||
formats = []
|
||||
akamai_url = xml_description.find('./metadata/akamaiHost').text
|
||||
audios = xml_description.find('./metadata/audios')
|
||||
if audios is not None:
|
||||
for audio in audios:
|
||||
formats.append({
|
||||
'url': 'rtmp://%s/ondemand?ovpfv=1.1' % akamai_url,
|
||||
'play_path': remove_end(audio.get('url'), '.flv'),
|
||||
'ext': 'flv',
|
||||
'vcodec': 'none',
|
||||
'format_id': audio.get('code'),
|
||||
})
|
||||
slide_video_path = xml_description.find('./metadata/slideVideo').text
|
||||
video_formats.append({
|
||||
formats.append({
|
||||
'url': 'rtmp://%s/ondemand?ovpfv=1.1' % akamai_url,
|
||||
'play_path': remove_end(slide_video_path, '.flv'),
|
||||
'ext': 'flv',
|
||||
@ -86,7 +99,7 @@ class GDCVaultIE(InfoExtractor):
|
||||
'format_id': 'slides',
|
||||
})
|
||||
speaker_video_path = xml_description.find('./metadata/speakerVideo').text
|
||||
video_formats.append({
|
||||
formats.append({
|
||||
'url': 'rtmp://%s/ondemand?ovpfv=1.1' % akamai_url,
|
||||
'play_path': remove_end(speaker_video_path, '.flv'),
|
||||
'ext': 'flv',
|
||||
@ -95,7 +108,7 @@ class GDCVaultIE(InfoExtractor):
|
||||
'preference': -1,
|
||||
'format_id': 'speaker',
|
||||
})
|
||||
return video_formats
|
||||
return formats
|
||||
|
||||
def _login(self, webpage_url, display_id):
|
||||
(username, password) = self._get_login_info()
|
||||
@ -133,16 +146,18 @@ class GDCVaultIE(InfoExtractor):
|
||||
r's1\.addVariable\("file",\s*encodeURIComponent\("(/[^"]+)"\)\);',
|
||||
start_page, 'url', default=None)
|
||||
if direct_url:
|
||||
video_url = 'http://www.gdcvault.com/' + direct_url
|
||||
title = self._html_search_regex(
|
||||
r'<td><strong>Session Name</strong></td>\s*<td>(.*?)</td>',
|
||||
start_page, 'title')
|
||||
video_url = 'http://www.gdcvault.com' + direct_url
|
||||
# resolve the url so that we can detect the correct extension
|
||||
head = self._request_webpage(HEADRequest(video_url), video_id)
|
||||
video_url = head.geturl()
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
'url': video_url,
|
||||
'ext': 'flv',
|
||||
'title': title,
|
||||
}
|
||||
|
||||
@ -168,8 +183,8 @@ class GDCVaultIE(InfoExtractor):
|
||||
# Fallback to the older format
|
||||
xml_name = self._html_search_regex(r'<iframe src=".*?\?xmlURL=xml/(?P<xml_file>.+?\.xml).*?".*?</iframe>', start_page, 'xml filename')
|
||||
|
||||
xml_decription_url = xml_root + 'xml/' + xml_name
|
||||
xml_description = self._download_xml(xml_decription_url, display_id)
|
||||
xml_description_url = xml_root + 'xml/' + xml_name
|
||||
xml_description = self._download_xml(xml_description_url, display_id)
|
||||
|
||||
video_title = xml_description.find('./metadata/title').text
|
||||
video_formats = self._parse_mp4(xml_description)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user