mirror of
https://github.com/l1ving/youtube-dl
synced 2025-01-25 04:35:47 +08:00
commit
4818163f06
6
.github/ISSUE_TEMPLATE.md
vendored
6
.github/ISSUE_TEMPLATE.md
vendored
@ -6,8 +6,8 @@
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2016.11.02*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
|
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2016.11.08*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
|
||||||
- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2016.11.02**
|
- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2016.11.08**
|
||||||
|
|
||||||
### Before submitting an *issue* make sure you have:
|
### Before submitting an *issue* make sure you have:
|
||||||
- [ ] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
|
- [ ] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
|
||||||
@ -35,7 +35,7 @@ $ youtube-dl -v <your command line>
|
|||||||
[debug] User config: []
|
[debug] User config: []
|
||||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||||
[debug] youtube-dl version 2016.11.02
|
[debug] youtube-dl version 2016.11.08
|
||||||
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
||||||
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -30,6 +30,7 @@ updates_key.pem
|
|||||||
*.m4v
|
*.m4v
|
||||||
*.mp3
|
*.mp3
|
||||||
*.3gp
|
*.3gp
|
||||||
|
*.wav
|
||||||
*.part
|
*.part
|
||||||
*.swp
|
*.swp
|
||||||
test/testdata
|
test/testdata
|
||||||
|
31
ChangeLog
31
ChangeLog
@ -1,6 +1,35 @@
|
|||||||
version <unreleased>
|
version 2016.11.08
|
||||||
|
|
||||||
Extractors
|
Extractors
|
||||||
|
* [tmz:article] Fix extraction (#11052)
|
||||||
|
* [espn] Fix extraction (#11041)
|
||||||
|
* [mitele] Fix extraction after website redesign (#10824)
|
||||||
|
- [ard] Remove age restriction check (#11129)
|
||||||
|
* [generic] Improve support for pornhub.com embeds (#11100)
|
||||||
|
+ [generic] Add support for redtube.com embeds (#11099)
|
||||||
|
+ [generic] Add support for drtuber.com embeds (#11098)
|
||||||
|
+ [redtube] Add support for embed URLs
|
||||||
|
+ [drtuber] Add support for embed URLs
|
||||||
|
+ [yahoo] Improve content id extraction (#11088)
|
||||||
|
* [toutv] Relax URL regular expression (#11121)
|
||||||
|
|
||||||
|
|
||||||
|
version 2016.11.04
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [extractor/common] Tolerate malformed RESOLUTION attribute in m3u8
|
||||||
|
manifests (#11113)
|
||||||
|
* [downloader/ism] Fix AVC Decoder Configuration Record
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [fox9] Add support for fox9.com (#11110)
|
||||||
|
+ [anvato] Extract more metadata and improve formats extraction
|
||||||
|
* [vodlocker] Improve removed videos detection (#11106)
|
||||||
|
+ [vzaar] Add support for vzaar.com (#11093)
|
||||||
|
+ [vice] Add support for uplynk preplay videos (#11101)
|
||||||
|
* [tubitv] Fix extraction (#11061)
|
||||||
|
+ [shahid] Add support for authentication (#11091)
|
||||||
|
+ [radiocanada] Add subtitles support (#11096)
|
||||||
+ [generic] Add support for ISM manifests
|
+ [generic] Add support for ISM manifests
|
||||||
|
|
||||||
|
|
||||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
|||||||
all: youtube-dl README.md CONTRIBUTING.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish supportedsites
|
all: youtube-dl README.md CONTRIBUTING.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish supportedsites
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz youtube-dl.zsh youtube-dl.fish youtube_dl/extractor/lazy_extractors.py *.dump *.part* *.info.json *.mp4 *.m4a *.flv *.mp3 *.avi *.mkv *.webm *.3gp *.jpg *.png CONTRIBUTING.md.tmp ISSUE_TEMPLATE.md.tmp youtube-dl youtube-dl.exe
|
rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz youtube-dl.zsh youtube-dl.fish youtube_dl/extractor/lazy_extractors.py *.dump *.part* *.info.json *.mp4 *.m4a *.flv *.mp3 *.avi *.mkv *.webm *.3gp *.wav *.jpg *.png CONTRIBUTING.md.tmp ISSUE_TEMPLATE.md.tmp youtube-dl youtube-dl.exe
|
||||||
find . -name "*.pyc" -delete
|
find . -name "*.pyc" -delete
|
||||||
find . -name "*.class" -delete
|
find . -name "*.class" -delete
|
||||||
|
|
||||||
|
@ -758,7 +758,7 @@ Once the video is fully downloaded, use any video player, such as [mpv](https://
|
|||||||
|
|
||||||
### I extracted a video URL with `-g`, but it does not play on another machine / in my webbrowser.
|
### I extracted a video URL with `-g`, but it does not play on another machine / in my webbrowser.
|
||||||
|
|
||||||
It depends a lot on the service. In many cases, requests for the video (to download/play it) must come from the same IP address and with the same cookies. Use the `--cookies` option to write the required cookies into a file, and advise your downloader to read cookies from that file. Some sites also require a common user agent to be used, use `--dump-user-agent` to see the one in use by youtube-dl.
|
It depends a lot on the service. In many cases, requests for the video (to download/play it) must come from the same IP address and with the same cookies and/or HTTP headers. Use the `--cookies` option to write the required cookies into a file, and advise your downloader to read cookies from that file. Some sites also require a common user agent to be used, use `--dump-user-agent` to see the one in use by youtube-dl. You can also get necessary cookies and HTTP headers from JSON output obtained with `--dump-json`.
|
||||||
|
|
||||||
It may be beneficial to use IPv6; in some cases, the restrictions are only applied to IPv4. Some services (sometimes only for a subset of videos) do not restrict the video URL by IP address, cookie, or user-agent, but these are the exception rather than the rule.
|
It may be beneficial to use IPv6; in some cases, the restrictions are only applied to IPv4. Some services (sometimes only for a subset of videos) do not restrict the video URL by IP address, cookie, or user-agent, but these are the exception rather than the rule.
|
||||||
|
|
||||||
|
@ -247,6 +247,7 @@
|
|||||||
- **FootyRoom**
|
- **FootyRoom**
|
||||||
- **Formula1**
|
- **Formula1**
|
||||||
- **FOX**
|
- **FOX**
|
||||||
|
- **FOX9**
|
||||||
- **Foxgay**
|
- **Foxgay**
|
||||||
- **foxnews**: Fox News and Fox Business Video
|
- **foxnews**: Fox News and Fox Business Video
|
||||||
- **foxnews:article**
|
- **foxnews:article**
|
||||||
@ -870,6 +871,7 @@
|
|||||||
- **vube**: Vube.com
|
- **vube**: Vube.com
|
||||||
- **VuClip**
|
- **VuClip**
|
||||||
- **VyboryMos**
|
- **VyboryMos**
|
||||||
|
- **Vzaar**
|
||||||
- **Walla**
|
- **Walla**
|
||||||
- **washingtonpost**
|
- **washingtonpost**
|
||||||
- **washingtonpost:article**
|
- **washingtonpost:article**
|
||||||
|
@ -129,7 +129,7 @@ def write_piff_header(stream, params):
|
|||||||
sample_entry_payload += u1616.pack(params['sampling_rate'])
|
sample_entry_payload += u1616.pack(params['sampling_rate'])
|
||||||
|
|
||||||
if fourcc == 'AACL':
|
if fourcc == 'AACL':
|
||||||
smaple_entry_box = box(b'mp4a', sample_entry_payload)
|
sample_entry_box = box(b'mp4a', sample_entry_payload)
|
||||||
else:
|
else:
|
||||||
sample_entry_payload = sample_entry_payload
|
sample_entry_payload = sample_entry_payload
|
||||||
sample_entry_payload += u16.pack(0) # pre defined
|
sample_entry_payload += u16.pack(0) # pre defined
|
||||||
@ -149,9 +149,7 @@ def write_piff_header(stream, params):
|
|||||||
if fourcc in ('H264', 'AVC1'):
|
if fourcc in ('H264', 'AVC1'):
|
||||||
sps, pps = codec_private_data.split(u32.pack(1))[1:]
|
sps, pps = codec_private_data.split(u32.pack(1))[1:]
|
||||||
avcc_payload = u8.pack(1) # configuration version
|
avcc_payload = u8.pack(1) # configuration version
|
||||||
avcc_payload += sps[1] # avc profile indication
|
avcc_payload += sps[1:4] # avc profile indication + profile compatibility + avc level indication
|
||||||
avcc_payload += sps[2] # profile compatibility
|
|
||||||
avcc_payload += sps[3] # avc level indication
|
|
||||||
avcc_payload += u8.pack(0xfc | (params.get('nal_unit_length_field', 4) - 1)) # complete represenation (1) + reserved (11111) + length size minus one
|
avcc_payload += u8.pack(0xfc | (params.get('nal_unit_length_field', 4) - 1)) # complete represenation (1) + reserved (11111) + length size minus one
|
||||||
avcc_payload += u8.pack(1) # reserved (0) + number of sps (0000001)
|
avcc_payload += u8.pack(1) # reserved (0) + number of sps (0000001)
|
||||||
avcc_payload += u16.pack(len(sps))
|
avcc_payload += u16.pack(len(sps))
|
||||||
@ -160,8 +158,8 @@ def write_piff_header(stream, params):
|
|||||||
avcc_payload += u16.pack(len(pps))
|
avcc_payload += u16.pack(len(pps))
|
||||||
avcc_payload += pps
|
avcc_payload += pps
|
||||||
sample_entry_payload += box(b'avcC', avcc_payload) # AVC Decoder Configuration Record
|
sample_entry_payload += box(b'avcC', avcc_payload) # AVC Decoder Configuration Record
|
||||||
smaple_entry_box = box(b'avc1', sample_entry_payload) # AVC Simple Entry
|
sample_entry_box = box(b'avc1', sample_entry_payload) # AVC Simple Entry
|
||||||
stsd_payload += smaple_entry_box
|
stsd_payload += sample_entry_box
|
||||||
|
|
||||||
stbl_payload = full_box(b'stsd', 0, 0, stsd_payload) # Sample Description Box
|
stbl_payload = full_box(b'stsd', 0, 0, stsd_payload) # Sample Description Box
|
||||||
|
|
||||||
|
@ -157,22 +157,16 @@ class AnvatoIE(InfoExtractor):
|
|||||||
video_data_url, video_id, transform_source=strip_jsonp,
|
video_data_url, video_id, transform_source=strip_jsonp,
|
||||||
data=json.dumps(payload).encode('utf-8'))
|
data=json.dumps(payload).encode('utf-8'))
|
||||||
|
|
||||||
def _extract_anvato_videos(self, webpage, video_id):
|
def _get_anvato_videos(self, access_key, video_id):
|
||||||
anvplayer_data = self._parse_json(self._html_search_regex(
|
|
||||||
r'<script[^>]+data-anvp=\'([^\']+)\'', webpage,
|
|
||||||
'Anvato player data'), video_id)
|
|
||||||
|
|
||||||
video_id = anvplayer_data['video']
|
|
||||||
access_key = anvplayer_data['accessKey']
|
|
||||||
|
|
||||||
video_data = self._get_video_json(access_key, video_id)
|
video_data = self._get_video_json(access_key, video_id)
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for published_url in video_data['published_urls']:
|
for published_url in video_data['published_urls']:
|
||||||
video_url = published_url['embed_url']
|
video_url = published_url['embed_url']
|
||||||
|
media_format = published_url.get('format')
|
||||||
ext = determine_ext(video_url)
|
ext = determine_ext(video_url)
|
||||||
|
|
||||||
if ext == 'smil':
|
if ext == 'smil' or media_format == 'smil':
|
||||||
formats.extend(self._extract_smil_formats(video_url, video_id))
|
formats.extend(self._extract_smil_formats(video_url, video_id))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@ -183,7 +177,7 @@ class AnvatoIE(InfoExtractor):
|
|||||||
'tbr': tbr if tbr != 0 else None,
|
'tbr': tbr if tbr != 0 else None,
|
||||||
}
|
}
|
||||||
|
|
||||||
if ext == 'm3u8':
|
if ext == 'm3u8' or media_format in ('m3u8', 'm3u8-variant'):
|
||||||
# Not using _extract_m3u8_formats here as individual media
|
# Not using _extract_m3u8_formats here as individual media
|
||||||
# playlists are also included in published_urls.
|
# playlists are also included in published_urls.
|
||||||
if tbr is None:
|
if tbr is None:
|
||||||
@ -194,7 +188,7 @@ class AnvatoIE(InfoExtractor):
|
|||||||
'format_id': '-'.join(filter(None, ['hls', compat_str(tbr)])),
|
'format_id': '-'.join(filter(None, ['hls', compat_str(tbr)])),
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
})
|
})
|
||||||
elif ext == 'mp3':
|
elif ext == 'mp3' or media_format == 'mp3':
|
||||||
a_format['vcodec'] = 'none'
|
a_format['vcodec'] = 'none'
|
||||||
else:
|
else:
|
||||||
a_format.update({
|
a_format.update({
|
||||||
@ -218,7 +212,19 @@ class AnvatoIE(InfoExtractor):
|
|||||||
'formats': formats,
|
'formats': formats,
|
||||||
'title': video_data.get('def_title'),
|
'title': video_data.get('def_title'),
|
||||||
'description': video_data.get('def_description'),
|
'description': video_data.get('def_description'),
|
||||||
|
'tags': video_data.get('def_tags', '').split(','),
|
||||||
'categories': video_data.get('categories'),
|
'categories': video_data.get('categories'),
|
||||||
'thumbnail': video_data.get('thumbnail'),
|
'thumbnail': video_data.get('thumbnail'),
|
||||||
|
'timestamp': int_or_none(video_data.get(
|
||||||
|
'ts_published') or video_data.get('ts_added')),
|
||||||
|
'uploader': video_data.get('mcp_id'),
|
||||||
|
'duration': int_or_none(video_data.get('duration')),
|
||||||
'subtitles': subtitles,
|
'subtitles': subtitles,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def _extract_anvato_videos(self, webpage, video_id):
|
||||||
|
anvplayer_data = self._parse_json(self._html_search_regex(
|
||||||
|
r'<script[^>]+data-anvp=\'([^\']+)\'', webpage,
|
||||||
|
'Anvato player data'), video_id)
|
||||||
|
return self._get_anvato_videos(
|
||||||
|
anvplayer_data['accessKey'], anvplayer_data['video'])
|
||||||
|
@ -178,8 +178,6 @@ class ARDMediathekIE(InfoExtractor):
|
|||||||
('>Leider liegt eine Störung vor.', 'Video %s is unavailable'),
|
('>Leider liegt eine Störung vor.', 'Video %s is unavailable'),
|
||||||
('>Der gewünschte Beitrag ist nicht mehr verfügbar.<',
|
('>Der gewünschte Beitrag ist nicht mehr verfügbar.<',
|
||||||
'Video %s is no longer available'),
|
'Video %s is no longer available'),
|
||||||
('Diese Sendung ist für Jugendliche unter 12 Jahren nicht geeignet. Der Clip ist deshalb nur von 20 bis 6 Uhr verfügbar.',
|
|
||||||
'This program is only suitable for those aged 12 and older. Video %s is therefore only available between 8 pm and 6 am.'),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
for pattern, message in ERRORS:
|
for pattern, message in ERRORS:
|
||||||
|
@ -22,6 +22,7 @@ class CBSLocalIE(AnvatoIE):
|
|||||||
'thumbnail': 're:^https?://.*',
|
'thumbnail': 're:^https?://.*',
|
||||||
'timestamp': 1463440500,
|
'timestamp': 1463440500,
|
||||||
'upload_date': '20160516',
|
'upload_date': '20160516',
|
||||||
|
'uploader': 'CBS',
|
||||||
'subtitles': {
|
'subtitles': {
|
||||||
'en': 'mincount:5',
|
'en': 'mincount:5',
|
||||||
},
|
},
|
||||||
@ -35,6 +36,7 @@ class CBSLocalIE(AnvatoIE):
|
|||||||
'Syndication\\Curb.tv',
|
'Syndication\\Curb.tv',
|
||||||
'Content\\News'
|
'Content\\News'
|
||||||
],
|
],
|
||||||
|
'tags': ['CBS 2 News Evening'],
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
# SendtoNews embed
|
# SendtoNews embed
|
||||||
|
@ -1280,9 +1280,10 @@ class InfoExtractor(object):
|
|||||||
}
|
}
|
||||||
resolution = last_info.get('RESOLUTION')
|
resolution = last_info.get('RESOLUTION')
|
||||||
if resolution:
|
if resolution:
|
||||||
width_str, height_str = resolution.split('x')
|
mobj = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', resolution)
|
||||||
f['width'] = int(width_str)
|
if mobj:
|
||||||
f['height'] = int(height_str)
|
f['width'] = int(mobj.group('width'))
|
||||||
|
f['height'] = int(mobj.group('height'))
|
||||||
# Unified Streaming Platform
|
# Unified Streaming Platform
|
||||||
mobj = re.search(
|
mobj = re.search(
|
||||||
r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
|
r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
|
||||||
|
@ -10,8 +10,8 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class DrTuberIE(InfoExtractor):
|
class DrTuberIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?drtuber\.com/video/(?P<id>\d+)/(?P<display_id>[\w-]+)'
|
_VALID_URL = r'https?://(?:www\.)?drtuber\.com/(?:video|embed)/(?P<id>\d+)(?:/(?P<display_id>[\w-]+))?'
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
'url': 'http://www.drtuber.com/video/1740434/hot-perky-blonde-naked-golf',
|
'url': 'http://www.drtuber.com/video/1740434/hot-perky-blonde-naked-golf',
|
||||||
'md5': '93e680cf2536ad0dfb7e74d94a89facd',
|
'md5': '93e680cf2536ad0dfb7e74d94a89facd',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@ -25,20 +25,30 @@ class DrTuberIE(InfoExtractor):
|
|||||||
'thumbnail': 're:https?://.*\.jpg$',
|
'thumbnail': 're:https?://.*\.jpg$',
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
}
|
}
|
||||||
}
|
}, {
|
||||||
|
'url': 'http://www.drtuber.com/embed/489939',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _extract_urls(webpage):
|
||||||
|
return re.findall(
|
||||||
|
r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//(?:www\.)?drtuber\.com/embed/\d+)',
|
||||||
|
webpage)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
display_id = mobj.group('display_id')
|
display_id = mobj.group('display_id') or video_id
|
||||||
|
|
||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(
|
||||||
|
'http://www.drtuber.com/video/%s' % video_id, display_id)
|
||||||
|
|
||||||
video_url = self._html_search_regex(
|
video_url = self._html_search_regex(
|
||||||
r'<source src="([^"]+)"', webpage, 'video URL')
|
r'<source src="([^"]+)"', webpage, 'video URL')
|
||||||
|
|
||||||
title = self._html_search_regex(
|
title = self._html_search_regex(
|
||||||
(r'class="title_watch"[^>]*><p>([^<]+)<',
|
(r'class="title_watch"[^>]*><(?:p|h\d+)[^>]*>([^<]+)<',
|
||||||
r'<p[^>]+class="title_substrate">([^<]+)</p>',
|
r'<p[^>]+class="title_substrate">([^<]+)</p>',
|
||||||
r'<title>([^<]+) - \d+'),
|
r'<title>([^<]+) - \d+'),
|
||||||
webpage, 'title')
|
webpage, 'title')
|
||||||
|
@ -1,38 +1,117 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import remove_end
|
from ..compat import compat_str
|
||||||
|
from ..utils import (
|
||||||
|
determine_ext,
|
||||||
|
int_or_none,
|
||||||
|
unified_timestamp,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class ESPNIE(InfoExtractor):
|
class ESPNIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:espn\.go|(?:www\.)?espn)\.com/(?:[^/]+/)*(?P<id>[^/]+)'
|
_VALID_URL = r'https?://(?:espn\.go|(?:www\.)?espn)\.com/video/clip(?:\?.*?\bid=|/_/id/)(?P<id>\d+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://espn.go.com/video/clip?id=10365079',
|
'url': 'http://espn.go.com/video/clip?id=10365079',
|
||||||
'md5': '60e5d097a523e767d06479335d1bdc58',
|
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'FkYWtmazr6Ed8xmvILvKLWjd4QvYZpzG',
|
'id': '10365079',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': '30 for 30 Shorts: Judging Jewell',
|
'title': '30 for 30 Shorts: Judging Jewell',
|
||||||
'description': None,
|
'description': 'md5:39370c2e016cb4ecf498ffe75bef7f0f',
|
||||||
|
'timestamp': 1390936111,
|
||||||
|
'upload_date': '20140128',
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
'add_ie': ['OoyalaExternal'],
|
|
||||||
}, {
|
}, {
|
||||||
# intl video, from http://www.espnfc.us/video/mls-highlights/150/video/2743663/must-see-moments-best-of-the-mls-season
|
# intl video, from http://www.espnfc.us/video/mls-highlights/150/video/2743663/must-see-moments-best-of-the-mls-season
|
||||||
'url': 'http://espn.go.com/video/clip?id=2743663',
|
'url': 'http://espn.go.com/video/clip?id=2743663',
|
||||||
'md5': 'f4ac89b59afc7e2d7dbb049523df6768',
|
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '50NDFkeTqRHB0nXBOK-RGdSG5YQPuxHg',
|
'id': '2743663',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Must-See Moments: Best of the MLS season',
|
'title': 'Must-See Moments: Best of the MLS season',
|
||||||
|
'description': 'md5:4c2d7232beaea572632bec41004f0aeb',
|
||||||
|
'timestamp': 1449446454,
|
||||||
|
'upload_date': '20151207',
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
'add_ie': ['OoyalaExternal'],
|
'expected_warnings': ['Unable to download f4m manifest'],
|
||||||
}, {
|
}, {
|
||||||
|
'url': 'http://www.espn.com/video/clip?id=10365079',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.espn.com/video/clip/_/id/17989860',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
|
clip = self._download_json(
|
||||||
|
'http://api-app.espn.com/v1/video/clips/%s' % video_id,
|
||||||
|
video_id)['videos'][0]
|
||||||
|
|
||||||
|
title = clip['headline']
|
||||||
|
|
||||||
|
format_urls = set()
|
||||||
|
formats = []
|
||||||
|
|
||||||
|
def traverse_source(source, base_source_id=None):
|
||||||
|
for source_id, source in source.items():
|
||||||
|
if isinstance(source, compat_str):
|
||||||
|
extract_source(source, base_source_id)
|
||||||
|
elif isinstance(source, dict):
|
||||||
|
traverse_source(
|
||||||
|
source,
|
||||||
|
'%s-%s' % (base_source_id, source_id)
|
||||||
|
if base_source_id else source_id)
|
||||||
|
|
||||||
|
def extract_source(source_url, source_id=None):
|
||||||
|
if source_url in format_urls:
|
||||||
|
return
|
||||||
|
format_urls.add(source_url)
|
||||||
|
ext = determine_ext(source_url)
|
||||||
|
if ext == 'smil':
|
||||||
|
formats.extend(self._extract_smil_formats(
|
||||||
|
source_url, video_id, fatal=False))
|
||||||
|
elif ext == 'f4m':
|
||||||
|
formats.extend(self._extract_f4m_formats(
|
||||||
|
source_url, video_id, f4m_id=source_id, fatal=False))
|
||||||
|
elif ext == 'm3u8':
|
||||||
|
formats.extend(self._extract_m3u8_formats(
|
||||||
|
source_url, video_id, 'mp4', entry_protocol='m3u8_native',
|
||||||
|
m3u8_id=source_id, fatal=False))
|
||||||
|
else:
|
||||||
|
formats.append({
|
||||||
|
'url': source_url,
|
||||||
|
'format_id': source_id,
|
||||||
|
})
|
||||||
|
|
||||||
|
traverse_source(clip['links']['source'])
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
description = clip.get('caption') or clip.get('description')
|
||||||
|
thumbnail = clip.get('thumbnail')
|
||||||
|
duration = int_or_none(clip.get('duration'))
|
||||||
|
timestamp = unified_timestamp(clip.get('originalPublishDate'))
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
'timestamp': timestamp,
|
||||||
|
'duration': duration,
|
||||||
|
'formats': formats,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class ESPNArticleIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:espn\.go|(?:www\.)?espn)\.com/(?:[^/]+/)*(?P<id>[^/]+)'
|
||||||
|
_TESTS = [{
|
||||||
'url': 'https://espn.go.com/video/iframe/twitter/?cms=espn&id=10365079',
|
'url': 'https://espn.go.com/video/iframe/twitter/?cms=espn&id=10365079',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
@ -47,11 +126,12 @@ class ESPNIE(InfoExtractor):
|
|||||||
}, {
|
}, {
|
||||||
'url': 'http://espn.go.com/nba/playoffs/2015/story/_/id/12887571/john-wall-washington-wizards-no-swelling-left-hand-wrist-game-5-return',
|
'url': 'http://espn.go.com/nba/playoffs/2015/story/_/id/12887571/john-wall-washington-wizards-no-swelling-left-hand-wrist-game-5-return',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
|
||||||
'url': 'http://www.espn.com/video/clip?id=10365079',
|
|
||||||
'only_matching': True,
|
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def suitable(cls, url):
|
||||||
|
return False if ESPNIE.suitable(url) else super(ESPNArticleIE, cls).suitable(url)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
@ -61,23 +141,5 @@ class ESPNIE(InfoExtractor):
|
|||||||
r'class=(["\']).*?video-play-button.*?\1[^>]+data-id=["\'](?P<id>\d+)',
|
r'class=(["\']).*?video-play-button.*?\1[^>]+data-id=["\'](?P<id>\d+)',
|
||||||
webpage, 'video id', group='id')
|
webpage, 'video id', group='id')
|
||||||
|
|
||||||
cms = 'espn'
|
return self.url_result(
|
||||||
if 'data-source="intl"' in webpage:
|
'http://espn.go.com/video/clip?id=%s' % video_id, ESPNIE.ie_key())
|
||||||
cms = 'intl'
|
|
||||||
player_url = 'https://espn.go.com/video/iframe/twitter/?id=%s&cms=%s' % (video_id, cms)
|
|
||||||
player = self._download_webpage(
|
|
||||||
player_url, video_id)
|
|
||||||
|
|
||||||
pcode = self._search_regex(
|
|
||||||
r'["\']pcode=([^"\']+)["\']', player, 'pcode')
|
|
||||||
|
|
||||||
title = remove_end(
|
|
||||||
self._og_search_title(webpage),
|
|
||||||
'- ESPN Video').strip()
|
|
||||||
|
|
||||||
return {
|
|
||||||
'_type': 'url_transparent',
|
|
||||||
'url': 'ooyalaexternal:%s:%s:%s' % (cms, video_id, pcode),
|
|
||||||
'ie_key': 'OoyalaExternal',
|
|
||||||
'title': title,
|
|
||||||
}
|
|
||||||
|
@ -296,6 +296,7 @@ from .footyroom import FootyRoomIE
|
|||||||
from .formula1 import Formula1IE
|
from .formula1 import Formula1IE
|
||||||
from .fourtube import FourTubeIE
|
from .fourtube import FourTubeIE
|
||||||
from .fox import FOXIE
|
from .fox import FOXIE
|
||||||
|
from .fox9 import FOX9IE
|
||||||
from .foxgay import FoxgayIE
|
from .foxgay import FoxgayIE
|
||||||
from .foxnews import (
|
from .foxnews import (
|
||||||
FoxNewsIE,
|
FoxNewsIE,
|
||||||
@ -1101,6 +1102,7 @@ from .vrt import VRTIE
|
|||||||
from .vube import VubeIE
|
from .vube import VubeIE
|
||||||
from .vuclip import VuClipIE
|
from .vuclip import VuClipIE
|
||||||
from .vyborymos import VyboryMosIE
|
from .vyborymos import VyboryMosIE
|
||||||
|
from .vzaar import VzaarIE
|
||||||
from .walla import WallaIE
|
from .walla import WallaIE
|
||||||
from .washingtonpost import (
|
from .washingtonpost import (
|
||||||
WashingtonPostIE,
|
WashingtonPostIE,
|
||||||
|
43
youtube_dl/extractor/fox9.py
Normal file
43
youtube_dl/extractor/fox9.py
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .anvato import AnvatoIE
|
||||||
|
from ..utils import js_to_json
|
||||||
|
|
||||||
|
|
||||||
|
class FOX9IE(AnvatoIE):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?fox9\.com/(?:[^/]+/)+(?P<id>\d+)-story'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://www.fox9.com/news/215123287-story',
|
||||||
|
'md5': 'd6e1b2572c3bab8a849c9103615dd243',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '314473',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Bear climbs tree in downtown Duluth',
|
||||||
|
'description': 'md5:6a36bfb5073a411758a752455408ac90',
|
||||||
|
'duration': 51,
|
||||||
|
'timestamp': 1478123580,
|
||||||
|
'upload_date': '20161102',
|
||||||
|
'uploader': 'EPFOX',
|
||||||
|
'categories': ['News', 'Sports'],
|
||||||
|
'tags': ['news', 'video'],
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.fox9.com/news/investigators/214070684-story',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
video_id = self._parse_json(
|
||||||
|
self._search_regex(
|
||||||
|
r'AnvatoPlaylist\s*\(\s*(\[.+?\])\s*\)\s*;',
|
||||||
|
webpage, 'anvato playlist'),
|
||||||
|
video_id, transform_source=js_to_json)[0]['video']
|
||||||
|
|
||||||
|
return self._get_anvato_videos(
|
||||||
|
'anvato_epfox_app_web_prod_b3373168e12f423f41504f207000188daf88251b',
|
||||||
|
video_id)
|
@ -47,6 +47,8 @@ from .svt import SVTIE
|
|||||||
from .pornhub import PornHubIE
|
from .pornhub import PornHubIE
|
||||||
from .xhamster import XHamsterEmbedIE
|
from .xhamster import XHamsterEmbedIE
|
||||||
from .tnaflix import TNAFlixNetworkEmbedIE
|
from .tnaflix import TNAFlixNetworkEmbedIE
|
||||||
|
from .drtuber import DrTuberIE
|
||||||
|
from .redtube import RedTubeIE
|
||||||
from .vimeo import VimeoIE
|
from .vimeo import VimeoIE
|
||||||
from .dailymotion import (
|
from .dailymotion import (
|
||||||
DailymotionIE,
|
DailymotionIE,
|
||||||
@ -1981,11 +1983,6 @@ class GenericIE(InfoExtractor):
|
|||||||
if sportbox_urls:
|
if sportbox_urls:
|
||||||
return _playlist_from_matches(sportbox_urls, ie='SportBoxEmbed')
|
return _playlist_from_matches(sportbox_urls, ie='SportBoxEmbed')
|
||||||
|
|
||||||
# Look for embedded PornHub player
|
|
||||||
pornhub_url = PornHubIE._extract_url(webpage)
|
|
||||||
if pornhub_url:
|
|
||||||
return self.url_result(pornhub_url, 'PornHub')
|
|
||||||
|
|
||||||
# Look for embedded XHamster player
|
# Look for embedded XHamster player
|
||||||
xhamster_urls = XHamsterEmbedIE._extract_urls(webpage)
|
xhamster_urls = XHamsterEmbedIE._extract_urls(webpage)
|
||||||
if xhamster_urls:
|
if xhamster_urls:
|
||||||
@ -1996,6 +1993,21 @@ class GenericIE(InfoExtractor):
|
|||||||
if tnaflix_urls:
|
if tnaflix_urls:
|
||||||
return _playlist_from_matches(tnaflix_urls, ie=TNAFlixNetworkEmbedIE.ie_key())
|
return _playlist_from_matches(tnaflix_urls, ie=TNAFlixNetworkEmbedIE.ie_key())
|
||||||
|
|
||||||
|
# Look for embedded PornHub player
|
||||||
|
pornhub_urls = PornHubIE._extract_urls(webpage)
|
||||||
|
if pornhub_urls:
|
||||||
|
return _playlist_from_matches(pornhub_urls, ie=PornHubIE.ie_key())
|
||||||
|
|
||||||
|
# Look for embedded DrTuber player
|
||||||
|
drtuber_urls = DrTuberIE._extract_urls(webpage)
|
||||||
|
if drtuber_urls:
|
||||||
|
return _playlist_from_matches(drtuber_urls, ie=DrTuberIE.ie_key())
|
||||||
|
|
||||||
|
# Look for embedded RedTube player
|
||||||
|
redtube_urls = RedTubeIE._extract_urls(webpage)
|
||||||
|
if redtube_urls:
|
||||||
|
return _playlist_from_matches(redtube_urls, ie=RedTubeIE.ie_key())
|
||||||
|
|
||||||
# Look for embedded Tvigle player
|
# Look for embedded Tvigle player
|
||||||
mobj = re.search(
|
mobj = re.search(
|
||||||
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//cloud\.tvigle\.ru/video/.+?)\1', webpage)
|
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//cloud\.tvigle\.ru/video/.+?)\1', webpage)
|
||||||
@ -2453,8 +2465,21 @@ class GenericIE(InfoExtractor):
|
|||||||
entry_info_dict['formats'] = self._extract_mpd_formats(video_url, video_id)
|
entry_info_dict['formats'] = self._extract_mpd_formats(video_url, video_id)
|
||||||
elif ext == 'f4m':
|
elif ext == 'f4m':
|
||||||
entry_info_dict['formats'] = self._extract_f4m_formats(video_url, video_id)
|
entry_info_dict['formats'] = self._extract_f4m_formats(video_url, video_id)
|
||||||
elif re.search(r'(?i)\.ism/manifest', video_url):
|
elif re.search(r'(?i)\.(?:ism|smil)/manifest', video_url) and video_url != url:
|
||||||
entry_info_dict['formats'] = self._extract_ism_formats(video_url, video_id)
|
# Just matching .ism/manifest is not enough to be reliably sure
|
||||||
|
# whether it's actually an ISM manifest or some other streaming
|
||||||
|
# manifest since there are various streaming URL formats
|
||||||
|
# possible (see [1]) as well as some other shenanigans like
|
||||||
|
# .smil/manifest URLs that actually serve an ISM (see [2]) and
|
||||||
|
# so on.
|
||||||
|
# Thus the most reasonable way to solve this is to delegate
|
||||||
|
# to generic extractor in order to look into the contents of
|
||||||
|
# the manifest itself.
|
||||||
|
# 1. https://azure.microsoft.com/en-us/documentation/articles/media-services-deliver-content-overview/#streaming-url-formats
|
||||||
|
# 2. https://svs.itworkscdn.net/lbcivod/smil:itwfcdn/lbci/170976.smil/Manifest
|
||||||
|
entry_info_dict = self.url_result(
|
||||||
|
smuggle_url(video_url, {'to_generic': True}),
|
||||||
|
GenericIE.ie_key())
|
||||||
else:
|
else:
|
||||||
entry_info_dict['url'] = video_url
|
entry_info_dict['url'] = video_url
|
||||||
|
|
||||||
|
@ -1,19 +1,20 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import uuid
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
|
compat_str,
|
||||||
compat_urllib_parse_urlencode,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
get_element_by_attribute,
|
|
||||||
int_or_none,
|
int_or_none,
|
||||||
remove_start,
|
|
||||||
extract_attributes,
|
extract_attributes,
|
||||||
determine_ext,
|
determine_ext,
|
||||||
|
smuggle_url,
|
||||||
|
parse_duration,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -72,16 +73,14 @@ class MiTeleBaseIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class MiTeleIE(MiTeleBaseIE):
|
class MiTeleIE(InfoExtractor):
|
||||||
IE_DESC = 'mitele.es'
|
IE_DESC = 'mitele.es'
|
||||||
_VALID_URL = r'https?://(?:www\.)?mitele\.es/(?:[^/]+/){3}(?P<id>[^/]+)/'
|
_VALID_URL = r'https?://(?:www\.)?mitele\.es/programas-tv/(?:[^/]+/)(?P<id>[^/]+)/player'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.mitele.es/programas-tv/diario-de/la-redaccion/programa-144/',
|
'url': 'http://www.mitele.es/programas-tv/diario-de/57b0dfb9c715da65618b4afa/player',
|
||||||
# MD5 is unstable
|
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '0NF1jJnxS1Wu3pHrmvFyw2',
|
'id': '57b0dfb9c715da65618b4afa',
|
||||||
'display_id': 'programa-144',
|
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Tor, la web invisible',
|
'title': 'Tor, la web invisible',
|
||||||
'description': 'md5:3b6fce7eaa41b2d97358726378d9369f',
|
'description': 'md5:3b6fce7eaa41b2d97358726378d9369f',
|
||||||
@ -91,57 +90,71 @@ class MiTeleIE(MiTeleBaseIE):
|
|||||||
'thumbnail': 're:(?i)^https?://.*\.jpg$',
|
'thumbnail': 're:(?i)^https?://.*\.jpg$',
|
||||||
'duration': 2913,
|
'duration': 2913,
|
||||||
},
|
},
|
||||||
|
'add_ie': ['Ooyala'],
|
||||||
}, {
|
}, {
|
||||||
# no explicit title
|
# no explicit title
|
||||||
'url': 'http://www.mitele.es/programas-tv/cuarto-milenio/temporada-6/programa-226/',
|
'url': 'http://www.mitele.es/programas-tv/cuarto-milenio/57b0de3dc915da14058b4876/player',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'eLZSwoEd1S3pVyUm8lc6F',
|
'id': '57b0de3dc915da14058b4876',
|
||||||
'display_id': 'programa-226',
|
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Cuarto Milenio - Temporada 6 - Programa 226',
|
'title': 'Cuarto Milenio Temporada 6 Programa 226',
|
||||||
'description': 'md5:50daf9fadefa4e62d9fc866d0c015701',
|
'description': 'md5:5ff132013f0cd968ffbf1f5f3538a65f',
|
||||||
'series': 'Cuarto Milenio',
|
'series': 'Cuarto Milenio',
|
||||||
'season': 'Temporada 6',
|
'season': 'Temporada 6',
|
||||||
'episode': 'Programa 226',
|
'episode': 'Programa 226',
|
||||||
'thumbnail': 're:(?i)^https?://.*\.jpg$',
|
'thumbnail': 're:(?i)^https?://.*\.jpg$',
|
||||||
'duration': 7312,
|
'duration': 7313,
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
|
'add_ie': ['Ooyala'],
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
display_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
webpage = self._download_webpage(url, display_id)
|
gigya_url = self._search_regex(r'<gigya-api>[^>]*</gigya-api>[^>]*<script\s*src="([^"]*)">[^>]*</script>', webpage, 'gigya', default=None)
|
||||||
|
gigya_sc = self._download_webpage(compat_urlparse.urljoin(r'http://www.mitele.es/', gigya_url), video_id, 'Downloading gigya script')
|
||||||
|
# Get a appKey/uuid for getting the session key
|
||||||
|
appKey_var = self._search_regex(r'value\("appGridApplicationKey",([0-9a-f]+)\)', gigya_sc, 'appKey variable')
|
||||||
|
appKey = self._search_regex(r'var %s="([0-9a-f]+)"' % appKey_var, gigya_sc, 'appKey')
|
||||||
|
uid = compat_str(uuid.uuid4())
|
||||||
|
session_url = 'https://appgrid-api.cloud.accedo.tv/session?appKey=%s&uuid=%s' % (appKey, uid)
|
||||||
|
session_json = self._download_json(session_url, video_id, 'Downloading session keys')
|
||||||
|
sessionKey = compat_str(session_json['sessionKey'])
|
||||||
|
|
||||||
info = self._get_player_info(url, webpage)
|
paths_url = 'https://appgrid-api.cloud.accedo.tv/metadata/general_configuration,%20web_configuration?sessionKey=' + sessionKey
|
||||||
|
paths = self._download_json(paths_url, video_id, 'Downloading paths JSON')
|
||||||
|
ooyala_s = paths['general_configuration']['api_configuration']['ooyala_search']
|
||||||
|
data_p = (
|
||||||
|
'http://' + ooyala_s['base_url'] + ooyala_s['full_path'] + ooyala_s['provider_id'] +
|
||||||
|
'/docs/' + video_id + '?include_titles=Series,Season&product_name=test&format=full')
|
||||||
|
data = self._download_json(data_p, video_id, 'Downloading data JSON')
|
||||||
|
source = data['hits']['hits'][0]['_source']
|
||||||
|
embedCode = source['offers'][0]['embed_codes'][0]
|
||||||
|
|
||||||
title = self._search_regex(
|
titles = source['localizable_titles'][0]
|
||||||
r'class="Destacado-text"[^>]*>\s*<strong>([^<]+)</strong>',
|
title = titles.get('title_medium') or titles['title_long']
|
||||||
webpage, 'title', default=None)
|
episode = titles['title_sort_name']
|
||||||
|
description = titles['summary_long']
|
||||||
|
titles_series = source['localizable_titles_series'][0]
|
||||||
|
series = titles_series['title_long']
|
||||||
|
titles_season = source['localizable_titles_season'][0]
|
||||||
|
season = titles_season['title_medium']
|
||||||
|
duration = parse_duration(source['videos'][0]['duration'])
|
||||||
|
|
||||||
mobj = re.search(r'''(?sx)
|
return {
|
||||||
class="Destacado-text"[^>]*>.*?<h1>\s*
|
'_type': 'url_transparent',
|
||||||
<span>(?P<series>[^<]+)</span>\s*
|
# for some reason only HLS is supported
|
||||||
<span>(?P<season>[^<]+)</span>\s*
|
'url': smuggle_url('ooyala:' + embedCode, {'supportedformats': 'm3u8'}),
|
||||||
<span>(?P<episode>[^<]+)</span>''', webpage)
|
'id': video_id,
|
||||||
series, season, episode = mobj.groups() if mobj else [None] * 3
|
|
||||||
|
|
||||||
if not title:
|
|
||||||
if mobj:
|
|
||||||
title = '%s - %s - %s' % (series, season, episode)
|
|
||||||
else:
|
|
||||||
title = remove_start(self._search_regex(
|
|
||||||
r'<title>([^<]+)</title>', webpage, 'title'), 'Ver online ')
|
|
||||||
|
|
||||||
info.update({
|
|
||||||
'display_id': display_id,
|
|
||||||
'title': title,
|
'title': title,
|
||||||
'description': get_element_by_attribute('class', 'text', webpage),
|
'description': description,
|
||||||
'series': series,
|
'series': series,
|
||||||
'season': season,
|
'season': season,
|
||||||
'episode': episode,
|
'episode': episode,
|
||||||
})
|
'duration': duration,
|
||||||
return info
|
'thumbnail': source['images'][0]['url'],
|
||||||
|
}
|
||||||
|
@ -18,7 +18,7 @@ class OoyalaBaseIE(InfoExtractor):
|
|||||||
_CONTENT_TREE_BASE = _PLAYER_BASE + 'player_api/v1/content_tree/'
|
_CONTENT_TREE_BASE = _PLAYER_BASE + 'player_api/v1/content_tree/'
|
||||||
_AUTHORIZATION_URL_TEMPLATE = _PLAYER_BASE + 'sas/player_api/v2/authorization/embed_code/%s/%s?'
|
_AUTHORIZATION_URL_TEMPLATE = _PLAYER_BASE + 'sas/player_api/v2/authorization/embed_code/%s/%s?'
|
||||||
|
|
||||||
def _extract(self, content_tree_url, video_id, domain='example.org'):
|
def _extract(self, content_tree_url, video_id, domain='example.org', supportedformats=None):
|
||||||
content_tree = self._download_json(content_tree_url, video_id)['content_tree']
|
content_tree = self._download_json(content_tree_url, video_id)['content_tree']
|
||||||
metadata = content_tree[list(content_tree)[0]]
|
metadata = content_tree[list(content_tree)[0]]
|
||||||
embed_code = metadata['embed_code']
|
embed_code = metadata['embed_code']
|
||||||
@ -29,7 +29,7 @@ class OoyalaBaseIE(InfoExtractor):
|
|||||||
self._AUTHORIZATION_URL_TEMPLATE % (pcode, embed_code) +
|
self._AUTHORIZATION_URL_TEMPLATE % (pcode, embed_code) +
|
||||||
compat_urllib_parse_urlencode({
|
compat_urllib_parse_urlencode({
|
||||||
'domain': domain,
|
'domain': domain,
|
||||||
'supportedFormats': 'mp4,rtmp,m3u8,hds',
|
'supportedFormats': supportedformats or 'mp4,rtmp,m3u8,hds',
|
||||||
}), video_id)
|
}), video_id)
|
||||||
|
|
||||||
cur_auth_data = auth_data['authorization_data'][embed_code]
|
cur_auth_data = auth_data['authorization_data'][embed_code]
|
||||||
@ -145,8 +145,9 @@ class OoyalaIE(OoyalaBaseIE):
|
|||||||
url, smuggled_data = unsmuggle_url(url, {})
|
url, smuggled_data = unsmuggle_url(url, {})
|
||||||
embed_code = self._match_id(url)
|
embed_code = self._match_id(url)
|
||||||
domain = smuggled_data.get('domain')
|
domain = smuggled_data.get('domain')
|
||||||
|
supportedformats = smuggled_data.get('supportedformats')
|
||||||
content_tree_url = self._CONTENT_TREE_BASE + 'embed_code/%s/%s' % (embed_code, embed_code)
|
content_tree_url = self._CONTENT_TREE_BASE + 'embed_code/%s/%s' % (embed_code, embed_code)
|
||||||
return self._extract(content_tree_url, embed_code, domain)
|
return self._extract(content_tree_url, embed_code, domain, supportedformats)
|
||||||
|
|
||||||
|
|
||||||
class OoyalaExternalIE(OoyalaBaseIE):
|
class OoyalaExternalIE(OoyalaBaseIE):
|
||||||
|
@ -33,7 +33,7 @@ class PornHubIE(InfoExtractor):
|
|||||||
(?:[a-z]+\.)?pornhub\.com/(?:view_video\.php\?viewkey=|embed/)|
|
(?:[a-z]+\.)?pornhub\.com/(?:view_video\.php\?viewkey=|embed/)|
|
||||||
(?:www\.)?thumbzilla\.com/video/
|
(?:www\.)?thumbzilla\.com/video/
|
||||||
)
|
)
|
||||||
(?P<id>[0-9a-z]+)
|
(?P<id>[\da-z]+)
|
||||||
'''
|
'''
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
|
'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
|
||||||
@ -96,12 +96,11 @@ class PornHubIE(InfoExtractor):
|
|||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
@classmethod
|
@staticmethod
|
||||||
def _extract_url(cls, webpage):
|
def _extract_urls(webpage):
|
||||||
mobj = re.search(
|
return re.findall(
|
||||||
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?pornhub\.com/embed/\d+)\1', webpage)
|
r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//(?:www\.)?pornhub\.com/embed/[\da-z]+)',
|
||||||
if mobj:
|
webpage)
|
||||||
return mobj.group('url')
|
|
||||||
|
|
||||||
def _extract_count(self, pattern, webpage, name):
|
def _extract_count(self, pattern, webpage, name):
|
||||||
return str_to_int(self._search_regex(
|
return str_to_int(self._search_regex(
|
||||||
|
@ -125,6 +125,14 @@ class RadioCanadaIE(InfoExtractor):
|
|||||||
f4m_id='hds', fatal=False))
|
f4m_id='hds', fatal=False))
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
subtitles = {}
|
||||||
|
closed_caption_url = get_meta('closedCaption') or get_meta('closedCaptionHTML5')
|
||||||
|
if closed_caption_url:
|
||||||
|
subtitles['fr'] = [{
|
||||||
|
'url': closed_caption_url,
|
||||||
|
'ext': determine_ext(closed_caption_url, 'vtt'),
|
||||||
|
}]
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': get_meta('Title'),
|
'title': get_meta('Title'),
|
||||||
@ -135,6 +143,7 @@ class RadioCanadaIE(InfoExtractor):
|
|||||||
'season_number': int_or_none('SrcSaison'),
|
'season_number': int_or_none('SrcSaison'),
|
||||||
'episode_number': int_or_none('SrcEpisode'),
|
'episode_number': int_or_none('SrcEpisode'),
|
||||||
'upload_date': unified_strdate(get_meta('Date')),
|
'upload_date': unified_strdate(get_meta('Date')),
|
||||||
|
'subtitles': subtitles,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
@ -10,8 +12,8 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class RedTubeIE(InfoExtractor):
|
class RedTubeIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?redtube\.com/(?P<id>[0-9]+)'
|
_VALID_URL = r'https?://(?:(?:www\.)?redtube\.com/|embed\.redtube\.com/\?.*?\bid=)(?P<id>[0-9]+)'
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
'url': 'http://www.redtube.com/66418',
|
'url': 'http://www.redtube.com/66418',
|
||||||
'md5': '7b8c22b5e7098a3e1c09709df1126d2d',
|
'md5': '7b8c22b5e7098a3e1c09709df1126d2d',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@ -23,11 +25,21 @@ class RedTubeIE(InfoExtractor):
|
|||||||
'view_count': int,
|
'view_count': int,
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
}
|
}
|
||||||
}
|
}, {
|
||||||
|
'url': 'http://embed.redtube.com/?bgcolor=000000&id=1443286',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _extract_urls(webpage):
|
||||||
|
return re.findall(
|
||||||
|
r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//embed\.redtube\.com/\?.*?\bid=\d+)',
|
||||||
|
webpage)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(
|
||||||
|
'http://www.redtube.com/%s' % video_id, video_id)
|
||||||
|
|
||||||
if any(s in webpage for s in ['video-deleted-info', '>This video has been removed']):
|
if any(s in webpage for s in ['video-deleted-info', '>This video has been removed']):
|
||||||
raise ExtractorError('Video %s has been removed' % video_id, expected=True)
|
raise ExtractorError('Video %s has been removed' % video_id, expected=True)
|
||||||
|
@ -1,17 +1,24 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
import json
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..compat import compat_HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
str_or_none,
|
str_or_none,
|
||||||
|
urlencode_postdata,
|
||||||
|
clean_html,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class ShahidIE(InfoExtractor):
|
class ShahidIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://shahid\.mbc\.net/ar/episode/(?P<id>\d+)/?'
|
_NETRC_MACHINE = 'shahid'
|
||||||
|
_VALID_URL = r'https?://shahid\.mbc\.net/ar/(?P<type>episode|movie)/(?P<id>\d+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://shahid.mbc.net/ar/episode/90574/%D8%A7%D9%84%D9%85%D9%84%D9%83-%D8%B9%D8%A8%D8%AF%D8%A7%D9%84%D9%84%D9%87-%D8%A7%D9%84%D8%A5%D9%86%D8%B3%D8%A7%D9%86-%D8%A7%D9%84%D9%85%D9%88%D8%B3%D9%85-1-%D9%83%D9%84%D9%8A%D8%A8-3.html',
|
'url': 'https://shahid.mbc.net/ar/episode/90574/%D8%A7%D9%84%D9%85%D9%84%D9%83-%D8%B9%D8%A8%D8%AF%D8%A7%D9%84%D9%84%D9%87-%D8%A7%D9%84%D8%A5%D9%86%D8%B3%D8%A7%D9%86-%D8%A7%D9%84%D9%85%D9%88%D8%B3%D9%85-1-%D9%83%D9%84%D9%8A%D8%A8-3.html',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@ -27,18 +34,54 @@ class ShahidIE(InfoExtractor):
|
|||||||
# m3u8 download
|
# m3u8 download
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
}
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'https://shahid.mbc.net/ar/movie/151746/%D8%A7%D9%84%D9%82%D9%86%D8%A7%D8%B5%D8%A9.html',
|
||||||
|
'only_matching': True
|
||||||
}, {
|
}, {
|
||||||
# shahid plus subscriber only
|
# shahid plus subscriber only
|
||||||
'url': 'https://shahid.mbc.net/ar/episode/90511/%D9%85%D8%B1%D8%A7%D9%8A%D8%A7-2011-%D8%A7%D9%84%D9%85%D9%88%D8%B3%D9%85-1-%D8%A7%D9%84%D8%AD%D9%84%D9%82%D8%A9-1.html',
|
'url': 'https://shahid.mbc.net/ar/episode/90511/%D9%85%D8%B1%D8%A7%D9%8A%D8%A7-2011-%D8%A7%D9%84%D9%85%D9%88%D8%B3%D9%85-1-%D8%A7%D9%84%D8%AD%D9%84%D9%82%D8%A9-1.html',
|
||||||
'only_matching': True
|
'only_matching': True
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _call_api(self, path, video_id, note):
|
def _real_initialize(self):
|
||||||
data = self._download_json(
|
email, password = self._get_login_info()
|
||||||
'http://api.shahid.net/api/v1_1/' + path, video_id, note, query={
|
if email is None:
|
||||||
'apiKey': 'sh@hid0nlin3',
|
return
|
||||||
'hash': 'b2wMCTHpSmyxGqQjJFOycRmLSex+BpTK/ooxy6vHaqs=',
|
|
||||||
}).get('data', {})
|
try:
|
||||||
|
user_data = self._download_json(
|
||||||
|
'https://shahid.mbc.net/wd/service/users/login',
|
||||||
|
None, 'Logging in', data=json.dumps({
|
||||||
|
'email': email,
|
||||||
|
'password': password,
|
||||||
|
'basic': 'false',
|
||||||
|
}).encode('utf-8'), headers={
|
||||||
|
'Content-Type': 'application/json; charset=UTF-8',
|
||||||
|
})['user']
|
||||||
|
except ExtractorError as e:
|
||||||
|
if isinstance(e.cause, compat_HTTPError):
|
||||||
|
fail_data = self._parse_json(
|
||||||
|
e.cause.read().decode('utf-8'), None, fatal=False)
|
||||||
|
if fail_data:
|
||||||
|
faults = fail_data.get('faults', [])
|
||||||
|
faults_message = ', '.join([clean_html(fault['userMessage']) for fault in faults if fault.get('userMessage')])
|
||||||
|
if faults_message:
|
||||||
|
raise ExtractorError(faults_message, expected=True)
|
||||||
|
raise
|
||||||
|
|
||||||
|
self._download_webpage(
|
||||||
|
'https://shahid.mbc.net/populateContext',
|
||||||
|
None, 'Populate Context', data=urlencode_postdata({
|
||||||
|
'firstName': user_data['firstName'],
|
||||||
|
'lastName': user_data['lastName'],
|
||||||
|
'userName': user_data['email'],
|
||||||
|
'csg_user_name': user_data['email'],
|
||||||
|
'subscriberId': user_data['id'],
|
||||||
|
'sessionId': user_data['sessionId'],
|
||||||
|
}))
|
||||||
|
|
||||||
|
def _get_api_data(self, response):
|
||||||
|
data = response.get('data', {})
|
||||||
|
|
||||||
error = data.get('error')
|
error = data.get('error')
|
||||||
if error:
|
if error:
|
||||||
@ -49,11 +92,11 @@ class ShahidIE(InfoExtractor):
|
|||||||
return data
|
return data
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
page_type, video_id = re.match(self._VALID_URL, url).groups()
|
||||||
|
|
||||||
player = self._call_api(
|
player = self._get_api_data(self._download_json(
|
||||||
'Content/Episode/%s' % video_id,
|
'https://shahid.mbc.net/arContent/getPlayerContent-param-.id-%s.type-player.html' % video_id,
|
||||||
video_id, 'Downloading player JSON')
|
video_id, 'Downloading player JSON'))
|
||||||
|
|
||||||
if player.get('drm'):
|
if player.get('drm'):
|
||||||
raise ExtractorError('This video is DRM protected.', expected=True)
|
raise ExtractorError('This video is DRM protected.', expected=True)
|
||||||
@ -61,9 +104,12 @@ class ShahidIE(InfoExtractor):
|
|||||||
formats = self._extract_m3u8_formats(player['url'], video_id, 'mp4')
|
formats = self._extract_m3u8_formats(player['url'], video_id, 'mp4')
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
video = self._call_api(
|
video = self._get_api_data(self._download_json(
|
||||||
'episode/%s' % video_id, video_id,
|
'http://api.shahid.net/api/v1_1/%s/%s' % (page_type, video_id),
|
||||||
'Downloading video JSON')['episode']
|
video_id, 'Downloading video JSON', query={
|
||||||
|
'apiKey': 'sh@hid0nlin3',
|
||||||
|
'hash': 'b2wMCTHpSmyxGqQjJFOycRmLSex+BpTK/ooxy6vHaqs=',
|
||||||
|
}))[page_type]
|
||||||
|
|
||||||
title = video['title']
|
title = video['title']
|
||||||
categories = [
|
categories = [
|
||||||
|
@ -32,12 +32,15 @@ class TMZArticleIE(InfoExtractor):
|
|||||||
_VALID_URL = r'https?://(?:www\.)?tmz\.com/\d{4}/\d{2}/\d{2}/(?P<id>[^/]+)/?'
|
_VALID_URL = r'https?://(?:www\.)?tmz\.com/\d{4}/\d{2}/\d{2}/(?P<id>[^/]+)/?'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.tmz.com/2015/04/19/bobby-brown-bobbi-kristina-awake-video-concert',
|
'url': 'http://www.tmz.com/2015/04/19/bobby-brown-bobbi-kristina-awake-video-concert',
|
||||||
'md5': 'e482a414a38db73087450e3a6ce69d00',
|
'md5': '3316ff838ae5bb7f642537825e1e90d2',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '0_6snoelag',
|
'id': '0_6snoelag',
|
||||||
'ext': 'mp4',
|
'ext': 'mov',
|
||||||
'title': 'Bobby Brown Tells Crowd ... Bobbi Kristina is Awake',
|
'title': 'Bobby Brown Tells Crowd ... Bobbi Kristina is Awake',
|
||||||
'description': 'Bobby Brown stunned his audience during a concert Saturday night, when he told the crowd, "Bobbi is awake. She\'s watching me."',
|
'description': 'Bobby Brown stunned his audience during a concert Saturday night, when he told the crowd, "Bobbi is awake. She\'s watching me."',
|
||||||
|
'timestamp': 1429467813,
|
||||||
|
'upload_date': '20150419',
|
||||||
|
'uploader_id': 'batchUser',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -45,12 +48,9 @@ class TMZArticleIE(InfoExtractor):
|
|||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
embedded_video_info_str = self._html_search_regex(
|
embedded_video_info = self._parse_json(self._html_search_regex(
|
||||||
r'tmzVideoEmbedV2\("([^)]+)"\);', webpage, 'embedded video info')
|
r'tmzVideoEmbed\(({.+?})\);', webpage, 'embedded video info'),
|
||||||
|
video_id)
|
||||||
embedded_video_info = self._parse_json(
|
|
||||||
embedded_video_info_str, video_id,
|
|
||||||
transform_source=lambda s: s.replace('\\', ''))
|
|
||||||
|
|
||||||
return self.url_result(
|
return self.url_result(
|
||||||
'http://www.tmz.com/videos/%s/' % embedded_video_info['id'])
|
'http://www.tmz.com/videos/%s/' % embedded_video_info['id'])
|
||||||
|
@ -15,11 +15,11 @@ from ..utils import (
|
|||||||
class TouTvIE(InfoExtractor):
|
class TouTvIE(InfoExtractor):
|
||||||
_NETRC_MACHINE = 'toutv'
|
_NETRC_MACHINE = 'toutv'
|
||||||
IE_NAME = 'tou.tv'
|
IE_NAME = 'tou.tv'
|
||||||
_VALID_URL = r'https?://ici\.tou\.tv/(?P<id>[a-zA-Z0-9_-]+/S[0-9]+E[0-9]+)'
|
_VALID_URL = r'https?://ici\.tou\.tv/(?P<id>[a-zA-Z0-9_-]+(?:/S[0-9]+E[0-9]+)?)'
|
||||||
_access_token = None
|
_access_token = None
|
||||||
_claims = None
|
_claims = None
|
||||||
|
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
'url': 'http://ici.tou.tv/garfield-tout-court/S2015E17',
|
'url': 'http://ici.tou.tv/garfield-tout-court/S2015E17',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '122017',
|
'id': '122017',
|
||||||
@ -33,7 +33,10 @@ class TouTvIE(InfoExtractor):
|
|||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
'skip': '404 Not Found',
|
'skip': '404 Not Found',
|
||||||
}
|
}, {
|
||||||
|
'url': 'http://ici.tou.tv/hackers',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
def _real_initialize(self):
|
def _real_initialize(self):
|
||||||
email, password = self._get_login_info()
|
email, password = self._get_login_info()
|
||||||
|
@ -9,7 +9,6 @@ from ..utils import (
|
|||||||
int_or_none,
|
int_or_none,
|
||||||
sanitized_Request,
|
sanitized_Request,
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
parse_iso8601,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -19,17 +18,13 @@ class TubiTvIE(InfoExtractor):
|
|||||||
_NETRC_MACHINE = 'tubitv'
|
_NETRC_MACHINE = 'tubitv'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://tubitv.com/video/283829/the_comedian_at_the_friday',
|
'url': 'http://tubitv.com/video/283829/the_comedian_at_the_friday',
|
||||||
|
'md5': '43ac06be9326f41912dc64ccf7a80320',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '283829',
|
'id': '283829',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'The Comedian at The Friday',
|
'title': 'The Comedian at The Friday',
|
||||||
'description': 'A stand up comedian is forced to look at the decisions in his life while on a one week trip to the west coast.',
|
'description': 'A stand up comedian is forced to look at the decisions in his life while on a one week trip to the west coast.',
|
||||||
'uploader': 'Indie Rights Films',
|
'uploader_id': 'bc168bee0d18dd1cb3b86c68706ab434',
|
||||||
'upload_date': '20160111',
|
|
||||||
'timestamp': 1452555979,
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
'skip_download': 'HLS download',
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -58,19 +53,28 @@ class TubiTvIE(InfoExtractor):
|
|||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
video_data = self._download_json(
|
video_data = self._download_json(
|
||||||
'http://tubitv.com/oz/videos/%s/content' % video_id, video_id)
|
'http://tubitv.com/oz/videos/%s/content' % video_id, video_id)
|
||||||
title = video_data['n']
|
title = video_data['title']
|
||||||
|
|
||||||
formats = self._extract_m3u8_formats(
|
formats = self._extract_m3u8_formats(
|
||||||
video_data['mh'], video_id, 'mp4', 'm3u8_native')
|
self._proto_relative_url(video_data['url']),
|
||||||
|
video_id, 'mp4', 'm3u8_native')
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
thumbnails = []
|
||||||
|
for thumbnail_url in video_data.get('thumbnails', []):
|
||||||
|
if not thumbnail_url:
|
||||||
|
continue
|
||||||
|
thumbnails.append({
|
||||||
|
'url': self._proto_relative_url(thumbnail_url),
|
||||||
|
})
|
||||||
|
|
||||||
subtitles = {}
|
subtitles = {}
|
||||||
for sub in video_data.get('sb', []):
|
for sub in video_data.get('subtitles', []):
|
||||||
sub_url = sub.get('u')
|
sub_url = sub.get('url')
|
||||||
if not sub_url:
|
if not sub_url:
|
||||||
continue
|
continue
|
||||||
subtitles.setdefault(sub.get('l', 'en'), []).append({
|
subtitles.setdefault(sub.get('lang', 'English'), []).append({
|
||||||
'url': sub_url,
|
'url': self._proto_relative_url(sub_url),
|
||||||
})
|
})
|
||||||
|
|
||||||
return {
|
return {
|
||||||
@ -78,9 +82,8 @@ class TubiTvIE(InfoExtractor):
|
|||||||
'title': title,
|
'title': title,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'subtitles': subtitles,
|
'subtitles': subtitles,
|
||||||
'thumbnail': video_data.get('ph'),
|
'thumbnails': thumbnails,
|
||||||
'description': video_data.get('d'),
|
'description': video_data.get('description'),
|
||||||
'duration': int_or_none(video_data.get('s')),
|
'duration': int_or_none(video_data.get('duration')),
|
||||||
'timestamp': parse_iso8601(video_data.get('u')),
|
'uploader_id': video_data.get('publisher_id'),
|
||||||
'uploader': video_data.get('on'),
|
|
||||||
}
|
}
|
||||||
|
@ -1,12 +1,93 @@
|
|||||||
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
import time
|
||||||
|
import hashlib
|
||||||
|
import json
|
||||||
|
|
||||||
|
from .adobepass import AdobePassIE
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import ExtractorError
|
from ..compat import compat_HTTPError
|
||||||
|
from ..utils import (
|
||||||
|
int_or_none,
|
||||||
|
parse_age_limit,
|
||||||
|
str_or_none,
|
||||||
|
parse_duration,
|
||||||
|
ExtractorError,
|
||||||
|
extract_attributes,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class ViceIE(InfoExtractor):
|
class ViceBaseIE(AdobePassIE):
|
||||||
|
def _extract_preplay_video(self, url, webpage):
|
||||||
|
watch_hub_data = extract_attributes(self._search_regex(
|
||||||
|
r'(?s)(<watch-hub\s*.+?</watch-hub>)', webpage, 'watch hub'))
|
||||||
|
video_id = watch_hub_data['vms-id']
|
||||||
|
title = watch_hub_data['video-title']
|
||||||
|
|
||||||
|
query = {}
|
||||||
|
is_locked = watch_hub_data.get('video-locked') == '1'
|
||||||
|
if is_locked:
|
||||||
|
resource = self._get_mvpd_resource(
|
||||||
|
'VICELAND', title, video_id,
|
||||||
|
watch_hub_data.get('video-rating'))
|
||||||
|
query['tvetoken'] = self._extract_mvpd_auth(url, video_id, 'VICELAND', resource)
|
||||||
|
|
||||||
|
# signature generation algorithm is reverse engineered from signatureGenerator in
|
||||||
|
# webpack:///../shared/~/vice-player/dist/js/vice-player.js in
|
||||||
|
# https://www.viceland.com/assets/common/js/web.vendor.bundle.js
|
||||||
|
exp = int(time.time()) + 14400
|
||||||
|
query.update({
|
||||||
|
'exp': exp,
|
||||||
|
'sign': hashlib.sha512(('%s:GET:%d' % (video_id, exp)).encode()).hexdigest(),
|
||||||
|
})
|
||||||
|
|
||||||
|
try:
|
||||||
|
host = 'www.viceland' if is_locked else self._PREPLAY_HOST
|
||||||
|
preplay = self._download_json('https://%s.com/en_us/preplay/%s' % (host, video_id), video_id, query=query)
|
||||||
|
except ExtractorError as e:
|
||||||
|
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400:
|
||||||
|
error = json.loads(e.cause.read().decode())
|
||||||
|
raise ExtractorError('%s said: %s' % (self.IE_NAME, error['details']), expected=True)
|
||||||
|
raise
|
||||||
|
|
||||||
|
video_data = preplay['video']
|
||||||
|
base = video_data['base']
|
||||||
|
uplynk_preplay_url = preplay['preplayURL']
|
||||||
|
episode = video_data.get('episode', {})
|
||||||
|
channel = video_data.get('channel', {})
|
||||||
|
|
||||||
|
subtitles = {}
|
||||||
|
cc_url = preplay.get('ccURL')
|
||||||
|
if cc_url:
|
||||||
|
subtitles['en'] = [{
|
||||||
|
'url': cc_url,
|
||||||
|
}]
|
||||||
|
|
||||||
|
return {
|
||||||
|
'_type': 'url_transparent',
|
||||||
|
'url': uplynk_preplay_url,
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'description': base.get('body'),
|
||||||
|
'thumbnail': watch_hub_data.get('cover-image') or watch_hub_data.get('thumbnail'),
|
||||||
|
'duration': parse_duration(video_data.get('video_duration') or watch_hub_data.get('video-duration')),
|
||||||
|
'timestamp': int_or_none(video_data.get('created_at')),
|
||||||
|
'age_limit': parse_age_limit(video_data.get('video_rating')),
|
||||||
|
'series': video_data.get('show_title') or watch_hub_data.get('show-title'),
|
||||||
|
'episode_number': int_or_none(episode.get('episode_number') or watch_hub_data.get('episode')),
|
||||||
|
'episode_id': str_or_none(episode.get('id') or video_data.get('episode_id')),
|
||||||
|
'season_number': int_or_none(watch_hub_data.get('season')),
|
||||||
|
'season_id': str_or_none(episode.get('season_id')),
|
||||||
|
'uploader': channel.get('base', {}).get('title') or watch_hub_data.get('channel-title'),
|
||||||
|
'uploader_id': str_or_none(channel.get('id')),
|
||||||
|
'subtitles': subtitles,
|
||||||
|
'ie_key': 'UplynkPreplay',
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class ViceIE(ViceBaseIE):
|
||||||
_VALID_URL = r'https?://(?:.+?\.)?vice\.com/(?:[^/]+/)?videos?/(?P<id>[^/?#&]+)'
|
_VALID_URL = r'https?://(?:.+?\.)?vice\.com/(?:[^/]+/)?videos?/(?P<id>[^/?#&]+)'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
@ -21,7 +102,7 @@ class ViceIE(InfoExtractor):
|
|||||||
'add_ie': ['Ooyala'],
|
'add_ie': ['Ooyala'],
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.vice.com/video/how-to-hack-a-car',
|
'url': 'http://www.vice.com/video/how-to-hack-a-car',
|
||||||
'md5': '6fb2989a3fed069fb8eab3401fc2d3c9',
|
'md5': 'a7ecf64ee4fa19b916c16f4b56184ae2',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '3jstaBeXgAs',
|
'id': '3jstaBeXgAs',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
@ -32,6 +113,22 @@ class ViceIE(InfoExtractor):
|
|||||||
'upload_date': '20140529',
|
'upload_date': '20140529',
|
||||||
},
|
},
|
||||||
'add_ie': ['Youtube'],
|
'add_ie': ['Youtube'],
|
||||||
|
}, {
|
||||||
|
'url': 'https://video.vice.com/en_us/video/the-signal-from-tolva/5816510690b70e6c5fd39a56',
|
||||||
|
'md5': '',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '5816510690b70e6c5fd39a56',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'uploader': 'Waypoint',
|
||||||
|
'title': 'The Signal From Tölva',
|
||||||
|
'uploader_id': '57f7d621e05ca860fa9ccaf9',
|
||||||
|
'timestamp': 1477941983938,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# m3u8 download
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
'add_ie': ['UplynkPreplay'],
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://news.vice.com/video/experimenting-on-animals-inside-the-monkey-lab',
|
'url': 'https://news.vice.com/video/experimenting-on-animals-inside-the-monkey-lab',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
@ -42,21 +139,21 @@ class ViceIE(InfoExtractor):
|
|||||||
'url': 'https://munchies.vice.com/en/videos/watch-the-trailer-for-our-new-series-the-pizza-show',
|
'url': 'https://munchies.vice.com/en/videos/watch-the-trailer-for-our-new-series-the-pizza-show',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
_PREPLAY_HOST = 'video.vice'
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage, urlh = self._download_webpage_handle(url, video_id)
|
||||||
try:
|
embed_code = self._search_regex(
|
||||||
embed_code = self._search_regex(
|
r'embedCode=([^&\'"]+)', webpage,
|
||||||
r'embedCode=([^&\'"]+)', webpage,
|
'ooyala embed code', default=None)
|
||||||
'ooyala embed code', default=None)
|
if embed_code:
|
||||||
if embed_code:
|
return self.url_result('ooyala:%s' % embed_code, 'Ooyala')
|
||||||
return self.url_result('ooyala:%s' % embed_code, 'Ooyala')
|
youtube_id = self._search_regex(
|
||||||
youtube_id = self._search_regex(
|
r'data-youtube-id="([^"]+)"', webpage, 'youtube id', default=None)
|
||||||
r'data-youtube-id="([^"]+)"', webpage, 'youtube id')
|
if youtube_id:
|
||||||
return self.url_result(youtube_id, 'Youtube')
|
return self.url_result(youtube_id, 'Youtube')
|
||||||
except ExtractorError:
|
return self._extract_preplay_video(urlh.geturl(), webpage)
|
||||||
raise ExtractorError('The page doesn\'t contain a video', expected=True)
|
|
||||||
|
|
||||||
|
|
||||||
class ViceShowIE(InfoExtractor):
|
class ViceShowIE(InfoExtractor):
|
||||||
|
@ -1,23 +1,10 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import time
|
from .vice import ViceBaseIE
|
||||||
import hashlib
|
|
||||||
import json
|
|
||||||
|
|
||||||
from .adobepass import AdobePassIE
|
|
||||||
from ..compat import compat_HTTPError
|
|
||||||
from ..utils import (
|
|
||||||
int_or_none,
|
|
||||||
parse_age_limit,
|
|
||||||
str_or_none,
|
|
||||||
parse_duration,
|
|
||||||
ExtractorError,
|
|
||||||
extract_attributes,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class VicelandIE(AdobePassIE):
|
class VicelandIE(ViceBaseIE):
|
||||||
_VALID_URL = r'https?://(?:www\.)?viceland\.com/[^/]+/video/[^/]+/(?P<id>[a-f0-9]+)'
|
_VALID_URL = r'https?://(?:www\.)?viceland\.com/[^/]+/video/[^/]+/(?P<id>[a-f0-9]+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'https://www.viceland.com/en_us/video/cyberwar-trailer/57608447973ee7705f6fbd4e',
|
'url': 'https://www.viceland.com/en_us/video/cyberwar-trailer/57608447973ee7705f6fbd4e',
|
||||||
@ -38,70 +25,9 @@ class VicelandIE(AdobePassIE):
|
|||||||
},
|
},
|
||||||
'add_ie': ['UplynkPreplay'],
|
'add_ie': ['UplynkPreplay'],
|
||||||
}
|
}
|
||||||
|
_PREPLAY_HOST = 'www.viceland'
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
watch_hub_data = extract_attributes(self._search_regex(
|
return self._extract_preplay_video(url, webpage)
|
||||||
r'(?s)(<watch-hub\s*.+?</watch-hub>)', webpage, 'watch hub'))
|
|
||||||
video_id = watch_hub_data['vms-id']
|
|
||||||
title = watch_hub_data['video-title']
|
|
||||||
|
|
||||||
query = {}
|
|
||||||
if watch_hub_data.get('video-locked') == '1':
|
|
||||||
resource = self._get_mvpd_resource(
|
|
||||||
'VICELAND', title, video_id,
|
|
||||||
watch_hub_data.get('video-rating'))
|
|
||||||
query['tvetoken'] = self._extract_mvpd_auth(url, video_id, 'VICELAND', resource)
|
|
||||||
|
|
||||||
# signature generation algorithm is reverse engineered from signatureGenerator in
|
|
||||||
# webpack:///../shared/~/vice-player/dist/js/vice-player.js in
|
|
||||||
# https://www.viceland.com/assets/common/js/web.vendor.bundle.js
|
|
||||||
exp = int(time.time()) + 14400
|
|
||||||
query.update({
|
|
||||||
'exp': exp,
|
|
||||||
'sign': hashlib.sha512(('%s:GET:%d' % (video_id, exp)).encode()).hexdigest(),
|
|
||||||
})
|
|
||||||
|
|
||||||
try:
|
|
||||||
preplay = self._download_json('https://www.viceland.com/en_us/preplay/%s' % video_id, video_id, query=query)
|
|
||||||
except ExtractorError as e:
|
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400:
|
|
||||||
error = json.loads(e.cause.read().decode())
|
|
||||||
raise ExtractorError('%s said: %s' % (self.IE_NAME, error['details']), expected=True)
|
|
||||||
raise
|
|
||||||
|
|
||||||
video_data = preplay['video']
|
|
||||||
base = video_data['base']
|
|
||||||
uplynk_preplay_url = preplay['preplayURL']
|
|
||||||
episode = video_data.get('episode', {})
|
|
||||||
channel = video_data.get('channel', {})
|
|
||||||
|
|
||||||
subtitles = {}
|
|
||||||
cc_url = preplay.get('ccURL')
|
|
||||||
if cc_url:
|
|
||||||
subtitles['en'] = [{
|
|
||||||
'url': cc_url,
|
|
||||||
}]
|
|
||||||
|
|
||||||
return {
|
|
||||||
'_type': 'url_transparent',
|
|
||||||
'url': uplynk_preplay_url,
|
|
||||||
'id': video_id,
|
|
||||||
'title': title,
|
|
||||||
'description': base.get('body'),
|
|
||||||
'thumbnail': watch_hub_data.get('cover-image') or watch_hub_data.get('thumbnail'),
|
|
||||||
'duration': parse_duration(video_data.get('video_duration') or watch_hub_data.get('video-duration')),
|
|
||||||
'timestamp': int_or_none(video_data.get('created_at')),
|
|
||||||
'age_limit': parse_age_limit(video_data.get('video_rating')),
|
|
||||||
'series': video_data.get('show_title') or watch_hub_data.get('show-title'),
|
|
||||||
'episode_number': int_or_none(episode.get('episode_number') or watch_hub_data.get('episode')),
|
|
||||||
'episode_id': str_or_none(episode.get('id') or video_data.get('episode_id')),
|
|
||||||
'season_number': int_or_none(watch_hub_data.get('season')),
|
|
||||||
'season_id': str_or_none(episode.get('season_id')),
|
|
||||||
'uploader': channel.get('base', {}).get('title') or watch_hub_data.get('channel-title'),
|
|
||||||
'uploader_id': str_or_none(channel.get('id')),
|
|
||||||
'subtitles': subtitles,
|
|
||||||
'ie_key': 'UplynkPreplay',
|
|
||||||
}
|
|
||||||
|
@ -31,7 +31,8 @@ class VodlockerIE(InfoExtractor):
|
|||||||
if any(p in webpage for p in (
|
if any(p in webpage for p in (
|
||||||
'>THIS FILE WAS DELETED<',
|
'>THIS FILE WAS DELETED<',
|
||||||
'>File Not Found<',
|
'>File Not Found<',
|
||||||
'The file you were looking for could not be found, sorry for any inconvenience.<')):
|
'The file you were looking for could not be found, sorry for any inconvenience.<',
|
||||||
|
'>The file was removed')):
|
||||||
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
|
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
|
||||||
|
|
||||||
fields = self._hidden_inputs(webpage)
|
fields = self._hidden_inputs(webpage)
|
||||||
|
55
youtube_dl/extractor/vzaar.py
Normal file
55
youtube_dl/extractor/vzaar.py
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
int_or_none,
|
||||||
|
float_or_none,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class VzaarIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:(?:www|view)\.)?vzaar\.com/(?:videos/)?(?P<id>\d+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://vzaar.com/videos/1152805',
|
||||||
|
'md5': 'bde5ddfeb104a6c56a93a06b04901dbf',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1152805',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'sample video (public)',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://view.vzaar.com/27272/player',
|
||||||
|
'md5': '3b50012ac9bbce7f445550d54e0508f2',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '27272',
|
||||||
|
'ext': 'mp3',
|
||||||
|
'title': 'MP3',
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
video_data = self._download_json(
|
||||||
|
'http://view.vzaar.com/v2/%s/video' % video_id, video_id)
|
||||||
|
source_url = video_data['sourceUrl']
|
||||||
|
|
||||||
|
info = {
|
||||||
|
'id': video_id,
|
||||||
|
'title': video_data['videoTitle'],
|
||||||
|
'url': source_url,
|
||||||
|
'thumbnail': self._proto_relative_url(video_data.get('poster')),
|
||||||
|
'duration': float_or_none(video_data.get('videoDuration')),
|
||||||
|
}
|
||||||
|
if 'audio' in source_url:
|
||||||
|
info.update({
|
||||||
|
'vcodec': 'none',
|
||||||
|
'ext': 'mp3',
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
info.update({
|
||||||
|
'width': int_or_none(video_data.get('width')),
|
||||||
|
'height': int_or_none(video_data.get('height')),
|
||||||
|
'ext': 'mp4',
|
||||||
|
})
|
||||||
|
return info
|
@ -201,6 +201,32 @@ class YahooIE(InfoExtractor):
|
|||||||
},
|
},
|
||||||
'skip': 'redirect to https://www.yahoo.com/music',
|
'skip': 'redirect to https://www.yahoo.com/music',
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
# yahoo://article/
|
||||||
|
'url': 'https://www.yahoo.com/movies/video/true-story-trailer-173000497.html',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '071c4013-ce30-3a93-a5b2-e0413cd4a9d1',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': "'True Story' Trailer",
|
||||||
|
'description': 'True Story',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
# ytwnews://cavideo/
|
||||||
|
'url': 'https://tw.video.yahoo.com/movie-tw/單車天使-中文版預-092316541.html',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'ba133ff2-0793-3510-b636-59dfe9ff6cff',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '單車天使 - 中文版預',
|
||||||
|
'description': '中文版預',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@ -269,7 +295,8 @@ class YahooIE(InfoExtractor):
|
|||||||
r'"first_videoid"\s*:\s*"([^"]+)"',
|
r'"first_videoid"\s*:\s*"([^"]+)"',
|
||||||
r'%s[^}]*"ccm_id"\s*:\s*"([^"]+)"' % re.escape(page_id),
|
r'%s[^}]*"ccm_id"\s*:\s*"([^"]+)"' % re.escape(page_id),
|
||||||
r'<article[^>]data-uuid=["\']([^"\']+)',
|
r'<article[^>]data-uuid=["\']([^"\']+)',
|
||||||
r'yahoo://article/view\?.*\buuid=([^&"\']+)',
|
r'<meta[^<>]+yahoo://article/view\?.*\buuid=([^&"\']+)',
|
||||||
|
r'<meta[^<>]+["\']ytwnews://cavideo/(?:[^/]+/)+([\da-fA-F-]+)[&"\']',
|
||||||
]
|
]
|
||||||
video_id = self._search_regex(
|
video_id = self._search_regex(
|
||||||
CONTENT_ID_REGEXES, webpage, 'content ID')
|
CONTENT_ID_REGEXES, webpage, 'content ID')
|
||||||
|
@ -1,3 +1,3 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
__version__ = '2016.11.02'
|
__version__ = '2016.11.08'
|
||||||
|
Loading…
Reference in New Issue
Block a user