1
0
mirror of https://github.com/l1ving/youtube-dl synced 2025-01-23 22:42:50 +08:00

Merge branch 'master' into patch-1

This commit is contained in:
Urgau 2018-06-07 19:00:53 +02:00 committed by GitHub
commit dad5c0c1d5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 302 additions and 119 deletions

View File

@ -6,8 +6,8 @@
---
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.06.02*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2018.06.02**
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.06.04*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2018.06.04**
### Before submitting an *issue* make sure you have:
- [ ] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
@ -36,7 +36,7 @@ Add the `-v` flag to **your command line** you run youtube-dl with (`youtube-dl
[debug] User config: []
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
[debug] youtube-dl version 2018.06.02
[debug] youtube-dl version 2018.06.04
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
[debug] Proxy map: {}

View File

@ -1,3 +1,16 @@
version 2018.06.04
Extractors
+ [camtube] Add support for camtube.co
+ [twitter:card] Extract guest token (#16609)
+ [chaturbate] Use geo verification headers
+ [bbc] Add support for bbcthree (#16612)
* [youtube] Move metadata extraction after video availability check
+ [youtube] Extract track and artist
+ [safari] Add support for new URL schema (#16614)
* [adn] Fix extraction
version 2018.06.02
Core

View File

@ -13,7 +13,7 @@ year = str(datetime.datetime.now().year)
for fn in glob.glob('*.html*'):
with io.open(fn, encoding='utf-8') as f:
content = f.read()
newc = re.sub(r'(?P<copyright>Copyright © 2006-)(?P<year>[0-9]{4})', 'Copyright © 2006-' + year, content)
newc = re.sub(r'(?P<copyright>Copyright © 2011-)(?P<year>[0-9]{4})', 'Copyright © 2011-' + year, content)
if content != newc:
tmpFn = fn + '.part'
with io.open(tmpFn, 'wt', encoding='utf-8') as outf:

View File

@ -129,6 +129,7 @@
- **Camdemy**
- **CamdemyFolder**
- **CamModels**
- **CamTube**
- **CamWithHer**
- **canalc2.tv**
- **Canalplus**: mycanal.fr and piwiplus.fr

View File

@ -0,0 +1,69 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
unified_timestamp,
)
class CamTubeIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:www|api)\.)?camtube\.co/recordings?/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://camtube.co/recording/minafay-030618-1136-chaturbate-female',
'info_dict': {
'id': '42ad3956-dd5b-445a-8313-803ea6079fac',
'display_id': 'minafay-030618-1136-chaturbate-female',
'ext': 'mp4',
'title': 'minafay-030618-1136-chaturbate-female',
'duration': 1274,
'timestamp': 1528018608,
'upload_date': '20180603',
},
'params': {
'skip_download': True,
},
}]
_API_BASE = 'https://api.camtube.co'
def _real_extract(self, url):
display_id = self._match_id(url)
token = self._download_json(
'%s/rpc/session/new' % self._API_BASE, display_id,
'Downloading session token')['token']
self._set_cookie('api.camtube.co', 'session', token)
video = self._download_json(
'%s/recordings/%s' % (self._API_BASE, display_id), display_id,
headers={'Referer': url})
video_id = video['uuid']
timestamp = unified_timestamp(video.get('createdAt'))
duration = int_or_none(video.get('duration'))
view_count = int_or_none(video.get('viewCount'))
like_count = int_or_none(video.get('likeCount'))
creator = video.get('stageName')
formats = [{
'url': '%s/recordings/%s/manifest.m3u8'
% (self._API_BASE, video_id),
'format_id': 'hls',
'ext': 'mp4',
'protocol': 'm3u8_native',
}]
return {
'id': video_id,
'display_id': display_id,
'title': display_id,
'timestamp': timestamp,
'duration': duration,
'view_count': view_count,
'like_count': like_count,
'creator': creator,
'formats': formats,
}

View File

@ -147,6 +147,7 @@ from .camdemy import (
CamdemyFolderIE
)
from .cammodels import CamModelsIE
from .camtube import CamTubeIE
from .camwithher import CamWithHerIE
from .canalplus import CanalplusIE
from .canalc2 import Canalc2IE

View File

@ -29,14 +29,13 @@ class NexxIE(InfoExtractor):
_TESTS = [{
# movie
'url': 'https://api.nexx.cloud/v3/748/videos/byid/128907',
'md5': '828cea195be04e66057b846288295ba1',
'md5': '31899fd683de49ad46f4ee67e53e83fe',
'info_dict': {
'id': '128907',
'ext': 'mp4',
'title': 'Stiftung Warentest',
'alt_title': 'Wie ein Test abläuft',
'description': 'md5:d1ddb1ef63de721132abd38639cc2fd2',
'release_year': 2013,
'creator': 'SPIEGEL TV',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 2509,
@ -62,6 +61,7 @@ class NexxIE(InfoExtractor):
'params': {
'skip_download': True,
},
'skip': 'HTTP Error 404: Not Found',
}, {
# does not work via arc
'url': 'nexx:741:1269984',
@ -71,12 +71,26 @@ class NexxIE(InfoExtractor):
'ext': 'mp4',
'title': '1 TAG ohne KLO... wortwörtlich! 😑',
'alt_title': '1 TAG ohne KLO... wortwörtlich! 😑',
'description': 'md5:4604539793c49eda9443ab5c5b1d612f',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 607,
'timestamp': 1518614955,
'upload_date': '20180214',
},
}, {
# free cdn from http://www.spiegel.de/video/eifel-zoo-aufregung-um-ausgebrochene-raubtiere-video-99018031.html
'url': 'nexx:747:1533779',
'md5': '6bf6883912b82b7069fb86c2297e9893',
'info_dict': {
'id': '1533779',
'ext': 'mp4',
'title': 'Aufregung um ausgebrochene Raubtiere',
'alt_title': 'Eifel-Zoo',
'description': 'md5:f21375c91c74ad741dcb164c427999d2',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 111,
'timestamp': 1527874460,
'upload_date': '20180601',
},
}, {
'url': 'https://api.nexxcdn.com/v3/748/videos/byid/128907',
'only_matching': True,
@ -141,6 +155,139 @@ class NexxIE(InfoExtractor):
self._handle_error(result)
return result['result']
def _extract_free_formats(self, video, video_id):
stream_data = video['streamdata']
cdn = stream_data['cdnType']
assert cdn == 'free'
hash = video['general']['hash']
ps = compat_str(stream_data['originalDomain'])
if stream_data['applyFolderHierarchy'] == 1:
s = ('%04d' % int(video_id))[::-1]
ps += '/%s/%s' % (s[0:2], s[2:4])
ps += '/%s/%s_' % (video_id, hash)
t = 'http://%s' + ps
fd = stream_data['azureFileDistribution'].split(',')
cdn_provider = stream_data['cdnProvider']
def p0(p):
return '_%s' % p if stream_data['applyAzureStructure'] == 1 else ''
formats = []
if cdn_provider == 'ak':
t += ','
for i in fd:
p = i.split(':')
t += p[1] + p0(int(p[0])) + ','
t += '.mp4.csmil/master.%s'
elif cdn_provider == 'ce':
k = t.split('/')
h = k.pop()
http_base = t = '/'.join(k)
http_base = http_base % stream_data['cdnPathHTTP']
t += '/asset.ism/manifest.%s?dcp_ver=aos4&videostream='
for i in fd:
p = i.split(':')
tbr = int(p[0])
filename = '%s%s%s.mp4' % (h, p[1], p0(tbr))
f = {
'url': http_base + '/' + filename,
'format_id': '%s-http-%d' % (cdn, tbr),
'tbr': tbr,
}
width_height = p[1].split('x')
if len(width_height) == 2:
f.update({
'width': int_or_none(width_height[0]),
'height': int_or_none(width_height[1]),
})
formats.append(f)
a = filename + ':%s' % (tbr * 1000)
t += a + ','
t = t[:-1] + '&audiostream=' + a.split(':')[0]
else:
assert False
if cdn_provider == 'ce':
formats.extend(self._extract_mpd_formats(
t % (stream_data['cdnPathDASH'], 'mpd'), video_id,
mpd_id='%s-dash' % cdn, fatal=False))
formats.extend(self._extract_m3u8_formats(
t % (stream_data['cdnPathHLS'], 'm3u8'), video_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id='%s-hls' % cdn, fatal=False))
return formats
def _extract_azure_formats(self, video, video_id):
stream_data = video['streamdata']
cdn = stream_data['cdnType']
assert cdn == 'azure'
azure_locator = stream_data['azureLocator']
def get_cdn_shield_base(shield_type='', static=False):
for secure in ('', 's'):
cdn_shield = stream_data.get('cdnShield%sHTTP%s' % (shield_type, secure.upper()))
if cdn_shield:
return 'http%s://%s' % (secure, cdn_shield)
else:
if 'fb' in stream_data['azureAccount']:
prefix = 'df' if static else 'f'
else:
prefix = 'd' if static else 'p'
account = int(stream_data['azureAccount'].replace('nexxplayplus', '').replace('nexxplayfb', ''))
return 'http://nx-%s%02d.akamaized.net/' % (prefix, account)
language = video['general'].get('language_raw') or ''
azure_stream_base = get_cdn_shield_base()
is_ml = ',' in language
azure_manifest_url = '%s%s/%s_src%s.ism/Manifest' % (
azure_stream_base, azure_locator, video_id, ('_manifest' if is_ml else '')) + '%s'
protection_token = try_get(
video, lambda x: x['protectiondata']['token'], compat_str)
if protection_token:
azure_manifest_url += '?hdnts=%s' % protection_token
formats = self._extract_m3u8_formats(
azure_manifest_url % '(format=m3u8-aapl)',
video_id, 'mp4', 'm3u8_native',
m3u8_id='%s-hls' % cdn, fatal=False)
formats.extend(self._extract_mpd_formats(
azure_manifest_url % '(format=mpd-time-csf)',
video_id, mpd_id='%s-dash' % cdn, fatal=False))
formats.extend(self._extract_ism_formats(
azure_manifest_url % '', video_id, ism_id='%s-mss' % cdn, fatal=False))
azure_progressive_base = get_cdn_shield_base('Prog', True)
azure_file_distribution = stream_data.get('azureFileDistribution')
if azure_file_distribution:
fds = azure_file_distribution.split(',')
if fds:
for fd in fds:
ss = fd.split(':')
if len(ss) == 2:
tbr = int_or_none(ss[0])
if tbr:
f = {
'url': '%s%s/%s_src_%s_%d.mp4' % (
azure_progressive_base, azure_locator, video_id, ss[1], tbr),
'format_id': '%s-http-%d' % (cdn, tbr),
'tbr': tbr,
}
width_height = ss[1].split('x')
if len(width_height) == 2:
f.update({
'width': int_or_none(width_height[0]),
'height': int_or_none(width_height[1]),
})
formats.append(f)
return formats
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
domain_id = mobj.group('domain_id') or mobj.group('domain_id_s')
@ -220,72 +367,15 @@ class NexxIE(InfoExtractor):
general = video['general']
title = general['title']
stream_data = video['streamdata']
language = general.get('language_raw') or ''
cdn = video['streamdata']['cdnType']
# TODO: reverse more cdns
cdn = stream_data['cdnType']
assert cdn == 'azure'
azure_locator = stream_data['azureLocator']
def get_cdn_shield_base(shield_type='', static=False):
for secure in ('', 's'):
cdn_shield = stream_data.get('cdnShield%sHTTP%s' % (shield_type, secure.upper()))
if cdn_shield:
return 'http%s://%s' % (secure, cdn_shield)
else:
if 'fb' in stream_data['azureAccount']:
prefix = 'df' if static else 'f'
else:
prefix = 'd' if static else 'p'
account = int(stream_data['azureAccount'].replace('nexxplayplus', '').replace('nexxplayfb', ''))
return 'http://nx-%s%02d.akamaized.net/' % (prefix, account)
azure_stream_base = get_cdn_shield_base()
is_ml = ',' in language
azure_manifest_url = '%s%s/%s_src%s.ism/Manifest' % (
azure_stream_base, azure_locator, video_id, ('_manifest' if is_ml else '')) + '%s'
protection_token = try_get(
video, lambda x: x['protectiondata']['token'], compat_str)
if protection_token:
azure_manifest_url += '?hdnts=%s' % protection_token
formats = self._extract_m3u8_formats(
azure_manifest_url % '(format=m3u8-aapl)',
video_id, 'mp4', 'm3u8_native',
m3u8_id='%s-hls' % cdn, fatal=False)
formats.extend(self._extract_mpd_formats(
azure_manifest_url % '(format=mpd-time-csf)',
video_id, mpd_id='%s-dash' % cdn, fatal=False))
formats.extend(self._extract_ism_formats(
azure_manifest_url % '', video_id, ism_id='%s-mss' % cdn, fatal=False))
azure_progressive_base = get_cdn_shield_base('Prog', True)
azure_file_distribution = stream_data.get('azureFileDistribution')
if azure_file_distribution:
fds = azure_file_distribution.split(',')
if fds:
for fd in fds:
ss = fd.split(':')
if len(ss) == 2:
tbr = int_or_none(ss[0])
if tbr:
f = {
'url': '%s%s/%s_src_%s_%d.mp4' % (
azure_progressive_base, azure_locator, video_id, ss[1], tbr),
'format_id': '%s-http-%d' % (cdn, tbr),
'tbr': tbr,
}
width_height = ss[1].split('x')
if len(width_height) == 2:
f.update({
'width': int_or_none(width_height[0]),
'height': int_or_none(width_height[1]),
})
formats.append(f)
if cdn == 'azure':
formats = self._extract_azure_formats(video, video_id)
elif cdn == 'free':
formats = self._extract_free_formats(video, video_id)
else:
# TODO: reverse more cdns
assert False
self._sort_formats(formats)

View File

@ -368,6 +368,19 @@ class PBSIE(InfoExtractor):
'title': 'Victoria - A Soldier\'s Daughter / The Green-Eyed Monster',
'description': 'md5:37efbac85e0c09b009586523ec143652',
'duration': 6292,
},
'params': {
'skip_download': True,
},
'expected_warnings': ['HTTP Error 403: Forbidden'],
},
'url': 'http://www.pbs.org/wgbh/roadshow/watch/episode/2105-indianapolis-hour-2/',
'info_dict': {
'id': '2365936247',
'ext': 'mp4',
'title': 'Antiques Roadshow - Indianapolis, Hour 2',
'description': 'md5:524b32249db55663e7231b6b8d1671a2',
'duration': 3180,
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {
@ -437,6 +450,7 @@ class PBSIE(InfoExtractor):
r'<section[^>]+data-coveid="(\d+)"', # coveplayer from http://www.pbs.org/wgbh/frontline/film/real-csi/
r'<input type="hidden" id="pbs_video_id_[0-9]+" value="([0-9]+)"/>', # jwplayer
r"(?s)window\.PBS\.playerConfig\s*=\s*{.*?id\s*:\s*'([0-9]+)',",
r'<div[^>]+\bdata-cove-id=["\'](\d+)"', # http://www.pbs.org/wgbh/roadshow/watch/episode/2105-indianapolis-hour-2/
]
media_id = self._search_regex(

View File

@ -53,7 +53,7 @@ class RBMARadioIE(InfoExtractor):
'format_id': compat_str(abr),
'abr': abr,
'vcodec': 'none',
} for abr in (96, 128, 256)]
} for abr in (96, 128, 192, 256)]
self._check_formats(formats, episode_id)
description = clean_html(episode.get('longTeaser'))

View File

@ -1,13 +1,12 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
int_or_none,
parse_iso8601,
try_get,
determine_ext,
)
@ -78,42 +77,25 @@ class TV4IE(InfoExtractor):
title = info['title']
subtitles = {}
formats = []
# http formats are linked with unresolvable host
for kind in ('hls3', ''):
data = self._download_json(
'https://prima.tv4play.se/api/web/asset/%s/play.json' % video_id,
video_id, 'Downloading sources JSON', query={
'protocol': kind,
'videoFormat': 'MP4+WEBVTT',
})
items = try_get(data, lambda x: x['playback']['items']['item'])
if not items:
continue
if isinstance(items, dict):
items = [items]
for item in items:
manifest_url = item.get('url')
if not isinstance(manifest_url, compat_str):
continue
ext = determine_ext(manifest_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id=kind, fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_akamai_formats(
manifest_url, video_id, {
'hls': 'tv4play-i.akamaihd.net',
}))
elif ext == 'webvtt':
subtitles = self._merge_subtitles(
subtitles, {
'sv': [{
'url': manifest_url,
'ext': 'vtt',
}]})
manifest_url = self._download_json(
'https://playback-api.b17g.net/media/' + video_id,
video_id, query={
'service': 'tv4',
'device': 'browser',
'protocol': 'hls',
})['playbackItem']['manifestUrl']
formats = self._extract_m3u8_formats(
manifest_url, video_id, 'mp4',
'm3u8_native', m3u8_id='hls', fatal=False)
formats.extend(self._extract_mpd_formats(
manifest_url.replace('.m3u8', '.mpd'),
video_id, mpd_id='dash', fatal=False))
formats.extend(self._extract_f4m_formats(
manifest_url.replace('.m3u8', '.f4m'),
video_id, f4m_id='hds', fatal=False))
formats.extend(self._extract_ism_formats(
re.sub(r'\.ism/.+?\.m3u8', r'.ism/Manifest', manifest_url),
video_id, ism_id='mss', fatal=False))
if not formats and info.get('is_geo_restricted'):
self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
@ -124,7 +106,7 @@ class TV4IE(InfoExtractor):
'id': video_id,
'title': title,
'formats': formats,
'subtitles': subtitles,
# 'subtitles': subtitles,
'description': info.get('description'),
'timestamp': parse_iso8601(info.get('broadcast_date_time')),
'duration': int_or_none(info.get('duration')),

View File

@ -108,6 +108,8 @@ class TwitterCardIE(TwitterBaseIE):
},
]
_API_BASE = 'https://api.twitter.com/1.1'
def _parse_media_info(self, media_info, video_id):
formats = []
for media_variant in media_info.get('variants', []):
@ -149,7 +151,7 @@ class TwitterCardIE(TwitterBaseIE):
main_script, 'bearer token')
# https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/get-statuses-show-id
api_data = self._download_json(
'https://api.twitter.com/1.1/statuses/show/%s.json' % video_id,
'%s/statuses/show/%s.json' % (self._API_BASE, video_id),
video_id, 'Downloading API data',
headers={
'Authorization': 'Bearer ' + bearer_token,
@ -229,11 +231,22 @@ class TwitterCardIE(TwitterBaseIE):
break
if not formats:
headers = {
'Authorization': 'Bearer AAAAAAAAAAAAAAAAAAAAAPYXBAAAAAAACLXUNDekMxqa8h%2F40K4moUkGsoc%3DTYfbDKbT3jJPCEVnMYqilB28NHfOPqkca3qaAxGfsyKCs0wRbw',
'Referer': url,
}
ct0 = self._get_cookies(url).get('ct0')
if ct0:
headers['csrf_token'] = ct0.value
guest_token = self._download_json(
'%s/guest/activate.json' % self._API_BASE, video_id,
'Downloading guest token', data=b'',
headers=headers)['guest_token']
headers['x-guest-token'] = guest_token
self._set_cookie('api.twitter.com', 'gt', guest_token)
config = self._download_json(
'https://api.twitter.com/1.1/videos/tweet/config/%s.json' % video_id,
video_id, headers={
'Authorization': 'Bearer AAAAAAAAAAAAAAAAAAAAAIK1zgAAAAAA2tUWuhGZ2JceoId5GwYWU5GspY4%3DUq7gzFoCZs1QfwGoVdvSac3IniczZEYXIcDyumCauIXpcAPorE',
})
'%s/videos/tweet/config/%s.json' % (self._API_BASE, video_id),
video_id, headers=headers)
track = config['track']
vmap_url = track.get('vmapUrl')
if vmap_url:

View File

@ -1,3 +1,3 @@
from __future__ import unicode_literals
__version__ = '2018.06.02'
__version__ = '2018.06.04'