1
0
mirror of https://github.com/l1ving/youtube-dl synced 2025-03-12 20:49:38 +08:00

Merge remote-tracking branch 'origin/master' into myversion

This commit is contained in:
Andrew Udvare 2018-03-20 23:50:26 -04:00
commit 478f92704d
No known key found for this signature in database
GPG Key ID: 1AFD9AFC120C26DD
13 changed files with 326 additions and 140 deletions

View File

@ -6,8 +6,8 @@
---
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.03.10*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2018.03.10**
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.03.14*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2018.03.14**
### Before submitting an *issue* make sure you have:
- [ ] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
@ -36,7 +36,7 @@ Add the `-v` flag to **your command line** you run youtube-dl with (`youtube-dl
[debug] User config: []
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
[debug] youtube-dl version 2018.03.10
[debug] youtube-dl version 2018.03.14
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
[debug] Proxy map: {}

View File

@ -1,7 +1,11 @@
version <unreleased>
version 2018.03.14
Extractors
* [soundcloud] Update client id (#15866)
+ [tennistv] Add support for tennistv.com
+ [line] Add support for tv.line.me (#9427)
* [xnxx] Fix extraction (#15817)
* [njpwworld] Fix authentication (#15815)
version 2018.03.10

View File

@ -427,6 +427,7 @@
- **limelight**
- **limelight:channel**
- **limelight:channel_list**
- **LineTV**
- **LiTV**
- **LiveLeak**
- **LiveLeakEmbed**
@ -829,6 +830,7 @@
- **TeleQuebecLive**
- **TeleTask**
- **Telewebion**
- **TennisTV**
- **TF1**
- **TFO**
- **TheIntercept**

View File

@ -352,6 +352,7 @@ class TestUtil(unittest.TestCase):
self.assertEqual(unified_timestamp('2017-03-30T17:52:41Q'), 1490896361)
self.assertEqual(unified_timestamp('Sep 11, 2013 | 5:49 AM'), 1378878540)
self.assertEqual(unified_timestamp('December 15, 2017 at 7:49 am'), 1513324140)
self.assertEqual(unified_timestamp('2018-03-14T08:32:43.1493874+00:00'), 1521016363)
def test_determine_ext(self):
self.assertEqual(determine_ext('http://example.com/foo/bar.mp4/?download'), 'mp4')

View File

@ -27,14 +27,14 @@ class BiliBiliIE(InfoExtractor):
_TESTS = [{
'url': 'http://www.bilibili.tv/video/av1074402/',
'md5': '9fa226fe2b8a9a4d5a69b4c6a183417e',
'md5': '5f7d29e1a2872f3df0cf76b1f87d3788',
'info_dict': {
'id': '1074402',
'ext': 'mp4',
'ext': 'flv',
'title': '【金坷垃】金泡沫',
'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923',
'duration': 308.315,
'timestamp': 1398012660,
'duration': 308.067,
'timestamp': 1398012678,
'upload_date': '20140420',
'thumbnail': r're:^https?://.+\.jpg',
'uploader': '菊子桑',
@ -59,17 +59,38 @@ class BiliBiliIE(InfoExtractor):
'url': 'http://www.bilibili.com/video/av8903802/',
'info_dict': {
'id': '8903802',
'ext': 'mp4',
'title': '阿滴英文|英文歌分享#6 "Closer',
'description': '滴妹今天唱Closer給你聽! 有史以来,被推最多次也是最久的歌曲,其实歌词跟我原本想像差蛮多的,不过还是好听! 微博@阿滴英文',
'uploader': '阿滴英文',
'uploader_id': '65880958',
'timestamp': 1488382620,
'upload_date': '20170301',
},
'params': {
'skip_download': True, # Test metadata only
},
'playlist': [{
'info_dict': {
'id': '8903802_part1',
'ext': 'flv',
'title': '阿滴英文|英文歌分享#6 "Closer',
'description': 'md5:3b1b9e25b78da4ef87e9b548b88ee76a',
'uploader': '阿滴英文',
'uploader_id': '65880958',
'timestamp': 1488382634,
'upload_date': '20170301',
},
'params': {
'skip_download': True, # Test metadata only
},
}, {
'info_dict': {
'id': '8903802_part2',
'ext': 'flv',
'title': '阿滴英文|英文歌分享#6 "Closer',
'description': 'md5:3b1b9e25b78da4ef87e9b548b88ee76a',
'uploader': '阿滴英文',
'uploader_id': '65880958',
'timestamp': 1488382634,
'upload_date': '20170301',
},
'params': {
'skip_download': True, # Test metadata only
},
}]
}]
_APP_KEY = '84956560bc028eb7'
@ -92,9 +113,13 @@ class BiliBiliIE(InfoExtractor):
webpage = self._download_webpage(url, video_id)
if 'anime/' not in url:
cid = compat_parse_qs(self._search_regex(
[r'EmbedPlayer\([^)]+,\s*"([^"]+)"\)',
r'<iframe[^>]+src="https://secure\.bilibili\.com/secure,([^"]+)"'],
cid = self._search_regex(
r'cid(?:["\']:|=)(\d+)', webpage, 'cid',
default=None
) or compat_parse_qs(self._search_regex(
[r'1EmbedPlayer\([^)]+,\s*"([^"]+)"\)',
r'1EmbedPlayer\([^)]+,\s*\\"([^"]+)\\"\)',
r'1<iframe[^>]+src="https://secure\.bilibili\.com/secure,([^"]+)"'],
webpage, 'player parameters'))['cid'][0]
else:
if 'no_bangumi_tip' not in smuggled_data:
@ -114,53 +139,66 @@ class BiliBiliIE(InfoExtractor):
self._report_error(js)
cid = js['result']['cid']
payload = 'appkey=%s&cid=%s&otype=json&quality=2&type=mp4' % (self._APP_KEY, cid)
sign = hashlib.md5((payload + self._BILIBILI_KEY).encode('utf-8')).hexdigest()
headers = {
'Referer': url
}
headers.update(self.geo_verification_headers())
video_info = self._download_json(
'http://interface.bilibili.com/playurl?%s&sign=%s' % (payload, sign),
video_id, note='Downloading video info page',
headers=headers)
if 'durl' not in video_info:
self._report_error(video_info)
entries = []
for idx, durl in enumerate(video_info['durl']):
formats = [{
'url': durl['url'],
'filesize': int_or_none(durl['size']),
}]
for backup_url in durl.get('backup_url', []):
formats.append({
'url': backup_url,
# backup URLs have lower priorities
'preference': -2 if 'hd.mp4' in backup_url else -3,
RENDITIONS = ('qn=80&quality=80&type=', 'quality=2&type=mp4')
for num, rendition in enumerate(RENDITIONS, start=1):
payload = 'appkey=%s&cid=%s&otype=json&%s' % (self._APP_KEY, cid, rendition)
sign = hashlib.md5((payload + self._BILIBILI_KEY).encode('utf-8')).hexdigest()
video_info = self._download_json(
'http://interface.bilibili.com/v2/playurl?%s&sign=%s' % (payload, sign),
video_id, note='Downloading video info page',
headers=headers, fatal=num == len(RENDITIONS))
if not video_info:
continue
if 'durl' not in video_info:
if num < len(RENDITIONS):
continue
self._report_error(video_info)
for idx, durl in enumerate(video_info['durl']):
formats = [{
'url': durl['url'],
'filesize': int_or_none(durl['size']),
}]
for backup_url in durl.get('backup_url', []):
formats.append({
'url': backup_url,
# backup URLs have lower priorities
'preference': -2 if 'hd.mp4' in backup_url else -3,
})
for a_format in formats:
a_format.setdefault('http_headers', {}).update({
'Referer': url,
})
self._sort_formats(formats)
entries.append({
'id': '%s_part%s' % (video_id, idx),
'duration': float_or_none(durl.get('length'), 1000),
'formats': formats,
})
break
for a_format in formats:
a_format.setdefault('http_headers', {}).update({
'Referer': url,
})
self._sort_formats(formats)
entries.append({
'id': '%s_part%s' % (video_id, idx),
'duration': float_or_none(durl.get('length'), 1000),
'formats': formats,
})
title = self._html_search_regex('<h1[^>]*>([^<]+)</h1>', webpage, 'title')
title = self._html_search_regex(
('<h1[^>]+\btitle=(["\'])(?P<title>(?:(?!\1).)+)\1',
'(?s)<h1[^>]*>(?P<title>.+?)</h1>'), webpage, 'title',
group='title')
description = self._html_search_meta('description', webpage)
timestamp = unified_timestamp(self._html_search_regex(
r'<time[^>]+datetime="([^"]+)"', webpage, 'upload time', default=None))
r'<time[^>]+datetime="([^"]+)"', webpage, 'upload time',
default=None) or self._html_search_meta(
'uploadDate', webpage, 'timestamp', default=None))
thumbnail = self._html_search_meta(['og:image', 'thumbnailUrl'], webpage)
# TODO 'view_count' requires deobfuscating Javascript
@ -174,13 +212,16 @@ class BiliBiliIE(InfoExtractor):
}
uploader_mobj = re.search(
r'<a[^>]+href="(?:https?:)?//space\.bilibili\.com/(?P<id>\d+)"[^>]+title="(?P<name>[^"]+)"',
r'<a[^>]+href="(?:https?:)?//space\.bilibili\.com/(?P<id>\d+)"[^>]*>(?P<name>[^<]+)',
webpage)
if uploader_mobj:
info.update({
'uploader': uploader_mobj.group('name'),
'uploader_id': uploader_mobj.group('id'),
})
if not info.get('uploader'):
info['uploader'] = self._html_search_meta(
'author', webpage, 'uploader', default=None)
for entry in entries:
entry.update(info)

View File

@ -1062,6 +1062,7 @@ from .telequebec import (
)
from .teletask import TeleTaskIE
from .telewebion import TelewebionIE
from .tennistv import TennisTVIE
from .testurl import TestURLIE
from .tf1 import TF1IE
from .tfo import TFOIE

View File

@ -7,6 +7,7 @@ from .youtube import YoutubeIE
from ..utils import (
determine_ext,
int_or_none,
NO_DEFAULT,
parse_iso8601,
smuggle_url,
xpath_text,
@ -16,18 +17,19 @@ from ..utils import (
class HeiseIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?heise\.de/(?:[^/]+/)+[^/]+-(?P<id>[0-9]+)\.html'
_TESTS = [{
# kaltura embed
'url': 'http://www.heise.de/video/artikel/Podcast-c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2404147.html',
'md5': 'ffed432483e922e88545ad9f2f15d30e',
'info_dict': {
'id': '2404147',
'id': '1_kkrq94sm',
'ext': 'mp4',
'title': "Podcast: c't uplink 3.3 Owncloud / Tastaturen / Peilsender Smartphone",
'format_id': 'mp4_720p',
'timestamp': 1411812600,
'upload_date': '20140927',
'timestamp': 1512734959,
'upload_date': '20171208',
'description': 'md5:c934cbfb326c669c2bcabcbe3d3fcd20',
'thumbnail': r're:^https?://.*/gallery/$',
}
},
'params': {
'skip_download': True,
},
}, {
# YouTube embed
'url': 'http://www.heise.de/newsticker/meldung/Netflix-In-20-Jahren-vom-Videoverleih-zum-TV-Revolutionaer-3814130.html',
@ -46,13 +48,26 @@ class HeiseIE(InfoExtractor):
},
}, {
'url': 'https://www.heise.de/video/artikel/nachgehakt-Wie-sichert-das-c-t-Tool-Restric-tor-Windows-10-ab-3700244.html',
'md5': '4b58058b46625bdbd841fc2804df95fc',
'info_dict': {
'id': '1_ntrmio2s',
'ext': 'mp4',
'title': "nachgehakt: Wie sichert das c't-Tool Restric'tor Windows 10 ab?",
'description': 'md5:47e8ffb6c46d85c92c310a512d6db271',
'timestamp': 1512470717,
'upload_date': '20171205',
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.heise.de/ct/artikel/c-t-uplink-20-8-Staubsaugerroboter-Xiaomi-Vacuum-2-AR-Brille-Meta-2-und-Android-rooten-3959893.html',
'info_dict': {
'id': '1_59mk80sf',
'ext': 'mp4',
'title': 'ct10 nachgehakt hos restrictor',
'title': "c't uplink 20.8: Staubsaugerroboter Xiaomi Vacuum 2, AR-Brille Meta 2 und Android rooten",
'description': 'md5:f50fe044d3371ec73a8f79fcebd74afc',
'timestamp': 1517567237,
'upload_date': '20180202',
},
'params': {
'skip_download': True,
@ -72,19 +87,40 @@ class HeiseIE(InfoExtractor):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_meta('fulltitle', webpage, default=None)
if not title or title == "c't":
title = self._search_regex(
r'<div[^>]+class="videoplayerjw"[^>]+data-title="([^"]+)"',
webpage, 'title')
def extract_title(default=NO_DEFAULT):
title = self._html_search_meta(
('fulltitle', 'title'), webpage, default=None)
if not title or title == "c't":
title = self._search_regex(
r'<div[^>]+class="videoplayerjw"[^>]+data-title="([^"]+)"',
webpage, 'title', default=None)
if not title:
title = self._html_search_regex(
r'<h1[^>]+\bclass=["\']article_page_title[^>]+>(.+?)<',
webpage, 'title', default=default)
return title
yt_urls = YoutubeIE._extract_urls(webpage)
if yt_urls:
return self.playlist_from_matches(yt_urls, video_id, title, ie=YoutubeIE.ie_key())
title = extract_title(default=None)
description = self._og_search_description(
webpage, default=None) or self._html_search_meta(
'description', webpage)
kaltura_url = KalturaIE._extract_url(webpage)
if kaltura_url:
return self.url_result(smuggle_url(kaltura_url, {'source_url': url}), KalturaIE.ie_key())
return {
'_type': 'url_transparent',
'url': smuggle_url(kaltura_url, {'source_url': url}),
'ie_key': KalturaIE.ie_key(),
'title': title,
'description': description,
}
yt_urls = YoutubeIE._extract_urls(webpage)
if yt_urls:
return self.playlist_from_matches(
yt_urls, video_id, title, ie=YoutubeIE.ie_key())
title = extract_title()
container_id = self._search_regex(
r'<div class="videoplayerjw"[^>]+data-container="([0-9]+)"',
@ -115,10 +151,6 @@ class HeiseIE(InfoExtractor):
})
self._sort_formats(formats)
description = self._og_search_description(
webpage, default=None) or self._html_search_meta(
'description', webpage)
return {
'id': video_id,
'title': title,

View File

@ -1,6 +1,6 @@
from __future__ import unicode_literals
import itertools
import json
import re
from .common import InfoExtractor
@ -238,70 +238,58 @@ class InstagramUserIE(InfoExtractor):
}
def _entries(self, uploader_id):
query = {
'__a': 1,
}
def get_count(kind):
def get_count(suffix):
return int_or_none(try_get(
node, lambda x: x['%ss' % kind]['count']))
node, lambda x: x['edge_media_' + suffix]['count']))
for page_num in itertools.count(1):
page = self._download_json(
'https://instagram.com/%s/' % uploader_id, uploader_id,
note='Downloading page %d' % page_num,
fatal=False, query=query)
if not page:
break
nodes = try_get(page, lambda x: x['user']['media']['nodes'], list)
if not nodes:
break
max_id = None
for node in nodes:
node_id = node.get('id')
if node_id:
max_id = node_id
if node.get('__typename') != 'GraphVideo' and node.get('is_video') is not True:
continue
video_id = node.get('code')
if not video_id:
continue
info = self.url_result(
'https://instagram.com/p/%s/' % video_id,
ie=InstagramIE.ie_key(), video_id=video_id)
description = try_get(
node, [lambda x: x['caption'], lambda x: x['text']['id']],
compat_str)
thumbnail = node.get('thumbnail_src') or node.get('display_src')
timestamp = int_or_none(node.get('date'))
comment_count = get_count('comment')
like_count = get_count('like')
view_count = int_or_none(node.get('video_views'))
info.update({
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'comment_count': comment_count,
'like_count': like_count,
'view_count': view_count,
edges = self._download_json(
'https://www.instagram.com/graphql/query/', uploader_id, query={
'query_hash': '472f257a40c653c64c666ce877d59d2b',
'variables': json.dumps({
'id': uploader_id,
'first': 999999999,
})
})['data']['user']['edge_owner_to_timeline_media']['edges']
yield info
for edge in edges:
node = edge['node']
if not max_id:
break
if node.get('__typename') != 'GraphVideo' and node.get('is_video') is not True:
continue
video_id = node.get('shortcode')
if not video_id:
continue
query['max_id'] = max_id
info = self.url_result(
'https://instagram.com/p/%s/' % video_id,
ie=InstagramIE.ie_key(), video_id=video_id)
description = try_get(
node, lambda x: x['edge_media_to_caption']['edges'][0]['node']['text'],
compat_str)
thumbnail = node.get('thumbnail_src') or node.get('display_src')
timestamp = int_or_none(node.get('taken_at_timestamp'))
comment_count = get_count('to_comment')
like_count = get_count('preview_like')
view_count = int_or_none(node.get('video_view_count'))
info.update({
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'comment_count': comment_count,
'like_count': like_count,
'view_count': view_count,
})
yield info
def _real_extract(self, url):
uploader_id = self._match_id(url)
username = self._match_id(url)
uploader_id = self._download_json(
'https://instagram.com/%s/' % username, username, query={
'__a': 1,
})['graphql']['user']['id']
return self.playlist_result(
self._entries(uploader_id), uploader_id, uploader_id)
self._entries(uploader_id), username, username)

View File

@ -157,7 +157,7 @@ class SoundcloudIE(InfoExtractor):
},
]
_CLIENT_ID = 'DQskPX1pntALRzMp4HSxya3Mc0AO66Ro'
_CLIENT_ID = 'LvWovRaJZlWCHql0bISuum8Bd2KX79mb'
@staticmethod
def _extract_urls(webpage):

View File

@ -0,0 +1,112 @@
# coding: utf-8
from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
ExtractorError,
unified_timestamp,
)
class TennisTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tennistv\.com/videos/(?P<id>[-a-z0-9]+)'
_TEST = {
'url': 'https://www.tennistv.com/videos/indian-wells-2018-verdasco-fritz',
'info_dict': {
'id': 'indian-wells-2018-verdasco-fritz',
'ext': 'mp4',
'title': 'Fernando Verdasco v Taylor Fritz',
'description': 're:^After his stunning victory.{174}$',
'thumbnail': 'https://atp-prod.akamaized.net/api/images/v1/images/112831/landscape/1242/0',
'timestamp': 1521017381,
'upload_date': '20180314',
},
'params': {
'skip_download': True,
},
'skip': 'Requires email and password of a subscribed account',
}
_NETRC_MACHINE = 'tennistv'
def _login(self):
(username, password) = self._get_login_info()
if not username or not password:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
login_form = {
'Email': username,
'Password': password,
}
login_json = json.dumps(login_form).encode('utf-8')
headers = {
'content-type': 'application/json',
'Referer': 'https://www.tennistv.com/login',
'Origin': 'https://www.tennistv.com',
}
login_result = self._download_json(
'https://www.tennistv.com/api/users/v1/login', None,
note='Logging in',
errnote='Login failed (wrong password?)',
headers=headers,
data=login_json)
if login_result['error']['errorCode']:
raise ExtractorError('Login failed, %s said: %r' % (self.IE_NAME, login_result['error']['errorMessage']))
if login_result['entitlement'] != 'SUBSCRIBED':
self.report_warning('%s may not be subscribed to %s.' % (username, self.IE_NAME))
self._session_token = login_result['sessionToken']
def _real_initialize(self):
self._login()
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
internal_id = self._search_regex(r'video=([0-9]+)', webpage, 'internal video id')
headers = {
'Origin': 'https://www.tennistv.com',
'authorization': 'ATP %s' % self._session_token,
'content-type': 'application/json',
'Referer': url,
}
check_data = {
'videoID': internal_id,
'VideoUrlType': 'HLSV3',
}
check_json = json.dumps(check_data).encode('utf-8')
check_result = self._download_json(
'https://www.tennistv.com/api/users/v1/entitlementchecknondiva',
video_id, note='Checking video authorization', headers=headers, data=check_json)
formats = self._extract_m3u8_formats(check_result['contentUrl'], video_id, ext='mp4')
vdata_url = 'https://www.tennistv.com/api/channels/v1/de/none/video/%s' % video_id
vdata = self._download_json(vdata_url, video_id)
timestamp = unified_timestamp(vdata['timestamp'])
thumbnail = vdata['video']['thumbnailUrl']
description = vdata['displayText']['description']
title = vdata['video']['title']
series = vdata['tour']
venue = vdata['displayText']['venue']
round_str = vdata['seo']['round']
return {
'id': video_id,
'title': title,
'description': description,
'formats': formats,
'thumbnail': thumbnail,
'timestamp': timestamp,
'series': series,
'season': venue,
'episode': round_str,
}

View File

@ -1246,6 +1246,11 @@ def unified_timestamp(date_str, day_first=True):
if m:
date_str = date_str[:-len(m.group('tz'))]
# Python only supports microseconds, so remove nanoseconds
m = re.search(r'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str)
if m:
date_str = m.group(1)
for expression in date_formats(day_first):
try:
dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)

View File

@ -1,3 +1,3 @@
from __future__ import unicode_literals
__version__ = '2018.03.10'
__version__ = '2018.03.14'