1
0
mirror of https://github.com/l1ving/youtube-dl synced 2024-12-21 22:33:22 +08:00

[vrak] Improve and update test (closes #11452)

This commit is contained in:
Sergey M․ 2017-03-03 23:58:16 +07:00
parent d16f27ca27
commit 4d058c9862
No known key found for this signature in database
GPG Key ID: 2C393E0F18A9236D

View File

@ -4,65 +4,77 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .brightcove import BrightcoveNewIE
from ..utils import (
int_or_none,
parse_age_limit,
smuggle_url,
unescapeHTML,
)
class VrakIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?vrak\.tv/videos\?.*?target=(?P<id>[0-9\.]+).*'
_VALID_URL = r'https?://(?:www\.)?vrak\.tv/videos\?.*?\btarget=(?P<id>[\d.]+)'
_TEST = {
'url': 'http://www.vrak.tv/videos?target=1.2240923&filtre=emission&id=1.1806721',
'md5': 'c5d5ce237bca3b1e990ce1b48d1f0948',
'url': 'http://www.vrak.tv/videos?target=1.2306782&filtre=emission&id=1.1806721',
'info_dict': {
'id': '5231040869001',
'id': '5345661243001',
'ext': 'mp4',
'title': 'Référendums américains, animés japonais et hooligans russes',
'upload_date': '20161201',
'description': 'This video file has been uploaded automatically using Oprah. It should be updated with real description soon.',
'timestamp': 1480628425,
'title': 'Obésité, film de hockey et Roseline Filion',
'timestamp': 1488492126,
'upload_date': '20170302',
'uploader_id': '2890187628001',
'creator': 'VRAK.TV',
'age_limit': 8,
'series': 'ALT (Actualité Légèrement Tordue)',
'episode': 'Obésité, film de hockey et Roseline Filion',
'tags': list,
},
'params': {
'skip_download': True,
},
}
}
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/2890187628001/default_default/index.html?videoId=%s'
def _real_extract(self, url):
url_id = self._match_id(url)
webpage = self._download_webpage(url, url_id)
video_id = self._match_id(url)
result = {}
result['title'] = self._html_search_regex(
r'<h3 class="videoTitle">(.+?)</h3>', webpage, 'title')
webpage = self._download_webpage(url, video_id)
# Inspired from BrightcoveNewIE._extract_url()
entries = []
for account_id, player_id, _, video_id in re.findall(
# account_id, player_id and embed from:
# <div class="video-player [...]
# data-publisher-id="2890187628001"
# data-player-id="VkSnGw3cx"
# video id is extracted from weird CMS Java/Javascript notation:
# RW java.lang.String value = '5231040869001';
# Need to use backtrack to pin to a ref since video is in grid
# layout with others
r'''(?sx)
<div[^>]+
data-publisher-id=["\'](\d+)["\']
title = self._html_search_regex(
r'<h\d\b[^>]+\bclass=["\']videoTitle["\'][^>]*>([^<]+)',
webpage, 'title', default=None) or self._og_search_title(webpage)
content = self._parse_json(
self._search_regex(
r'data-player-options-content=(["\'])(?P<content>{.+?})\1',
webpage, 'content', default='{}', group='content'),
video_id, transform_source=unescapeHTML)
ref_id = content.get('refId') or self._search_regex(
r'refId&quot;:&quot;([^&]+)&quot;', webpage, 'ref id')
brightcove_id = self._search_regex(
r'''(?x)
java\.lang\.String\s+value\s*=\s*["']brightcove\.article\.\d+\.%s
[^>]*
data-player-id=["\']([^"\']+)["\']
[^>]*
refId&quot;:&quot;([^&]+)&quot;
[^>]*
>.*?
</div>.*?
RW\ java\.lang\.String\ value\ =\ \'brightcove\.article\.\d+\.\3\'
[^>]*
RW\ java\.lang\.String\ value\ =\ \'(\d+)\'
''', webpage):
java\.lang\.String\s+value\s*=\s*["'](\d+)
''' % re.escape(ref_id), webpage, 'brightcove id')
entries.append(
'http://players.brightcove.net/%s/%s_%s/index.html?videoId=%s'
% (account_id, player_id, 'default', video_id))
if entries:
result = self.url_result(entries[0], BrightcoveNewIE.ie_key())
return result
return {
'_type': 'url_transparent',
'ie_key': BrightcoveNewIE.ie_key(),
'url': smuggle_url(
self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id,
{'geo_countries': ['CA']}),
'id': brightcove_id,
'description': content.get('description'),
'creator': content.get('brand'),
'age_limit': parse_age_limit(content.get('rating')),
'series': content.get('showName') or content.get(
'episodeName'), # this is intentional
'season_number': int_or_none(content.get('seasonNumber')),
'episode': title,
'episode_number': int_or_none(content.get('episodeNumber')),
'tags': content.get('tags', []),
}