1
0
mirror of https://github.com/l1ving/youtube-dl synced 2025-03-11 10:07:15 +08:00

Merge pull request #107 from ytdl-org/master

[pull] master from ytdl-org:master
This commit is contained in:
pull[bot] 2019-10-04 13:41:50 +00:00 committed by GitHub
commit 52f657b9f1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 48 additions and 55 deletions

View File

@ -1129,6 +1129,7 @@ from .telegraaf import TelegraafIE
from .telemb import TeleMBIE from .telemb import TeleMBIE
from .telequebec import ( from .telequebec import (
TeleQuebecIE, TeleQuebecIE,
TeleQuebecSquatIE,
TeleQuebecEmissionIE, TeleQuebecEmissionIE,
TeleQuebecLiveIE, TeleQuebecLiveIE,
) )
@ -1413,7 +1414,6 @@ from .weibo import (
WeiboMobileIE WeiboMobileIE
) )
from .weiqitv import WeiqiTVIE from .weiqitv import WeiqiTVIE
from .wimp import WimpIE
from .wistia import WistiaIE from .wistia import WistiaIE
from .worldstarhiphop import WorldStarHipHopIE from .worldstarhiphop import WorldStarHipHopIE
from .wsj import ( from .wsj import (

View File

@ -7,6 +7,7 @@ from ..utils import (
int_or_none, int_or_none,
smuggle_url, smuggle_url,
try_get, try_get,
unified_timestamp,
) )
@ -70,6 +71,52 @@ class TeleQuebecIE(TeleQuebecBaseIE):
return info return info
class TeleQuebecSquatIE(InfoExtractor):
_VALID_URL = r'https://squat\.telequebec\.tv/videos/(?P<id>\d+)'
_TESTS = [{
'url': 'https://squat.telequebec.tv/videos/9314',
'info_dict': {
'id': 'd59ae78112d542e793d83cc9d3a5b530',
'ext': 'mp4',
'title': 'Poupeflekta',
'description': 'md5:2f0718f8d2f8fece1646ee25fb7bce75',
'duration': 1351,
'timestamp': 1569057600,
'upload_date': '20190921',
'series': 'Miraculous : Les Aventures de Ladybug et Chat Noir',
'season': 'Saison 3',
'season_number': 3,
'episode_number': 57,
},
'params': {
'skip_download': True,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
'https://squat.api.telequebec.tv/v1/videos/%s' % video_id,
video_id)
media_id = video['sourceId']
return {
'_type': 'url_transparent',
'url': 'http://zonevideo.telequebec.tv/media/%s' % media_id,
'ie_key': TeleQuebecIE.ie_key(),
'id': media_id,
'title': video.get('titre'),
'description': video.get('description'),
'timestamp': unified_timestamp(video.get('datePublication')),
'series': video.get('container'),
'season': video.get('saison'),
'season_number': int_or_none(video.get('noSaison')),
'episode_number': int_or_none(video.get('episode')),
}
class TeleQuebecEmissionIE(TeleQuebecBaseIE): class TeleQuebecEmissionIE(TeleQuebecBaseIE):
_VALID_URL = r'''(?x) _VALID_URL = r'''(?x)
https?:// https?://

View File

@ -1,54 +0,0 @@
from __future__ import unicode_literals
from .common import InfoExtractor
from .youtube import YoutubeIE
class WimpIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?wimp\.com/(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://www.wimp.com/maru-is-exhausted/',
'md5': 'ee21217ffd66d058e8b16be340b74883',
'info_dict': {
'id': 'maru-is-exhausted',
'ext': 'mp4',
'title': 'Maru is exhausted.',
'description': 'md5:57e099e857c0a4ea312542b684a869b8',
}
}, {
'url': 'http://www.wimp.com/clowncar/',
'md5': '5c31ad862a90dc5b1f023956faec13fe',
'info_dict': {
'id': 'cG4CEr2aiSg',
'ext': 'webm',
'title': 'Basset hound clown car...incredible!',
'description': '5 of my Bassets crawled in this dog loo! www.bellinghambassets.com\n\nFor licensing/usage please contact: licensing(at)jukinmediadotcom',
'upload_date': '20140303',
'uploader': 'Gretchen Hoey',
'uploader_id': 'gretchenandjeff1',
},
'add_ie': ['Youtube'],
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
youtube_id = self._search_regex(
(r"videoId\s*:\s*[\"']([0-9A-Za-z_-]{11})[\"']",
r'data-id=["\']([0-9A-Za-z_-]{11})'),
webpage, 'video URL', default=None)
if youtube_id:
return self.url_result(youtube_id, YoutubeIE.ie_key())
info_dict = self._extract_jwplayer_data(
webpage, video_id, require_title=False)
info_dict.update({
'id': video_id,
'title': self._og_search_title(webpage),
'description': self._og_search_description(webpage),
})
return info_dict