mirror of
https://github.com/l1ving/youtube-dl
synced 2025-01-23 14:57:51 +08:00
parent
178ee88319
commit
c40ad8907a
@ -11,14 +11,14 @@ class LibsynIE(InfoExtractor):
|
|||||||
_VALID_URL = r'(?P<mainurl>https?://html5-player\.libsyn\.com/embed/episode/id/(?P<id>[0-9]+))'
|
_VALID_URL = r'(?P<mainurl>https?://html5-player\.libsyn\.com/embed/episode/id/(?P<id>[0-9]+))'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://html5-player.libsyn.com/embed/episode/id/3377616/',
|
'url': 'http://html5-player.libsyn.com/embed/episode/id/6324076/',
|
||||||
'md5': '443360ee1b58007bc3dcf09b41d093bb',
|
'md5': '5b497505660690028d482f9a28431cca',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '3377616',
|
'id': '6324076',
|
||||||
'ext': 'mp3',
|
'ext': 'mp3',
|
||||||
'title': "The Daily Show Podcast without Jon Stewart - Episode 12: Bassem Youssef: Egypt's Jon Stewart",
|
'title': "Verily, Octopi Sing - Pandas are not smallpox",
|
||||||
'description': 'md5:601cb790edd05908957dae8aaa866465',
|
'description': 'md5:0d20ad472ae296f22a0c9df23a6d78f1',
|
||||||
'upload_date': '20150220',
|
'upload_date': '20180303',
|
||||||
'thumbnail': 're:^https?://.*',
|
'thumbnail': 're:^https?://.*',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
@ -28,6 +28,7 @@ class LibsynIE(InfoExtractor):
|
|||||||
'id': '3727166',
|
'id': '3727166',
|
||||||
'ext': 'mp3',
|
'ext': 'mp3',
|
||||||
'title': 'Clients From Hell Podcast - How a Sex Toy Company Kickstarted my Freelance Career',
|
'title': 'Clients From Hell Podcast - How a Sex Toy Company Kickstarted my Freelance Career',
|
||||||
|
'description': 'md5:996a28b4f829ed4ffb8302a53d825704',
|
||||||
'upload_date': '20150818',
|
'upload_date': '20150818',
|
||||||
'thumbnail': 're:^https?://.*',
|
'thumbnail': 're:^https?://.*',
|
||||||
}
|
}
|
||||||
@ -39,25 +40,30 @@ class LibsynIE(InfoExtractor):
|
|||||||
url = m.group('mainurl')
|
url = m.group('mainurl')
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
formats = [{
|
media_url_json = self._search_regex(r'"media_url(?:_libsyn)?"\s*:\s*("(\\"|[^"])+")', webpage, 'media_url')
|
||||||
'url': media_url,
|
media_url = self._parse_json(media_url_json, video_id)
|
||||||
} for media_url in set(re.findall(r'var\s+mediaURL(?:Libsyn)?\s*=\s*"([^"]+)"', webpage))]
|
|
||||||
|
|
||||||
podcast_title = self._search_regex(
|
|
||||||
r'<h2>([^<]+)</h2>', webpage, 'podcast title', default=None)
|
|
||||||
episode_title = self._search_regex(
|
|
||||||
r'(?:<div class="episode-title">|<h3>)([^<]+)</', webpage, 'episode title')
|
|
||||||
|
|
||||||
|
podcast_title = self._html_search_regex( r'<h3\b[^>]*>([^<]+)</h3>', webpage, 'podcast title', default=None)
|
||||||
|
episode_title_json = self._search_regex(r'"item_title"\s*:\s*("(\\"|[^"])+")', webpage, 'episode title', default=None)
|
||||||
|
episode_title = self._parse_json(episode_title_json, video_id, fatal=False)
|
||||||
|
if episode_title is None:
|
||||||
|
# Fallback: scrape from page
|
||||||
|
episode_title = self._html_search_regex(r'<h4\b[^>]*>([^<]+)', webpage, 'episode title')
|
||||||
title = '%s - %s' % (podcast_title, episode_title) if podcast_title else episode_title
|
title = '%s - %s' % (podcast_title, episode_title) if podcast_title else episode_title
|
||||||
|
|
||||||
description = self._html_search_regex(
|
description = self._html_search_regex(
|
||||||
r'<div id="info_text_body">(.+?)</div>', webpage,
|
r'<(\w+)\b[^>]*id="info_text_body"[^>]*>(?P<description>.+?)<\/\1>',
|
||||||
'description', default=None)
|
webpage, 'description', default=None, group='description')
|
||||||
thumbnail = self._search_regex(
|
|
||||||
r'<img[^>]+class="info-show-icon"[^>]+src="([^"]+)"',
|
thumbnail_json = self._search_regex(r'"thumbnail_url"\s*:\s*("(\\"|[^"])+")', webpage, 'thumbnail')
|
||||||
webpage, 'thumbnail', fatal=False)
|
thumbnail = self._parse_json(thumbnail_json, video_id, fatal=False)
|
||||||
release_date = unified_strdate(self._search_regex(
|
|
||||||
r'<div class="release_date">Released: ([^<]+)<', webpage, 'release date', fatal=False))
|
release_date_json = self._search_regex(r'"release_date"\s*:\s*("(\\"|[^"])+")', webpage, 'release date')
|
||||||
|
release_date = unified_strdate(self._parse_json(release_date_json, video_id, fatal=False))
|
||||||
|
if release_date is None:
|
||||||
|
# Fallback: scrape from page
|
||||||
|
release_date = unified_strdate(self._search_regex(
|
||||||
|
r'<div class="release_date">Released: ([^<]+)<', webpage, 'release date', fatal=False))
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
@ -65,5 +71,5 @@ class LibsynIE(InfoExtractor):
|
|||||||
'description': description,
|
'description': description,
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
'upload_date': release_date,
|
'upload_date': release_date,
|
||||||
'formats': formats,
|
'url': media_url,
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user