1
0
mirror of https://github.com/l1ving/youtube-dl synced 2024-12-21 16:55:36 +08:00

[puhutv] Improve extraction (closes #16269)

This commit is contained in:
Sergey M․ 2018-07-22 20:25:46 +07:00
parent 6de82b4476
commit 8fd2a7be37
No known key found for this signature in database
GPG Key ID: 2C393E0F18A9236D

View File

@ -2,53 +2,54 @@
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..compat import (
compat_HTTPError,
compat_str,
)
from ..utils import (
ExtractorError,
int_or_none,
float_or_none,
determine_ext,
parse_resolution,
str_or_none,
url_or_none,
unified_strdate,
unified_timestamp,
try_get,
url_basename,
remove_end
unified_timestamp,
url_or_none,
urljoin,
)
class PuhuTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?puhutv\.com/(?P<id>[a-z0-9-]+)-izle'
_VALID_URL = r'https?://(?:www\.)?puhutv\.com/(?P<id>[^/?#&]+)-izle'
IE_NAME = 'puhutv'
_TESTS = [
{
# A Film
'url': 'https://puhutv.com/sut-kardesler-izle',
'md5': 'a347470371d56e1585d1b2c8dab01c96',
'info_dict': {
'id': 'sut-kardesler',
'display_id': '5085',
'ext': 'mp4',
'title': 'Süt Kardeşler',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': 'Arzu Film',
'description': 'md5:405fd024df916ca16731114eb18e511a',
'uploader_id': '43',
'upload_date': '20160729',
'timestamp': int,
},
_TESTS = [{
# film
'url': 'https://puhutv.com/sut-kardesler-izle',
'md5': 'fbd8f2d8e7681f8bcd51b592475a6ae7',
'info_dict': {
'id': '5085',
'display_id': 'sut-kardesler',
'ext': 'mp4',
'title': 'Süt Kardeşler',
'description': 'md5:405fd024df916ca16731114eb18e511a',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 4832.44,
'creator': 'Arzu Film',
'timestamp': 1469778212,
'upload_date': '20160729',
'release_year': 1976,
'view_count': int,
'tags': ['Aile', 'Komedi', 'Klasikler'],
},
{
# An Episode and geo restricted
'url': 'https://puhutv.com/jet-sosyete-1-bolum-izle',
'only_matching': True,
},
{
# Has subtitle
'url': 'https://puhutv.com/dip-1-bolum-izle',
'only_matching': True,
}
]
}, {
# episode, geo restricted, bypassable with --geo-verification-proxy
'url': 'https://puhutv.com/jet-sosyete-1-bolum-izle',
'only_matching': True,
}, {
# 4k, with subtitles
'url': 'https://puhutv.com/dip-1-bolum-izle',
'only_matching': True,
}]
_SUBTITLE_LANGS = {
'English': 'en',
'Deutsch': 'de',
@ -56,47 +57,103 @@ class PuhuTVIE(InfoExtractor):
}
def _real_extract(self, url):
video_id = self._match_id(url)
info = self._download_json(
'https://puhutv.com/api/slug/%s-izle' % video_id, video_id)['data']
display_id = self._match_id(url)
display_id = compat_str(info['id'])
title = info['title']['name']
info = self._download_json(
urljoin(url, '/api/slug/%s-izle' % display_id),
display_id)['data']
video_id = compat_str(info['id'])
title = info.get('name') or info['title']['name']
if info.get('display_name'):
title = '%s %s' % (title, info.get('display_name'))
description = try_get(info, lambda x: x['title']['description'], compat_str) or info.get('description')
try:
videos = self._download_json(
'https://puhutv.com/api/assets/%s/videos' % video_id,
display_id, 'Downloading video JSON',
headers=self.geo_verification_headers())
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
self.raise_geo_restricted()
raise
formats = []
for video in videos['data']['videos']:
media_url = url_or_none(video.get('url'))
if not media_url:
continue
playlist = video.get('is_playlist')
if video.get('stream_type') == 'hls' and playlist is True:
formats.extend(self._extract_m3u8_formats(
media_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
continue
quality = int_or_none(video.get('quality'))
f = {
'url': media_url,
'ext': 'mp4',
'height': quality
}
video_format = video.get('video_format')
if video_format == 'hls' and playlist is False:
format_id = 'hls'
f['protocol'] = 'm3u8_native'
elif video_format == 'mp4':
format_id = 'http'
else:
continue
if quality:
format_id += '-%sp' % quality
f['format_id'] = format_id
formats.append(f)
self._sort_formats(formats)
description = try_get(
info, lambda x: x['title']['description'],
compat_str) or info.get('description')
timestamp = unified_timestamp(info.get('created_at'))
upload_date = unified_strdate(info.get('created_at'))
uploader = try_get(info, lambda x: x['title']['producer']['name'], compat_str)
uploader_id = str_or_none(try_get(info, lambda x: x['title']['producer']['id']))
view_count = int_or_none(try_get(info, lambda x: x['content']['watch_count']))
duration = float_or_none(try_get(info, lambda x: x['content']['duration_in_ms']), scale=1000)
thumbnail = try_get(info, lambda x: x['content']['images']['wide']['main'], compat_str)
release_year = int_or_none(try_get(info, lambda x: x['title']['released_at']))
webpage_url = info.get('web_url')
creator = try_get(
info, lambda x: x['title']['producer']['name'], compat_str)
duration = float_or_none(
try_get(info, lambda x: x['content']['duration_in_ms'], int),
scale=1000)
view_count = try_get(info, lambda x: x['content']['watch_count'], int)
images = try_get(
info, lambda x: x['content']['images']['wide'], dict) or {}
thumbnails = []
for image_id, image_url in images.items():
if not isinstance(image_url, compat_str):
continue
if not image_url.startswith(('http', '//')):
image_url = 'https://%s' % image_url
t = parse_resolution(image_id)
t.update({
'id': image_id,
'url': image_url
})
thumbnails.append(t)
release_year = try_get(info, lambda x: x['title']['released_at'], int)
season_number = int_or_none(info.get('season_number'))
season_id = int_or_none(info.get('season_id'))
season_id = str_or_none(info.get('season_id'))
episode_number = int_or_none(info.get('episode_number'))
tags = []
for tag in try_get(info, lambda x: x['title']['genres'], list) or []:
if isinstance(tag.get('name'), compat_str):
tags.append(tag.get('name'))
thumbnails = []
thumbs_dict = try_get(info, lambda x: x['content']['images']['wide'], dict) or {}
for id, url in thumbs_dict.items():
if not url_or_none(url):
for genre in try_get(info, lambda x: x['title']['genres'], list) or []:
if not isinstance(genre, dict):
continue
thumbnails.append({
'url': 'https://%s' % url,
'id': id
})
genre_name = genre.get('name')
if genre_name and isinstance(genre_name, compat_str):
tags.append(genre_name)
subtitles = {}
for subtitle in try_get(info, lambda x: x['content']['subtitles'], list) or []:
for subtitle in try_get(
info, lambda x: x['content']['subtitles'], list) or []:
if not isinstance(subtitle, dict):
continue
lang = subtitle.get('language')
@ -107,30 +164,6 @@ class PuhuTVIE(InfoExtractor):
'url': sub_url
}]
# Some of videos are geo restricted upon request copyright owner and returns 403
req_formats = self._download_json(
'https://puhutv.com/api/assets/%s/videos' % display_id,
video_id, 'Downloading video JSON')
formats = []
for format in req_formats['data']['videos']:
media_url = url_or_none(format.get('url'))
if not media_url:
continue
ext = format.get('video_format') or determine_ext(media_url)
quality = format.get('quality')
if format.get('stream_type') == 'hls' and format.get('is_playlist') is True:
m3u8_id = remove_end(url_basename(media_url), '.m3u8')
formats.append(self._m3u8_meta_format(media_url, ext, m3u8_id=m3u8_id))
elif ext == 'mp4' and format.get('is_playlist', False) is False:
formats.append({
'url': media_url,
'format_id': 'http-%s' % quality,
'ext': ext,
'height': quality
})
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
@ -140,93 +173,75 @@ class PuhuTVIE(InfoExtractor):
'season_number': season_number,
'episode_number': episode_number,
'release_year': release_year,
'upload_date': upload_date,
'timestamp': timestamp,
'uploader': uploader,
'uploader_id': uploader_id,
'creator': creator,
'view_count': view_count,
'duration': duration,
'tags': tags,
'subtitles': subtitles,
'webpage_url': webpage_url,
'thumbnail': thumbnail,
'thumbnails': thumbnails,
'formats': formats
}
class PuhuTVSerieIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?puhutv\.com/(?P<id>[a-z0-9-]+)-detay'
_VALID_URL = r'https?://(?:www\.)?puhutv\.com/(?P<id>[^/?#&]+)-detay'
IE_NAME = 'puhutv:serie'
_TESTS = [
{
'url': 'https://puhutv.com/deniz-yildizi-detay',
'info_dict': {
'title': 'Deniz Yıldızı',
'id': 'deniz-yildizi',
'uploader': 'Focus Film',
'uploader_id': 61,
},
'playlist_mincount': 234,
_TESTS = [{
'url': 'https://puhutv.com/deniz-yildizi-detay',
'info_dict': {
'title': 'Deniz Yıldızı',
'id': 'deniz-yildizi',
},
{
# a film detail page which is using same url with serie page
'url': 'https://puhutv.com/kaybedenler-kulubu-detay',
'info_dict': {
'title': 'Kaybedenler Kulübü',
'id': 'kaybedenler-kulubu',
'uploader': 'Tolga Örnek, Murat Dörtbudak, Neslihan Dörtbudak, Kemal Kaplanoğlu',
'uploader_id': 248,
},
'playlist_mincount': 1,
},
]
'playlist_mincount': 205,
}, {
# a film detail page which is using same url with serie page
'url': 'https://puhutv.com/kaybedenler-kulubu-detay',
'only_matching': True,
}]
def _extract_entries(self, playlist_id, seasons):
def _extract_entries(self, seasons):
for season in seasons:
season_id = season['id']
season_number = season.get('position')
pagenum = 1
season_id = season.get('id')
if not season_id:
continue
page = 1
has_more = True
while has_more is True:
season_info = self._download_json(
season = self._download_json(
'https://galadriel.puhutv.com/seasons/%s' % season_id,
playlist_id, 'Downloading season %s page %s' % (season_number, pagenum), query={
'page': pagenum,
season_id, 'Downloading page %s' % page, query={
'page': page,
'per': 40,
})
for episode in season_info.get('episodes'):
video_id = episode['slugPath'].replace('-izle', '')
yield self.url_result(
'https://puhutv.com/%s-izle' % video_id,
PuhuTVIE.ie_key(), video_id)
pagenum = pagenum + 1
has_more = season_info.get('hasMore', False)
episodes = season.get('episodes')
if isinstance(episodes, list):
for ep in episodes:
slug_path = str_or_none(ep.get('slugPath'))
if not slug_path:
continue
video_id = str_or_none(int_or_none(ep.get('id')))
yield self.url_result(
'https://puhutv.com/%s' % slug_path,
ie=PuhuTVIE.ie_key(), video_id=video_id,
video_title=ep.get('name') or ep.get('eventLabel'))
page += 1
has_more = season.get('hasMore')
def _real_extract(self, url):
playlist_id = self._match_id(url)
info = self._download_json(
'https://puhutv.com/api/slug/%s-detay' % playlist_id, playlist_id)['data']
urljoin(url, '/api/slug/%s-detay' % playlist_id),
playlist_id)['data']
title = info.get('name')
uploader = try_get(info, lambda x: x['producer']['name'], compat_str)
uploader_id = try_get(info, lambda x: x['producer']['id'])
seasons = info.get('seasons')
if seasons:
entries = self._extract_entries(playlist_id, seasons)
else:
# For films, these are using same url with series
video_id = info['assets'][0]['slug']
return self.url_result(
'https://puhutv.com/%s-izle' % video_id,
PuhuTVIE.ie_key(), video_id)
return self.playlist_result(
self._extract_entries(seasons), playlist_id, info.get('name'))
return {
'_type': 'playlist',
'id': playlist_id,
'title': title,
'uploader': uploader,
'uploader_id': uploader_id,
'entries': entries,
}
# For films, these are using same url with series
video_id = info.get('slug') or info['assets'][0]['slug']
return self.url_result(
'https://puhutv.com/%s-izle' % video_id,
PuhuTVIE.ie_key(), video_id)