1
0
mirror of https://github.com/l1ving/youtube-dl synced 2025-03-13 03:47:16 +08:00

[izlesene] extract info using api request

This commit is contained in:
remitamine 2015-10-03 16:05:29 +01:00
parent 60d23e5e59
commit 7fadcf71c6

View File

@ -6,11 +6,7 @@ import re
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
from ..utils import (
determine_ext,
float_or_none,
get_element_by_id,
int_or_none,
parse_iso8601,
str_to_int,
)
@ -28,13 +24,8 @@ class IzleseneIE(InfoExtractor):
'id': '7599694',
'ext': 'mp4',
'title': 'Sevinçten Çıldırtan Doğum Günü Hediyesi',
'description': 'md5:253753e2655dde93f59f74b572454f6d',
'thumbnail': 're:^http://.*\.jpg',
'uploader_id': 'pelikzzle',
'timestamp': int,
'upload_date': '20140702',
'duration': 95.395,
'age_limit': 0,
'duration': 95395,
}
},
{
@ -44,13 +35,8 @@ class IzleseneIE(InfoExtractor):
'id': '17997',
'ext': 'mp4',
'title': 'Tarkan Dortmund 2006 Konseri',
'description': 'Tarkan Dortmund 2006 Konseri',
'thumbnail': 're:^http://.*\.jpg',
'uploader_id': 'parlayankiz',
'timestamp': int,
'upload_date': '20061112',
'duration': 253.666,
'age_limit': 0,
'duration': 253666,
}
},
]
@ -58,65 +44,37 @@ class IzleseneIE(InfoExtractor):
def _real_extract(self, url):
video_id = self._match_id(url)
url = 'http://www.izlesene.com/video/%s' % video_id
webpage = self._download_webpage(url, video_id)
api_data = self._download_json('http://panel.izlesene.com/api/playerJson/izlesene/%s' % video_id, video_id)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
thumbnail = self._proto_relative_url(
self._og_search_thumbnail(webpage), scheme='http:')
uploader = self._html_search_regex(
r"adduserUsername\s*=\s*'([^']+)';",
webpage, 'uploader', fatal=False)
timestamp = parse_iso8601(self._html_search_meta(
'uploadDate', webpage, 'upload date'))
duration = float_or_none(self._html_search_regex(
r'"videoduration"\s*:\s*"([^"]+)"',
webpage, 'duration', fatal=False), scale=1000)
view_count = str_to_int(get_element_by_id('videoViewCount', webpage))
comment_count = self._html_search_regex(
r'comment_count\s*=\s*\'([^\']+)\';',
webpage, 'comment_count', fatal=False)
content_url = self._html_search_meta(
'contentURL', webpage, 'content URL', fatal=False)
ext = determine_ext(content_url, 'mp4')
title = api_data['videoname']
thumbnail = api_data.get('thumbnail')
duration = int_or_none(api_data.get('videoduration'))
view_count = int_or_none(str_to_int(api_data.get('views')))
# Might be empty for some videos.
streams = self._html_search_regex(
r'"qualitylevel"\s*:\s*"([^"]+)"', webpage, 'streams', default='')
streams = api_data.get('qualitylevel')
formats = []
# TODO: extract formats from dash manifest
if streams:
for stream in streams.split('|'):
quality, url = re.search(r'\[(\w+)\](.+)', stream).groups()
formats.append({
'format_id': '%sp' % quality if quality else 'sd',
'url': compat_urllib_parse_unquote(url),
'ext': ext,
'url': url,
})
else:
stream_url = self._search_regex(
r'"streamurl"\s*:\s*"([^"]+)"', webpage, 'stream URL')
stream_url = api_data.get('streamurl')
formats.append({
'format_id': 'sd',
'url': compat_urllib_parse_unquote(stream_url),
'ext': ext,
'url': stream_url,
})
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader_id': uploader,
'timestamp': timestamp,
'duration': duration,
'view_count': int_or_none(view_count),
'comment_count': int_or_none(comment_count),
'age_limit': self._family_friendly_search(webpage),
'view_count': view_count,
'formats': formats,
}