1
0
mirror of https://github.com/l1ving/youtube-dl synced 2024-11-24 07:32:58 +08:00
youtube-dl/youtube_dl/extractor/pornhub.py

102 lines
3.8 KiB
Python
Raw Normal View History

2014-01-07 17:25:34 +08:00
from __future__ import unicode_literals
2013-10-27 07:04:22 +08:00
import os
import re
from .common import InfoExtractor
from ..utils import (
compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urllib_parse,
str_to_int,
2013-10-27 07:04:22 +08:00
)
from ..aes import (
aes_decrypt_text
)
2014-01-07 17:25:34 +08:00
2013-10-27 07:04:22 +08:00
class PornHubIE(InfoExtractor):
2013-12-11 16:22:08 +08:00
_VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>pornhub\.com/view_video\.php\?viewkey=(?P<videoid>[0-9a-f]+))'
2013-10-27 07:04:22 +08:00
_TEST = {
2014-01-07 17:25:34 +08:00
'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
'file': '648719015.mp4',
'md5': '882f488fa1f0026f023f33576004a2ed',
'info_dict': {
"uploader": "BABES-COM",
"title": "Seductive Indian beauty strips down and fingers her pink pussy",
"age_limit": 18
2013-10-27 07:04:22 +08:00
}
}
def _extract_count(self, pattern, webpage, name):
count = self._html_search_regex(pattern, webpage, '%s count' % name, fatal=False)
if count:
count = str_to_int(count)
return count
2013-10-27 07:04:22 +08:00
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('videoid')
url = 'http://www.' + mobj.group('url')
req = compat_urllib_request.Request(url)
req.add_header('Cookie', 'age_verified=1')
webpage = self._download_webpage(req, video_id)
2014-01-07 17:25:34 +08:00
video_title = self._html_search_regex(r'<h1 [^>]+>([^<]+)', webpage, 'title')
video_uploader = self._html_search_regex(
r'(?s)<div class="video-info-row">\s*From:&nbsp;.+?<(?:a href="/users/|<span class="username)[^>]+>(.+?)<',
webpage, 'uploader', fatal=False)
2014-01-07 17:25:34 +08:00
thumbnail = self._html_search_regex(r'"image_url":"([^"]+)', webpage, 'thumbnail', fatal=False)
2013-10-27 07:04:22 +08:00
if thumbnail:
thumbnail = compat_urllib_parse.unquote(thumbnail)
view_count = self._extract_count(r'<span class="count">([\d,\.]+)</span> views', webpage, 'view')
like_count = self._extract_count(r'<span class="votesUp">([\d,\.]+)</span>', webpage, 'like')
dislike_count = self._extract_count(r'<span class="votesDown">([\d,\.]+)</span>', webpage, 'dislike')
comment_count = self._extract_count(
r'All comments \(<var class="videoCommentCount">([\d,\.]+)</var>', webpage, 'comment')
2013-10-27 07:04:22 +08:00
video_urls = list(map(compat_urllib_parse.unquote , re.findall(r'"quality_[0-9]{3}p":"([^"]+)', webpage)))
if webpage.find('"encrypted":true') != -1:
password = compat_urllib_parse.unquote_plus(self._html_search_regex(r'"video_title":"([^"]+)', webpage, 'password'))
2013-10-27 07:04:22 +08:00
video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))
formats = []
for video_url in video_urls:
path = compat_urllib_parse_urlparse(video_url).path
extension = os.path.splitext(path)[1][1:]
2013-10-27 07:04:22 +08:00
format = path.split('/')[5].split('_')[:2]
format = "-".join(format)
2014-01-07 17:25:34 +08:00
m = re.match(r'^(?P<height>[0-9]+)P-(?P<tbr>[0-9]+)K$', format)
if m is None:
height = None
tbr = None
else:
height = int(m.group('height'))
tbr = int(m.group('tbr'))
2013-10-27 07:04:22 +08:00
formats.append({
'url': video_url,
'ext': extension,
'format': format,
'format_id': format,
2014-01-07 17:25:34 +08:00
'tbr': tbr,
'height': height,
2013-10-27 07:04:22 +08:00
})
2014-01-07 17:25:34 +08:00
self._sort_formats(formats)
2013-10-27 07:04:22 +08:00
return {
'id': video_id,
'uploader': video_uploader,
'title': video_title,
'thumbnail': thumbnail,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'comment_count': comment_count,
2013-10-27 07:04:22 +08:00
'formats': formats,
'age_limit': 18,
2013-10-27 07:04:22 +08:00
}