1
0
mirror of https://github.com/l1ving/youtube-dl synced 2024-11-21 21:12:58 +08:00
youtube-dl/youtube_dl/extractor/hearthisat.py
2015-01-22 18:15:04 +01:00

118 lines
4.2 KiB
Python

# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_request,
compat_urlparse,
)
from ..utils import (
HEADRequest,
str_to_int,
urlencode_postdata,
urlhandle_detect_ext,
)
class HearThisAtIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?hearthis\.at/(?P<artist>[^/]+)/(?P<title>[A-Za-z0-9\-]+)/?$'
_PLAYLIST_URL = 'https://hearthis.at/playlist.php'
_TEST = {
'url': 'https://hearthis.at/moofi/dr-kreep',
'md5': 'ab6ec33c8fed6556029337c7885eb4e0',
'info_dict': {
'id': '150939',
'ext': 'wav',
'title': 'Moofi - Dr. Kreep',
'thumbnail': 're:^https?://.*\.jpg$',
'timestamp': 1421564134,
'description': 'Creepy Patch. Mutable Instruments Braids Vowel + Formant Mode.',
'upload_date': '20150118',
'comment_count': int,
'view_count': int,
'like_count': int,
'duration': 71,
'categories': ['Experimental'],
}
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
display_id = '{artist:s} - {title:s}'.format(**m.groupdict())
webpage = self._download_webpage(url, display_id)
track_id = self._search_regex(
r'intTrackId\s*=\s*(\d+)', webpage, 'track ID')
payload = urlencode_postdata({'tracks[]': track_id})
req = compat_urllib_request.Request(self._PLAYLIST_URL, payload)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
track = self._download_json(req, track_id, 'Downloading playlist')[0]
title = '{artist:s} - {title:s}'.format(**track)
categories = None
if track.get('category'):
categories = [track['category']]
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
meta_span = r'<span[^>]+class="%s".*?</i>([^<]+)</span>'
view_count = str_to_int(self._search_regex(
meta_span % 'plays_count', webpage, 'view count', fatal=False))
like_count = str_to_int(self._search_regex(
meta_span % 'likes_count', webpage, 'like count', fatal=False))
comment_count = str_to_int(self._search_regex(
meta_span % 'comment_count', webpage, 'comment count', fatal=False))
duration = str_to_int(self._search_regex(
r'data-length="(\d+)', webpage, 'duration', fatal=False))
timestamp = str_to_int(self._search_regex(
r'<span[^>]+class="calctime"[^>]+data-time="(\d+)', webpage, 'timestamp', fatal=False))
formats = []
mp3_url = self._search_regex(
r'(?s)<a class="player-link"\s+(?:[a-zA-Z0-9_:-]+="[^"]+"\s+)*?data-mp3="([^"]+)"',
webpage, 'mp3 URL', fatal=False)
if mp3_url:
formats.append({
'format_id': 'mp3',
'vcodec': 'none',
'acodec': 'mp3',
'url': mp3_url,
})
download_path = self._search_regex(
r'<a class="[^"]*download_fct[^"]*"\s+href="([^"]+)"',
webpage, 'download URL', default=None)
if download_path:
download_url = compat_urlparse.urljoin(url, download_path)
ext_req = HEADRequest(download_url)
ext_handle = self._request_webpage(
ext_req, display_id, note='Determining extension')
ext = urlhandle_detect_ext(ext_handle)
formats.append({
'format_id': 'download',
'vcodec': 'none',
'ext': ext,
'url': download_url,
'preference': 2, # Usually better quality
})
self._sort_formats(formats)
return {
'id': track_id,
'display_id': display_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
'description': description,
'duration': duration,
'timestamp': timestamp,
'view_count': view_count,
'comment_count': comment_count,
'like_count': like_count,
'categories': categories,
}