2014-01-07 17:04:48 +08:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
2013-06-24 04:24:58 +08:00
|
|
|
import json
|
|
|
|
import re
|
|
|
|
|
|
|
|
from .common import InfoExtractor
|
2014-12-13 19:24:42 +08:00
|
|
|
from ..compat import (
|
2013-11-23 00:44:55 +08:00
|
|
|
compat_str,
|
2013-11-22 23:05:14 +08:00
|
|
|
compat_urlparse,
|
2014-12-13 19:24:42 +08:00
|
|
|
)
|
|
|
|
from ..utils import (
|
2013-06-24 04:24:58 +08:00
|
|
|
ExtractorError,
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
class BandcampIE(InfoExtractor):
|
2014-05-05 08:44:44 +08:00
|
|
|
_VALID_URL = r'https?://.*?\.bandcamp\.com/track/(?P<title>.*)'
|
2013-11-23 00:44:55 +08:00
|
|
|
_TESTS = [{
|
2014-01-07 17:04:48 +08:00
|
|
|
'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
|
|
|
|
'md5': 'c557841d5e50261777a6585648adf439',
|
|
|
|
'info_dict': {
|
2014-10-02 21:22:46 +08:00
|
|
|
'id': '1812978515',
|
|
|
|
'ext': 'mp3',
|
|
|
|
'title': "youtube-dl \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad",
|
|
|
|
'duration': 9.8485,
|
2013-06-28 02:46:46 +08:00
|
|
|
},
|
2014-01-07 17:04:48 +08:00
|
|
|
'_skip': 'There is a limit of 200 free downloads / month for the test song'
|
2014-10-02 21:22:46 +08:00
|
|
|
}, {
|
|
|
|
'url': 'http://benprunty.bandcamp.com/track/lanius-battle',
|
|
|
|
'md5': '2b68e5851514c20efdff2afc5603b8b4',
|
|
|
|
'info_dict': {
|
|
|
|
'id': '2650410135',
|
|
|
|
'ext': 'mp3',
|
|
|
|
'title': 'Lanius (Battle)',
|
|
|
|
'uploader': 'Ben Prunty Music',
|
|
|
|
},
|
2013-11-23 00:44:55 +08:00
|
|
|
}]
|
2013-06-24 04:24:58 +08:00
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
title = mobj.group('title')
|
|
|
|
webpage = self._download_webpage(url, title)
|
|
|
|
m_download = re.search(r'freeDownloadPage: "(.*?)"', webpage)
|
2014-05-17 12:22:24 +08:00
|
|
|
if not m_download:
|
2013-11-23 00:44:55 +08:00
|
|
|
m_trackinfo = re.search(r'trackinfo: (.+),\s*?\n', webpage)
|
2013-12-26 21:08:57 +08:00
|
|
|
if m_trackinfo:
|
|
|
|
json_code = m_trackinfo.group(1)
|
2014-05-17 12:22:24 +08:00
|
|
|
data = json.loads(json_code)[0]
|
2013-12-26 21:08:57 +08:00
|
|
|
|
|
|
|
formats = []
|
2014-05-17 12:22:24 +08:00
|
|
|
for format_id, format_url in data['file'].items():
|
2014-05-20 20:44:42 +08:00
|
|
|
ext, abr_str = format_id.split('-', 1)
|
2013-12-26 21:08:57 +08:00
|
|
|
formats.append({
|
|
|
|
'format_id': format_id,
|
|
|
|
'url': format_url,
|
2014-05-17 12:22:24 +08:00
|
|
|
'ext': ext,
|
2013-12-26 21:08:57 +08:00
|
|
|
'vcodec': 'none',
|
2014-05-17 12:22:24 +08:00
|
|
|
'acodec': ext,
|
|
|
|
'abr': int(abr_str),
|
2013-12-26 21:08:57 +08:00
|
|
|
})
|
|
|
|
|
|
|
|
self._sort_formats(formats)
|
2013-11-23 00:44:55 +08:00
|
|
|
|
2013-11-23 04:19:31 +08:00
|
|
|
return {
|
2014-05-17 12:22:24 +08:00
|
|
|
'id': compat_str(data['id']),
|
|
|
|
'title': data['title'],
|
2013-11-23 00:44:55 +08:00
|
|
|
'formats': formats,
|
2014-05-17 12:22:24 +08:00
|
|
|
'duration': float(data['duration']),
|
2013-11-23 04:19:31 +08:00
|
|
|
}
|
2013-12-26 21:08:57 +08:00
|
|
|
else:
|
2014-01-07 17:04:48 +08:00
|
|
|
raise ExtractorError('No free songs found')
|
2013-06-24 04:24:58 +08:00
|
|
|
|
|
|
|
download_link = m_download.group(1)
|
2014-10-02 21:22:46 +08:00
|
|
|
video_id = self._search_regex(
|
2015-02-10 02:08:51 +08:00
|
|
|
r'(?ms)var TralbumData = {.*?id: (?P<id>\d+),?$',
|
|
|
|
webpage, 'video id')
|
2013-06-24 04:24:58 +08:00
|
|
|
|
2014-05-17 12:22:24 +08:00
|
|
|
download_webpage = self._download_webpage(download_link, video_id, 'Downloading free downloads page')
|
|
|
|
# We get the dictionary of the track from some javascript code
|
2015-02-10 02:08:51 +08:00
|
|
|
all_info = self._parse_json(self._search_regex(
|
|
|
|
r'(?sm)items: (.*?),$', download_webpage, 'items'), video_id)
|
2015-02-10 08:37:14 +08:00
|
|
|
info = all_info[0]
|
2013-06-24 04:24:58 +08:00
|
|
|
# We pick mp3-320 for now, until format selection can be easily implemented.
|
2014-01-07 17:04:48 +08:00
|
|
|
mp3_info = info['downloads']['mp3-320']
|
2013-06-24 04:24:58 +08:00
|
|
|
# If we try to use this url it says the link has expired
|
2014-01-07 17:04:48 +08:00
|
|
|
initial_url = mp3_info['url']
|
2015-02-10 02:08:51 +08:00
|
|
|
m_url = re.match(
|
|
|
|
r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$',
|
|
|
|
initial_url)
|
2014-11-24 03:41:03 +08:00
|
|
|
# We build the url we will use to get the final track url
|
2013-06-24 04:24:58 +08:00
|
|
|
# This url is build in Bandcamp in the script download_bunde_*.js
|
2013-12-26 21:08:57 +08:00
|
|
|
request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), video_id, m_url.group('ts'))
|
2014-01-10 03:23:28 +08:00
|
|
|
final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url')
|
2013-06-24 04:24:58 +08:00
|
|
|
# If we could correctly generate the .rand field the url would be
|
2014-11-24 03:41:03 +08:00
|
|
|
# in the "download_url" key
|
2015-02-10 02:08:51 +08:00
|
|
|
final_url = self._search_regex(
|
|
|
|
r'"retry_url":"(.*?)"', final_url_webpage, 'final video URL')
|
2013-06-24 04:24:58 +08:00
|
|
|
|
2013-12-26 21:08:57 +08:00
|
|
|
return {
|
|
|
|
'id': video_id,
|
2014-01-07 17:04:48 +08:00
|
|
|
'title': info['title'],
|
2013-12-26 21:08:57 +08:00
|
|
|
'ext': 'mp3',
|
|
|
|
'vcodec': 'none',
|
|
|
|
'url': final_url,
|
2014-01-10 06:04:26 +08:00
|
|
|
'thumbnail': info.get('thumb_url'),
|
|
|
|
'uploader': info.get('artist'),
|
2013-12-26 21:08:57 +08:00
|
|
|
}
|
2013-11-22 23:05:14 +08:00
|
|
|
|
|
|
|
|
|
|
|
class BandcampAlbumIE(InfoExtractor):
|
2014-01-07 17:04:48 +08:00
|
|
|
IE_NAME = 'Bandcamp:album'
|
2015-02-18 07:48:52 +08:00
|
|
|
_VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?:/album/(?P<album_id>[^?#]+)|/?(?:$|[?#]))'
|
2013-11-22 23:05:14 +08:00
|
|
|
|
2014-08-28 06:58:24 +08:00
|
|
|
_TESTS = [{
|
2014-01-07 17:04:48 +08:00
|
|
|
'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
|
|
|
|
'playlist': [
|
2013-11-23 04:19:31 +08:00
|
|
|
{
|
2014-01-07 17:04:48 +08:00
|
|
|
'md5': '39bc1eded3476e927c724321ddf116cf',
|
|
|
|
'info_dict': {
|
2014-11-12 22:00:54 +08:00
|
|
|
'id': '1353101989',
|
|
|
|
'ext': 'mp3',
|
2014-01-07 17:04:48 +08:00
|
|
|
'title': 'Intro',
|
2013-11-23 04:19:31 +08:00
|
|
|
}
|
|
|
|
},
|
|
|
|
{
|
2014-01-07 17:04:48 +08:00
|
|
|
'md5': '1a2c32e2691474643e912cc6cd4bffaa',
|
|
|
|
'info_dict': {
|
2014-11-12 22:00:54 +08:00
|
|
|
'id': '38097443',
|
|
|
|
'ext': 'mp3',
|
2014-01-07 17:04:48 +08:00
|
|
|
'title': 'Kero One - Keep It Alive (Blazo remix)',
|
2013-11-23 04:19:31 +08:00
|
|
|
}
|
|
|
|
},
|
|
|
|
],
|
2014-11-12 22:00:54 +08:00
|
|
|
'info_dict': {
|
|
|
|
'title': 'Jazz Format Mixtape vol.1',
|
2015-02-18 07:48:52 +08:00
|
|
|
'id': 'jazz-format-mixtape-vol-1',
|
|
|
|
'uploader_id': 'blazo',
|
2014-11-12 22:00:54 +08:00
|
|
|
},
|
2014-01-07 17:04:48 +08:00
|
|
|
'params': {
|
|
|
|
'playlistend': 2
|
2013-11-23 04:19:31 +08:00
|
|
|
},
|
2015-02-18 07:48:52 +08:00
|
|
|
'skip': 'Bandcamp imposes download limits.'
|
2014-08-28 06:58:24 +08:00
|
|
|
}, {
|
|
|
|
'url': 'http://nightbringer.bandcamp.com/album/hierophany-of-the-open-grave',
|
|
|
|
'info_dict': {
|
|
|
|
'title': 'Hierophany of the Open Grave',
|
2015-02-18 07:48:52 +08:00
|
|
|
'uploader_id': 'nightbringer',
|
|
|
|
'id': 'hierophany-of-the-open-grave',
|
2014-08-28 06:58:24 +08:00
|
|
|
},
|
|
|
|
'playlist_mincount': 9,
|
2014-12-14 04:00:54 +08:00
|
|
|
}, {
|
|
|
|
'url': 'http://dotscale.bandcamp.com',
|
|
|
|
'info_dict': {
|
|
|
|
'title': 'Loom',
|
2015-02-18 07:48:52 +08:00
|
|
|
'id': 'dotscale',
|
|
|
|
'uploader_id': 'dotscale',
|
2014-12-14 04:00:54 +08:00
|
|
|
},
|
|
|
|
'playlist_mincount': 7,
|
2014-08-28 06:58:24 +08:00
|
|
|
}]
|
2013-11-23 04:19:31 +08:00
|
|
|
|
2013-11-22 23:05:14 +08:00
|
|
|
def _real_extract(self, url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
2015-02-18 07:48:52 +08:00
|
|
|
uploader_id = mobj.group('subdomain')
|
|
|
|
album_id = mobj.group('album_id')
|
|
|
|
playlist_id = album_id or uploader_id
|
|
|
|
webpage = self._download_webpage(url, playlist_id)
|
2013-11-22 23:05:14 +08:00
|
|
|
tracks_paths = re.findall(r'<a href="(.*?)" itemprop="url">', webpage)
|
|
|
|
if not tracks_paths:
|
2014-01-07 17:04:48 +08:00
|
|
|
raise ExtractorError('The page doesn\'t contain any tracks')
|
2013-11-22 23:05:14 +08:00
|
|
|
entries = [
|
|
|
|
self.url_result(compat_urlparse.urljoin(url, t_path), ie=BandcampIE.ie_key())
|
|
|
|
for t_path in tracks_paths]
|
2015-01-16 21:20:25 +08:00
|
|
|
title = self._search_regex(
|
|
|
|
r'album_title\s*:\s*"(.*?)"', webpage, 'title', fatal=False)
|
2013-11-22 23:05:14 +08:00
|
|
|
return {
|
|
|
|
'_type': 'playlist',
|
2015-02-18 07:48:52 +08:00
|
|
|
'uploader_id': uploader_id,
|
2014-05-05 08:44:44 +08:00
|
|
|
'id': playlist_id,
|
2013-11-22 23:05:14 +08:00
|
|
|
'title': title,
|
|
|
|
'entries': entries,
|
|
|
|
}
|