1
0
mirror of https://github.com/l1ving/youtube-dl synced 2024-11-26 06:13:00 +08:00

Sublime space formatting

This commit is contained in:
Nick Daniels 2012-12-19 14:19:08 +00:00
parent 8a2f13c304
commit cdb3076445

View File

@ -23,7 +23,7 @@ class InfoExtractor(object):
Information extractors are the classes that, given a URL, extract Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then others. The information is stored in a dictionary which is then
passed to the FileDownloader. The FileDownloader processes this passed to the FileDownloader. The FileDownloader processes this
information possibly downloading the video to the file system, among information possibly downloading the video to the file system, among
other possible outcomes. other possible outcomes.
@ -159,7 +159,7 @@ class YoutubeIE(InfoExtractor):
'44': '480x854', '44': '480x854',
'45': '720x1280', '45': '720x1280',
'46': '1080x1920', '46': '1080x1920',
} }
IE_NAME = u'youtube' IE_NAME = u'youtube'
def suitable(self, url): def suitable(self, url):
@ -988,7 +988,7 @@ class VimeoIE(InfoExtractor):
except: except:
self._downloader.trouble(u'ERROR: unable to extract info section') self._downloader.trouble(u'ERROR: unable to extract info section')
return return
# Extract title # Extract title
video_title = config["video"]["title"] video_title = config["video"]["title"]
@ -1211,7 +1211,7 @@ class GenericIE(InfoExtractor):
def report_following_redirect(self, new_url): def report_following_redirect(self, new_url):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url) self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url)
def _test_redirect(self, url): def _test_redirect(self, url):
"""Check if it is a redirect, like url shorteners, in case restart chain.""" """Check if it is a redirect, like url shorteners, in case restart chain."""
class HeadRequest(compat_urllib_request.Request): class HeadRequest(compat_urllib_request.Request):
@ -1220,38 +1220,38 @@ class GenericIE(InfoExtractor):
class HEADRedirectHandler(compat_urllib_request.HTTPRedirectHandler): class HEADRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
""" """
Subclass the HTTPRedirectHandler to make it use our Subclass the HTTPRedirectHandler to make it use our
HeadRequest also on the redirected URL HeadRequest also on the redirected URL
""" """
def redirect_request(self, req, fp, code, msg, headers, newurl): def redirect_request(self, req, fp, code, msg, headers, newurl):
if code in (301, 302, 303, 307): if code in (301, 302, 303, 307):
newurl = newurl.replace(' ', '%20') newurl = newurl.replace(' ', '%20')
newheaders = dict((k,v) for k,v in req.headers.items() newheaders = dict((k,v) for k,v in req.headers.items()
if k.lower() not in ("content-length", "content-type")) if k.lower() not in ("content-length", "content-type"))
return HeadRequest(newurl, return HeadRequest(newurl,
headers=newheaders, headers=newheaders,
origin_req_host=req.get_origin_req_host(), origin_req_host=req.get_origin_req_host(),
unverifiable=True) unverifiable=True)
else: else:
raise compat_urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp) raise compat_urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp)
class HTTPMethodFallback(compat_urllib_request.BaseHandler): class HTTPMethodFallback(compat_urllib_request.BaseHandler):
""" """
Fallback to GET if HEAD is not allowed (405 HTTP error) Fallback to GET if HEAD is not allowed (405 HTTP error)
""" """
def http_error_405(self, req, fp, code, msg, headers): def http_error_405(self, req, fp, code, msg, headers):
fp.read() fp.read()
fp.close() fp.close()
newheaders = dict((k,v) for k,v in req.headers.items() newheaders = dict((k,v) for k,v in req.headers.items()
if k.lower() not in ("content-length", "content-type")) if k.lower() not in ("content-length", "content-type"))
return self.parent.open(compat_urllib_request.Request(req.get_full_url(), return self.parent.open(compat_urllib_request.Request(req.get_full_url(),
headers=newheaders, headers=newheaders,
origin_req_host=req.get_origin_req_host(), origin_req_host=req.get_origin_req_host(),
unverifiable=True)) unverifiable=True))
# Build our opener # Build our opener
opener = compat_urllib_request.OpenerDirector() opener = compat_urllib_request.OpenerDirector()
for handler in [compat_urllib_request.HTTPHandler, compat_urllib_request.HTTPDefaultErrorHandler, for handler in [compat_urllib_request.HTTPHandler, compat_urllib_request.HTTPDefaultErrorHandler,
HTTPMethodFallback, HEADRedirectHandler, HTTPMethodFallback, HEADRedirectHandler,
compat_urllib_error.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]: compat_urllib_error.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
@ -2256,7 +2256,7 @@ class MyVideoIE(InfoExtractor):
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id): def report_download_webpage(self, video_id):
"""Report webpage download.""" """Report webpage download."""
self._downloader.to_screen(u'[myvideo] %s: Downloading webpage' % video_id) self._downloader.to_screen(u'[myvideo] %s: Downloading webpage' % video_id)
@ -2310,10 +2310,10 @@ class ComedyCentralIE(InfoExtractor):
"""Information extractor for The Daily Show and Colbert Report """ """Information extractor for The Daily Show and Colbert Report """
# urls can be abbreviations like :thedailyshow or :colbert # urls can be abbreviations like :thedailyshow or :colbert
# urls for episodes like: # urls for episodes like:
# or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day # or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day
# or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news # or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news
# or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524 # or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524
_VALID_URL = r"""^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport) _VALID_URL = r"""^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport)
|(https?://)?(www\.)? |(https?://)?(www\.)?
(?P<showname>thedailyshow|colbertnation)\.com/ (?P<showname>thedailyshow|colbertnation)\.com/
@ -2321,7 +2321,7 @@ class ComedyCentralIE(InfoExtractor):
(?P<clip> (?P<clip>
(the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?)) (the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))
|(watch/(?P<date>[^/]*)/(?P<tdstitle>.*))))) |(watch/(?P<date>[^/]*)/(?P<tdstitle>.*)))))
$""" $"""
IE_NAME = u'comedycentral' IE_NAME = u'comedycentral'
_available_formats = ['3500', '2200', '1700', '1200', '750', '400'] _available_formats = ['3500', '2200', '1700', '1200', '750', '400']
@ -2425,7 +2425,7 @@ class ComedyCentralIE(InfoExtractor):
return return
else: else:
mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])] mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
playerUrl_raw = mMovieParams[0][0] playerUrl_raw = mMovieParams[0][0]
self.report_player_url(epTitle) self.report_player_url(epTitle)
try: try:
@ -2474,7 +2474,7 @@ class ComedyCentralIE(InfoExtractor):
if len(turls) == 0: if len(turls) == 0:
self._downloader.trouble(u'\nERROR: unable to download ' + mediaId + ': No videos found') self._downloader.trouble(u'\nERROR: unable to download ' + mediaId + ': No videos found')
continue continue
if self._downloader.params.get('listformats', None): if self._downloader.params.get('listformats', None):
self._print_formats([i[0] for i in turls]) self._print_formats([i[0] for i in turls])
return return
@ -2514,7 +2514,7 @@ class ComedyCentralIE(InfoExtractor):
} }
results.append(info) results.append(info)
return results return results
@ -3078,7 +3078,7 @@ class StanfordOpenClassroomIE(InfoExtractor):
assert entry['type'] == 'reference' assert entry['type'] == 'reference'
results += self.extract(entry['url']) results += self.extract(entry['url'])
return results return results
else: # Root page else: # Root page
info = { info = {
'id': 'Stanford OpenClassroom', 'id': 'Stanford OpenClassroom',
@ -3152,7 +3152,7 @@ class MTVIE(InfoExtractor):
self._downloader.trouble(u'ERROR: unable to extract performer') self._downloader.trouble(u'ERROR: unable to extract performer')
return return
performer = unescapeHTML(mobj.group(1).decode('iso-8859-1')) performer = unescapeHTML(mobj.group(1).decode('iso-8859-1'))
video_title = performer + ' - ' + song_name video_title = performer + ' - ' + song_name
mobj = re.search(r'<meta name="mtvn_uri" content="([^"]+)"/>', webpage) mobj = re.search(r'<meta name="mtvn_uri" content="([^"]+)"/>', webpage)
if mobj is None: if mobj is None:
@ -3581,7 +3581,7 @@ class JustinTVIE(InfoExtractor):
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.trouble(u'ERROR: unable to download video info JSON: %s' % compat_str(err)) self._downloader.trouble(u'ERROR: unable to download video info JSON: %s' % compat_str(err))
return return
response = json.loads(webpage) response = json.loads(webpage)
info = [] info = []
for clip in response: for clip in response:
@ -3604,7 +3604,7 @@ class JustinTVIE(InfoExtractor):
if mobj is None: if mobj is None:
self._downloader.trouble(u'ERROR: invalid URL: %s' % url) self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
return return
api = 'http://api.justin.tv' api = 'http://api.justin.tv'
video_id = mobj.group(mobj.lastindex) video_id = mobj.group(mobj.lastindex)
paged = False paged = False
@ -3614,9 +3614,9 @@ class JustinTVIE(InfoExtractor):
else: else:
api += '/clip/show/%s.json' api += '/clip/show/%s.json'
api = api % (video_id,) api = api % (video_id,)
self.report_extraction(video_id) self.report_extraction(video_id)
info = [] info = []
offset = 0 offset = 0
limit = self._JUSTIN_PAGE_LIMIT limit = self._JUSTIN_PAGE_LIMIT