1
0
mirror of https://github.com/l1ving/youtube-dl synced 2024-12-31 02:23:06 +08:00

Clean duplicate method report_download_webpage in InfoExtractors

This commit is contained in:
Jaime Marquínez Ferrándiz 2013-04-24 22:02:20 +02:00
parent a3d689cfb3
commit 320e26a0af

View File

@ -152,6 +152,10 @@ class InfoExtractor(object):
"""Report information extraction.""" """Report information extraction."""
self.to_screen(u'%s: Extracting information' % id_or_name) self.to_screen(u'%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen(u'%s: Downloading webpage' % video_id)
def report_age_confirmation(self): def report_age_confirmation(self):
"""Report attempt to confirm age.""" """Report attempt to confirm age."""
self.to_screen(u'Confirming age') self.to_screen(u'Confirming age')
@ -691,10 +695,6 @@ class MetacafeIE(InfoExtractor):
"""Report disclaimer retrieval.""" """Report disclaimer retrieval."""
self.to_screen(u'Retrieving disclaimer') self.to_screen(u'Retrieving disclaimer')
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen(u'%s: Downloading webpage' % video_id)
def _real_initialize(self): def _real_initialize(self):
# Retrieve disclaimer # Retrieve disclaimer
request = compat_urllib_request.Request(self._DISCLAIMER) request = compat_urllib_request.Request(self._DISCLAIMER)
@ -882,10 +882,6 @@ class PhotobucketIE(InfoExtractor):
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen(u'%s: Downloading webpage' % video_id)
def _real_extract(self, url): def _real_extract(self, url):
# Extract id from URL # Extract id from URL
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
@ -947,10 +943,6 @@ class YahooIE(InfoExtractor):
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen(u'%s: Downloading webpage' % video_id)
def _real_extract(self, url, new_video=True): def _real_extract(self, url, new_video=True):
# Extract ID from URL # Extract ID from URL
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
@ -1083,10 +1075,6 @@ class VimeoIE(InfoExtractor):
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen(u'%s: Downloading webpage' % video_id)
def _real_extract(self, url, new_video=True): def _real_extract(self, url, new_video=True):
# Extract ID from URL # Extract ID from URL
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
@ -1200,10 +1188,6 @@ class ArteTvIE(InfoExtractor):
def __init__(self, downloader=None): def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader) InfoExtractor.__init__(self, downloader)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen(u'%s: Downloading webpage' % video_id)
def fetch_webpage(self, url): def fetch_webpage(self, url):
request = compat_urllib_request.Request(url) request = compat_urllib_request.Request(url)
try: try:
@ -2020,10 +2004,6 @@ class DepositFilesIE(InfoExtractor):
_VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles\.com/(?:../(?#locale))?files/(.+)' _VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles\.com/(?:../(?#locale))?files/(.+)'
def report_download_webpage(self, file_id):
"""Report webpage download."""
self.to_screen(u'%s: Downloading webpage' % file_id)
def _real_extract(self, url): def _real_extract(self, url):
file_id = url.split('/')[-1] file_id = url.split('/')[-1]
# Rebuild url in english locale # Rebuild url in english locale
@ -3014,10 +2994,6 @@ class StanfordOpenClassroomIE(InfoExtractor):
_VALID_URL = r'^(?:https?://)?openclassroom.stanford.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$' _VALID_URL = r'^(?:https?://)?openclassroom.stanford.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$'
IE_NAME = u'stanfordoc' IE_NAME = u'stanfordoc'
def report_download_webpage(self, objid):
"""Report information extraction."""
self.to_screen(u'%s: Downloading webpage' % objid)
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
@ -3196,10 +3172,6 @@ class MTVIE(InfoExtractor):
class YoukuIE(InfoExtractor): class YoukuIE(InfoExtractor):
_VALID_URL = r'(?:http://)?v\.youku\.com/v_show/id_(?P<ID>[A-Za-z0-9]+)\.html' _VALID_URL = r'(?:http://)?v\.youku\.com/v_show/id_(?P<ID>[A-Za-z0-9]+)\.html'
def report_download_webpage(self, file_id):
"""Report webpage download."""
self.to_screen(u'%s: Downloading webpage' % file_id)
def _gen_sid(self): def _gen_sid(self):
nowTime = int(time.time() * 1000) nowTime = int(time.time() * 1000)
random1 = random.randint(1000,1998) random1 = random.randint(1000,1998)
@ -3309,10 +3281,6 @@ class XNXXIE(InfoExtractor):
VIDEO_TITLE_RE = r'<title>(.*?)\s+-\s+XNXX.COM' VIDEO_TITLE_RE = r'<title>(.*?)\s+-\s+XNXX.COM'
VIDEO_THUMB_RE = r'url_bigthumb=(.*?)&amp;' VIDEO_THUMB_RE = r'url_bigthumb=(.*?)&amp;'
def report_webpage(self, video_id):
"""Report information extraction"""
self.to_screen(u'%s: Downloading webpage' % video_id)
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
if mobj is None: if mobj is None:
@ -3320,7 +3288,7 @@ class XNXXIE(InfoExtractor):
return return
video_id = mobj.group(1) video_id = mobj.group(1)
self.report_webpage(video_id) self.report_download_webpage(video_id)
# Get webpage content # Get webpage content
try: try: