1
0
mirror of https://github.com/l1ving/youtube-dl synced 2024-11-21 00:42:58 +08:00

switched ytsearch to more robust Youtube Data API (fixes #307)

This commit is contained in:
Filippo Valsorda 2012-03-14 22:44:45 +01:00
parent 597e7b1805
commit afbaa80b8b

View File

@ -2248,9 +2248,7 @@ class GenericIE(InfoExtractor):
class YoutubeSearchIE(InfoExtractor): class YoutubeSearchIE(InfoExtractor):
"""Information Extractor for YouTube search queries.""" """Information Extractor for YouTube search queries."""
_VALID_URL = r'ytsearch(\d+|all)?:[\s\S]+' _VALID_URL = r'ytsearch(\d+|all)?:[\s\S]+'
_TEMPLATE_URL = 'http://www.youtube.com/results?search_query=%s&page=%s&gl=US&hl=en' _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
_VIDEO_INDICATOR = r'href="/watch\?v=.+?"'
_MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
_youtube_ie = None _youtube_ie = None
_max_youtube_results = 1000 _max_youtube_results = 1000
IE_NAME = u'youtube:search' IE_NAME = u'youtube:search'
@ -2301,37 +2299,31 @@ class YoutubeSearchIE(InfoExtractor):
"""Downloads a specified number of results for a query""" """Downloads a specified number of results for a query"""
video_ids = [] video_ids = []
already_seen = set() pagenum = 0
pagenum = 1 limit = n
while True: while (50 * pagenum) < limit:
self.report_download_page(query, pagenum) self.report_download_page(query, pagenum+1)
result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum) result_url = self._API_URL % (urllib.quote_plus(query), (50*pagenum)+1)
request = urllib2.Request(result_url) request = urllib2.Request(result_url)
try: try:
page = urllib2.urlopen(request).read() data = urllib2.urlopen(request).read()
except (urllib2.URLError, httplib.HTTPException, socket.error), err: except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) self._downloader.trouble(u'ERROR: unable to download API page: %s' % str(err))
return return
api_response = json.loads(data)['data']
# Extract video identifiers new_ids = list(video['id'] for video in api_response['items'])
for mobj in re.finditer(self._VIDEO_INDICATOR, page): video_ids += new_ids
video_id = page[mobj.span()[0]:mobj.span()[1]].split('=')[2][:-1]
if video_id not in already_seen:
video_ids.append(video_id)
already_seen.add(video_id)
if len(video_ids) == n:
# Specified n videos reached
for id in video_ids:
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
return
if re.search(self._MORE_PAGES_INDICATOR, page) is None: limit = min(n, api_response['totalItems'])
for id in video_ids: pagenum += 1
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
return
pagenum = pagenum + 1 if len(video_ids) > n:
video_ids = video_ids[:n]
for id in video_ids:
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
return
class GoogleSearchIE(InfoExtractor): class GoogleSearchIE(InfoExtractor):