1
0
mirror of https://github.com/l1ving/youtube-dl synced 2024-11-22 02:02:53 +08:00

[googlesearch] Fix start, and skip playlists (Fixes #2329)

This commit is contained in:
Philipp Hagemeister 2014-02-06 03:29:10 +01:00
parent 211e17dd43
commit ccf9114e84
2 changed files with 34 additions and 13 deletions

View File

@ -34,6 +34,7 @@ from youtube_dl.extractor import (
KhanAcademyIE, KhanAcademyIE,
EveryonesMixtapeIE, EveryonesMixtapeIE,
RutubeChannelIE, RutubeChannelIE,
GoogleSearchIE,
GenericIE, GenericIE,
) )
@ -240,6 +241,14 @@ class TestPlaylists(unittest.TestCase):
self.assertEqual(result['title'], 'Always/Never: A Little-Seen Movie About Nuclear Command and Control : The New Yorker') self.assertEqual(result['title'], 'Always/Never: A Little-Seen Movie About Nuclear Command and Control : The New Yorker')
self.assertEqual(len(result['entries']), 3) self.assertEqual(len(result['entries']), 3)
def test_GoogleSearch(self):
dl = FakeYDL()
ie = GoogleSearchIE(dl)
result = ie.extract('gvsearch15:python language')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], 'python language')
self.assertEqual(result['title'], 'python language')
self.assertTrue(len(result['entries']) == 15)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()

View File

@ -1,3 +1,5 @@
from __future__ import unicode_literals
import itertools import itertools
import re import re
@ -8,32 +10,42 @@ from ..utils import (
class GoogleSearchIE(SearchInfoExtractor): class GoogleSearchIE(SearchInfoExtractor):
IE_DESC = u'Google Video search' IE_DESC = 'Google Video search'
_MORE_PAGES_INDICATOR = r'id="pnnext" class="pn"'
_MAX_RESULTS = 1000 _MAX_RESULTS = 1000
IE_NAME = u'video.google:search' IE_NAME = 'video.google:search'
_SEARCH_KEY = 'gvsearch' _SEARCH_KEY = 'gvsearch'
def _get_n_results(self, query, n): def _get_n_results(self, query, n):
"""Get a specified number of results for a query""" """Get a specified number of results for a query"""
entries = []
res = { res = {
'_type': 'playlist', '_type': 'playlist',
'id': query, 'id': query,
'entries': [] 'title': query,
} }
for pagenum in itertools.count(1): for pagenum in itertools.count():
result_url = u'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en' % (compat_urllib_parse.quote_plus(query), pagenum*10) result_url = (
webpage = self._download_webpage(result_url, u'gvsearch:' + query, 'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en'
note='Downloading result page ' + str(pagenum)) % (compat_urllib_parse.quote_plus(query), pagenum * 10))
for mobj in re.finditer(r'<h3 class="r"><a href="([^"]+)"', webpage): webpage = self._download_webpage(
e = { result_url, 'gvsearch:' + query,
note='Downloading result page ' + str(pagenum + 1))
for hit_idx, mobj in enumerate(re.finditer(
r'<h3 class="r"><a href="([^"]+)"', webpage)):
# Skip playlists
if not re.search(r'id="vidthumb%d"' % (hit_idx + 1), webpage):
continue
entries.append({
'_type': 'url', '_type': 'url',
'url': mobj.group(1) 'url': mobj.group(1)
} })
res['entries'].append(e)
if (pagenum * 10 > n) or not re.search(self._MORE_PAGES_INDICATOR, webpage): if (len(entries) >= n) or not re.search(r'class="pn" id="pnnext"', webpage):
res['entries'] = entries[:n]
return res return res