1
0
mirror of https://github.com/l1ving/youtube-dl synced 2024-11-26 07:33:02 +08:00
youtube-dl/youtube_dl/extractor/googlesearch.py
Philipp Hagemeister 1cc79574fc Fix imports and general cleanup
· Import from compat what comes from compat. Yes, some names are available in utils too, but that's an implementation detail.
· Use _match_id consistently whenever possible
· Fix some outdated tests
· Use consistent valid URL (always match the whole protocol, no ^ at start required)
· Use modern test definitions
2014-12-13 12:35:45 +01:00

60 lines
1.7 KiB
Python

from __future__ import unicode_literals
import itertools
import re
from .common import SearchInfoExtractor
from ..compat import (
compat_urllib_parse,
)
class GoogleSearchIE(SearchInfoExtractor):
IE_DESC = 'Google Video search'
_MAX_RESULTS = 1000
IE_NAME = 'video.google:search'
_SEARCH_KEY = 'gvsearch'
_TEST = {
'url': 'gvsearch15:python language',
'info_dict': {
'id': 'python language',
'title': 'python language',
},
'playlist_count': 15,
}
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
entries = []
res = {
'_type': 'playlist',
'id': query,
'title': query,
}
for pagenum in itertools.count():
result_url = (
'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en'
% (compat_urllib_parse.quote_plus(query), pagenum * 10))
webpage = self._download_webpage(
result_url, 'gvsearch:' + query,
note='Downloading result page ' + str(pagenum + 1))
for hit_idx, mobj in enumerate(re.finditer(
r'<h3 class="r"><a href="([^"]+)"', webpage)):
# Skip playlists
if not re.search(r'id="vidthumb%d"' % (hit_idx + 1), webpage):
continue
entries.append({
'_type': 'url',
'url': mobj.group(1)
})
if (len(entries) >= n) or not re.search(r'id="pnnext"', webpage):
res['entries'] = entries[:n]
return res