mirror of
https://github.com/l1ving/youtube-dl
synced 2025-03-14 07:37:25 +08:00
Merge remote-tracking branch 'upstream/master'
This commit is contained in:
commit
ef1498dc3c
@ -1,6 +1,6 @@
|
||||
**Please include the full output of youtube-dl when run with `-v`**.
|
||||
|
||||
The output (including the first lines) contain important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
|
||||
The output (including the first lines) contains important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
|
||||
|
||||
Please re-read your issue once again to avoid a couple of common mistakes (you can and should use this as a checklist):
|
||||
|
||||
@ -114,12 +114,13 @@ If you want to add support for a new site, you can follow this quick list (assum
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
# TODO more code goes here, for example ...
|
||||
title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title')
|
||||
title = self._html_search_regex(r'<h1>(.+?)</h1>', webpage, 'title')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': self._og_search_description(webpage),
|
||||
'uploader': self._search_regex(r'<div[^>]+id="uploader"[^>]*>([^<]+)<', webpage, 'uploader', fatal=False),
|
||||
# TODO more properties (see youtube_dl/extractor/common.py)
|
||||
}
|
||||
```
|
||||
|
@ -710,12 +710,13 @@ If you want to add support for a new site, you can follow this quick list (assum
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
# TODO more code goes here, for example ...
|
||||
title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title')
|
||||
title = self._html_search_regex(r'<h1>(.+?)</h1>', webpage, 'title')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': self._og_search_description(webpage),
|
||||
'uploader': self._search_regex(r'<div[^>]+id="uploader"[^>]*>([^<]+)<', webpage, 'uploader', fatal=False),
|
||||
# TODO more properties (see youtube_dl/extractor/common.py)
|
||||
}
|
||||
```
|
||||
@ -794,7 +795,7 @@ Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/i
|
||||
|
||||
**Please include the full output of youtube-dl when run with `-v`**.
|
||||
|
||||
The output (including the first lines) contain important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
|
||||
The output (including the first lines) contains important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
|
||||
|
||||
Please re-read your issue once again to avoid a couple of common mistakes (you can and should use this as a checklist):
|
||||
|
||||
|
@ -53,6 +53,7 @@
|
||||
- **Bandcamp:album**
|
||||
- **bbc**: BBC
|
||||
- **bbc.co.uk**: BBC iPlayer
|
||||
- **bbc.co.uk:article**: BBC articles
|
||||
- **BeatportPro**
|
||||
- **Beeg**
|
||||
- **BehindKink**
|
||||
@ -92,6 +93,7 @@
|
||||
- **Clipsyndicate**
|
||||
- **Cloudy**
|
||||
- **Clubic**
|
||||
- **Clyp**
|
||||
- **cmt.com**
|
||||
- **CNET**
|
||||
- **CNN**
|
||||
@ -159,6 +161,7 @@
|
||||
- **facebook**
|
||||
- **faz.net**
|
||||
- **fc2**
|
||||
- **Fczenit**
|
||||
- **fernsehkritik.tv**
|
||||
- **Firstpost**
|
||||
- **FiveTV**
|
||||
@ -279,7 +282,7 @@
|
||||
- **macgamestore**: MacGameStore trailers
|
||||
- **mailru**: Видео@Mail.Ru
|
||||
- **Malemotion**
|
||||
- **MDR**
|
||||
- **MDR**: MDR.DE and KiKA
|
||||
- **media.ccc.de**
|
||||
- **metacafe**
|
||||
- **Metacritic**
|
||||
@ -514,6 +517,7 @@
|
||||
- **SSA**
|
||||
- **stanfordoc**: Stanford Open ClassRoom
|
||||
- **Steam**
|
||||
- **Stitcher**
|
||||
- **streamcloud.eu**
|
||||
- **StreamCZ**
|
||||
- **StreetVoice**
|
||||
@ -587,7 +591,8 @@
|
||||
- **twitch:stream**
|
||||
- **twitch:video**
|
||||
- **twitch:vod**
|
||||
- **TwitterCard**
|
||||
- **twitter**
|
||||
- **twitter:card**
|
||||
- **Ubu**
|
||||
- **udemy**
|
||||
- **udemy:course**
|
||||
|
2
setup.py
2
setup.py
@ -28,7 +28,7 @@ py2exe_options = {
|
||||
"compressed": 1,
|
||||
"optimize": 2,
|
||||
"dist_dir": '.',
|
||||
"dll_excludes": ['w9xpopen.exe'],
|
||||
"dll_excludes": ['w9xpopen.exe', 'crypt32.dll'],
|
||||
}
|
||||
|
||||
py2exe_console = [{
|
||||
|
@ -37,12 +37,16 @@ class TestInfoExtractor(unittest.TestCase):
|
||||
<meta property='og:image' content='http://domain.com/pic.jpg?key1=val1&key2=val2'/>
|
||||
<meta content='application/x-shockwave-flash' property='og:video:type'>
|
||||
<meta content='Foo' property=og:foobar>
|
||||
<meta name="og:test1" content='foo > < bar'/>
|
||||
<meta name="og:test2" content="foo >//< bar"/>
|
||||
'''
|
||||
self.assertEqual(ie._og_search_title(html), 'Foo')
|
||||
self.assertEqual(ie._og_search_description(html), 'Some video\'s description ')
|
||||
self.assertEqual(ie._og_search_thumbnail(html), 'http://domain.com/pic.jpg?key1=val1&key2=val2')
|
||||
self.assertEqual(ie._og_search_video_url(html, default=None), None)
|
||||
self.assertEqual(ie._og_search_property('foobar', html), 'Foo')
|
||||
self.assertEqual(ie._og_search_property('test1', html), 'foo > < bar')
|
||||
self.assertEqual(ie._og_search_property('test2', html), 'foo >//< bar')
|
||||
|
||||
def test_html_search_meta(self):
|
||||
ie = self.ie
|
||||
|
@ -13,8 +13,10 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
from youtube_dl.utils import get_filesystem_encoding
|
||||
from youtube_dl.compat import (
|
||||
compat_getenv,
|
||||
compat_etree_fromstring,
|
||||
compat_expanduser,
|
||||
compat_shlex_split,
|
||||
compat_str,
|
||||
compat_urllib_parse_unquote,
|
||||
compat_urllib_parse_unquote_plus,
|
||||
)
|
||||
@ -71,5 +73,20 @@ class TestCompat(unittest.TestCase):
|
||||
def test_compat_shlex_split(self):
|
||||
self.assertEqual(compat_shlex_split('-option "one two"'), ['-option', 'one two'])
|
||||
|
||||
def test_compat_etree_fromstring(self):
|
||||
xml = '''
|
||||
<root foo="bar" spam="中文">
|
||||
<normal>foo</normal>
|
||||
<chinese>中文</chinese>
|
||||
<foo><bar>spam</bar></foo>
|
||||
</root>
|
||||
'''
|
||||
doc = compat_etree_fromstring(xml.encode('utf-8'))
|
||||
self.assertTrue(isinstance(doc.attrib['foo'], compat_str))
|
||||
self.assertTrue(isinstance(doc.attrib['spam'], compat_str))
|
||||
self.assertTrue(isinstance(doc.find('normal').text, compat_str))
|
||||
self.assertTrue(isinstance(doc.find('chinese').text, compat_str))
|
||||
self.assertTrue(isinstance(doc.find('foo/bar').text, compat_str))
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
@ -102,7 +102,7 @@ def generator(test_case):
|
||||
|
||||
params = get_params(test_case.get('params', {}))
|
||||
if is_playlist and 'playlist' not in test_case:
|
||||
params.setdefault('extract_flat', True)
|
||||
params.setdefault('extract_flat', 'in_playlist')
|
||||
params.setdefault('skip_download', True)
|
||||
|
||||
ydl = YoutubeDL(params, auto_init=False)
|
||||
|
@ -28,6 +28,7 @@ from youtube_dl.extractor import (
|
||||
ThePlatformFeedIE,
|
||||
RTVEALaCartaIE,
|
||||
FunnyOrDieIE,
|
||||
DemocracynowIE,
|
||||
)
|
||||
|
||||
|
||||
@ -346,5 +347,25 @@ class TestFunnyOrDieSubtitles(BaseTestSubtitles):
|
||||
self.assertEqual(md5(subtitles['en']), 'c5593c193eacd353596c11c2d4f9ecc4')
|
||||
|
||||
|
||||
class TestDemocracynowSubtitles(BaseTestSubtitles):
|
||||
url = 'http://www.democracynow.org/shows/2015/7/3'
|
||||
IE = DemocracynowIE
|
||||
|
||||
def test_allsubtitles(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(set(subtitles.keys()), set(['en']))
|
||||
self.assertEqual(md5(subtitles['en']), 'acaca989e24a9e45a6719c9b3d60815c')
|
||||
|
||||
def test_subtitles_in_page(self):
|
||||
self.url = 'http://www.democracynow.org/2015/7/3/this_flag_comes_down_today_bree'
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(set(subtitles.keys()), set(['en']))
|
||||
self.assertEqual(md5(subtitles['en']), 'acaca989e24a9e45a6719c9b3d60815c')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
@ -68,6 +68,9 @@ from youtube_dl.utils import (
|
||||
cli_valueless_option,
|
||||
cli_bool_option,
|
||||
)
|
||||
from youtube_dl.compat import (
|
||||
compat_etree_fromstring,
|
||||
)
|
||||
|
||||
|
||||
class TestUtil(unittest.TestCase):
|
||||
@ -233,6 +236,7 @@ class TestUtil(unittest.TestCase):
|
||||
unified_strdate('2/2/2015 6:47:40 PM', day_first=False),
|
||||
'20150202')
|
||||
self.assertEqual(unified_strdate('25-09-2014'), '20140925')
|
||||
self.assertEqual(unified_strdate('UNKNOWN DATE FORMAT'), None)
|
||||
|
||||
def test_find_xpath_attr(self):
|
||||
testxml = '''<root>
|
||||
@ -242,7 +246,7 @@ class TestUtil(unittest.TestCase):
|
||||
<node x="b" y="d" />
|
||||
<node x="" />
|
||||
</root>'''
|
||||
doc = xml.etree.ElementTree.fromstring(testxml)
|
||||
doc = compat_etree_fromstring(testxml)
|
||||
|
||||
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n'), None)
|
||||
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n', 'v'), None)
|
||||
@ -263,7 +267,7 @@ class TestUtil(unittest.TestCase):
|
||||
<url>http://server.com/download.mp3</url>
|
||||
</media:song>
|
||||
</root>'''
|
||||
doc = xml.etree.ElementTree.fromstring(testxml)
|
||||
doc = compat_etree_fromstring(testxml)
|
||||
find = lambda p: doc.find(xpath_with_ns(p, {'media': 'http://example.com/'}))
|
||||
self.assertTrue(find('media:song') is not None)
|
||||
self.assertEqual(find('media:song/media:author').text, 'The Author')
|
||||
@ -275,9 +279,16 @@ class TestUtil(unittest.TestCase):
|
||||
p = xml.etree.ElementTree.SubElement(div, 'p')
|
||||
p.text = 'Foo'
|
||||
self.assertEqual(xpath_element(doc, 'div/p'), p)
|
||||
self.assertEqual(xpath_element(doc, ['div/p']), p)
|
||||
self.assertEqual(xpath_element(doc, ['div/bar', 'div/p']), p)
|
||||
self.assertEqual(xpath_element(doc, 'div/bar', default='default'), 'default')
|
||||
self.assertEqual(xpath_element(doc, ['div/bar'], default='default'), 'default')
|
||||
self.assertTrue(xpath_element(doc, 'div/bar') is None)
|
||||
self.assertTrue(xpath_element(doc, ['div/bar']) is None)
|
||||
self.assertTrue(xpath_element(doc, ['div/bar'], 'div/baz') is None)
|
||||
self.assertRaises(ExtractorError, xpath_element, doc, 'div/bar', fatal=True)
|
||||
self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar'], fatal=True)
|
||||
self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar', 'div/baz'], fatal=True)
|
||||
|
||||
def test_xpath_text(self):
|
||||
testxml = '''<root>
|
||||
@ -285,7 +296,7 @@ class TestUtil(unittest.TestCase):
|
||||
<p>Foo</p>
|
||||
</div>
|
||||
</root>'''
|
||||
doc = xml.etree.ElementTree.fromstring(testxml)
|
||||
doc = compat_etree_fromstring(testxml)
|
||||
self.assertEqual(xpath_text(doc, 'div/p'), 'Foo')
|
||||
self.assertEqual(xpath_text(doc, 'div/bar', default='default'), 'default')
|
||||
self.assertTrue(xpath_text(doc, 'div/bar') is None)
|
||||
@ -297,7 +308,7 @@ class TestUtil(unittest.TestCase):
|
||||
<p x="a">Foo</p>
|
||||
</div>
|
||||
</root>'''
|
||||
doc = xml.etree.ElementTree.fromstring(testxml)
|
||||
doc = compat_etree_fromstring(testxml)
|
||||
self.assertEqual(xpath_attr(doc, 'div/p', 'x'), 'a')
|
||||
self.assertEqual(xpath_attr(doc, 'div/bar', 'x'), None)
|
||||
self.assertEqual(xpath_attr(doc, 'div/p', 'y'), None)
|
||||
@ -425,6 +436,8 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
|
||||
self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
|
||||
self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266)
|
||||
self.assertEqual(parse_iso8601('2015-09-29T08:27:31.727'), 1443515251)
|
||||
self.assertEqual(parse_iso8601('2015-09-29T08-27-31.727'), None)
|
||||
|
||||
def test_strip_jsonp(self):
|
||||
stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
|
||||
@ -495,6 +508,9 @@ class TestUtil(unittest.TestCase):
|
||||
"playlist":[{"controls":{"all":null}}]
|
||||
}''')
|
||||
|
||||
inp = '''"The CW\\'s \\'Crazy Ex-Girlfriend\\'"'''
|
||||
self.assertEqual(js_to_json(inp), '''"The CW's 'Crazy Ex-Girlfriend'"''')
|
||||
|
||||
inp = '"SAND Number: SAND 2013-7800P\\nPresenter: Tom Russo\\nHabanero Software Training - Xyce Software\\nXyce, Sandia\\u0027s"'
|
||||
json_code = js_to_json(inp)
|
||||
self.assertEqual(json.loads(json_code), json.loads(inp))
|
||||
|
@ -57,5 +57,14 @@ class TestYoutubeLists(unittest.TestCase):
|
||||
entries = result['entries']
|
||||
self.assertEqual(len(entries), 100)
|
||||
|
||||
def test_youtube_flat_playlist_titles(self):
|
||||
dl = FakeYDL()
|
||||
dl.params['extract_flat'] = True
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
result = ie.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
|
||||
self.assertIsPlaylist(result)
|
||||
for entry in result['entries']:
|
||||
self.assertTrue(entry.get('title'))
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
@ -37,6 +37,7 @@ from .compat import (
|
||||
compat_tokenize_tokenize,
|
||||
compat_urllib_error,
|
||||
compat_urllib_request,
|
||||
compat_urllib_request_DataHandler,
|
||||
)
|
||||
from .utils import (
|
||||
ContentTooShortError,
|
||||
@ -571,7 +572,7 @@ class YoutubeDL(object):
|
||||
if v is not None)
|
||||
template_dict = collections.defaultdict(lambda: 'NA', template_dict)
|
||||
|
||||
outtmpl = sanitize_path(self.params.get('outtmpl', DEFAULT_OUTTMPL))
|
||||
outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
|
||||
tmpl = compat_expanduser(outtmpl)
|
||||
filename = tmpl % template_dict
|
||||
# Temporary fix for #4787
|
||||
@ -579,7 +580,7 @@ class YoutubeDL(object):
|
||||
# to workaround encoding issues with subprocess on python2 @ Windows
|
||||
if sys.version_info < (3, 0) and sys.platform == 'win32':
|
||||
filename = encodeFilename(filename, True).decode(preferredencoding())
|
||||
return filename
|
||||
return sanitize_path(filename)
|
||||
except ValueError as err:
|
||||
self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
|
||||
return None
|
||||
@ -1967,8 +1968,9 @@ class YoutubeDL(object):
|
||||
debuglevel = 1 if self.params.get('debug_printtraffic') else 0
|
||||
https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
|
||||
ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
|
||||
data_handler = compat_urllib_request_DataHandler()
|
||||
opener = compat_urllib_request.build_opener(
|
||||
proxy_handler, https_handler, cookie_processor, ydlh)
|
||||
proxy_handler, https_handler, cookie_processor, ydlh, data_handler)
|
||||
|
||||
# Delete the default user-agent header, which would otherwise apply in
|
||||
# cases where our custom HTTP handler doesn't come into play
|
||||
|
@ -1,7 +1,10 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import binascii
|
||||
import collections
|
||||
import email
|
||||
import getpass
|
||||
import io
|
||||
import optparse
|
||||
import os
|
||||
import re
|
||||
@ -11,6 +14,7 @@ import socket
|
||||
import subprocess
|
||||
import sys
|
||||
import itertools
|
||||
import xml.etree.ElementTree
|
||||
|
||||
|
||||
try:
|
||||
@ -38,6 +42,11 @@ try:
|
||||
except ImportError: # Python 2
|
||||
import urlparse as compat_urlparse
|
||||
|
||||
try:
|
||||
import urllib.response as compat_urllib_response
|
||||
except ImportError: # Python 2
|
||||
import urllib as compat_urllib_response
|
||||
|
||||
try:
|
||||
import http.cookiejar as compat_cookiejar
|
||||
except ImportError: # Python 2
|
||||
@ -155,6 +164,40 @@ except ImportError: # Python 2
|
||||
string = string.replace('+', ' ')
|
||||
return compat_urllib_parse_unquote(string, encoding, errors)
|
||||
|
||||
try:
|
||||
from urllib.request import DataHandler as compat_urllib_request_DataHandler
|
||||
except ImportError: # Python < 3.4
|
||||
# Ported from CPython 98774:1733b3bd46db, Lib/urllib/request.py
|
||||
class compat_urllib_request_DataHandler(compat_urllib_request.BaseHandler):
|
||||
def data_open(self, req):
|
||||
# data URLs as specified in RFC 2397.
|
||||
#
|
||||
# ignores POSTed data
|
||||
#
|
||||
# syntax:
|
||||
# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
|
||||
# mediatype := [ type "/" subtype ] *( ";" parameter )
|
||||
# data := *urlchar
|
||||
# parameter := attribute "=" value
|
||||
url = req.get_full_url()
|
||||
|
||||
scheme, data = url.split(":", 1)
|
||||
mediatype, data = data.split(",", 1)
|
||||
|
||||
# even base64 encoded data URLs might be quoted so unquote in any case:
|
||||
data = compat_urllib_parse_unquote_to_bytes(data)
|
||||
if mediatype.endswith(";base64"):
|
||||
data = binascii.a2b_base64(data)
|
||||
mediatype = mediatype[:-7]
|
||||
|
||||
if not mediatype:
|
||||
mediatype = "text/plain;charset=US-ASCII"
|
||||
|
||||
headers = email.message_from_string(
|
||||
"Content-type: %s\nContent-length: %d\n" % (mediatype, len(data)))
|
||||
|
||||
return compat_urllib_response.addinfourl(io.BytesIO(data), headers, url)
|
||||
|
||||
try:
|
||||
compat_basestring = basestring # Python 2
|
||||
except NameError:
|
||||
@ -170,6 +213,43 @@ try:
|
||||
except ImportError: # Python 2.6
|
||||
from xml.parsers.expat import ExpatError as compat_xml_parse_error
|
||||
|
||||
if sys.version_info[0] >= 3:
|
||||
compat_etree_fromstring = xml.etree.ElementTree.fromstring
|
||||
else:
|
||||
# python 2.x tries to encode unicode strings with ascii (see the
|
||||
# XMLParser._fixtext method)
|
||||
etree = xml.etree.ElementTree
|
||||
|
||||
try:
|
||||
_etree_iter = etree.Element.iter
|
||||
except AttributeError: # Python <=2.6
|
||||
def _etree_iter(root):
|
||||
for el in root.findall('*'):
|
||||
yield el
|
||||
for sub in _etree_iter(el):
|
||||
yield sub
|
||||
|
||||
# on 2.6 XML doesn't have a parser argument, function copied from CPython
|
||||
# 2.7 source
|
||||
def _XML(text, parser=None):
|
||||
if not parser:
|
||||
parser = etree.XMLParser(target=etree.TreeBuilder())
|
||||
parser.feed(text)
|
||||
return parser.close()
|
||||
|
||||
def _element_factory(*args, **kwargs):
|
||||
el = etree.Element(*args, **kwargs)
|
||||
for k, v in el.items():
|
||||
if isinstance(v, bytes):
|
||||
el.set(k, v.decode('utf-8'))
|
||||
return el
|
||||
|
||||
def compat_etree_fromstring(text):
|
||||
doc = _XML(text, parser=etree.XMLParser(target=etree.TreeBuilder(element_factory=_element_factory)))
|
||||
for el in _etree_iter(doc):
|
||||
if el.text is not None and isinstance(el.text, bytes):
|
||||
el.text = el.text.decode('utf-8')
|
||||
return doc
|
||||
|
||||
try:
|
||||
from urllib.parse import parse_qs as compat_parse_qs
|
||||
@ -465,6 +545,7 @@ __all__ = [
|
||||
'compat_chr',
|
||||
'compat_cookiejar',
|
||||
'compat_cookies',
|
||||
'compat_etree_fromstring',
|
||||
'compat_expanduser',
|
||||
'compat_get_terminal_size',
|
||||
'compat_getenv',
|
||||
@ -489,6 +570,8 @@ __all__ = [
|
||||
'compat_urllib_parse_unquote_to_bytes',
|
||||
'compat_urllib_parse_urlparse',
|
||||
'compat_urllib_request',
|
||||
'compat_urllib_request_DataHandler',
|
||||
'compat_urllib_response',
|
||||
'compat_urlparse',
|
||||
'compat_urlretrieve',
|
||||
'compat_xml_parse_error',
|
||||
|
@ -5,12 +5,13 @@ import io
|
||||
import itertools
|
||||
import os
|
||||
import time
|
||||
import xml.etree.ElementTree as etree
|
||||
|
||||
from .fragment import FragmentFD
|
||||
from ..compat import (
|
||||
compat_etree_fromstring,
|
||||
compat_urlparse,
|
||||
compat_urllib_error,
|
||||
compat_urllib_parse_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
encodeFilename,
|
||||
@ -285,9 +286,11 @@ class F4mFD(FragmentFD):
|
||||
man_url = info_dict['url']
|
||||
requested_bitrate = info_dict.get('tbr')
|
||||
self.to_screen('[%s] Downloading f4m manifest' % self.FD_NAME)
|
||||
manifest = self.ydl.urlopen(man_url).read()
|
||||
urlh = self.ydl.urlopen(man_url)
|
||||
man_url = urlh.geturl()
|
||||
manifest = urlh.read()
|
||||
|
||||
doc = etree.fromstring(manifest)
|
||||
doc = compat_etree_fromstring(manifest)
|
||||
formats = [(int(f.attrib.get('bitrate', -1)), f)
|
||||
for f in self._get_unencrypted_media(doc)]
|
||||
if requested_bitrate is None:
|
||||
@ -329,20 +332,25 @@ class F4mFD(FragmentFD):
|
||||
if not live:
|
||||
write_metadata_tag(dest_stream, metadata)
|
||||
|
||||
base_url_parsed = compat_urllib_parse_urlparse(base_url)
|
||||
|
||||
self._start_frag_download(ctx)
|
||||
|
||||
frags_filenames = []
|
||||
while fragments_list:
|
||||
seg_i, frag_i = fragments_list.pop(0)
|
||||
name = 'Seg%d-Frag%d' % (seg_i, frag_i)
|
||||
url = base_url + name
|
||||
query = []
|
||||
if base_url_parsed.query:
|
||||
query.append(base_url_parsed.query)
|
||||
if akamai_pv:
|
||||
url += '?' + akamai_pv.strip(';')
|
||||
query.append(akamai_pv.strip(';'))
|
||||
if info_dict.get('extra_param_to_segment_url'):
|
||||
url += info_dict.get('extra_param_to_segment_url')
|
||||
query.append(info_dict['extra_param_to_segment_url'])
|
||||
url_parsed = base_url_parsed._replace(path=base_url_parsed.path + name, query='&'.join(query))
|
||||
frag_filename = '%s-%s' % (ctx['tmpfilename'], name)
|
||||
try:
|
||||
success = ctx['dl'].download(frag_filename, {'url': url})
|
||||
success = ctx['dl'].download(frag_filename, {'url': url_parsed.geturl()})
|
||||
if not success:
|
||||
return False
|
||||
(down, frag_sanitized) = sanitize_open(frag_filename, 'rb')
|
||||
|
@ -30,7 +30,7 @@ class HlsFD(FileDownloader):
|
||||
|
||||
args = [ffpp.executable, '-y']
|
||||
|
||||
if info_dict['http_headers']:
|
||||
if info_dict['http_headers'] and re.match(r'^https?://', url):
|
||||
# Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv:
|
||||
# [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
|
||||
args += [
|
||||
|
@ -45,6 +45,7 @@ from .bambuser import BambuserIE, BambuserChannelIE
|
||||
from .bandcamp import BandcampIE, BandcampAlbumIE
|
||||
from .bbc import (
|
||||
BBCCoUkIE,
|
||||
BBCCoUkArticleIE,
|
||||
BBCIE,
|
||||
)
|
||||
from .beeg import BeegIE
|
||||
@ -89,6 +90,7 @@ from .cliphunter import CliphunterIE
|
||||
from .clipsyndicate import ClipsyndicateIE
|
||||
from .cloudy import CloudyIE
|
||||
from .clubic import ClubicIE
|
||||
from .clyp import ClypIE
|
||||
from .cmt import CMTIE
|
||||
from .cnet import CNETIE
|
||||
from .cnn import (
|
||||
@ -122,6 +124,7 @@ from .dbtv import DBTVIE
|
||||
from .dcn import DCNIE
|
||||
from .dctp import DctpTvIE
|
||||
from .deezer import DeezerPlaylistIE
|
||||
from .democracynow import DemocracynowIE
|
||||
from .dfb import DFBIE
|
||||
from .dhm import DHMIE
|
||||
from .dotsub import DotsubIE
|
||||
@ -209,7 +212,10 @@ from .gfycat import GfycatIE
|
||||
from .giantbomb import GiantBombIE
|
||||
from .giga import GigaIE
|
||||
from .glide import GlideIE
|
||||
from .globo import GloboIE
|
||||
from .globo import (
|
||||
GloboIE,
|
||||
GloboArticleIE,
|
||||
)
|
||||
from .godtube import GodTubeIE
|
||||
from .goldenmoustache import GoldenMoustacheIE
|
||||
from .golem import GolemIE
|
||||
@ -586,6 +592,7 @@ from .spankwire import SpankwireIE
|
||||
from .spiegel import SpiegelIE, SpiegelArticleIE
|
||||
from .spiegeltv import SpiegeltvIE
|
||||
from .spike import SpikeIE
|
||||
from .stitcher import StitcherIE
|
||||
from .sport5 import Sport5IE
|
||||
from .sportbox import (
|
||||
SportBoxIE,
|
||||
@ -690,7 +697,7 @@ from .twitch import (
|
||||
TwitchBookmarksIE,
|
||||
TwitchStreamIE,
|
||||
)
|
||||
from .twitter import TwitterCardIE
|
||||
from .twitter import TwitterCardIE, TwitterIE
|
||||
from .ubu import UbuIE
|
||||
from .udemy import (
|
||||
UdemyIE,
|
||||
@ -717,7 +724,6 @@ from .vh1 import VH1IE
|
||||
from .vice import ViceIE
|
||||
from .viddler import ViddlerIE
|
||||
from .videodetective import VideoDetectiveIE
|
||||
from .videolecturesnet import VideoLecturesNetIE
|
||||
from .videofyme import VideofyMeIE
|
||||
from .videomega import VideoMegaIE
|
||||
from .videopremium import VideoPremiumIE
|
||||
@ -728,6 +734,7 @@ from .vidto import VidtoIE
|
||||
from .vidzi import VidziIE
|
||||
from .vier import VierIE, VierVideosIE
|
||||
from .viewster import ViewsterIE
|
||||
from .viidea import ViideaIE
|
||||
from .vimeo import (
|
||||
VimeoIE,
|
||||
VimeoAlbumIE,
|
||||
|
@ -12,7 +12,7 @@ from ..utils import (
|
||||
|
||||
class ABCIE(InfoExtractor):
|
||||
IE_NAME = 'abc.net.au'
|
||||
_VALID_URL = r'http://www\.abc\.net\.au/news/[^/]+/[^/]+/(?P<id>\d+)'
|
||||
_VALID_URL = r'http://www\.abc\.net\.au/news/(?:[^/]+/){1,2}(?P<id>\d+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334',
|
||||
@ -36,6 +36,18 @@ class ABCIE(InfoExtractor):
|
||||
'title': 'Marriage Equality: Warren Entsch introduces same sex marriage bill',
|
||||
},
|
||||
'add_ie': ['Youtube'],
|
||||
}, {
|
||||
'url': 'http://www.abc.net.au/news/2015-10-23/nab-lifts-interest-rates-following-westpac-and-cba/6880080',
|
||||
'md5': 'b96eee7c9edf4fc5a358a0252881cc1f',
|
||||
'info_dict': {
|
||||
'id': '6880080',
|
||||
'ext': 'mp3',
|
||||
'title': 'NAB lifts interest rates, following Westpac and CBA',
|
||||
'description': 'md5:f13d8edc81e462fce4a0437c7dc04728',
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.abc.net.au/news/2015-10-19/6866214',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
@ -43,7 +55,7 @@ class ABCIE(InfoExtractor):
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
mobj = re.search(
|
||||
r'inline(?P<type>Video|YouTube)Data\.push\((?P<json_data>[^)]+)\);',
|
||||
r'inline(?P<type>Video|Audio|YouTube)Data\.push\((?P<json_data>[^)]+)\);',
|
||||
webpage)
|
||||
if mobj is None:
|
||||
raise ExtractorError('Unable to extract video urls')
|
||||
@ -60,11 +72,13 @@ class ABCIE(InfoExtractor):
|
||||
|
||||
formats = [{
|
||||
'url': url_info['url'],
|
||||
'vcodec': url_info.get('codec') if mobj.group('type') == 'Video' else 'none',
|
||||
'width': int_or_none(url_info.get('width')),
|
||||
'height': int_or_none(url_info.get('height')),
|
||||
'tbr': int_or_none(url_info.get('bitrate')),
|
||||
'filesize': int_or_none(url_info.get('filesize')),
|
||||
} for url_info in urls_info]
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
|
@ -183,7 +183,7 @@ class AdultSwimIE(InfoExtractor):
|
||||
media_url = file_el.text
|
||||
if determine_ext(media_url) == 'm3u8':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
media_url, segment_title, 'mp4', 'm3u8_native', preference=0, m3u8_id='hls'))
|
||||
media_url, segment_title, 'mp4', preference=0, m3u8_id='hls'))
|
||||
else:
|
||||
formats.append({
|
||||
'format_id': '%s_%s' % (bitrate, ftype),
|
||||
|
@ -26,8 +26,8 @@ class AnitubeIE(InfoExtractor):
|
||||
video_id = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
key = self._html_search_regex(
|
||||
r'http://www\.anitube\.se/embed/([A-Za-z0-9_-]*)', webpage, 'key')
|
||||
key = self._search_regex(
|
||||
r'src=["\']https?://[^/]+/embed/([A-Za-z0-9_-]+)', webpage, 'key')
|
||||
|
||||
config_xml = self._download_xml(
|
||||
'http://www.anitube.se/nuevo/econfig.php?key=%s' % key, key)
|
||||
|
@ -14,8 +14,8 @@ from ..utils import (
|
||||
parse_duration,
|
||||
unified_strdate,
|
||||
xpath_text,
|
||||
parse_xml,
|
||||
)
|
||||
from ..compat import compat_etree_fromstring
|
||||
|
||||
|
||||
class ARDMediathekIE(InfoExtractor):
|
||||
@ -161,7 +161,7 @@ class ARDMediathekIE(InfoExtractor):
|
||||
raise ExtractorError('This program is only suitable for those aged 12 and older. Video %s is therefore only available between 20 pm and 6 am.' % video_id, expected=True)
|
||||
|
||||
if re.search(r'[\?&]rss($|[=&])', url):
|
||||
doc = parse_xml(webpage)
|
||||
doc = compat_etree_fromstring(webpage.encode('utf-8'))
|
||||
if doc.tag == 'rss':
|
||||
return GenericIE()._extract_rss(url, video_id, doc)
|
||||
|
||||
|
@ -10,6 +10,8 @@ from ..compat import (
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
)
|
||||
|
||||
|
||||
@ -52,11 +54,11 @@ class BandcampIE(InfoExtractor):
|
||||
ext, abr_str = format_id.split('-', 1)
|
||||
formats.append({
|
||||
'format_id': format_id,
|
||||
'url': format_url,
|
||||
'url': self._proto_relative_url(format_url, 'http:'),
|
||||
'ext': ext,
|
||||
'vcodec': 'none',
|
||||
'acodec': ext,
|
||||
'abr': int(abr_str),
|
||||
'abr': int_or_none(abr_str),
|
||||
})
|
||||
|
||||
self._sort_formats(formats)
|
||||
@ -65,7 +67,7 @@ class BandcampIE(InfoExtractor):
|
||||
'id': compat_str(data['id']),
|
||||
'title': data['title'],
|
||||
'formats': formats,
|
||||
'duration': float(data['duration']),
|
||||
'duration': float_or_none(data.get('duration')),
|
||||
}
|
||||
else:
|
||||
raise ExtractorError('No free songs found')
|
||||
|
@ -2,7 +2,6 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
@ -14,13 +13,16 @@ from ..utils import (
|
||||
remove_end,
|
||||
unescapeHTML,
|
||||
)
|
||||
from ..compat import compat_HTTPError
|
||||
from ..compat import (
|
||||
compat_etree_fromstring,
|
||||
compat_HTTPError,
|
||||
)
|
||||
|
||||
|
||||
class BBCCoUkIE(InfoExtractor):
|
||||
IE_NAME = 'bbc.co.uk'
|
||||
IE_DESC = 'BBC iPlayer'
|
||||
_VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:(?:(?:programmes|iplayer(?:/[^/]+)?/(?:episode|playlist))/)|music/clips[/#])(?P<id>[\da-z]{8})'
|
||||
_VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:(?:programmes/(?!articles/)|iplayer(?:/[^/]+)?/(?:episode/|playlist/))|music/clips[/#])(?P<id>[\da-z]{8})'
|
||||
|
||||
_MEDIASELECTOR_URLS = [
|
||||
# Provides HQ HLS streams with even better quality that pc mediaset but fails
|
||||
@ -344,7 +346,7 @@ class BBCCoUkIE(InfoExtractor):
|
||||
url, programme_id, 'Downloading media selection XML')
|
||||
except ExtractorError as ee:
|
||||
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
|
||||
media_selection = xml.etree.ElementTree.fromstring(ee.cause.read().decode('utf-8'))
|
||||
media_selection = compat_etree_fromstring(ee.cause.read().decode('utf-8'))
|
||||
else:
|
||||
raise
|
||||
return self._process_media_selector(media_selection, programme_id)
|
||||
@ -421,7 +423,7 @@ class BBCCoUkIE(InfoExtractor):
|
||||
continue
|
||||
title = playlist.find('./{%s}title' % self._EMP_PLAYLIST_NS).text
|
||||
description_el = playlist.find('./{%s}summary' % self._EMP_PLAYLIST_NS)
|
||||
description = description_el.text if description_el else None
|
||||
description = description_el.text if description_el is not None else None
|
||||
|
||||
def get_programme_id(item):
|
||||
def get_from_attributes(item):
|
||||
@ -625,6 +627,7 @@ class BBCIE(BBCCoUkIE):
|
||||
'id': 'p02xycnp',
|
||||
'ext': 'mp4',
|
||||
'title': 'Transfers: Cristiano Ronaldo to Man Utd, Arsenal to spend?',
|
||||
'description': 'BBC Sport\'s David Ornstein has the latest transfer gossip, including rumours of a Manchester United return for Cristiano Ronaldo.',
|
||||
'duration': 140,
|
||||
},
|
||||
'params': {
|
||||
@ -651,7 +654,7 @@ class BBCIE(BBCCoUkIE):
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
return False if BBCCoUkIE.suitable(url) else super(BBCIE, cls).suitable(url)
|
||||
return False if BBCCoUkIE.suitable(url) or BBCCoUkArticleIE.suitable(url) else super(BBCIE, cls).suitable(url)
|
||||
|
||||
def _extract_from_media_meta(self, media_meta, video_id):
|
||||
# Direct links to media in media metadata (e.g.
|
||||
@ -902,3 +905,33 @@ class BBCIE(BBCCoUkIE):
|
||||
})
|
||||
|
||||
return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
|
||||
|
||||
|
||||
class BBCCoUkArticleIE(InfoExtractor):
|
||||
_VALID_URL = 'http://www.bbc.co.uk/programmes/articles/(?P<id>[a-zA-Z0-9]+)'
|
||||
IE_NAME = 'bbc.co.uk:article'
|
||||
IE_DESC = 'BBC articles'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.bbc.co.uk/programmes/articles/3jNQLTMrPlYGTBn0WV6M2MS/not-your-typical-role-model-ada-lovelace-the-19th-century-programmer',
|
||||
'info_dict': {
|
||||
'id': '3jNQLTMrPlYGTBn0WV6M2MS',
|
||||
'title': 'Calculating Ada: The Countess of Computing - Not your typical role model: Ada Lovelace the 19th century programmer - BBC Four',
|
||||
'description': 'Hannah Fry reveals some of her surprising discoveries about Ada Lovelace during filming.',
|
||||
},
|
||||
'playlist_count': 4,
|
||||
'add_ie': ['BBCCoUk'],
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
playlist_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
|
||||
title = self._og_search_title(webpage)
|
||||
description = self._og_search_description(webpage).strip()
|
||||
|
||||
entries = [self.url_result(programme_url) for programme_url in re.findall(
|
||||
r'<div[^>]+typeof="Clip"[^>]+resource="([^"]+)"', webpage)]
|
||||
|
||||
return self.playlist_result(entries, playlist_id, title, description)
|
||||
|
@ -33,6 +33,8 @@ class BeegIE(InfoExtractor):
|
||||
|
||||
formats = []
|
||||
for format_id, video_url in video.items():
|
||||
if not video_url:
|
||||
continue
|
||||
height = self._search_regex(
|
||||
r'^(\d+)[pP]$', format_id, 'height', default=None)
|
||||
if not height:
|
||||
|
@ -4,9 +4,11 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
import itertools
|
||||
import json
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_etree_fromstring,
|
||||
)
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
unified_strdate,
|
||||
@ -88,7 +90,7 @@ class BiliBiliIE(InfoExtractor):
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
lq_doc = ET.fromstring(lq_page)
|
||||
lq_doc = compat_etree_fromstring(lq_page)
|
||||
lq_durls = lq_doc.findall('./durl')
|
||||
|
||||
hq_doc = self._download_xml(
|
||||
|
@ -3,10 +3,10 @@ from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import json
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_etree_fromstring,
|
||||
compat_parse_qs,
|
||||
compat_str,
|
||||
compat_urllib_parse,
|
||||
@ -119,7 +119,7 @@ class BrightcoveIE(InfoExtractor):
|
||||
object_str = fix_xml_ampersands(object_str)
|
||||
|
||||
try:
|
||||
object_doc = xml.etree.ElementTree.fromstring(object_str.encode('utf-8'))
|
||||
object_doc = compat_etree_fromstring(object_str.encode('utf-8'))
|
||||
except compat_xml_parse_error:
|
||||
return
|
||||
|
||||
|
@ -4,38 +4,53 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import parse_duration
|
||||
|
||||
|
||||
class Canalc2IE(InfoExtractor):
|
||||
IE_NAME = 'canalc2.tv'
|
||||
_VALID_URL = r'http://.*?\.canalc2\.tv/video\.asp\?.*?idVideo=(?P<id>\d+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?canalc2\.tv/video/(?P<id>\d+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.canalc2.tv/video.asp?idVideo=12163&voir=oui',
|
||||
'url': 'http://www.canalc2.tv/video/12163',
|
||||
'md5': '060158428b650f896c542dfbb3d6487f',
|
||||
'info_dict': {
|
||||
'id': '12163',
|
||||
'ext': 'mp4',
|
||||
'title': 'Terrasses du Numérique'
|
||||
'ext': 'flv',
|
||||
'title': 'Terrasses du Numérique',
|
||||
'duration': 122,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True, # Requires rtmpdump
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = re.match(self._VALID_URL, url).group('id')
|
||||
# We need to set the voir field for getting the file name
|
||||
url = 'http://www.canalc2.tv/video.asp?idVideo=%s&voir=oui' % video_id
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
file_name = self._search_regex(
|
||||
r"so\.addVariable\('file','(.*?)'\);",
|
||||
webpage, 'file name')
|
||||
video_url = 'http://vod-flash.u-strasbg.fr:8080/' + file_name
|
||||
video_url = self._search_regex(
|
||||
r'jwplayer\((["\'])Player\1\)\.setup\({[^}]*file\s*:\s*(["\'])(?P<file>.+?)\2',
|
||||
webpage, 'video_url', group='file')
|
||||
formats = [{'url': video_url}]
|
||||
if video_url.startswith('rtmp://'):
|
||||
rtmp = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>.+/))(?P<play_path>mp4:.+)$', video_url)
|
||||
formats[0].update({
|
||||
'url': rtmp.group('url'),
|
||||
'ext': 'flv',
|
||||
'app': rtmp.group('app'),
|
||||
'play_path': rtmp.group('play_path'),
|
||||
'page_url': url,
|
||||
})
|
||||
|
||||
title = self._html_search_regex(
|
||||
r'class="evenement8">(.*?)</a>', webpage, 'title')
|
||||
r'(?s)class="[^"]*col_description[^"]*">.*?<h3>(.*?)</h3>', webpage, 'title')
|
||||
duration = parse_duration(self._search_regex(
|
||||
r'id=["\']video_duree["\'][^>]*>([^<]+)',
|
||||
webpage, 'duration', fatal=False))
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'ext': 'mp4',
|
||||
'url': video_url,
|
||||
'title': title,
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
}
|
||||
|
57
youtube_dl/extractor/clyp.py
Normal file
57
youtube_dl/extractor/clyp.py
Normal file
@ -0,0 +1,57 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
float_or_none,
|
||||
parse_iso8601,
|
||||
)
|
||||
|
||||
|
||||
class ClypIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?clyp\.it/(?P<id>[a-z0-9]+)'
|
||||
_TEST = {
|
||||
'url': 'https://clyp.it/ojz2wfah',
|
||||
'md5': '1d4961036c41247ecfdcc439c0cddcbb',
|
||||
'info_dict': {
|
||||
'id': 'ojz2wfah',
|
||||
'ext': 'mp3',
|
||||
'title': 'Krisson80 - bits wip wip',
|
||||
'description': '#Krisson80BitsWipWip #chiptune\n#wip',
|
||||
'duration': 263.21,
|
||||
'timestamp': 1443515251,
|
||||
'upload_date': '20150929',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
audio_id = self._match_id(url)
|
||||
|
||||
metadata = self._download_json(
|
||||
'https://api.clyp.it/%s' % audio_id, audio_id)
|
||||
|
||||
formats = []
|
||||
for secure in ('', 'Secure'):
|
||||
for ext in ('Ogg', 'Mp3'):
|
||||
format_id = '%s%s' % (secure, ext)
|
||||
format_url = metadata.get('%sUrl' % format_id)
|
||||
if format_url:
|
||||
formats.append({
|
||||
'url': format_url,
|
||||
'format_id': format_id,
|
||||
'vcodec': 'none',
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
title = metadata['Title']
|
||||
description = metadata.get('Description')
|
||||
duration = float_or_none(metadata.get('Duration'))
|
||||
timestamp = parse_iso8601(metadata.get('DateCreated'))
|
||||
|
||||
return {
|
||||
'id': audio_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'duration': duration,
|
||||
'timestamp': timestamp,
|
||||
'formats': formats,
|
||||
}
|
@ -10,13 +10,11 @@ import re
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from ..compat import (
|
||||
compat_cookiejar,
|
||||
compat_cookies,
|
||||
compat_getpass,
|
||||
compat_HTTPError,
|
||||
compat_http_client,
|
||||
compat_urllib_error,
|
||||
compat_urllib_parse,
|
||||
@ -24,6 +22,7 @@ from ..compat import (
|
||||
compat_urllib_request,
|
||||
compat_urlparse,
|
||||
compat_str,
|
||||
compat_etree_fromstring,
|
||||
)
|
||||
from ..utils import (
|
||||
NO_DEFAULT,
|
||||
@ -172,6 +171,7 @@ class InfoExtractor(object):
|
||||
view_count: How many users have watched the video on the platform.
|
||||
like_count: Number of positive ratings of the video
|
||||
dislike_count: Number of negative ratings of the video
|
||||
repost_count: Number of reposts of the video
|
||||
average_rating: Average rating give by users, the scale used depends on the webpage
|
||||
comment_count: Number of comments on the video
|
||||
comments: A list of comments, each with one or more of the following
|
||||
@ -310,11 +310,11 @@ class InfoExtractor(object):
|
||||
@classmethod
|
||||
def ie_key(cls):
|
||||
"""A string for getting the InfoExtractor with get_info_extractor"""
|
||||
return cls.__name__[:-2]
|
||||
return compat_str(cls.__name__[:-2])
|
||||
|
||||
@property
|
||||
def IE_NAME(self):
|
||||
return type(self).__name__[:-2]
|
||||
return compat_str(type(self).__name__[:-2])
|
||||
|
||||
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
|
||||
""" Returns the response handle """
|
||||
@ -461,7 +461,7 @@ class InfoExtractor(object):
|
||||
return xml_string
|
||||
if transform_source:
|
||||
xml_string = transform_source(xml_string)
|
||||
return xml.etree.ElementTree.fromstring(xml_string.encode('utf-8'))
|
||||
return compat_etree_fromstring(xml_string.encode('utf-8'))
|
||||
|
||||
def _download_json(self, url_or_request, video_id,
|
||||
note='Downloading JSON metadata',
|
||||
@ -645,7 +645,7 @@ class InfoExtractor(object):
|
||||
# Helper functions for extracting OpenGraph info
|
||||
@staticmethod
|
||||
def _og_regexes(prop):
|
||||
content_re = r'content=(?:"([^>]+?)"|\'([^>]+?)\'|\s*([^\s"\'=<>`]+?))'
|
||||
content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
|
||||
property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)'
|
||||
% {'prop': re.escape(prop)})
|
||||
template = r'<meta[^>]+?%s[^>]+?%s'
|
||||
@ -841,7 +841,7 @@ class InfoExtractor(object):
|
||||
self._request_webpage(url, video_id, 'Checking %s URL' % item)
|
||||
return True
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, compat_HTTPError):
|
||||
if isinstance(e.cause, compat_urllib_error.URLError):
|
||||
self.to_screen(
|
||||
'%s: %s URL is invalid, skipping' % (video_id, item))
|
||||
return False
|
||||
@ -943,13 +943,15 @@ class InfoExtractor(object):
|
||||
if re.match(r'^https?://', u)
|
||||
else compat_urlparse.urljoin(m3u8_url, u))
|
||||
|
||||
m3u8_doc = self._download_webpage(
|
||||
res = self._download_webpage_handle(
|
||||
m3u8_url, video_id,
|
||||
note=note or 'Downloading m3u8 information',
|
||||
errnote=errnote or 'Failed to download m3u8 information',
|
||||
fatal=fatal)
|
||||
if m3u8_doc is False:
|
||||
return m3u8_doc
|
||||
if res is False:
|
||||
return res
|
||||
m3u8_doc, urlh = res
|
||||
m3u8_url = urlh.geturl()
|
||||
last_info = None
|
||||
last_media = None
|
||||
kv_rex = re.compile(
|
||||
|
@ -5,12 +5,12 @@ import re
|
||||
import json
|
||||
import base64
|
||||
import zlib
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from hashlib import sha1
|
||||
from math import pow, sqrt, floor
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_etree_fromstring,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_parse_unquote,
|
||||
compat_urllib_request,
|
||||
@ -21,6 +21,7 @@ from ..utils import (
|
||||
bytes_to_intlist,
|
||||
intlist_to_bytes,
|
||||
int_or_none,
|
||||
lowercase_escape,
|
||||
remove_end,
|
||||
unified_strdate,
|
||||
urlencode_postdata,
|
||||
@ -32,6 +33,26 @@ from ..aes import (
|
||||
|
||||
|
||||
class CrunchyrollBaseIE(InfoExtractor):
|
||||
_NETRC_MACHINE = 'crunchyroll'
|
||||
|
||||
def _login(self):
|
||||
(username, password) = self._get_login_info()
|
||||
if username is None:
|
||||
return
|
||||
self.report_login()
|
||||
login_url = 'https://www.crunchyroll.com/?a=formhandler'
|
||||
data = urlencode_postdata({
|
||||
'formname': 'RpcApiUser_Login',
|
||||
'name': username,
|
||||
'password': password,
|
||||
})
|
||||
login_request = compat_urllib_request.Request(login_url, data)
|
||||
login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
self._download_webpage(login_request, None, False, 'Wrong login info')
|
||||
|
||||
def _real_initialize(self):
|
||||
self._login()
|
||||
|
||||
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None):
|
||||
request = (url_or_request if isinstance(url_or_request, compat_urllib_request.Request)
|
||||
else compat_urllib_request.Request(url_or_request))
|
||||
@ -46,10 +67,22 @@ class CrunchyrollBaseIE(InfoExtractor):
|
||||
return super(CrunchyrollBaseIE, self)._download_webpage(
|
||||
request, video_id, note, errnote, fatal, tries, timeout, encoding)
|
||||
|
||||
@staticmethod
|
||||
def _add_skip_wall(url):
|
||||
parsed_url = compat_urlparse.urlparse(url)
|
||||
qs = compat_urlparse.parse_qs(parsed_url.query)
|
||||
# Always force skip_wall to bypass maturity wall, namely 18+ confirmation message:
|
||||
# > This content may be inappropriate for some people.
|
||||
# > Are you sure you want to continue?
|
||||
# since it's not disabled by default in crunchyroll account's settings.
|
||||
# See https://github.com/rg3/youtube-dl/issues/7202.
|
||||
qs['skip_wall'] = ['1']
|
||||
return compat_urlparse.urlunparse(
|
||||
parsed_url._replace(query=compat_urllib_parse.urlencode(qs, True)))
|
||||
|
||||
|
||||
class CrunchyrollIE(CrunchyrollBaseIE):
|
||||
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.(?:com|fr)/(?:media(?:-|/\?id=)|[^/]*/[^/?&]*?)(?P<video_id>[0-9]+))(?:[/?&]|$)'
|
||||
_NETRC_MACHINE = 'crunchyroll'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513',
|
||||
'info_dict': {
|
||||
@ -72,7 +105,7 @@ class CrunchyrollIE(CrunchyrollBaseIE):
|
||||
'id': '589804',
|
||||
'ext': 'flv',
|
||||
'title': 'Culture Japan Episode 1 – Rebuilding Japan after the 3.11',
|
||||
'description': 'md5:fe2743efedb49d279552926d0bd0cd9e',
|
||||
'description': 'md5:2fbc01f90b87e8e9137296f37b461c12',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'uploader': 'Danny Choo Network',
|
||||
'upload_date': '20120213',
|
||||
@ -81,10 +114,13 @@ class CrunchyrollIE(CrunchyrollBaseIE):
|
||||
# rtmp
|
||||
'skip_download': True,
|
||||
},
|
||||
|
||||
}, {
|
||||
'url': 'http://www.crunchyroll.fr/girl-friend-beta/episode-11-goodbye-la-mode-661697',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# geo-restricted (US), 18+ maturity wall, non-premium available
|
||||
'url': 'http://www.crunchyroll.com/cosplay-complex-ova/episode-1-the-birth-of-the-cosplay-club-565617',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
_FORMAT_IDS = {
|
||||
@ -94,24 +130,6 @@ class CrunchyrollIE(CrunchyrollBaseIE):
|
||||
'1080': ('80', '108'),
|
||||
}
|
||||
|
||||
def _login(self):
|
||||
(username, password) = self._get_login_info()
|
||||
if username is None:
|
||||
return
|
||||
self.report_login()
|
||||
login_url = 'https://www.crunchyroll.com/?a=formhandler'
|
||||
data = urlencode_postdata({
|
||||
'formname': 'RpcApiUser_Login',
|
||||
'name': username,
|
||||
'password': password,
|
||||
})
|
||||
login_request = compat_urllib_request.Request(login_url, data)
|
||||
login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
self._download_webpage(login_request, None, False, 'Wrong login info')
|
||||
|
||||
def _real_initialize(self):
|
||||
self._login()
|
||||
|
||||
def _decrypt_subtitles(self, data, iv, id):
|
||||
data = bytes_to_intlist(base64.b64decode(data.encode('utf-8')))
|
||||
iv = bytes_to_intlist(base64.b64decode(iv.encode('utf-8')))
|
||||
@ -217,7 +235,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
return output
|
||||
|
||||
def _extract_subtitles(self, subtitle):
|
||||
sub_root = xml.etree.ElementTree.fromstring(subtitle)
|
||||
sub_root = compat_etree_fromstring(subtitle)
|
||||
return [{
|
||||
'ext': 'srt',
|
||||
'data': self._convert_subtitles_to_srt(sub_root),
|
||||
@ -228,7 +246,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
|
||||
def _get_subtitles(self, video_id, webpage):
|
||||
subtitles = {}
|
||||
for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage):
|
||||
for sub_id, sub_name in re.findall(r'\bssid=([0-9]+)"[^>]+?\btitle="([^"]+)', webpage):
|
||||
sub_page = self._download_webpage(
|
||||
'http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' + sub_id,
|
||||
video_id, note='Downloading subtitles for ' + sub_name)
|
||||
@ -254,7 +272,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
else:
|
||||
webpage_url = 'http://www.' + mobj.group('url')
|
||||
|
||||
webpage = self._download_webpage(webpage_url, video_id, 'Downloading webpage')
|
||||
webpage = self._download_webpage(self._add_skip_wall(webpage_url), video_id, 'Downloading webpage')
|
||||
note_m = self._html_search_regex(
|
||||
r'<div class="showmedia-trailer-notice">(.+?)</div>',
|
||||
webpage, 'trailer-notice', default='')
|
||||
@ -270,11 +288,15 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
if 'To view this, please log in to verify you are 18 or older.' in webpage:
|
||||
self.raise_login_required()
|
||||
|
||||
video_title = self._html_search_regex(r'<h1[^>]*>(.+?)</h1>', webpage, 'video_title', flags=re.DOTALL)
|
||||
video_title = self._html_search_regex(
|
||||
r'(?s)<h1[^>]*>((?:(?!<h1).)*?<span[^>]+itemprop=["\']title["\'][^>]*>(?:(?!<h1).)+?)</h1>',
|
||||
webpage, 'video_title')
|
||||
video_title = re.sub(r' {2,}', ' ', video_title)
|
||||
video_description = self._html_search_regex(r'"description":"([^"]+)', webpage, 'video_description', default='')
|
||||
if not video_description:
|
||||
video_description = None
|
||||
video_description = self._html_search_regex(
|
||||
r'<script[^>]*>\s*.+?\[media_id=%s\].+?"description"\s*:\s*"([^"]+)' % video_id,
|
||||
webpage, 'description', default=None)
|
||||
if video_description:
|
||||
video_description = lowercase_escape(video_description.replace(r'\r\n', '\n'))
|
||||
video_upload_date = self._html_search_regex(
|
||||
[r'<div>Availability for free users:(.+?)</div>', r'<div>[^<>]+<span>\s*(.+?\d{4})\s*</span></div>'],
|
||||
webpage, 'video_upload_date', fatal=False, flags=re.DOTALL)
|
||||
@ -352,7 +374,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
|
||||
class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE):
|
||||
IE_NAME = "crunchyroll:playlist"
|
||||
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?$'
|
||||
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?(?:\?|$)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.crunchyroll.com/a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi',
|
||||
@ -361,12 +383,25 @@ class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE):
|
||||
'title': 'A Bridge to the Starry Skies - Hoshizora e Kakaru Hashi'
|
||||
},
|
||||
'playlist_count': 13,
|
||||
}, {
|
||||
# geo-restricted (US), 18+ maturity wall, non-premium available
|
||||
'url': 'http://www.crunchyroll.com/cosplay-complex-ova',
|
||||
'info_dict': {
|
||||
'id': 'cosplay-complex-ova',
|
||||
'title': 'Cosplay Complex OVA'
|
||||
},
|
||||
'playlist_count': 3,
|
||||
'skip': 'Georestricted',
|
||||
}, {
|
||||
# geo-restricted (US), 18+ maturity wall, non-premium will be available since 2015.11.14
|
||||
'url': 'http://www.crunchyroll.com/ladies-versus-butlers?skip_wall=1',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
show_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, show_id)
|
||||
webpage = self._download_webpage(self._add_skip_wall(url), show_id)
|
||||
title = self._html_search_regex(
|
||||
r'(?s)<h1[^>]*>\s*<span itemprop="name">(.*?)</span>',
|
||||
webpage, 'title')
|
||||
|
@ -96,6 +96,11 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
|
||||
'uploader': 'HotWaves1012',
|
||||
'age_limit': 18,
|
||||
}
|
||||
},
|
||||
# geo-restricted, player v5
|
||||
{
|
||||
'url': 'http://www.dailymotion.com/video/xhza0o',
|
||||
'only_matching': True,
|
||||
}
|
||||
]
|
||||
|
||||
@ -124,6 +129,9 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
|
||||
if player_v5:
|
||||
player = self._parse_json(player_v5, video_id)
|
||||
metadata = player['metadata']
|
||||
|
||||
self._check_error(metadata)
|
||||
|
||||
formats = []
|
||||
for quality, media_list in metadata['qualities'].items():
|
||||
for media in media_list:
|
||||
@ -133,9 +141,17 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
|
||||
type_ = media.get('type')
|
||||
if type_ == 'application/vnd.lumberjack.manifest':
|
||||
continue
|
||||
if type_ == 'application/x-mpegURL' or determine_ext(media_url) == 'm3u8':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
media_url, video_id, 'mp4', m3u8_id='hls'))
|
||||
ext = determine_ext(media_url)
|
||||
if type_ == 'application/x-mpegURL' or ext == 'm3u8':
|
||||
m3u8_formats = self._extract_m3u8_formats(
|
||||
media_url, video_id, 'mp4', m3u8_id='hls', fatal=False)
|
||||
if m3u8_formats:
|
||||
formats.extend(m3u8_formats)
|
||||
elif type_ == 'application/f4m' or ext == 'f4m':
|
||||
f4m_formats = self._extract_f4m_formats(
|
||||
media_url, video_id, preference=-1, f4m_id='hds', fatal=False)
|
||||
if f4m_formats:
|
||||
formats.extend(f4m_formats)
|
||||
else:
|
||||
f = {
|
||||
'url': media_url,
|
||||
@ -201,9 +217,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
|
||||
'video info', flags=re.MULTILINE),
|
||||
video_id)
|
||||
|
||||
if info.get('error') is not None:
|
||||
msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title']
|
||||
raise ExtractorError(msg, expected=True)
|
||||
self._check_error(info)
|
||||
|
||||
formats = []
|
||||
for (key, format_id) in self._FORMATS:
|
||||
@ -246,6 +260,11 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
|
||||
'duration': info['duration']
|
||||
}
|
||||
|
||||
def _check_error(self, info):
|
||||
if info.get('error') is not None:
|
||||
raise ExtractorError(
|
||||
'%s said: %s' % (self.IE_NAME, info['error']['title']), expected=True)
|
||||
|
||||
def _get_subtitles(self, video_id, webpage):
|
||||
try:
|
||||
sub_list = self._download_webpage(
|
||||
|
88
youtube_dl/extractor/democracynow.py
Normal file
88
youtube_dl/extractor/democracynow.py
Normal file
@ -0,0 +1,88 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import os.path
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urlparse
|
||||
from ..utils import (
|
||||
url_basename,
|
||||
remove_start,
|
||||
)
|
||||
|
||||
|
||||
class DemocracynowIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?democracynow.org/(?P<id>[^\?]*)'
|
||||
IE_NAME = 'democracynow'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.democracynow.org/shows/2015/7/3',
|
||||
'md5': 'fbb8fe3d7a56a5e12431ce2f9b2fab0d',
|
||||
'info_dict': {
|
||||
'id': '2015-0703-001',
|
||||
'ext': 'mp4',
|
||||
'title': 'July 03, 2015 - Democracy Now!',
|
||||
'description': 'A daily independent global news hour with Amy Goodman & Juan González "What to the Slave is 4th of July?": James Earl Jones Reads Frederick Douglass\u2019 Historic Speech : "This Flag Comes Down Today": Bree Newsome Scales SC Capitol Flagpole, Takes Down Confederate Flag : "We Shall Overcome": Remembering Folk Icon, Activist Pete Seeger in His Own Words & Songs',
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.democracynow.org/2015/7/3/this_flag_comes_down_today_bree',
|
||||
'md5': 'fbb8fe3d7a56a5e12431ce2f9b2fab0d',
|
||||
'info_dict': {
|
||||
'id': '2015-0703-001',
|
||||
'ext': 'mp4',
|
||||
'title': '"This Flag Comes Down Today": Bree Newsome Scales SC Capitol Flagpole, Takes Down Confederate Flag',
|
||||
'description': 'md5:4d2bc4f0d29f5553c2210a4bc7761a21',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
description = self._og_search_description(webpage)
|
||||
|
||||
json_data = self._parse_json(self._search_regex(
|
||||
r'<script[^>]+type="text/json"[^>]*>\s*({[^>]+})', webpage, 'json'),
|
||||
display_id)
|
||||
video_id = None
|
||||
formats = []
|
||||
|
||||
default_lang = 'en'
|
||||
|
||||
subtitles = {}
|
||||
|
||||
def add_subtitle_item(lang, info_dict):
|
||||
if lang not in subtitles:
|
||||
subtitles[lang] = []
|
||||
subtitles[lang].append(info_dict)
|
||||
|
||||
# chapter_file are not subtitles
|
||||
if 'caption_file' in json_data:
|
||||
add_subtitle_item(default_lang, {
|
||||
'url': compat_urlparse.urljoin(url, json_data['caption_file']),
|
||||
})
|
||||
|
||||
for subtitle_item in json_data.get('captions', []):
|
||||
lang = subtitle_item.get('language', '').lower() or default_lang
|
||||
add_subtitle_item(lang, {
|
||||
'url': compat_urlparse.urljoin(url, subtitle_item['url']),
|
||||
})
|
||||
|
||||
for key in ('file', 'audio', 'video'):
|
||||
media_url = json_data.get(key, '')
|
||||
if not media_url:
|
||||
continue
|
||||
media_url = re.sub(r'\?.*', '', compat_urlparse.urljoin(url, media_url))
|
||||
video_id = video_id or remove_start(os.path.splitext(url_basename(media_url))[0], 'dn')
|
||||
formats.append({
|
||||
'url': media_url,
|
||||
})
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id or display_id,
|
||||
'title': json_data['title'],
|
||||
'description': description,
|
||||
'subtitles': subtitles,
|
||||
'formats': formats,
|
||||
}
|
@ -87,7 +87,7 @@ class EaglePlatformIE(InfoExtractor):
|
||||
m3u8_url = self._get_video_url(secure_m3u8, video_id, 'Downloading m3u8 JSON')
|
||||
formats = self._extract_m3u8_formats(
|
||||
m3u8_url, video_id,
|
||||
'mp4', entry_protocol='m3u8_native')
|
||||
'mp4', entry_protocol='m3u8_native', m3u8_id='hls')
|
||||
|
||||
mp4_url = self._get_video_url(
|
||||
# Secure mp4 URL is constructed according to Player.prototype.mp4 from
|
||||
|
@ -1,39 +1,92 @@
|
||||
# encoding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .brightcove import BrightcoveIE
|
||||
from ..utils import ExtractorError
|
||||
from ..compat import compat_urllib_request
|
||||
from ..utils import (
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
parse_iso8601,
|
||||
)
|
||||
|
||||
|
||||
class EitbIE(InfoExtractor):
|
||||
IE_NAME = 'eitb.tv'
|
||||
_VALID_URL = r'https?://www\.eitb\.tv/(eu/bideoa|es/video)/[^/]+/(?P<playlist_id>\d+)/(?P<chapter_id>\d+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?eitb\.tv/(?:eu/bideoa|es/video)/[^/]+/\d+/(?P<id>\d+)'
|
||||
|
||||
_TEST = {
|
||||
'add_ie': ['Brightcove'],
|
||||
'url': 'http://www.eitb.tv/es/video/60-minutos-60-minutos-2013-2014/2677100210001/2743577154001/lasa-y-zabala-30-anos/',
|
||||
'url': 'http://www.eitb.tv/es/video/60-minutos-60-minutos-2013-2014/4104995148001/4090227752001/lasa-y-zabala-30-anos/',
|
||||
'md5': 'edf4436247185adee3ea18ce64c47998',
|
||||
'info_dict': {
|
||||
'id': '2743577154001',
|
||||
'id': '4090227752001',
|
||||
'ext': 'mp4',
|
||||
'title': '60 minutos (Lasa y Zabala, 30 años)',
|
||||
# All videos from eitb has this description in the brightcove info
|
||||
'description': '.',
|
||||
'uploader': 'Euskal Telebista',
|
||||
'description': 'Programa de reportajes de actualidad.',
|
||||
'duration': 3996.76,
|
||||
'timestamp': 1381789200,
|
||||
'upload_date': '20131014',
|
||||
'tags': list,
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
chapter_id = mobj.group('chapter_id')
|
||||
webpage = self._download_webpage(url, chapter_id)
|
||||
bc_url = BrightcoveIE._extract_brightcove_url(webpage)
|
||||
if bc_url is None:
|
||||
raise ExtractorError('Could not extract the Brightcove url')
|
||||
# The BrightcoveExperience object doesn't contain the video id, we set
|
||||
# it manually
|
||||
bc_url += '&%40videoPlayer={0}'.format(chapter_id)
|
||||
return self.url_result(bc_url, BrightcoveIE.ie_key())
|
||||
video_id = self._match_id(url)
|
||||
|
||||
video = self._download_json(
|
||||
'http://mam.eitb.eus/mam/REST/ServiceMultiweb/Video/MULTIWEBTV/%s/' % video_id,
|
||||
video_id, 'Downloading video JSON')
|
||||
|
||||
media = video['web_media'][0]
|
||||
|
||||
formats = []
|
||||
for rendition in media['RENDITIONS']:
|
||||
video_url = rendition.get('PMD_URL')
|
||||
if not video_url:
|
||||
continue
|
||||
tbr = float_or_none(rendition.get('ENCODING_RATE'), 1000)
|
||||
format_id = 'http'
|
||||
if tbr:
|
||||
format_id += '-%d' % int(tbr)
|
||||
formats.append({
|
||||
'url': rendition['PMD_URL'],
|
||||
'format_id': format_id,
|
||||
'width': int_or_none(rendition.get('FRAME_WIDTH')),
|
||||
'height': int_or_none(rendition.get('FRAME_HEIGHT')),
|
||||
'tbr': tbr,
|
||||
})
|
||||
|
||||
hls_url = media.get('HLS_SURL')
|
||||
if hls_url:
|
||||
request = compat_urllib_request.Request(
|
||||
'http://mam.eitb.eus/mam/REST/ServiceMultiweb/DomainRestrictedSecurity/TokenAuth/',
|
||||
headers={'Referer': url})
|
||||
token_data = self._download_json(
|
||||
request, video_id, 'Downloading auth token', fatal=False)
|
||||
if token_data:
|
||||
token = token_data.get('token')
|
||||
if token:
|
||||
m3u8_formats = self._extract_m3u8_formats(
|
||||
'%s?hdnts=%s' % (hls_url, token), video_id, m3u8_id='hls', fatal=False)
|
||||
if m3u8_formats:
|
||||
formats.extend(m3u8_formats)
|
||||
|
||||
hds_url = media.get('HDS_SURL')
|
||||
if hds_url:
|
||||
f4m_formats = self._extract_f4m_formats(
|
||||
'%s?hdcore=3.7.0' % hds_url.replace('euskalsvod', 'euskalvod'),
|
||||
video_id, f4m_id='hds', fatal=False)
|
||||
if f4m_formats:
|
||||
formats.extend(f4m_formats)
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': media.get('NAME_ES') or media.get('name') or media['NAME_EU'],
|
||||
'description': media.get('SHORT_DESC_ES') or video.get('desc_group') or media.get('SHORT_DESC_EU'),
|
||||
'thumbnail': media.get('STILL_URL') or media.get('THUMBNAIL_URL'),
|
||||
'duration': float_or_none(media.get('LENGTH'), 1000),
|
||||
'timestamp': parse_iso8601(media.get('BROADCST_DATE'), ' '),
|
||||
'tags': media.get('TAGS'),
|
||||
'formats': formats,
|
||||
}
|
||||
|
@ -3,23 +3,20 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_parse_qs,
|
||||
compat_urllib_request,
|
||||
)
|
||||
from ..compat import compat_urllib_request
|
||||
from ..utils import (
|
||||
qualities,
|
||||
int_or_none,
|
||||
str_to_int,
|
||||
)
|
||||
|
||||
|
||||
class ExtremeTubeIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?(?P<url>extremetube\.com/.*?video/.+?(?P<id>[0-9]+))(?:[/?&]|$)'
|
||||
_VALID_URL = r'https?://(?:www\.)?extremetube\.com/(?:[^/]+/)?video/(?P<id>[^/#?&]+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.extremetube.com/video/music-video-14-british-euro-brit-european-cumshots-swallow-652431',
|
||||
'md5': '344d0c6d50e2f16b06e49ca011d8ac69',
|
||||
'info_dict': {
|
||||
'id': '652431',
|
||||
'id': 'music-video-14-british-euro-brit-european-cumshots-swallow-652431',
|
||||
'ext': 'mp4',
|
||||
'title': 'Music Video 14 british euro brit european cumshots swallow',
|
||||
'uploader': 'unknown',
|
||||
@ -29,12 +26,16 @@ class ExtremeTubeIE(InfoExtractor):
|
||||
}, {
|
||||
'url': 'http://www.extremetube.com/gay/video/abcde-1234',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.extremetube.com/video/latina-slut-fucked-by-fat-black-dick',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.extremetube.com/video/652431',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
url = 'http://www.' + mobj.group('url')
|
||||
video_id = self._match_id(url)
|
||||
|
||||
req = compat_urllib_request.Request(url)
|
||||
req.add_header('Cookie', 'age_verified=1')
|
||||
@ -49,20 +50,36 @@ class ExtremeTubeIE(InfoExtractor):
|
||||
r'Views:\s*</strong>\s*<span>([\d,\.]+)</span>',
|
||||
webpage, 'view count', fatal=False))
|
||||
|
||||
flash_vars = compat_parse_qs(self._search_regex(
|
||||
r'<param[^>]+?name="flashvars"[^>]+?value="([^"]+)"', webpage, 'flash vars'))
|
||||
flash_vars = self._parse_json(
|
||||
self._search_regex(
|
||||
r'var\s+flashvars\s*=\s*({.+?});', webpage, 'flash vars'),
|
||||
video_id)
|
||||
|
||||
formats = []
|
||||
quality = qualities(['180p', '240p', '360p', '480p', '720p', '1080p'])
|
||||
for k, vals in flash_vars.items():
|
||||
m = re.match(r'quality_(?P<quality>[0-9]+p)$', k)
|
||||
if m is not None:
|
||||
formats.append({
|
||||
'format_id': m.group('quality'),
|
||||
'quality': quality(m.group('quality')),
|
||||
'url': vals[0],
|
||||
for quality_key, video_url in flash_vars.items():
|
||||
height = int_or_none(self._search_regex(
|
||||
r'quality_(\d+)[pP]$', quality_key, 'height', default=None))
|
||||
if not height:
|
||||
continue
|
||||
f = {
|
||||
'url': video_url,
|
||||
}
|
||||
mobj = re.search(
|
||||
r'/(?P<height>\d{3,4})[pP]_(?P<bitrate>\d+)[kK]_\d+', video_url)
|
||||
if mobj:
|
||||
height = int(mobj.group('height'))
|
||||
bitrate = int(mobj.group('bitrate'))
|
||||
f.update({
|
||||
'format_id': '%dp-%dk' % (height, bitrate),
|
||||
'height': height,
|
||||
'tbr': bitrate,
|
||||
})
|
||||
|
||||
else:
|
||||
f.update({
|
||||
'format_id': '%dp' % height,
|
||||
'height': height,
|
||||
})
|
||||
formats.append(f)
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
|
@ -14,7 +14,6 @@ from ..compat import (
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
limit_length,
|
||||
urlencode_postdata,
|
||||
get_element_by_id,
|
||||
@ -142,16 +141,20 @@ class FacebookIE(InfoExtractor):
|
||||
data = dict(json.loads(m.group(1)))
|
||||
params_raw = compat_urllib_parse_unquote(data['params'])
|
||||
params = json.loads(params_raw)
|
||||
video_data = params['video_data'][0]
|
||||
|
||||
formats = []
|
||||
for quality in ['sd', 'hd']:
|
||||
src = video_data.get('%s_src' % quality)
|
||||
if src is not None:
|
||||
formats.append({
|
||||
'format_id': quality,
|
||||
'url': src,
|
||||
})
|
||||
for format_id, f in params['video_data'].items():
|
||||
if not f or not isinstance(f, list):
|
||||
continue
|
||||
for quality in ('sd', 'hd'):
|
||||
for src_type in ('src', 'src_no_ratelimit'):
|
||||
src = f[0].get('%s_%s' % (quality, src_type))
|
||||
if src:
|
||||
formats.append({
|
||||
'format_id': '%s_%s_%s' % (format_id, quality, src_type),
|
||||
'url': src,
|
||||
'preference': -10 if format_id == 'progressive' else 0,
|
||||
})
|
||||
if not formats:
|
||||
raise ExtractorError('Cannot find video formats')
|
||||
|
||||
@ -171,7 +174,5 @@ class FacebookIE(InfoExtractor):
|
||||
'id': video_id,
|
||||
'title': video_title,
|
||||
'formats': formats,
|
||||
'duration': int_or_none(video_data.get('video_duration')),
|
||||
'thumbnail': video_data.get('thumbnail_src'),
|
||||
'uploader': uploader,
|
||||
}
|
||||
|
@ -83,6 +83,14 @@ class FranceTVBaseInfoExtractor(InfoExtractor):
|
||||
if subtitle:
|
||||
title += ' - %s' % subtitle
|
||||
|
||||
subtitles = {}
|
||||
subtitles_list = [{
|
||||
'url': subformat['url'],
|
||||
'ext': subformat.get('format'),
|
||||
} for subformat in info.get('subtitles', []) if subformat.get('url')]
|
||||
if subtitles_list:
|
||||
subtitles['fr'] = subtitles_list
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
@ -91,20 +99,27 @@ class FranceTVBaseInfoExtractor(InfoExtractor):
|
||||
'duration': int_or_none(info.get('real_duration')) or parse_duration(info['duree']),
|
||||
'timestamp': int_or_none(info['diffusion']['timestamp']),
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
||||
|
||||
class PluzzIE(FranceTVBaseInfoExtractor):
|
||||
IE_NAME = 'pluzz.francetv.fr'
|
||||
_VALID_URL = r'https?://pluzz\.francetv\.fr/videos/(.*?)\.html'
|
||||
_VALID_URL = r'https?://(?:m\.)?pluzz\.francetv\.fr/videos/(?P<id>.+?)\.html'
|
||||
|
||||
# Can't use tests, videos expire in 7 days
|
||||
|
||||
def _real_extract(self, url):
|
||||
title = re.match(self._VALID_URL, url).group(1)
|
||||
webpage = self._download_webpage(url, title)
|
||||
video_id = self._search_regex(
|
||||
r'data-diffusion="(\d+)"', webpage, 'ID')
|
||||
display_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
video_id = self._html_search_meta(
|
||||
'id_video', webpage, 'video id', default=None)
|
||||
if not video_id:
|
||||
video_id = self._search_regex(
|
||||
r'data-diffusion=["\'](\d+)', webpage, 'video id')
|
||||
|
||||
return self._extract_video(video_id, 'Pluzz')
|
||||
|
||||
|
||||
@ -120,6 +135,9 @@ class FranceTvInfoIE(FranceTVBaseInfoExtractor):
|
||||
'title': 'Soir 3',
|
||||
'upload_date': '20130826',
|
||||
'timestamp': 1377548400,
|
||||
'subtitles': {
|
||||
'fr': 'mincount:2',
|
||||
},
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.francetvinfo.fr/elections/europeennes/direct-europeennes-regardez-le-debat-entre-les-candidats-a-la-presidence-de-la-commission_600639.html',
|
||||
|
@ -9,6 +9,7 @@ import sys
|
||||
from .common import InfoExtractor
|
||||
from .youtube import YoutubeIE
|
||||
from ..compat import (
|
||||
compat_etree_fromstring,
|
||||
compat_urllib_parse_unquote,
|
||||
compat_urllib_request,
|
||||
compat_urlparse,
|
||||
@ -21,7 +22,6 @@ from ..utils import (
|
||||
HEADRequest,
|
||||
is_html,
|
||||
orderedSet,
|
||||
parse_xml,
|
||||
smuggle_url,
|
||||
unescapeHTML,
|
||||
unified_strdate,
|
||||
@ -141,6 +141,7 @@ class GenericIE(InfoExtractor):
|
||||
'ext': 'mp4',
|
||||
'title': 'Automatics, robotics and biocybernetics',
|
||||
'description': 'md5:815fc1deb6b3a2bff99de2d5325be482',
|
||||
'upload_date': '20130627',
|
||||
'formats': 'mincount:16',
|
||||
'subtitles': 'mincount:1',
|
||||
},
|
||||
@ -1237,7 +1238,7 @@ class GenericIE(InfoExtractor):
|
||||
|
||||
# Is it an RSS feed, a SMIL file or a XSPF playlist?
|
||||
try:
|
||||
doc = parse_xml(webpage)
|
||||
doc = compat_etree_fromstring(webpage.encode('utf-8'))
|
||||
if doc.tag == 'rss':
|
||||
return self._extract_rss(url, video_id, doc)
|
||||
elif re.match(r'^(?:{[^}]+})?smil$', doc.tag):
|
||||
|
@ -14,79 +14,58 @@ from ..utils import (
|
||||
ExtractorError,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
str_or_none,
|
||||
)
|
||||
|
||||
|
||||
class GloboIE(InfoExtractor):
|
||||
_VALID_URL = 'https?://.+?\.globo\.com/(?P<id>.+)'
|
||||
_VALID_URL = '(?:globo:|https?://.+?\.globo\.com/(?:[^/]+/)*(?:v/(?:[^/]+/)?|videos/))(?P<id>\d{7,})'
|
||||
|
||||
_API_URL_TEMPLATE = 'http://api.globovideos.com/videos/%s/playlist'
|
||||
_SECURITY_URL_TEMPLATE = 'http://security.video.globo.com/videos/%s/hash?player=flash&version=17.0.0.132&resource_id=%s'
|
||||
|
||||
_VIDEOID_REGEXES = [
|
||||
r'\bdata-video-id="(\d+)"',
|
||||
r'\bdata-player-videosids="(\d+)"',
|
||||
r'<div[^>]+\bid="(\d+)"',
|
||||
]
|
||||
|
||||
_RESIGN_EXPIRATION = 86400
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://globotv.globo.com/sportv/futebol-nacional/v/os-gols-de-atletico-mg-3-x-2-santos-pela-24a-rodada-do-brasileirao/3654973/',
|
||||
'md5': '03ebf41cb7ade43581608b7d9b71fab0',
|
||||
'info_dict': {
|
||||
'id': '3654973',
|
||||
'ext': 'mp4',
|
||||
'title': 'Os gols de Atlético-MG 3 x 2 Santos pela 24ª rodada do Brasileirão',
|
||||
'duration': 251.585,
|
||||
'uploader': 'SporTV',
|
||||
'uploader_id': 698,
|
||||
'like_count': int,
|
||||
}
|
||||
_TESTS = [{
|
||||
'url': 'http://g1.globo.com/carros/autoesporte/videos/t/exclusivos-do-g1/v/mercedes-benz-gla-passa-por-teste-de-colisao-na-europa/3607726/',
|
||||
'md5': 'b3ccc801f75cd04a914d51dadb83a78d',
|
||||
'info_dict': {
|
||||
'id': '3607726',
|
||||
'ext': 'mp4',
|
||||
'title': 'Mercedes-Benz GLA passa por teste de colisão na Europa',
|
||||
'duration': 103.204,
|
||||
'uploader': 'Globo.com',
|
||||
'uploader_id': '265',
|
||||
},
|
||||
{
|
||||
'url': 'http://g1.globo.com/carros/autoesporte/videos/t/exclusivos-do-g1/v/mercedes-benz-gla-passa-por-teste-de-colisao-na-europa/3607726/',
|
||||
'md5': 'b3ccc801f75cd04a914d51dadb83a78d',
|
||||
'info_dict': {
|
||||
'id': '3607726',
|
||||
'ext': 'mp4',
|
||||
'title': 'Mercedes-Benz GLA passa por teste de colisão na Europa',
|
||||
'duration': 103.204,
|
||||
'uploader': 'Globo.com',
|
||||
'uploader_id': 265,
|
||||
'like_count': int,
|
||||
}
|
||||
}, {
|
||||
'url': 'http://globoplay.globo.com/v/4581987/',
|
||||
'md5': 'f36a1ecd6a50da1577eee6dd17f67eff',
|
||||
'info_dict': {
|
||||
'id': '4581987',
|
||||
'ext': 'mp4',
|
||||
'title': 'Acidentes de trânsito estão entre as maiores causas de queda de energia em SP',
|
||||
'duration': 137.973,
|
||||
'uploader': 'Rede Globo',
|
||||
'uploader_id': '196',
|
||||
},
|
||||
{
|
||||
'url': 'http://g1.globo.com/jornal-nacional/noticia/2014/09/novidade-na-fiscalizacao-de-bagagem-pela-receita-provoca-discussoes.html',
|
||||
'md5': '307fdeae4390ccfe6ba1aa198cf6e72b',
|
||||
'info_dict': {
|
||||
'id': '3652183',
|
||||
'ext': 'mp4',
|
||||
'title': 'Receita Federal explica como vai fiscalizar bagagens de quem retorna ao Brasil de avião',
|
||||
'duration': 110.711,
|
||||
'uploader': 'Rede Globo',
|
||||
'uploader_id': 196,
|
||||
'like_count': int,
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'http://globotv.globo.com/canal-brasil/sangue-latino/t/todos-os-videos/v/ator-e-diretor-argentino-ricado-darin-fala-sobre-utopias-e-suas-perdas/3928201/',
|
||||
'md5': 'c1defca721ce25b2354e927d3e4b3dec',
|
||||
'info_dict': {
|
||||
'id': '3928201',
|
||||
'ext': 'mp4',
|
||||
'title': 'Ator e diretor argentino, Ricado Darín fala sobre utopias e suas perdas',
|
||||
'duration': 1472.906,
|
||||
'uploader': 'Canal Brasil',
|
||||
'uploader_id': 705,
|
||||
'like_count': int,
|
||||
}
|
||||
},
|
||||
]
|
||||
}, {
|
||||
'url': 'http://canalbrasil.globo.com/programas/sangue-latino/videos/3928201.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://globosatplay.globo.com/globonews/v/4472924/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://globotv.globo.com/t/programa/v/clipe-sexo-e-as-negas-adeus/3836166/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://globotv.globo.com/canal-brasil/sangue-latino/t/todos-os-videos/v/ator-e-diretor-argentino-ricado-darin-fala-sobre-utopias-e-suas-perdas/3928201/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://canaloff.globo.com/programas/desejar-profundo/videos/4518560.html',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
class MD5():
|
||||
class MD5:
|
||||
HEX_FORMAT_LOWERCASE = 0
|
||||
HEX_FORMAT_UPPERCASE = 1
|
||||
BASE64_PAD_CHARACTER_DEFAULT_COMPLIANCE = ''
|
||||
@ -353,9 +332,6 @@ class GloboIE(InfoExtractor):
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
video_id = self._search_regex(self._VIDEOID_REGEXES, webpage, 'video id')
|
||||
|
||||
video = self._download_json(
|
||||
self._API_URL_TEMPLATE % video_id, video_id)['videos'][0]
|
||||
|
||||
@ -364,7 +340,7 @@ class GloboIE(InfoExtractor):
|
||||
formats = []
|
||||
for resource in video['resources']:
|
||||
resource_id = resource.get('_id')
|
||||
if not resource_id:
|
||||
if not resource_id or resource_id.endswith('manifest'):
|
||||
continue
|
||||
|
||||
security = self._download_json(
|
||||
@ -393,20 +369,23 @@ class GloboIE(InfoExtractor):
|
||||
resource_url = resource['url']
|
||||
signed_url = '%s?h=%s&k=%s' % (resource_url, signed_hash, 'flash')
|
||||
if resource_id.endswith('m3u8') or resource_url.endswith('.m3u8'):
|
||||
formats.extend(self._extract_m3u8_formats(signed_url, resource_id, 'mp4'))
|
||||
m3u8_formats = self._extract_m3u8_formats(
|
||||
signed_url, resource_id, 'mp4', entry_protocol='m3u8_native',
|
||||
m3u8_id='hls', fatal=False)
|
||||
if m3u8_formats:
|
||||
formats.extend(m3u8_formats)
|
||||
else:
|
||||
formats.append({
|
||||
'url': signed_url,
|
||||
'format_id': resource_id,
|
||||
'height': resource.get('height'),
|
||||
'format_id': 'http-%s' % resource_id,
|
||||
'height': int_or_none(resource.get('height')),
|
||||
})
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
duration = float_or_none(video.get('duration'), 1000)
|
||||
like_count = int_or_none(video.get('likes'))
|
||||
uploader = video.get('channel')
|
||||
uploader_id = video.get('channel_id')
|
||||
uploader_id = str_or_none(video.get('channel_id'))
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
@ -414,6 +393,46 @@ class GloboIE(InfoExtractor):
|
||||
'duration': duration,
|
||||
'uploader': uploader,
|
||||
'uploader_id': uploader_id,
|
||||
'like_count': like_count,
|
||||
'formats': formats
|
||||
}
|
||||
|
||||
|
||||
class GloboArticleIE(InfoExtractor):
|
||||
_VALID_URL = 'https?://.+?\.globo\.com/(?:[^/]+/)*(?P<id>[^/]+)\.html'
|
||||
|
||||
_VIDEOID_REGEXES = [
|
||||
r'\bdata-video-id=["\'](\d{7,})',
|
||||
r'\bdata-player-videosids=["\'](\d{7,})',
|
||||
r'\bvideosIDs\s*:\s*["\'](\d{7,})',
|
||||
r'\bdata-id=["\'](\d{7,})',
|
||||
r'<div[^>]+\bid=["\'](\d{7,})',
|
||||
]
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://g1.globo.com/jornal-nacional/noticia/2014/09/novidade-na-fiscalizacao-de-bagagem-pela-receita-provoca-discussoes.html',
|
||||
'md5': '307fdeae4390ccfe6ba1aa198cf6e72b',
|
||||
'info_dict': {
|
||||
'id': '3652183',
|
||||
'ext': 'mp4',
|
||||
'title': 'Receita Federal explica como vai fiscalizar bagagens de quem retorna ao Brasil de avião',
|
||||
'duration': 110.711,
|
||||
'uploader': 'Rede Globo',
|
||||
'uploader_id': '196',
|
||||
}
|
||||
}, {
|
||||
'url': 'http://gq.globo.com/Prazeres/Poder/noticia/2015/10/all-o-desafio-assista-ao-segundo-capitulo-da-serie.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://gshow.globo.com/programas/tv-xuxa/O-Programa/noticia/2014/01/xuxa-e-junno-namoram-muuuito-em-luau-de-zeze-di-camargo-e-luciano.html',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
return False if GloboIE.suitable(url) else super(GloboArticleIE, cls).suitable(url)
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
video_id = self._search_regex(self._VIDEOID_REGEXES, webpage, 'video id')
|
||||
return self.url_result('globo:%s' % video_id, 'Globo')
|
||||
|
@ -61,7 +61,7 @@ class GooglePlusIE(InfoExtractor):
|
||||
'width': int(width),
|
||||
'height': int(height),
|
||||
} for width, height, video_url in re.findall(
|
||||
r'\d+,(\d+),(\d+),"(https?://redirector\.googlevideo\.com.*?)"', webpage)]
|
||||
r'\d+,(\d+),(\d+),"(https?://[^.]+\.googleusercontent.com.*?)"', webpage)]
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
|
@ -4,8 +4,8 @@ import re
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urlparse,
|
||||
from ..utils import (
|
||||
qualities,
|
||||
)
|
||||
|
||||
|
||||
@ -30,24 +30,33 @@ class ImdbIE(InfoExtractor):
|
||||
descr = self._html_search_regex(
|
||||
r'(?s)<span itemprop="description">(.*?)</span>',
|
||||
webpage, 'description', fatal=False)
|
||||
available_formats = re.findall(
|
||||
r'case \'(?P<f_id>.*?)\' :$\s+url = \'(?P<path>.*?)\'', webpage,
|
||||
flags=re.MULTILINE)
|
||||
player_url = 'http://www.imdb.com/video/imdb/vi%s/imdb/single' % video_id
|
||||
player_page = self._download_webpage(
|
||||
player_url, video_id, 'Downloading player page')
|
||||
# the player page contains the info for the default format, we have to
|
||||
# fetch other pages for the rest of the formats
|
||||
extra_formats = re.findall(r'href="(?P<url>%s.*?)".*?>(?P<name>.*?)<' % re.escape(player_url), player_page)
|
||||
format_pages = [
|
||||
self._download_webpage(
|
||||
f_url, video_id, 'Downloading info for %s format' % f_name)
|
||||
for f_url, f_name in extra_formats]
|
||||
format_pages.append(player_page)
|
||||
|
||||
quality = qualities(['SD', '480p', '720p'])
|
||||
formats = []
|
||||
for f_id, f_path in available_formats:
|
||||
f_path = f_path.strip()
|
||||
format_page = self._download_webpage(
|
||||
compat_urlparse.urljoin(url, f_path),
|
||||
'Downloading info for %s format' % f_id)
|
||||
for format_page in format_pages:
|
||||
json_data = self._search_regex(
|
||||
r'<script[^>]+class="imdb-player-data"[^>]*?>(.*?)</script>',
|
||||
format_page, 'json data', flags=re.DOTALL)
|
||||
info = json.loads(json_data)
|
||||
format_info = info['videoPlayerObject']['video']
|
||||
f_id = format_info['ffname']
|
||||
formats.append({
|
||||
'format_id': f_id,
|
||||
'url': format_info['videoInfoList'][0]['videoUrl'],
|
||||
'quality': quality(f_id),
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
|
@ -205,9 +205,9 @@ class IqiyiIE(InfoExtractor):
|
||||
|
||||
def get_enc_key(self, swf_url, video_id):
|
||||
# TODO: automatic key extraction
|
||||
# last update at 2015-10-10 for Zombie::bite
|
||||
# '7239670519b6ac209a0bee4ef0446a6b24894b8ac2751506e42116212a0d0272e505'[2:66][1::2]
|
||||
enc_key = '97596c0abee04ab49ba25564161ad225'
|
||||
# last update at 2015-10-22 for Zombie::bite
|
||||
# '7223c67061dbea1259d0ceb44f44b6d62288f4f80c972170de5201d2321060270e05'[2:66][0::2]
|
||||
enc_key = '2c76de15dcb44bd28ff0927d50d31620'
|
||||
return enc_key
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@ -9,13 +9,14 @@ from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
compat_urlparse,
|
||||
compat_ord,
|
||||
)
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
ExtractorError,
|
||||
parse_iso8601,
|
||||
int_or_none,
|
||||
encode_data_uri,
|
||||
)
|
||||
|
||||
|
||||
@ -25,15 +26,16 @@ class LetvIE(InfoExtractor):
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.letv.com/ptv/vplay/22005890.html',
|
||||
'md5': 'cab23bd68d5a8db9be31c9a222c1e8df',
|
||||
'md5': 'edadcfe5406976f42f9f266057ee5e40',
|
||||
'info_dict': {
|
||||
'id': '22005890',
|
||||
'ext': 'mp4',
|
||||
'title': '第87届奥斯卡颁奖礼完美落幕 《鸟人》成最大赢家',
|
||||
'timestamp': 1424747397,
|
||||
'upload_date': '20150224',
|
||||
'description': 'md5:a9cb175fd753e2962176b7beca21a47c',
|
||||
}
|
||||
},
|
||||
'params': {
|
||||
'hls_prefer_native': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.letv.com/ptv/vplay/1415246.html',
|
||||
'info_dict': {
|
||||
@ -42,16 +44,22 @@ class LetvIE(InfoExtractor):
|
||||
'title': '美人天下01',
|
||||
'description': 'md5:f88573d9d7225ada1359eaf0dbf8bcda',
|
||||
},
|
||||
'params': {
|
||||
'hls_prefer_native': True,
|
||||
},
|
||||
}, {
|
||||
'note': 'This video is available only in Mainland China, thus a proxy is needed',
|
||||
'url': 'http://www.letv.com/ptv/vplay/1118082.html',
|
||||
'md5': 'f80936fbe20fb2f58648e81386ff7927',
|
||||
'md5': '2424c74948a62e5f31988438979c5ad1',
|
||||
'info_dict': {
|
||||
'id': '1118082',
|
||||
'ext': 'mp4',
|
||||
'title': '与龙共舞 完整版',
|
||||
'description': 'md5:7506a5eeb1722bb9d4068f85024e3986',
|
||||
},
|
||||
'params': {
|
||||
'hls_prefer_native': True,
|
||||
},
|
||||
'skip': 'Only available in China',
|
||||
}]
|
||||
|
||||
@ -74,6 +82,27 @@ class LetvIE(InfoExtractor):
|
||||
_loc3_ = self.ror(_loc3_, _loc2_ % 17)
|
||||
return _loc3_
|
||||
|
||||
# see M3U8Encryption class in KLetvPlayer.swf
|
||||
@staticmethod
|
||||
def decrypt_m3u8(encrypted_data):
|
||||
if encrypted_data[:5].decode('utf-8').lower() != 'vc_01':
|
||||
return encrypted_data
|
||||
encrypted_data = encrypted_data[5:]
|
||||
|
||||
_loc4_ = bytearray()
|
||||
while encrypted_data:
|
||||
b = compat_ord(encrypted_data[0])
|
||||
_loc4_.extend([b // 16, b & 0x0f])
|
||||
encrypted_data = encrypted_data[1:]
|
||||
idx = len(_loc4_) - 11
|
||||
_loc4_ = _loc4_[idx:] + _loc4_[:idx]
|
||||
_loc7_ = bytearray()
|
||||
while _loc4_:
|
||||
_loc7_.append(_loc4_[0] * 16 + _loc4_[1])
|
||||
_loc4_ = _loc4_[2:]
|
||||
|
||||
return bytes(_loc7_)
|
||||
|
||||
def _real_extract(self, url):
|
||||
media_id = self._match_id(url)
|
||||
page = self._download_webpage(url, media_id)
|
||||
@ -115,23 +144,28 @@ class LetvIE(InfoExtractor):
|
||||
for format_id in formats:
|
||||
if format_id in dispatch:
|
||||
media_url = playurl['domain'][0] + dispatch[format_id][0]
|
||||
|
||||
# Mimic what flvxz.com do
|
||||
url_parts = list(compat_urlparse.urlparse(media_url))
|
||||
qs = dict(compat_urlparse.parse_qs(url_parts[4]))
|
||||
qs.update({
|
||||
'platid': '14',
|
||||
'splatid': '1401',
|
||||
'tss': 'no',
|
||||
'retry': 1
|
||||
media_url += '&' + compat_urllib_parse.urlencode({
|
||||
'm3v': 1,
|
||||
'format': 1,
|
||||
'expect': 3,
|
||||
'rateid': format_id,
|
||||
})
|
||||
url_parts[4] = compat_urllib_parse.urlencode(qs)
|
||||
media_url = compat_urlparse.urlunparse(url_parts)
|
||||
|
||||
nodes_data = self._download_json(
|
||||
media_url, media_id,
|
||||
'Download JSON metadata for format %s' % format_id)
|
||||
|
||||
req = self._request_webpage(
|
||||
nodes_data['nodelist'][0]['location'], media_id,
|
||||
note='Downloading m3u8 information for format %s' % format_id)
|
||||
|
||||
m3u8_data = self.decrypt_m3u8(req.read())
|
||||
|
||||
url_info_dict = {
|
||||
'url': media_url,
|
||||
'url': encode_data_uri(m3u8_data, 'application/vnd.apple.mpegurl'),
|
||||
'ext': determine_ext(dispatch[format_id][1]),
|
||||
'format_id': format_id,
|
||||
'protocol': 'm3u8',
|
||||
}
|
||||
|
||||
if format_id[-1:] == 'p':
|
||||
|
@ -82,6 +82,11 @@ class LyndaBaseIE(InfoExtractor):
|
||||
expected=True)
|
||||
raise ExtractorError('Unable to log in')
|
||||
|
||||
def _logout(self):
|
||||
self._download_webpage(
|
||||
'http://www.lynda.com/ajax/logout.aspx', None,
|
||||
'Logging out', 'Unable to log out', fatal=False)
|
||||
|
||||
|
||||
class LyndaIE(LyndaBaseIE):
|
||||
IE_NAME = 'lynda'
|
||||
@ -108,50 +113,47 @@ class LyndaIE(LyndaBaseIE):
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
page = self._download_webpage(
|
||||
video = self._download_json(
|
||||
'http://www.lynda.com/ajax/player?videoId=%s&type=video' % video_id,
|
||||
video_id, 'Downloading video JSON')
|
||||
video_json = json.loads(page)
|
||||
|
||||
if 'Status' in video_json:
|
||||
if 'Status' in video:
|
||||
raise ExtractorError(
|
||||
'lynda returned error: %s' % video_json['Message'], expected=True)
|
||||
'lynda returned error: %s' % video['Message'], expected=True)
|
||||
|
||||
if video_json['HasAccess'] is False:
|
||||
if video.get('HasAccess') is False:
|
||||
self.raise_login_required('Video %s is only available for members' % video_id)
|
||||
|
||||
video_id = compat_str(video_json['ID'])
|
||||
duration = video_json['DurationInSeconds']
|
||||
title = video_json['Title']
|
||||
video_id = compat_str(video.get('ID') or video_id)
|
||||
duration = int_or_none(video.get('DurationInSeconds'))
|
||||
title = video['Title']
|
||||
|
||||
formats = []
|
||||
|
||||
fmts = video_json.get('Formats')
|
||||
fmts = video.get('Formats')
|
||||
if fmts:
|
||||
formats.extend([
|
||||
{
|
||||
'url': fmt['Url'],
|
||||
'ext': fmt['Extension'],
|
||||
'width': fmt['Width'],
|
||||
'height': fmt['Height'],
|
||||
'filesize': fmt['FileSize'],
|
||||
'format_id': str(fmt['Resolution'])
|
||||
} for fmt in fmts])
|
||||
formats.extend([{
|
||||
'url': f['Url'],
|
||||
'ext': f.get('Extension'),
|
||||
'width': int_or_none(f.get('Width')),
|
||||
'height': int_or_none(f.get('Height')),
|
||||
'filesize': int_or_none(f.get('FileSize')),
|
||||
'format_id': compat_str(f.get('Resolution')) if f.get('Resolution') else None,
|
||||
} for f in fmts if f.get('Url')])
|
||||
|
||||
prioritized_streams = video_json.get('PrioritizedStreams')
|
||||
prioritized_streams = video.get('PrioritizedStreams')
|
||||
if prioritized_streams:
|
||||
formats.extend([
|
||||
{
|
||||
for prioritized_stream_id, prioritized_stream in prioritized_streams.items():
|
||||
formats.extend([{
|
||||
'url': video_url,
|
||||
'width': int_or_none(format_id),
|
||||
'format_id': format_id,
|
||||
} for format_id, video_url in prioritized_streams['0'].items()
|
||||
])
|
||||
'format_id': '%s-%s' % (prioritized_stream_id, format_id),
|
||||
} for format_id, video_url in prioritized_stream.items()])
|
||||
|
||||
self._check_formats(formats, video_id)
|
||||
self._sort_formats(formats)
|
||||
|
||||
subtitles = self.extract_subtitles(video_id, page)
|
||||
subtitles = self.extract_subtitles(video_id)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
@ -182,7 +184,7 @@ class LyndaIE(LyndaBaseIE):
|
||||
if srt:
|
||||
return srt
|
||||
|
||||
def _get_subtitles(self, video_id, webpage):
|
||||
def _get_subtitles(self, video_id):
|
||||
url = 'http://www.lynda.com/ajax/player?videoId=%s&type=transcript' % video_id
|
||||
subs = self._download_json(url, None, False)
|
||||
if subs:
|
||||
@ -204,12 +206,13 @@ class LyndaCourseIE(LyndaBaseIE):
|
||||
course_path = mobj.group('coursepath')
|
||||
course_id = mobj.group('courseid')
|
||||
|
||||
page = self._download_webpage(
|
||||
course = self._download_json(
|
||||
'http://www.lynda.com/ajax/player?courseId=%s&type=course' % course_id,
|
||||
course_id, 'Downloading course JSON')
|
||||
course_json = json.loads(page)
|
||||
|
||||
if 'Status' in course_json and course_json['Status'] == 'NotFound':
|
||||
self._logout()
|
||||
|
||||
if course.get('Status') == 'NotFound':
|
||||
raise ExtractorError(
|
||||
'Course %s does not exist' % course_id, expected=True)
|
||||
|
||||
@ -219,12 +222,13 @@ class LyndaCourseIE(LyndaBaseIE):
|
||||
# Might want to extract videos right here from video['Formats'] as it seems 'Formats' is not provided
|
||||
# by single video API anymore
|
||||
|
||||
for chapter in course_json['Chapters']:
|
||||
for video in chapter['Videos']:
|
||||
if video['HasAccess'] is False:
|
||||
for chapter in course['Chapters']:
|
||||
for video in chapter.get('Videos', []):
|
||||
if video.get('HasAccess') is False:
|
||||
unaccessible_videos += 1
|
||||
continue
|
||||
videos.append(video['ID'])
|
||||
if video.get('ID'):
|
||||
videos.append(video['ID'])
|
||||
|
||||
if unaccessible_videos > 0:
|
||||
self._downloader.report_warning(
|
||||
@ -237,6 +241,6 @@ class LyndaCourseIE(LyndaBaseIE):
|
||||
'Lynda')
|
||||
for video_id in videos]
|
||||
|
||||
course_title = course_json['Title']
|
||||
course_title = course.get('Title')
|
||||
|
||||
return self.playlist_result(entries, course_id, course_title)
|
||||
|
@ -1,64 +1,169 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urlparse
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
int_or_none,
|
||||
parse_duration,
|
||||
parse_iso8601,
|
||||
xpath_text,
|
||||
)
|
||||
|
||||
|
||||
class MDRIE(InfoExtractor):
|
||||
_VALID_URL = r'^(?P<domain>https?://(?:www\.)?mdr\.de)/(?:.*)/(?P<type>video|audio)(?P<video_id>[^/_]+)(?:_|\.html)'
|
||||
IE_DESC = 'MDR.DE and KiKA'
|
||||
_VALID_URL = r'https?://(?:www\.)?(?:mdr|kika)\.de/(?:.*)/[a-z]+(?P<id>\d+)(?:_.+?)?\.html'
|
||||
|
||||
# No tests, MDR regularily deletes its videos
|
||||
_TEST = {
|
||||
_TESTS = [{
|
||||
# MDR regularily deletes its videos
|
||||
'url': 'http://www.mdr.de/fakt/video189002.html',
|
||||
'only_matching': True,
|
||||
}
|
||||
}, {
|
||||
# audio
|
||||
'url': 'http://www.mdr.de/kultur/audio1312272_zc-15948bad_zs-86171fdd.html',
|
||||
'md5': '64c4ee50f0a791deb9479cd7bbe9d2fa',
|
||||
'info_dict': {
|
||||
'id': '1312272',
|
||||
'ext': 'mp3',
|
||||
'title': 'Feuilleton vom 30. Oktober 2015',
|
||||
'duration': 250,
|
||||
'uploader': 'MITTELDEUTSCHER RUNDFUNK',
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.kika.de/baumhaus/videos/video19636.html',
|
||||
'md5': '4930515e36b06c111213e80d1e4aad0e',
|
||||
'info_dict': {
|
||||
'id': '19636',
|
||||
'ext': 'mp4',
|
||||
'title': 'Baumhaus vom 30. Oktober 2015',
|
||||
'duration': 134,
|
||||
'uploader': 'KIKA',
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.kika.de/sendungen/einzelsendungen/weihnachtsprogramm/videos/video8182.html',
|
||||
'md5': '5fe9c4dd7d71e3b238f04b8fdd588357',
|
||||
'info_dict': {
|
||||
'id': '8182',
|
||||
'ext': 'mp4',
|
||||
'title': 'Beutolomäus und der geheime Weihnachtswunsch',
|
||||
'description': 'md5:b69d32d7b2c55cbe86945ab309d39bbd',
|
||||
'timestamp': 1419047100,
|
||||
'upload_date': '20141220',
|
||||
'duration': 4628,
|
||||
'uploader': 'KIKA',
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.kika.de/baumhaus/sendungen/video19636_zc-fea7f8a0_zs-4bf89c60.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.kika.de/sendungen/einzelsendungen/weihnachtsprogramm/einzelsendung2534.html',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
m = re.match(self._VALID_URL, url)
|
||||
video_id = m.group('video_id')
|
||||
domain = m.group('domain')
|
||||
video_id = self._match_id(url)
|
||||
|
||||
# determine title and media streams from webpage
|
||||
html = self._download_webpage(url, video_id)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
title = self._html_search_regex(r'<h[12]>(.*?)</h[12]>', html, 'title')
|
||||
xmlurl = self._search_regex(
|
||||
r'dataURL:\'(/(?:.+)/(?:video|audio)[0-9]+-avCustom.xml)', html, 'XML URL')
|
||||
data_url = self._search_regex(
|
||||
r'dataURL\s*:\s*(["\'])(?P<url>/.+/(?:video|audio)[0-9]+-avCustom\.xml)\1',
|
||||
webpage, 'data url', group='url')
|
||||
|
||||
doc = self._download_xml(
|
||||
compat_urlparse.urljoin(url, data_url), video_id)
|
||||
|
||||
title = xpath_text(doc, ['./title', './broadcast/broadcastName'], 'title', fatal=True)
|
||||
|
||||
doc = self._download_xml(domain + xmlurl, video_id)
|
||||
formats = []
|
||||
for a in doc.findall('./assets/asset'):
|
||||
url_el = a.find('./progressiveDownloadUrl')
|
||||
if url_el is None:
|
||||
continue
|
||||
abr = int(a.find('bitrateAudio').text) // 1000
|
||||
media_type = a.find('mediaType').text
|
||||
format = {
|
||||
'abr': abr,
|
||||
'filesize': int(a.find('fileSize').text),
|
||||
'url': url_el.text,
|
||||
}
|
||||
processed_urls = []
|
||||
for asset in doc.findall('./assets/asset'):
|
||||
for source in (
|
||||
'progressiveDownload',
|
||||
'dynamicHttpStreamingRedirector',
|
||||
'adaptiveHttpStreamingRedirector'):
|
||||
url_el = asset.find('./%sUrl' % source)
|
||||
if url_el is None:
|
||||
continue
|
||||
|
||||
video_url = url_el.text
|
||||
if video_url in processed_urls:
|
||||
continue
|
||||
|
||||
processed_urls.append(video_url)
|
||||
|
||||
vbr = int_or_none(xpath_text(asset, './bitrateVideo', 'vbr'), 1000)
|
||||
abr = int_or_none(xpath_text(asset, './bitrateAudio', 'abr'), 1000)
|
||||
|
||||
ext = determine_ext(url_el.text)
|
||||
if ext == 'm3u8':
|
||||
url_formats = self._extract_m3u8_formats(
|
||||
video_url, video_id, 'mp4', entry_protocol='m3u8_native',
|
||||
preference=0, m3u8_id='HLS', fatal=False)
|
||||
elif ext == 'f4m':
|
||||
url_formats = self._extract_f4m_formats(
|
||||
video_url + '?hdcore=3.7.0&plugin=aasp-3.7.0.39.44', video_id,
|
||||
preference=0, f4m_id='HDS', fatal=False)
|
||||
else:
|
||||
media_type = xpath_text(asset, './mediaType', 'media type', default='MP4')
|
||||
vbr = int_or_none(xpath_text(asset, './bitrateVideo', 'vbr'), 1000)
|
||||
abr = int_or_none(xpath_text(asset, './bitrateAudio', 'abr'), 1000)
|
||||
filesize = int_or_none(xpath_text(asset, './fileSize', 'file size'))
|
||||
|
||||
f = {
|
||||
'url': video_url,
|
||||
'format_id': '%s-%d' % (media_type, vbr or abr),
|
||||
'filesize': filesize,
|
||||
'abr': abr,
|
||||
'preference': 1,
|
||||
}
|
||||
|
||||
if vbr:
|
||||
width = int_or_none(xpath_text(asset, './frameWidth', 'width'))
|
||||
height = int_or_none(xpath_text(asset, './frameHeight', 'height'))
|
||||
f.update({
|
||||
'vbr': vbr,
|
||||
'width': width,
|
||||
'height': height,
|
||||
})
|
||||
|
||||
url_formats = [f]
|
||||
|
||||
if not url_formats:
|
||||
continue
|
||||
|
||||
if not vbr:
|
||||
for f in url_formats:
|
||||
abr = f.get('tbr') or abr
|
||||
if 'tbr' in f:
|
||||
del f['tbr']
|
||||
f.update({
|
||||
'abr': abr,
|
||||
'vcodec': 'none',
|
||||
})
|
||||
|
||||
formats.extend(url_formats)
|
||||
|
||||
vbr_el = a.find('bitrateVideo')
|
||||
if vbr_el is None:
|
||||
format.update({
|
||||
'vcodec': 'none',
|
||||
'format_id': '%s-%d' % (media_type, abr),
|
||||
})
|
||||
else:
|
||||
vbr = int(vbr_el.text) // 1000
|
||||
format.update({
|
||||
'vbr': vbr,
|
||||
'width': int(a.find('frameWidth').text),
|
||||
'height': int(a.find('frameHeight').text),
|
||||
'format_id': '%s-%d' % (media_type, vbr),
|
||||
})
|
||||
formats.append(format)
|
||||
self._sort_formats(formats)
|
||||
|
||||
description = xpath_text(doc, './broadcast/broadcastDescription', 'description')
|
||||
timestamp = parse_iso8601(
|
||||
xpath_text(
|
||||
doc, [
|
||||
'./broadcast/broadcastDate',
|
||||
'./broadcast/broadcastStartDate',
|
||||
'./broadcast/broadcastEndDate'],
|
||||
'timestamp', default=None))
|
||||
duration = parse_duration(xpath_text(doc, './duration', 'duration'))
|
||||
uploader = xpath_text(doc, './rights', 'uploader')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'timestamp': timestamp,
|
||||
'duration': duration,
|
||||
'uploader': uploader,
|
||||
'formats': formats,
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ from __future__ import unicode_literals
|
||||
import random
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urllib_request
|
||||
from ..utils import (
|
||||
xpath_text,
|
||||
int_or_none,
|
||||
@ -51,6 +52,8 @@ class MioMioIE(InfoExtractor):
|
||||
mioplayer_path = self._search_regex(
|
||||
r'src="(/mioplayer/[^"]+)"', webpage, 'ref_path')
|
||||
|
||||
http_headers = {'Referer': 'http://www.miomio.tv%s' % mioplayer_path}
|
||||
|
||||
xml_config = self._search_regex(
|
||||
r'flashvars="type=(?:sina|video)&(.+?)&',
|
||||
webpage, 'xml config')
|
||||
@ -60,14 +63,12 @@ class MioMioIE(InfoExtractor):
|
||||
'http://www.miomio.tv/mioplayer/mioplayerconfigfiles/xml.php?id=%s&r=%s' % (id, random.randint(100, 999)),
|
||||
video_id)
|
||||
|
||||
# the following xml contains the actual configuration information on the video file(s)
|
||||
vid_config = self._download_xml(
|
||||
vid_config_request = compat_urllib_request.Request(
|
||||
'http://www.miomio.tv/mioplayer/mioplayerconfigfiles/sina.php?{0}'.format(xml_config),
|
||||
video_id)
|
||||
headers=http_headers)
|
||||
|
||||
http_headers = {
|
||||
'Referer': 'http://www.miomio.tv%s' % mioplayer_path,
|
||||
}
|
||||
# the following xml contains the actual configuration information on the video file(s)
|
||||
vid_config = self._download_xml(vid_config_request, video_id)
|
||||
|
||||
if not int_or_none(xpath_text(vid_config, 'timelength')):
|
||||
raise ExtractorError('Unable to load videos!', expected=True)
|
||||
|
@ -86,7 +86,7 @@ class MITIE(TechTVMITIE):
|
||||
webpage = self._download_webpage(url, page_title)
|
||||
embed_url = self._search_regex(
|
||||
r'<iframe .*?src="(.+?)"', webpage, 'embed url')
|
||||
return self.url_result(embed_url, ie='TechTVMIT')
|
||||
return self.url_result(embed_url)
|
||||
|
||||
|
||||
class OCWMITIE(InfoExtractor):
|
||||
|
@ -1,7 +1,10 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urllib_parse
|
||||
from ..compat import (
|
||||
compat_urllib_parse,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
encode_dict,
|
||||
get_element_by_attribute,
|
||||
@ -15,7 +18,7 @@ class MiTeleIE(InfoExtractor):
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.mitele.es/programas-tv/diario-de/la-redaccion/programa-144/',
|
||||
'md5': 'ace7635b2a0b286aaa37d3ff192d2a8a',
|
||||
'md5': '0ff1a13aebb35d9bc14081ff633dd324',
|
||||
'info_dict': {
|
||||
'id': '0NF1jJnxS1Wu3pHrmvFyw2',
|
||||
'display_id': 'programa-144',
|
||||
@ -34,6 +37,7 @@ class MiTeleIE(InfoExtractor):
|
||||
|
||||
config_url = self._search_regex(
|
||||
r'data-config\s*=\s*"([^"]+)"', webpage, 'data config url')
|
||||
config_url = compat_urlparse.urljoin(url, config_url)
|
||||
|
||||
config = self._download_json(
|
||||
config_url, display_id, 'Downloading config JSON')
|
||||
@ -56,7 +60,7 @@ class MiTeleIE(InfoExtractor):
|
||||
'sta': '0',
|
||||
}
|
||||
media = self._download_json(
|
||||
'%s/?%s' % (gat, compat_urllib_parse.urlencode(encode_dict(token_data)).encode('utf-8')),
|
||||
'%s/?%s' % (gat, compat_urllib_parse.urlencode(encode_dict(token_data))),
|
||||
display_id, 'Downloading %s JSON' % location['loc'])
|
||||
file_ = media.get('file')
|
||||
if not file_:
|
||||
|
@ -17,7 +17,7 @@ from ..utils import (
|
||||
|
||||
class MonikerIE(InfoExtractor):
|
||||
IE_DESC = 'allmyvideos.net and vidspot.net'
|
||||
_VALID_URL = r'https?://(?:www\.)?(?:allmyvideos|vidspot)\.net/(?P<id>[a-zA-Z0-9_-]+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?(?:allmyvideos|vidspot)\.net/(?:(?:2|v)/v-)?(?P<id>[a-zA-Z0-9_-]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://allmyvideos.net/jih3nce3x6wn',
|
||||
@ -46,6 +46,18 @@ class MonikerIE(InfoExtractor):
|
||||
}, {
|
||||
'url': 'https://www.vidspot.net/l2ngsmhs8ci5',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://vidspot.net/2/v-ywDf99',
|
||||
'md5': '5f8254ce12df30479428b0152fb8e7ba',
|
||||
'info_dict': {
|
||||
'id': 'ywDf99',
|
||||
'ext': 'mp4',
|
||||
'title': 'IL FAIT LE MALIN EN PORSHE CAYENNE ( mais pas pour longtemps)',
|
||||
'description': 'IL FAIT LE MALIN EN PORSHE CAYENNE.',
|
||||
},
|
||||
}, {
|
||||
'url': 'http://allmyvideos.net/v/v-HXZm5t',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
@ -64,18 +76,30 @@ class MonikerIE(InfoExtractor):
|
||||
raise ExtractorError(
|
||||
'%s returned error: %s' % (self.IE_NAME, error), expected=True)
|
||||
|
||||
fields = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', orig_webpage)
|
||||
data = dict(fields)
|
||||
builtin_url = self._search_regex(
|
||||
r'<iframe[^>]+src=(["\'])(?P<url>.+?/builtin-.+?)\1',
|
||||
orig_webpage, 'builtin URL', default=None, group='url')
|
||||
|
||||
post = compat_urllib_parse.urlencode(data)
|
||||
headers = {
|
||||
b'Content-Type': b'application/x-www-form-urlencoded',
|
||||
}
|
||||
req = compat_urllib_request.Request(url, post, headers)
|
||||
webpage = self._download_webpage(
|
||||
req, video_id, note='Downloading video page ...')
|
||||
if builtin_url:
|
||||
req = compat_urllib_request.Request(builtin_url)
|
||||
req.add_header('Referer', url)
|
||||
webpage = self._download_webpage(req, video_id, 'Downloading builtin page')
|
||||
title = self._og_search_title(orig_webpage).strip()
|
||||
description = self._og_search_description(orig_webpage).strip()
|
||||
else:
|
||||
fields = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', orig_webpage)
|
||||
data = dict(fields)
|
||||
|
||||
title = os.path.splitext(data['fname'])[0]
|
||||
post = compat_urllib_parse.urlencode(data)
|
||||
headers = {
|
||||
b'Content-Type': b'application/x-www-form-urlencoded',
|
||||
}
|
||||
req = compat_urllib_request.Request(url, post, headers)
|
||||
webpage = self._download_webpage(
|
||||
req, video_id, note='Downloading video page ...')
|
||||
|
||||
title = os.path.splitext(data['fname'])[0]
|
||||
description = None
|
||||
|
||||
# Could be several links with different quality
|
||||
links = re.findall(r'"file" : "?(.+?)",', webpage)
|
||||
@ -89,5 +113,6 @@ class MonikerIE(InfoExtractor):
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'formats': formats,
|
||||
}
|
||||
|
@ -1,80 +1,42 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_str,
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
clean_html,
|
||||
compat_urllib_request,
|
||||
)
|
||||
|
||||
|
||||
class MovieClipsIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://movieclips\.com/(?P<id>[\da-zA-Z]+)(?:-(?P<display_id>[\da-z-]+))?'
|
||||
_VALID_URL = r'https?://(?:www.)?movieclips\.com/videos/(?P<id>[^/?#]+)'
|
||||
_TEST = {
|
||||
'url': 'http://movieclips.com/Wy7ZU-my-week-with-marilyn-movie-do-you-love-me/',
|
||||
'url': 'http://www.movieclips.com/videos/warcraft-trailer-1-561180739597?autoPlay=true&playlistId=5',
|
||||
'info_dict': {
|
||||
'id': 'Wy7ZU',
|
||||
'display_id': 'my-week-with-marilyn-movie-do-you-love-me',
|
||||
'id': 'pKIGmG83AqD9',
|
||||
'display_id': 'warcraft-trailer-1-561180739597',
|
||||
'ext': 'mp4',
|
||||
'title': 'My Week with Marilyn - Do You Love Me?',
|
||||
'description': 'md5:e86795bd332fe3cff461e7c8dc542acb',
|
||||
'title': 'Warcraft Trailer 1',
|
||||
'description': 'Watch Trailer 1 from Warcraft (2016). Legendary’s WARCRAFT is a 3D epic adventure of world-colliding conflict based.',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
}
|
||||
'add_ie': ['ThePlatform'],
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
display_id = mobj.group('display_id')
|
||||
show_id = display_id or video_id
|
||||
display_id = self._match_id(url)
|
||||
|
||||
config = self._download_xml(
|
||||
'http://config.movieclips.com/player/config/%s' % video_id,
|
||||
show_id, 'Downloading player config')
|
||||
|
||||
if config.find('./country-region').text == 'false':
|
||||
raise ExtractorError(
|
||||
'%s said: %s' % (self.IE_NAME, config.find('./region_alert').text), expected=True)
|
||||
|
||||
properties = config.find('./video/properties')
|
||||
smil_file = properties.attrib['smil_file']
|
||||
|
||||
smil = self._download_xml(smil_file, show_id, 'Downloading SMIL')
|
||||
base_url = smil.find('./head/meta').attrib['base']
|
||||
|
||||
formats = []
|
||||
for video in smil.findall('./body/switch/video'):
|
||||
vbr = int(video.attrib['system-bitrate']) / 1000
|
||||
src = video.attrib['src']
|
||||
formats.append({
|
||||
'url': base_url,
|
||||
'play_path': src,
|
||||
'ext': src.split(':')[0],
|
||||
'vbr': vbr,
|
||||
'format_id': '%dk' % vbr,
|
||||
})
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
title = '%s - %s' % (properties.attrib['clip_movie_title'], properties.attrib['clip_title'])
|
||||
description = clean_html(compat_str(properties.attrib['clip_description']))
|
||||
thumbnail = properties.attrib['image']
|
||||
categories = properties.attrib['clip_categories'].split(',')
|
||||
req = compat_urllib_request.Request(url)
|
||||
# it doesn't work if it thinks the browser it's too old
|
||||
req.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/43.0 (Chrome)')
|
||||
webpage = self._download_webpage(req, display_id)
|
||||
theplatform_link = self._html_search_regex(r'src="(http://player.theplatform.com/p/.*?)"', webpage, 'theplatform link')
|
||||
title = self._html_search_regex(r'<title[^>]*>([^>]+)-\s*\d+\s*|\s*Movieclips.com</title>', webpage, 'title')
|
||||
description = self._html_search_meta('description', webpage)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
'_type': 'url_transparent',
|
||||
'url': theplatform_link,
|
||||
'title': title,
|
||||
'display_id': display_id,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'categories': categories,
|
||||
'formats': formats,
|
||||
}
|
||||
|
@ -14,7 +14,8 @@ from ..utils import (
|
||||
|
||||
class NDRBaseIE(InfoExtractor):
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
display_id = next(group for group in mobj.groups() if group)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
return self._extract_embed(webpage, display_id)
|
||||
|
||||
@ -22,7 +23,7 @@ class NDRBaseIE(InfoExtractor):
|
||||
class NDRIE(NDRBaseIE):
|
||||
IE_NAME = 'ndr'
|
||||
IE_DESC = 'NDR.de - Norddeutscher Rundfunk'
|
||||
_VALID_URL = r'https?://www\.ndr\.de/(?:[^/]+/)+(?P<id>[^/?#]+),[\da-z]+\.html'
|
||||
_VALID_URL = r'https?://www\.ndr\.de/(?:[^/]+/)*(?P<id>[^/?#]+),[\da-z]+\.html'
|
||||
_TESTS = [{
|
||||
# httpVideo, same content id
|
||||
'url': 'http://www.ndr.de/fernsehen/Party-Poette-und-Parade,hafengeburtstag988.html',
|
||||
@ -77,6 +78,9 @@ class NDRIE(NDRBaseIE):
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.ndr.de/Fettes-Brot-Ferris-MC-und-Thees-Uhlmann-live-on-stage,festivalsommer116.html',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _extract_embed(self, webpage, display_id):
|
||||
@ -101,7 +105,7 @@ class NDRIE(NDRBaseIE):
|
||||
class NJoyIE(NDRBaseIE):
|
||||
IE_NAME = 'njoy'
|
||||
IE_DESC = 'N-JOY'
|
||||
_VALID_URL = r'https?://www\.n-joy\.de/(?:[^/]+/)+(?P<id>[^/?#]+),[\da-z]+\.html'
|
||||
_VALID_URL = r'https?://www\.n-joy\.de/(?:[^/]+/)*(?:(?P<display_id>[^/?#]+),)?(?P<id>[\da-z]+)\.html'
|
||||
_TESTS = [{
|
||||
# httpVideo, same content id
|
||||
'url': 'http://www.n-joy.de/entertainment/comedy/comedy_contest/Benaissa-beim-NDR-Comedy-Contest,comedycontest2480.html',
|
||||
@ -136,6 +140,9 @@ class NJoyIE(NDRBaseIE):
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.n-joy.de/radio/webradio/morningshow209.html',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _extract_embed(self, webpage, display_id):
|
||||
@ -231,7 +238,7 @@ class NDREmbedBaseIE(InfoExtractor):
|
||||
|
||||
class NDREmbedIE(NDREmbedBaseIE):
|
||||
IE_NAME = 'ndr:embed'
|
||||
_VALID_URL = r'https?://www\.ndr\.de/(?:[^/]+/)+(?P<id>[\da-z]+)-(?:player|externalPlayer)\.html'
|
||||
_VALID_URL = r'https?://www\.ndr\.de/(?:[^/]+/)*(?P<id>[\da-z]+)-(?:player|externalPlayer)\.html'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.ndr.de/fernsehen/sendungen/ndr_aktuell/ndraktuell28488-player.html',
|
||||
'md5': '8b9306142fe65bbdefb5ce24edb6b0a9',
|
||||
@ -325,7 +332,7 @@ class NDREmbedIE(NDREmbedBaseIE):
|
||||
|
||||
class NJoyEmbedIE(NDREmbedBaseIE):
|
||||
IE_NAME = 'njoy:embed'
|
||||
_VALID_URL = r'https?://www\.n-joy\.de/(?:[^/]+/)+(?P<id>[\da-z]+)-(?:player|externalPlayer)_[^/]+\.html'
|
||||
_VALID_URL = r'https?://www\.n-joy\.de/(?:[^/]+/)*(?P<id>[\da-z]+)-(?:player|externalPlayer)_[^/]+\.html'
|
||||
_TESTS = [{
|
||||
# httpVideo
|
||||
'url': 'http://www.n-joy.de/events/reeperbahnfestival/doku948-player_image-bc168e87-5263-4d6d-bd27-bb643005a6de_theme-n-joy.html',
|
||||
|
@ -13,7 +13,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class OdnoklassnikiIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?(?:odnoklassniki|ok)\.ru/(?:video|web-api/video/moviePlayer)/(?P<id>[\d-]+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?(?:odnoklassniki|ok)\.ru/(?:video(?:embed)?|web-api/video/moviePlayer)/(?P<id>[\d-]+)'
|
||||
_TESTS = [{
|
||||
# metadata in JSON
|
||||
'url': 'http://ok.ru/video/20079905452',
|
||||
@ -66,6 +66,9 @@ class OdnoklassnikiIE(InfoExtractor):
|
||||
}, {
|
||||
'url': 'http://www.ok.ru/video/20648036891',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.ok.ru/videoembed/20648036891',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@ -8,6 +8,7 @@ from ..utils import (
|
||||
ExtractorError,
|
||||
determine_ext,
|
||||
int_or_none,
|
||||
strip_jsonp,
|
||||
unified_strdate,
|
||||
US_RATINGS,
|
||||
)
|
||||
@ -153,6 +154,22 @@ class PBSIE(InfoExtractor):
|
||||
'params': {
|
||||
'skip_download': True, # requires ffmpeg
|
||||
},
|
||||
},
|
||||
{
|
||||
# Frontline video embedded via flp2012.js
|
||||
'url': 'http://www.pbs.org/wgbh/pages/frontline/the-atomic-artists',
|
||||
'info_dict': {
|
||||
'id': '2070868960',
|
||||
'display_id': 'the-atomic-artists',
|
||||
'ext': 'mp4',
|
||||
'title': 'FRONTLINE - The Atomic Artists',
|
||||
'description': 'md5:f5bfbefadf421e8bb8647602011caf8e',
|
||||
'duration': 723,
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True, # requires ffmpeg
|
||||
},
|
||||
}
|
||||
]
|
||||
_ERRORS = {
|
||||
@ -191,9 +208,30 @@ class PBSIE(InfoExtractor):
|
||||
if media_id:
|
||||
return media_id, presumptive_id, upload_date
|
||||
|
||||
url = self._search_regex(
|
||||
r'(?s)<iframe[^>]+?(?:[a-z-]+?=["\'].*?["\'][^>]+?)*?\bsrc=["\']([^\'"]+partnerplayer[^\'"]+)["\']',
|
||||
webpage, 'player URL')
|
||||
# Fronline video embedded via flp
|
||||
video_id = self._search_regex(
|
||||
r'videoid\s*:\s*"([\d+a-z]{7,})"', webpage, 'videoid', default=None)
|
||||
if video_id:
|
||||
# pkg_id calculation is reverse engineered from
|
||||
# http://www.pbs.org/wgbh/pages/frontline/js/flp2012.js
|
||||
prg_id = self._search_regex(
|
||||
r'videoid\s*:\s*"([\d+a-z]{7,})"', webpage, 'videoid')[7:]
|
||||
if 'q' in prg_id:
|
||||
prg_id = prg_id.split('q')[1]
|
||||
prg_id = int(prg_id, 16)
|
||||
getdir = self._download_json(
|
||||
'http://www.pbs.org/wgbh/pages/frontline/.json/getdir/getdir%d.json' % prg_id,
|
||||
presumptive_id, 'Downloading getdir JSON',
|
||||
transform_source=strip_jsonp)
|
||||
return getdir['mid'], presumptive_id, upload_date
|
||||
|
||||
for iframe in re.findall(r'(?s)<iframe(.+?)></iframe>', webpage):
|
||||
url = self._search_regex(
|
||||
r'src=(["\'])(?P<url>.+?partnerplayer.+?)\1', iframe,
|
||||
'player URL', default=None, group='url')
|
||||
if url:
|
||||
break
|
||||
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
||||
player_id = mobj.group('player_id')
|
||||
|
@ -12,7 +12,8 @@ from ..utils import parse_iso8601
|
||||
class PeriscopeIE(InfoExtractor):
|
||||
IE_DESC = 'Periscope'
|
||||
_VALID_URL = r'https?://(?:www\.)?periscope\.tv/w/(?P<id>[^/?#]+)'
|
||||
_TEST = {
|
||||
# Alive example URLs can be found here http://onperiscope.com/
|
||||
_TESTS = [{
|
||||
'url': 'https://www.periscope.tv/w/aJUQnjY3MjA3ODF8NTYxMDIyMDl2zCg2pECBgwTqRpQuQD352EMPTKQjT4uqlM3cgWFA-g==',
|
||||
'md5': '65b57957972e503fcbbaeed8f4fa04ca',
|
||||
'info_dict': {
|
||||
@ -25,11 +26,15 @@ class PeriscopeIE(InfoExtractor):
|
||||
'uploader_id': '1465763',
|
||||
},
|
||||
'skip': 'Expires in 24 hours',
|
||||
}
|
||||
}, {
|
||||
'url': 'https://www.periscope.tv/w/1ZkKzPbMVggJv',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _call_api(self, method, token):
|
||||
def _call_api(self, method, value):
|
||||
attribute = 'token' if len(value) > 13 else 'broadcast_id'
|
||||
return self._download_json(
|
||||
'https://api.periscope.tv/api/v2/%s?token=%s' % (method, token), token)
|
||||
'https://api.periscope.tv/api/v2/%s?%s=%s' % (method, attribute, value), value)
|
||||
|
||||
def _real_extract(self, url):
|
||||
token = self._match_id(url)
|
||||
|
@ -20,7 +20,7 @@ from ..utils import (
|
||||
class ProSiebenSat1IE(InfoExtractor):
|
||||
IE_NAME = 'prosiebensat1'
|
||||
IE_DESC = 'ProSiebenSat.1 Digital'
|
||||
_VALID_URL = r'https?://(?:www\.)?(?:(?:prosieben|prosiebenmaxx|sixx|sat1|kabeleins|the-voice-of-germany)\.(?:de|at)|ran\.de|fem\.com)/(?P<id>.+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?(?:(?:prosieben|prosiebenmaxx|sixx|sat1|kabeleins|the-voice-of-germany)\.(?:de|at|ch)|ran\.de|fem\.com)/(?P<id>.+)'
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
|
@ -9,8 +9,8 @@ from ..utils import (
|
||||
|
||||
|
||||
class RTBFIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://www.rtbf.be/video/[^\?]+\?id=(?P<id>\d+)'
|
||||
_TEST = {
|
||||
_VALID_URL = r'https?://(?:www\.)?rtbf\.be/(?:video/[^?]+\?.*\bid=|ouftivi/(?:[^/]+/)*[^?]+\?.*\bvideoId=)(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.rtbf.be/video/detail_les-diables-au-coeur-episode-2?id=1921274',
|
||||
'md5': '799f334ddf2c0a582ba80c44655be570',
|
||||
'info_dict': {
|
||||
@ -19,7 +19,14 @@ class RTBFIE(InfoExtractor):
|
||||
'title': 'Les Diables au coeur (épisode 2)',
|
||||
'duration': 3099,
|
||||
}
|
||||
}
|
||||
}, {
|
||||
# geo restricted
|
||||
'url': 'http://www.rtbf.be/ouftivi/heros/detail_scooby-doo-mysteres-associes?id=1097&videoId=2057442',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.rtbf.be/ouftivi/niouzz?videoId=2055858',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
_QUALITIES = [
|
||||
('mobile', 'mobile'),
|
||||
|
@ -9,16 +9,16 @@ from ..utils import (
|
||||
|
||||
|
||||
class RteIE(InfoExtractor):
|
||||
_VALID_URL = r'http?://(?:www\.)?rte\.ie/player/[^/]{2,3}/show/(?P<id>[0-9]+)/'
|
||||
_VALID_URL = r'https?://(?:www\.)?rte\.ie/player/[^/]{2,3}/show/[^/]+/(?P<id>[0-9]+)'
|
||||
_TEST = {
|
||||
'url': 'http://www.rte.ie/player/de/show/10363114/',
|
||||
'url': 'http://www.rte.ie/player/ie/show/iwitness-862/10478715/',
|
||||
'info_dict': {
|
||||
'id': '10363114',
|
||||
'id': '10478715',
|
||||
'ext': 'mp4',
|
||||
'title': 'One News',
|
||||
'title': 'Watch iWitness online',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'description': 'The One O\'Clock News followed by Weather.',
|
||||
'duration': 436.844,
|
||||
'description': 'iWitness : The spirit of Ireland, one voice and one minute at a time.',
|
||||
'duration': 60.046,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'f4m fails with --test atm'
|
||||
|
@ -121,9 +121,9 @@ class SenateISVPIE(InfoExtractor):
|
||||
'url': compat_urlparse.urljoin(domain, filename) + '?v=3.1.0&fp=&r=&g=',
|
||||
}]
|
||||
else:
|
||||
hdcore_sign = '?hdcore=3.1.0'
|
||||
hdcore_sign = 'hdcore=3.1.0'
|
||||
url_params = (domain, video_id, stream_num)
|
||||
f4m_url = '%s/z/%s_1@%s/manifest.f4m' % url_params + hdcore_sign
|
||||
f4m_url = '%s/z/%s_1@%s/manifest.f4m?' % url_params + hdcore_sign
|
||||
m3u8_url = '%s/i/%s_1@%s/master.m3u8' % url_params
|
||||
for entry in self._extract_f4m_formats(f4m_url, video_id, f4m_id='f4m'):
|
||||
# URLs without the extra param induce an 404 error
|
||||
|
@ -77,17 +77,21 @@ class SpiegeltvIE(InfoExtractor):
|
||||
'rtmp_live': True,
|
||||
})
|
||||
elif determine_ext(endpoint) == 'm3u8':
|
||||
m3u8_formats = self._extract_m3u8_formats(
|
||||
endpoint.replace('[video]', play_path),
|
||||
video_id, 'm4v',
|
||||
preference=1, # Prefer hls since it allows to workaround georestriction
|
||||
m3u8_id='hls', fatal=False)
|
||||
if m3u8_formats is not False:
|
||||
formats.extend(m3u8_formats)
|
||||
formats.append({
|
||||
'url': endpoint.replace('[video]', play_path),
|
||||
'ext': 'm4v',
|
||||
'format_id': 'hls', # Prefer hls since it allows to workaround georestriction
|
||||
'protocol': 'm3u8',
|
||||
'preference': 1,
|
||||
'http_headers': {
|
||||
'Accept-Encoding': 'deflate', # gzip causes trouble on the server side
|
||||
},
|
||||
})
|
||||
else:
|
||||
formats.append({
|
||||
'url': endpoint,
|
||||
})
|
||||
self._check_formats(formats, video_id)
|
||||
|
||||
thumbnails = []
|
||||
for image in media_json['images']:
|
||||
|
81
youtube_dl/extractor/stitcher.py
Normal file
81
youtube_dl/extractor/stitcher.py
Normal file
@ -0,0 +1,81 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
int_or_none,
|
||||
js_to_json,
|
||||
unescapeHTML,
|
||||
)
|
||||
|
||||
|
||||
class StitcherIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?stitcher\.com/podcast/(?:[^/]+/)+e/(?:(?P<display_id>[^/#?&]+?)-)?(?P<id>\d+)(?:[/#?&]|$)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.stitcher.com/podcast/the-talking-machines/e/40789481?autoplay=true',
|
||||
'md5': '391dd4e021e6edeb7b8e68fbf2e9e940',
|
||||
'info_dict': {
|
||||
'id': '40789481',
|
||||
'ext': 'mp3',
|
||||
'title': 'Machine Learning Mastery and Cancer Clusters',
|
||||
'description': 'md5:55163197a44e915a14a1ac3a1de0f2d3',
|
||||
'duration': 1604,
|
||||
'thumbnail': 're:^https?://.*\.jpg',
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.stitcher.com/podcast/panoply/vulture-tv/e/the-rare-hourlong-comedy-plus-40846275?autoplay=true',
|
||||
'info_dict': {
|
||||
'id': '40846275',
|
||||
'display_id': 'the-rare-hourlong-comedy-plus',
|
||||
'ext': 'mp3',
|
||||
'title': "The CW's 'Crazy Ex-Girlfriend'",
|
||||
'description': 'md5:04f1e2f98eb3f5cbb094cea0f9e19b17',
|
||||
'duration': 2235,
|
||||
'thumbnail': 're:^https?://.*\.jpg',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
# escaped title
|
||||
'url': 'http://www.stitcher.com/podcast/marketplace-on-stitcher/e/40910226?autoplay=true',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.stitcher.com/podcast/panoply/getting-in/e/episode-2a-how-many-extracurriculars-should-i-have-40876278?autoplay=true',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
audio_id = mobj.group('id')
|
||||
display_id = mobj.group('display_id') or audio_id
|
||||
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
episode = self._parse_json(
|
||||
js_to_json(self._search_regex(
|
||||
r'(?s)var\s+stitcher\s*=\s*({.+?});\n', webpage, 'episode config')),
|
||||
display_id)['config']['episode']
|
||||
|
||||
title = unescapeHTML(episode['title'])
|
||||
formats = [{
|
||||
'url': episode[episode_key],
|
||||
'ext': determine_ext(episode[episode_key]) or 'mp3',
|
||||
'vcodec': 'none',
|
||||
} for episode_key in ('episodeURL',) if episode.get(episode_key)]
|
||||
description = self._search_regex(
|
||||
r'Episode Info:\s*</span>([^<]+)<', webpage, 'description', fatal=False)
|
||||
duration = int_or_none(episode.get('duration'))
|
||||
thumbnail = episode.get('episodeImage')
|
||||
|
||||
return {
|
||||
'id': audio_id,
|
||||
'display_id': display_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'duration': duration,
|
||||
'thumbnail': thumbnail,
|
||||
'formats': formats,
|
||||
}
|
@ -10,10 +10,10 @@ class TutvIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?tu\.tv/videos/(?P<id>[^/?]+)'
|
||||
_TEST = {
|
||||
'url': 'http://tu.tv/videos/robots-futbolistas',
|
||||
'md5': '627c7c124ac2a9b5ab6addb94e0e65f7',
|
||||
'md5': '0cd9e28ad270488911b0d2a72323395d',
|
||||
'info_dict': {
|
||||
'id': '2973058',
|
||||
'ext': 'flv',
|
||||
'ext': 'mp4',
|
||||
'title': 'Robots futbolistas',
|
||||
},
|
||||
}
|
||||
|
@ -15,6 +15,7 @@ from ..compat import (
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
encode_dict,
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
parse_duration,
|
||||
@ -27,8 +28,7 @@ class TwitchBaseIE(InfoExtractor):
|
||||
|
||||
_API_BASE = 'https://api.twitch.tv'
|
||||
_USHER_BASE = 'http://usher.twitch.tv'
|
||||
_LOGIN_URL = 'https://secure.twitch.tv/login'
|
||||
_LOGIN_POST_URL = 'https://passport.twitch.tv/authentications/new'
|
||||
_LOGIN_URL = 'http://www.twitch.tv/login'
|
||||
_NETRC_MACHINE = 'twitch'
|
||||
|
||||
def _handle_error(self, response):
|
||||
@ -61,26 +61,28 @@ class TwitchBaseIE(InfoExtractor):
|
||||
if username is None:
|
||||
return
|
||||
|
||||
login_page = self._download_webpage(
|
||||
login_page, handle = self._download_webpage_handle(
|
||||
self._LOGIN_URL, None, 'Downloading login page')
|
||||
|
||||
login_form = self._hidden_inputs(login_page)
|
||||
|
||||
login_form.update({
|
||||
'login': username.encode('utf-8'),
|
||||
'password': password.encode('utf-8'),
|
||||
'username': username,
|
||||
'password': password,
|
||||
})
|
||||
|
||||
redirect_url = handle.geturl()
|
||||
|
||||
post_url = self._search_regex(
|
||||
r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page,
|
||||
'post url', default=self._LOGIN_POST_URL, group='url')
|
||||
'post url', default=redirect_url, group='url')
|
||||
|
||||
if not post_url.startswith('http'):
|
||||
post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
|
||||
post_url = compat_urlparse.urljoin(redirect_url, post_url)
|
||||
|
||||
request = compat_urllib_request.Request(
|
||||
post_url, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
|
||||
request.add_header('Referer', self._LOGIN_URL)
|
||||
post_url, compat_urllib_parse.urlencode(encode_dict(login_form)).encode('utf-8'))
|
||||
request.add_header('Referer', redirect_url)
|
||||
response = self._download_webpage(
|
||||
request, None, 'Logging in as %s' % username)
|
||||
|
||||
@ -238,14 +240,24 @@ class TwitchVodIE(TwitchItemBaseIE):
|
||||
|
||||
def _real_extract(self, url):
|
||||
item_id = self._match_id(url)
|
||||
|
||||
info = self._download_info(self._ITEM_SHORTCUT, item_id)
|
||||
access_token = self._download_json(
|
||||
'%s/api/vods/%s/access_token' % (self._API_BASE, item_id), item_id,
|
||||
'Downloading %s access token' % self._ITEM_TYPE)
|
||||
|
||||
formats = self._extract_m3u8_formats(
|
||||
'%s/vod/%s?nauth=%s&nauthsig=%s&allow_source=true'
|
||||
% (self._USHER_BASE, item_id, access_token['token'], access_token['sig']),
|
||||
'%s/vod/%s?%s' % (
|
||||
self._USHER_BASE, item_id,
|
||||
compat_urllib_parse.urlencode({
|
||||
'allow_source': 'true',
|
||||
'allow_spectre': 'true',
|
||||
'player': 'twitchweb',
|
||||
'nauth': access_token['token'],
|
||||
'nauthsig': access_token['sig'],
|
||||
})),
|
||||
item_id, 'mp4')
|
||||
|
||||
self._prefer_source(formats)
|
||||
info['formats'] = formats
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
@ -6,23 +7,51 @@ from .common import InfoExtractor
|
||||
from ..compat import compat_urllib_request
|
||||
from ..utils import (
|
||||
float_or_none,
|
||||
unescapeHTML,
|
||||
xpath_text,
|
||||
remove_end,
|
||||
)
|
||||
|
||||
|
||||
class TwitterCardIE(InfoExtractor):
|
||||
IE_NAME = 'twitter:card'
|
||||
_VALID_URL = r'https?://(?:www\.)?twitter\.com/i/cards/tfw/v1/(?P<id>\d+)'
|
||||
_TEST = {
|
||||
'url': 'https://twitter.com/i/cards/tfw/v1/560070183650213889',
|
||||
'md5': 'a74f50b310c83170319ba16de6955192',
|
||||
'info_dict': {
|
||||
'id': '560070183650213889',
|
||||
'ext': 'mp4',
|
||||
'title': 'TwitterCard',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'duration': 30.033,
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'https://twitter.com/i/cards/tfw/v1/560070183650213889',
|
||||
'md5': '7d2f6b4d2eb841a7ccc893d479bfceb4',
|
||||
'info_dict': {
|
||||
'id': '560070183650213889',
|
||||
'ext': 'mp4',
|
||||
'title': 'TwitterCard',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'duration': 30.033,
|
||||
}
|
||||
},
|
||||
}
|
||||
{
|
||||
'url': 'https://twitter.com/i/cards/tfw/v1/623160978427936768',
|
||||
'md5': '7ee2a553b63d1bccba97fbed97d9e1c8',
|
||||
'info_dict': {
|
||||
'id': '623160978427936768',
|
||||
'ext': 'mp4',
|
||||
'title': 'TwitterCard',
|
||||
'thumbnail': 're:^https?://.*\.jpg',
|
||||
'duration': 80.155,
|
||||
},
|
||||
},
|
||||
{
|
||||
'url': 'https://twitter.com/i/cards/tfw/v1/654001591733886977',
|
||||
'md5': 'b6f35e8b08a0bec6c8af77a2f4b3a814',
|
||||
'info_dict': {
|
||||
'id': 'dq4Oj5quskI',
|
||||
'ext': 'mp4',
|
||||
'title': 'Ubuntu 11.10 Overview',
|
||||
'description': 'Take a quick peek at what\'s new and improved in Ubuntu 11.10.\n\nOnce installed take a look at 10 Things to Do After Installing: http://www.omgubuntu.co.uk/2011/10/10-things-to-do-after-installing-ubuntu-11-10/',
|
||||
'upload_date': '20111013',
|
||||
'uploader': 'OMG! Ubuntu!',
|
||||
'uploader_id': 'omgubuntu',
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
@ -40,10 +69,24 @@ class TwitterCardIE(InfoExtractor):
|
||||
request.add_header('User-Agent', user_agent)
|
||||
webpage = self._download_webpage(request, video_id)
|
||||
|
||||
config = self._parse_json(
|
||||
unescapeHTML(self._search_regex(
|
||||
r'data-player-config="([^"]+)"', webpage, 'data player config')),
|
||||
youtube_url = self._html_search_regex(
|
||||
r'<iframe[^>]+src="((?:https?:)?//www.youtube.com/embed/[^"]+)"',
|
||||
webpage, 'youtube iframe', default=None)
|
||||
if youtube_url:
|
||||
return self.url_result(youtube_url, 'Youtube')
|
||||
|
||||
config = self._parse_json(self._html_search_regex(
|
||||
r'data-player-config="([^"]+)"', webpage, 'data player config'),
|
||||
video_id)
|
||||
if 'playlist' not in config:
|
||||
if 'vmapUrl' in config:
|
||||
vmap_data = self._download_xml(config['vmapUrl'], video_id)
|
||||
video_url = xpath_text(vmap_data, './/MediaFile').strip()
|
||||
formats.append({
|
||||
'url': video_url,
|
||||
})
|
||||
break # same video regardless of UA
|
||||
continue
|
||||
|
||||
video_url = config['playlist'][0]['source']
|
||||
|
||||
@ -70,3 +113,54 @@ class TwitterCardIE(InfoExtractor):
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
}
|
||||
|
||||
|
||||
class TwitterIE(InfoExtractor):
|
||||
IE_NAME = 'twitter'
|
||||
_VALID_URL = r'https?://(?:www\.|m\.|mobile\.)?twitter\.com/(?P<user_id>[^/]+)/status/(?P<id>\d+)'
|
||||
_TEMPLATE_URL = 'https://twitter.com/%s/status/%s'
|
||||
|
||||
_TEST = {
|
||||
'url': 'https://twitter.com/freethenipple/status/643211948184596480',
|
||||
'md5': '31cd83a116fc41f99ae3d909d4caf6a0',
|
||||
'info_dict': {
|
||||
'id': '643211948184596480',
|
||||
'ext': 'mp4',
|
||||
'title': 'FREE THE NIPPLE - FTN supporters on Hollywood Blvd today!',
|
||||
'thumbnail': 're:^https?://.*\.jpg',
|
||||
'duration': 12.922,
|
||||
'description': 'FREE THE NIPPLE on Twitter: "FTN supporters on Hollywood Blvd today! http://t.co/c7jHH749xJ"',
|
||||
'uploader': 'FREE THE NIPPLE',
|
||||
'uploader_id': 'freethenipple',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
user_id = mobj.group('user_id')
|
||||
twid = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(self._TEMPLATE_URL % (user_id, twid), twid)
|
||||
|
||||
username = remove_end(self._og_search_title(webpage), ' on Twitter')
|
||||
|
||||
title = self._og_search_description(webpage).strip('').replace('\n', ' ')
|
||||
|
||||
# strip 'https -_t.co_BJYgOjSeGA' junk from filenames
|
||||
mobj = re.match(r'“(.*)\s+(https?://[^ ]+)”', title)
|
||||
title, short_url = mobj.groups()
|
||||
|
||||
card_id = self._search_regex(
|
||||
r'["\']/i/cards/tfw/v1/(\d+)', webpage, 'twitter card url')
|
||||
card_url = 'https://twitter.com/i/cards/tfw/v1/' + card_id
|
||||
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'ie_key': 'TwitterCard',
|
||||
'uploader_id': user_id,
|
||||
'uploader': username,
|
||||
'url': card_url,
|
||||
'webpage_url': url,
|
||||
'description': '%s on Twitter: "%s %s"' % (username, title, short_url),
|
||||
'title': username + ' - ' + title,
|
||||
}
|
||||
|
@ -1,10 +1,10 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_etree_fromstring,
|
||||
compat_urllib_request,
|
||||
)
|
||||
from ..utils import (
|
||||
@ -97,7 +97,7 @@ class VevoIE(InfoExtractor):
|
||||
if last_version['version'] == -1:
|
||||
raise ExtractorError('Unable to extract last version of the video')
|
||||
|
||||
renditions = xml.etree.ElementTree.fromstring(last_version['data'])
|
||||
renditions = compat_etree_fromstring(last_version['data'])
|
||||
formats = []
|
||||
# Already sorted from worst to best quality
|
||||
for rend in renditions.findall('rendition'):
|
||||
@ -114,7 +114,7 @@ class VevoIE(InfoExtractor):
|
||||
|
||||
def _formats_from_smil(self, smil_xml):
|
||||
formats = []
|
||||
smil_doc = xml.etree.ElementTree.fromstring(smil_xml.encode('utf-8'))
|
||||
smil_doc = compat_etree_fromstring(smil_xml.encode('utf-8'))
|
||||
els = smil_doc.findall('.//{http://www.w3.org/2001/SMIL20/Language}video')
|
||||
for el in els:
|
||||
src = el.attrib['src']
|
||||
|
@ -2,8 +2,8 @@ from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
find_xpath_attr,
|
||||
int_or_none,
|
||||
parse_iso8601,
|
||||
)
|
||||
|
||||
|
||||
@ -18,33 +18,35 @@ class VideofyMeIE(InfoExtractor):
|
||||
'id': '1100701',
|
||||
'ext': 'mp4',
|
||||
'title': 'This is VideofyMe',
|
||||
'description': None,
|
||||
'description': '',
|
||||
'upload_date': '20130326',
|
||||
'timestamp': 1364288959,
|
||||
'uploader': 'VideofyMe',
|
||||
'uploader_id': 'thisisvideofyme',
|
||||
'view_count': int,
|
||||
'likes': int,
|
||||
'comment_count': int,
|
||||
},
|
||||
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
config = self._download_xml('http://sunshine.videofy.me/?videoId=%s' % video_id,
|
||||
video_id)
|
||||
video = config.find('video')
|
||||
sources = video.find('sources')
|
||||
url_node = next(node for node in [find_xpath_attr(sources, 'source', 'id', 'HQ %s' % key)
|
||||
for key in ['on', 'av', 'off']] if node is not None)
|
||||
video_url = url_node.find('url').text
|
||||
view_count = int_or_none(self._search_regex(
|
||||
r'([0-9]+)', video.find('views').text, 'view count', fatal=False))
|
||||
|
||||
config = self._download_json('http://vf-player-info-loader.herokuapp.com/%s.json' % video_id, video_id)['videoinfo']
|
||||
|
||||
video = config.get('video')
|
||||
blog = config.get('blog', {})
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': video.find('title').text,
|
||||
'url': video_url,
|
||||
'thumbnail': video.find('thumb').text,
|
||||
'description': video.find('description').text,
|
||||
'uploader': config.find('blog/name').text,
|
||||
'uploader_id': video.find('identifier').text,
|
||||
'view_count': view_count,
|
||||
'title': video['title'],
|
||||
'url': video['sources']['source']['url'],
|
||||
'thumbnail': video.get('thumb'),
|
||||
'description': video.get('description'),
|
||||
'timestamp': parse_iso8601(video.get('date')),
|
||||
'uploader': blog.get('name'),
|
||||
'uploader_id': blog.get('identifier'),
|
||||
'view_count': int_or_none(self._search_regex(r'([0-9]+)', video.get('views'), 'view count', fatal=False)),
|
||||
'likes': int_or_none(video.get('likes')),
|
||||
'comment_count': int_or_none(video.get('nrOfComments')),
|
||||
}
|
||||
|
@ -1,82 +0,0 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_HTTPError,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
parse_duration,
|
||||
)
|
||||
|
||||
|
||||
class VideoLecturesNetIE(InfoExtractor):
|
||||
_VALID_URL = r'http://(?:www\.)?videolectures\.net/(?P<id>[^/#?]+)/*(?:[#?].*)?$'
|
||||
IE_NAME = 'videolectures.net'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/',
|
||||
'info_dict': {
|
||||
'id': 'promogram_igor_mekjavic_eng',
|
||||
'ext': 'mp4',
|
||||
'title': 'Automatics, robotics and biocybernetics',
|
||||
'description': 'md5:815fc1deb6b3a2bff99de2d5325be482',
|
||||
'upload_date': '20130627',
|
||||
'duration': 565,
|
||||
'thumbnail': 're:http://.*\.jpg',
|
||||
},
|
||||
}, {
|
||||
# video with invalid direct format links (HTTP 403)
|
||||
'url': 'http://videolectures.net/russir2010_filippova_nlp/',
|
||||
'info_dict': {
|
||||
'id': 'russir2010_filippova_nlp',
|
||||
'ext': 'flv',
|
||||
'title': 'NLP at Google',
|
||||
'description': 'md5:fc7a6d9bf0302d7cc0e53f7ca23747b3',
|
||||
'duration': 5352,
|
||||
'thumbnail': 're:http://.*\.jpg',
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://videolectures.net/deeplearning2015_montreal/',
|
||||
'info_dict': {
|
||||
'id': 'deeplearning2015_montreal',
|
||||
'title': 'Deep Learning Summer School, Montreal 2015',
|
||||
'description': 'md5:90121a40cc6926df1bf04dcd8563ed3b',
|
||||
},
|
||||
'playlist_count': 30,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
smil_url = 'http://videolectures.net/%s/video/1/smil.xml' % video_id
|
||||
|
||||
try:
|
||||
smil = self._download_smil(smil_url, video_id)
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
|
||||
# Probably a playlist
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
entries = [
|
||||
self.url_result(compat_urlparse.urljoin(url, video_url), 'VideoLecturesNet')
|
||||
for _, video_url in re.findall(r'<a[^>]+href=(["\'])(.+?)\1[^>]+id=["\']lec=\d+', webpage)]
|
||||
playlist_title = self._html_search_meta('title', webpage, 'title', fatal=True)
|
||||
playlist_description = self._html_search_meta('description', webpage, 'description')
|
||||
return self.playlist_result(entries, video_id, playlist_title, playlist_description)
|
||||
|
||||
info = self._parse_smil(smil, smil_url, video_id)
|
||||
|
||||
info['id'] = video_id
|
||||
|
||||
switch = smil.find('.//switch')
|
||||
if switch is not None:
|
||||
info['duration'] = parse_duration(switch.attrib.get('dur'))
|
||||
|
||||
return info
|
@ -14,7 +14,7 @@ class VidmeIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://vid\.me/(?:e/)?(?P<id>[\da-zA-Z]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://vid.me/QNB',
|
||||
'md5': 'c62f1156138dc3323902188c5b5a8bd6',
|
||||
'md5': 'f42d05e7149aeaec5c037b17e5d3dc82',
|
||||
'info_dict': {
|
||||
'id': 'QNB',
|
||||
'ext': 'mp4',
|
||||
@ -93,6 +93,39 @@ class VidmeIE(InfoExtractor):
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
# nsfw, user-disabled
|
||||
'url': 'https://vid.me/dzGJ',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# suspended
|
||||
'url': 'https://vid.me/Ox3G',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# deleted
|
||||
'url': 'https://vid.me/KTPm',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# no formats in the API response
|
||||
'url': 'https://vid.me/e5g',
|
||||
'info_dict': {
|
||||
'id': 'e5g',
|
||||
'ext': 'mp4',
|
||||
'title': 'Video upload (e5g)',
|
||||
'thumbnail': 're:^https?://.*\.jpg',
|
||||
'timestamp': 1401480195,
|
||||
'upload_date': '20140530',
|
||||
'uploader': None,
|
||||
'uploader_id': None,
|
||||
'age_limit': 0,
|
||||
'duration': 483,
|
||||
'view_count': int,
|
||||
'like_count': int,
|
||||
'comment_count': int,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
@ -114,6 +147,17 @@ class VidmeIE(InfoExtractor):
|
||||
|
||||
video = response['video']
|
||||
|
||||
if video.get('state') == 'deleted':
|
||||
raise ExtractorError(
|
||||
'Vidme said: Sorry, this video has been deleted.',
|
||||
expected=True)
|
||||
|
||||
if video.get('state') in ('user-disabled', 'suspended'):
|
||||
raise ExtractorError(
|
||||
'Vidme said: This video has been suspended either due to a copyright claim, '
|
||||
'or for violating the terms of use.',
|
||||
expected=True)
|
||||
|
||||
formats = [{
|
||||
'format_id': f.get('type'),
|
||||
'url': f['uri'],
|
||||
@ -121,6 +165,14 @@ class VidmeIE(InfoExtractor):
|
||||
'height': int_or_none(f.get('height')),
|
||||
'preference': 0 if f.get('type', '').endswith('clip') else 1,
|
||||
} for f in video.get('formats', []) if f.get('uri')]
|
||||
|
||||
if not formats and video.get('complete_url'):
|
||||
formats.append({
|
||||
'url': video.get('complete_url'),
|
||||
'width': int_or_none(video.get('width')),
|
||||
'height': int_or_none(video.get('height')),
|
||||
})
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
title = video['title']
|
||||
@ -137,7 +189,7 @@ class VidmeIE(InfoExtractor):
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'title': title or 'Video upload (%s)' % video_id,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'uploader': uploader,
|
||||
|
@ -20,8 +20,14 @@ class VidziIE(InfoExtractor):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
video_url = self._html_search_regex(
|
||||
r'{\s*file\s*:\s*"([^"]+)"\s*}', webpage, 'video url')
|
||||
video_host = self._html_search_regex(
|
||||
r'id=\'vplayer\'><img src="http://(.*?)/i', webpage,
|
||||
'video host')
|
||||
video_hash = self._html_search_regex(
|
||||
r'\|([a-z0-9]+)\|hls\|type', webpage, 'video_hash')
|
||||
ext = self._html_search_regex(
|
||||
r'\|tracks\|([a-z0-9]+)\|', webpage, 'video ext')
|
||||
video_url = 'http://' + video_host + '/' + video_hash + '/v.' + ext
|
||||
title = self._html_search_regex(
|
||||
r'(?s)<h2 class="video-title">(.*?)</h2>', webpage, 'title')
|
||||
|
||||
|
@ -131,10 +131,11 @@ class ViewsterIE(InfoExtractor):
|
||||
formats.extend(self._extract_f4m_formats(
|
||||
video_url, video_id, f4m_id='hds'))
|
||||
elif ext == 'm3u8':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
m3u8_formats = self._extract_m3u8_formats(
|
||||
video_url, video_id, 'mp4', m3u8_id='hls',
|
||||
fatal=False # m3u8 sometimes fail
|
||||
))
|
||||
fatal=False) # m3u8 sometimes fail
|
||||
if m3u8_formats:
|
||||
formats.extend(m3u8_formats)
|
||||
else:
|
||||
format_id = media.get('Bitrate')
|
||||
f = {
|
||||
|
188
youtube_dl/extractor/viidea.py
Normal file
188
youtube_dl/extractor/viidea.py
Normal file
@ -0,0 +1,188 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urlparse,
|
||||
compat_str,
|
||||
)
|
||||
from ..utils import (
|
||||
parse_duration,
|
||||
js_to_json,
|
||||
parse_iso8601,
|
||||
)
|
||||
|
||||
|
||||
class ViideaIE(InfoExtractor):
|
||||
_VALID_URL = r'''(?x)http://(?:www\.)?(?:
|
||||
videolectures\.net|
|
||||
flexilearn\.viidea\.net|
|
||||
presentations\.ocwconsortium\.org|
|
||||
video\.travel-zoom\.si|
|
||||
video\.pomp-forum\.si|
|
||||
tv\.nil\.si|
|
||||
video\.hekovnik.com|
|
||||
video\.szko\.si|
|
||||
kpk\.viidea\.com|
|
||||
inside\.viidea\.net|
|
||||
video\.kiberpipa\.org|
|
||||
bvvideo\.si|
|
||||
kongres\.viidea\.net|
|
||||
edemokracija\.viidea\.com
|
||||
)(?:/lecture)?/(?P<id>[^/]+)(?:/video/(?P<part>\d+))?/*(?:[#?].*)?$'''
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/',
|
||||
'info_dict': {
|
||||
'id': '20171',
|
||||
'display_id': 'promogram_igor_mekjavic_eng',
|
||||
'ext': 'mp4',
|
||||
'title': 'Automatics, robotics and biocybernetics',
|
||||
'description': 'md5:815fc1deb6b3a2bff99de2d5325be482',
|
||||
'thumbnail': 're:http://.*\.jpg',
|
||||
'timestamp': 1372349289,
|
||||
'upload_date': '20130627',
|
||||
'duration': 565,
|
||||
},
|
||||
}, {
|
||||
# video with invalid direct format links (HTTP 403)
|
||||
'url': 'http://videolectures.net/russir2010_filippova_nlp/',
|
||||
'info_dict': {
|
||||
'id': '14891',
|
||||
'display_id': 'russir2010_filippova_nlp',
|
||||
'ext': 'flv',
|
||||
'title': 'NLP at Google',
|
||||
'description': 'md5:fc7a6d9bf0302d7cc0e53f7ca23747b3',
|
||||
'thumbnail': 're:http://.*\.jpg',
|
||||
'timestamp': 1284375600,
|
||||
'upload_date': '20100913',
|
||||
'duration': 5352,
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
# event playlist
|
||||
'url': 'http://videolectures.net/deeplearning2015_montreal/',
|
||||
'info_dict': {
|
||||
'id': '23181',
|
||||
'title': 'Deep Learning Summer School, Montreal 2015',
|
||||
'description': 'md5:0533a85e4bd918df52a01f0e1ebe87b7',
|
||||
'thumbnail': 're:http://.*\.jpg',
|
||||
'timestamp': 1438560000,
|
||||
},
|
||||
'playlist_count': 30,
|
||||
}, {
|
||||
# multi part lecture
|
||||
'url': 'http://videolectures.net/mlss09uk_bishop_ibi/',
|
||||
'info_dict': {
|
||||
'id': '9737',
|
||||
'display_id': 'mlss09uk_bishop_ibi',
|
||||
'title': 'Introduction To Bayesian Inference',
|
||||
'thumbnail': 're:http://.*\.jpg',
|
||||
'timestamp': 1251622800,
|
||||
},
|
||||
'playlist': [{
|
||||
'info_dict': {
|
||||
'id': '9737_part1',
|
||||
'display_id': 'mlss09uk_bishop_ibi_part1',
|
||||
'ext': 'wmv',
|
||||
'title': 'Introduction To Bayesian Inference (Part 1)',
|
||||
'thumbnail': 're:http://.*\.jpg',
|
||||
'duration': 4622,
|
||||
'timestamp': 1251622800,
|
||||
'upload_date': '20090830',
|
||||
},
|
||||
}, {
|
||||
'info_dict': {
|
||||
'id': '9737_part2',
|
||||
'display_id': 'mlss09uk_bishop_ibi_part2',
|
||||
'ext': 'wmv',
|
||||
'title': 'Introduction To Bayesian Inference (Part 2)',
|
||||
'thumbnail': 're:http://.*\.jpg',
|
||||
'duration': 5641,
|
||||
'timestamp': 1251622800,
|
||||
'upload_date': '20090830',
|
||||
},
|
||||
}],
|
||||
'playlist_count': 2,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
lecture_slug, explicit_part_id = re.match(self._VALID_URL, url).groups()
|
||||
|
||||
webpage = self._download_webpage(url, lecture_slug)
|
||||
|
||||
cfg = self._parse_json(self._search_regex(
|
||||
[r'cfg\s*:\s*({.+?})\s*,\s*[\da-zA-Z_]+\s*:\s*\(?\s*function',
|
||||
r'cfg\s*:\s*({[^}]+})'],
|
||||
webpage, 'cfg'), lecture_slug, js_to_json)
|
||||
|
||||
lecture_id = compat_str(cfg['obj_id'])
|
||||
|
||||
base_url = self._proto_relative_url(cfg['livepipe'], 'http:')
|
||||
|
||||
lecture_data = self._download_json(
|
||||
'%s/site/api/lecture/%s?format=json' % (base_url, lecture_id),
|
||||
lecture_id)['lecture'][0]
|
||||
|
||||
lecture_info = {
|
||||
'id': lecture_id,
|
||||
'display_id': lecture_slug,
|
||||
'title': lecture_data['title'],
|
||||
'timestamp': parse_iso8601(lecture_data.get('time')),
|
||||
'description': lecture_data.get('description_wiki'),
|
||||
'thumbnail': lecture_data.get('thumb'),
|
||||
}
|
||||
|
||||
playlist_entries = []
|
||||
lecture_type = lecture_data.get('type')
|
||||
parts = [compat_str(video) for video in cfg.get('videos', [])]
|
||||
if parts:
|
||||
multipart = len(parts) > 1
|
||||
|
||||
def extract_part(part_id):
|
||||
smil_url = '%s/%s/video/%s/smil.xml' % (base_url, lecture_slug, part_id)
|
||||
smil = self._download_smil(smil_url, lecture_id)
|
||||
info = self._parse_smil(smil, smil_url, lecture_id)
|
||||
info['id'] = lecture_id if not multipart else '%s_part%s' % (lecture_id, part_id)
|
||||
info['display_id'] = lecture_slug if not multipart else '%s_part%s' % (lecture_slug, part_id)
|
||||
if multipart:
|
||||
info['title'] += ' (Part %s)' % part_id
|
||||
switch = smil.find('.//switch')
|
||||
if switch is not None:
|
||||
info['duration'] = parse_duration(switch.attrib.get('dur'))
|
||||
item_info = lecture_info.copy()
|
||||
item_info.update(info)
|
||||
return item_info
|
||||
|
||||
if explicit_part_id or not multipart:
|
||||
result = extract_part(explicit_part_id or parts[0])
|
||||
else:
|
||||
result = {
|
||||
'_type': 'multi_video',
|
||||
'entries': [extract_part(part) for part in parts],
|
||||
}
|
||||
result.update(lecture_info)
|
||||
|
||||
# Immediately return explicitly requested part or non event item
|
||||
if explicit_part_id or lecture_type != 'evt':
|
||||
return result
|
||||
|
||||
playlist_entries.append(result)
|
||||
|
||||
# It's probably a playlist
|
||||
if not parts or lecture_type == 'evt':
|
||||
playlist_webpage = self._download_webpage(
|
||||
'%s/site/ajax/drilldown/?id=%s' % (base_url, lecture_id), lecture_id)
|
||||
entries = [
|
||||
self.url_result(compat_urlparse.urljoin(url, video_url), 'Viidea')
|
||||
for _, video_url in re.findall(
|
||||
r'<a[^>]+href=(["\'])(.+?)\1[^>]+id=["\']lec=\d+', playlist_webpage)]
|
||||
playlist_entries.extend(entries)
|
||||
|
||||
playlist = self.playlist_result(playlist_entries, lecture_id)
|
||||
playlist.update(lecture_info)
|
||||
return playlist
|
@ -8,11 +8,11 @@ import itertools
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_HTTPError,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
encode_dict,
|
||||
ExtractorError,
|
||||
InAdvancePagedList,
|
||||
int_or_none,
|
||||
@ -40,13 +40,13 @@ class VimeoBaseInfoExtractor(InfoExtractor):
|
||||
self.report_login()
|
||||
webpage = self._download_webpage(self._LOGIN_URL, None, False)
|
||||
token, vuid = self._extract_xsrft_and_vuid(webpage)
|
||||
data = urlencode_postdata({
|
||||
data = urlencode_postdata(encode_dict({
|
||||
'action': 'login',
|
||||
'email': username,
|
||||
'password': password,
|
||||
'service': 'vimeo',
|
||||
'token': token,
|
||||
})
|
||||
}))
|
||||
login_request = compat_urllib_request.Request(self._LOGIN_URL, data)
|
||||
login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
login_request.add_header('Cookie', 'vuid=%s' % vuid)
|
||||
@ -133,7 +133,7 @@ class VimeoIE(VimeoBaseInfoExtractor):
|
||||
'uploader_id': 'user18948128',
|
||||
'uploader': 'Jaime Marquínez Ferrándiz',
|
||||
'duration': 10,
|
||||
'description': 'This is "youtube-dl password protected test video" by Jaime Marquínez Ferrándiz on Vimeo, the home for high quality videos and the people who love them.',
|
||||
'description': 'This is "youtube-dl password protected test video" by Jaime Marquínez Ferrándiz on Vimeo, the home for high quality videos and the people\u2026',
|
||||
},
|
||||
'params': {
|
||||
'videopassword': 'youtube-dl',
|
||||
@ -181,6 +181,11 @@ class VimeoIE(VimeoBaseInfoExtractor):
|
||||
'uploader_id': 'user28849593',
|
||||
},
|
||||
},
|
||||
{
|
||||
'url': 'https://vimeo.com/109815029',
|
||||
'note': 'Video not completely processed, "failed" seed status',
|
||||
'only_matching': True,
|
||||
},
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
@ -203,10 +208,10 @@ class VimeoIE(VimeoBaseInfoExtractor):
|
||||
if password is None:
|
||||
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
|
||||
token, vuid = self._extract_xsrft_and_vuid(webpage)
|
||||
data = urlencode_postdata({
|
||||
data = urlencode_postdata(encode_dict({
|
||||
'password': password,
|
||||
'token': token,
|
||||
})
|
||||
}))
|
||||
if url.startswith('http://'):
|
||||
# vimeo only supports https now, but the user can give an http url
|
||||
url = url.replace('http://', 'https://')
|
||||
@ -222,7 +227,7 @@ class VimeoIE(VimeoBaseInfoExtractor):
|
||||
password = self._downloader.params.get('videopassword', None)
|
||||
if password is None:
|
||||
raise ExtractorError('This video is protected by a password, use the --video-password option')
|
||||
data = compat_urllib_parse.urlencode({'password': password})
|
||||
data = urlencode_postdata(encode_dict({'password': password}))
|
||||
pass_url = url + '/check-password'
|
||||
password_request = compat_urllib_request.Request(pass_url, data)
|
||||
password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
@ -273,20 +278,30 @@ class VimeoIE(VimeoBaseInfoExtractor):
|
||||
self.report_extraction(video_id)
|
||||
|
||||
vimeo_config = self._search_regex(
|
||||
r'vimeo\.config\s*=\s*({.+?});', webpage,
|
||||
r'vimeo\.config\s*=\s*(?:({.+?})|_extend\([^,]+,\s+({.+?})\));', webpage,
|
||||
'vimeo config', default=None)
|
||||
if vimeo_config:
|
||||
seed_status = self._parse_json(vimeo_config, video_id).get('seed_status', {})
|
||||
if seed_status.get('state') == 'failed':
|
||||
raise ExtractorError(
|
||||
'%s returned error: %s' % (self.IE_NAME, seed_status['title']),
|
||||
'%s said: %s' % (self.IE_NAME, seed_status['title']),
|
||||
expected=True)
|
||||
|
||||
# Extract the config JSON
|
||||
try:
|
||||
try:
|
||||
config_url = self._html_search_regex(
|
||||
r' data-config-url="(.+?)"', webpage, 'config URL')
|
||||
r' data-config-url="(.+?)"', webpage,
|
||||
'config URL', default=None)
|
||||
if not config_url:
|
||||
# Sometimes new react-based page is served instead of old one that require
|
||||
# different config URL extraction approach (see
|
||||
# https://github.com/rg3/youtube-dl/pull/7209)
|
||||
vimeo_clip_page_config = self._search_regex(
|
||||
r'vimeo\.clip_page_config\s*=\s*({.+?});', webpage,
|
||||
'vimeo clip page config')
|
||||
config_url = self._parse_json(
|
||||
vimeo_clip_page_config, video_id)['player']['config_url']
|
||||
config_json = self._download_webpage(config_url, video_id)
|
||||
config = json.loads(config_json)
|
||||
except RegexNotFoundError:
|
||||
@ -473,7 +488,7 @@ class VimeoChannelIE(VimeoBaseInfoExtractor):
|
||||
token, vuid = self._extract_xsrft_and_vuid(webpage)
|
||||
fields['token'] = token
|
||||
fields['password'] = password
|
||||
post = urlencode_postdata(fields)
|
||||
post = urlencode_postdata(encode_dict(fields))
|
||||
password_path = self._search_regex(
|
||||
r'action="([^"]+)"', login_form, 'password URL')
|
||||
password_url = compat_urlparse.urljoin(page_url, password_path)
|
||||
|
@ -1,10 +1,14 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import itertools
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import unified_strdate
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
unified_strdate,
|
||||
)
|
||||
|
||||
|
||||
class VineIE(InfoExtractor):
|
||||
@ -17,10 +21,12 @@ class VineIE(InfoExtractor):
|
||||
'ext': 'mp4',
|
||||
'title': 'Chicken.',
|
||||
'alt_title': 'Vine by Jack Dorsey',
|
||||
'description': 'Chicken.',
|
||||
'upload_date': '20130519',
|
||||
'uploader': 'Jack Dorsey',
|
||||
'uploader_id': '76',
|
||||
'like_count': int,
|
||||
'comment_count': int,
|
||||
'repost_count': int,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://vine.co/v/MYxVapFvz2z',
|
||||
@ -29,11 +35,13 @@ class VineIE(InfoExtractor):
|
||||
'id': 'MYxVapFvz2z',
|
||||
'ext': 'mp4',
|
||||
'title': 'Fuck Da Police #Mikebrown #justice #ferguson #prayforferguson #protesting #NMOS14',
|
||||
'alt_title': 'Vine by Luna',
|
||||
'description': 'Fuck Da Police #Mikebrown #justice #ferguson #prayforferguson #protesting #NMOS14',
|
||||
'alt_title': 'Vine by Mars Ruiz',
|
||||
'upload_date': '20140815',
|
||||
'uploader': 'Luna',
|
||||
'uploader': 'Mars Ruiz',
|
||||
'uploader_id': '1102363502380728320',
|
||||
'like_count': int,
|
||||
'comment_count': int,
|
||||
'repost_count': int,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://vine.co/v/bxVjBbZlPUH',
|
||||
@ -43,14 +51,33 @@ class VineIE(InfoExtractor):
|
||||
'ext': 'mp4',
|
||||
'title': '#mw3 #ac130 #killcam #angelofdeath',
|
||||
'alt_title': 'Vine by Z3k3',
|
||||
'description': '#mw3 #ac130 #killcam #angelofdeath',
|
||||
'upload_date': '20130430',
|
||||
'uploader': 'Z3k3',
|
||||
'uploader_id': '936470460173008896',
|
||||
'like_count': int,
|
||||
'comment_count': int,
|
||||
'repost_count': int,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://vine.co/oembed/MYxVapFvz2z.json',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://vine.co/v/e192BnZnZ9V',
|
||||
'info_dict': {
|
||||
'id': 'e192BnZnZ9V',
|
||||
'ext': 'mp4',
|
||||
'title': 'ยิ้ม~ เขิน~ อาย~ น่าร้ากอ้ะ >//< @n_whitewo @orlameena #lovesicktheseries #lovesickseason2',
|
||||
'alt_title': 'Vine by Pimry_zaa',
|
||||
'upload_date': '20150705',
|
||||
'uploader': 'Pimry_zaa',
|
||||
'uploader_id': '1135760698325307392',
|
||||
'like_count': int,
|
||||
'comment_count': int,
|
||||
'repost_count': int,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
@ -58,32 +85,33 @@ class VineIE(InfoExtractor):
|
||||
webpage = self._download_webpage('https://vine.co/v/' + video_id, video_id)
|
||||
|
||||
data = self._parse_json(
|
||||
self._html_search_regex(
|
||||
r'window\.POST_DATA = { %s: ({.+?}) };\s*</script>' % video_id,
|
||||
self._search_regex(
|
||||
r'window\.POST_DATA\s*=\s*{\s*%s\s*:\s*({.+?})\s*};\s*</script>' % video_id,
|
||||
webpage, 'vine data'),
|
||||
video_id)
|
||||
|
||||
formats = [{
|
||||
'format_id': '%(format)s-%(rate)s' % f,
|
||||
'vcodec': f['format'],
|
||||
'quality': f['rate'],
|
||||
'vcodec': f.get('format'),
|
||||
'quality': f.get('rate'),
|
||||
'url': f['videoUrl'],
|
||||
} for f in data['videoUrls']]
|
||||
} for f in data['videoUrls'] if f.get('videoUrl')]
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
username = data.get('username')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': self._og_search_title(webpage),
|
||||
'alt_title': self._og_search_description(webpage, default=None),
|
||||
'description': data['description'],
|
||||
'thumbnail': data['thumbnailUrl'],
|
||||
'upload_date': unified_strdate(data['created']),
|
||||
'uploader': data['username'],
|
||||
'uploader_id': data['userIdStr'],
|
||||
'like_count': data['likes']['count'],
|
||||
'comment_count': data['comments']['count'],
|
||||
'repost_count': data['reposts']['count'],
|
||||
'title': data.get('description') or self._og_search_title(webpage),
|
||||
'alt_title': 'Vine by %s' % username if username else self._og_search_description(webpage, default=None),
|
||||
'thumbnail': data.get('thumbnailUrl'),
|
||||
'upload_date': unified_strdate(data.get('created')),
|
||||
'uploader': username,
|
||||
'uploader_id': data.get('userIdStr'),
|
||||
'like_count': int_or_none(data.get('likes', {}).get('count')),
|
||||
'comment_count': int_or_none(data.get('comments', {}).get('count')),
|
||||
'repost_count': int_or_none(data.get('reposts', {}).get('count')),
|
||||
'formats': formats,
|
||||
}
|
||||
|
||||
|
@ -281,9 +281,13 @@ class VKIE(InfoExtractor):
|
||||
mobj.group(1) + ' ' + mobj.group(2)
|
||||
upload_date = unified_strdate(mobj.group(1) + ' ' + mobj.group(2))
|
||||
|
||||
view_count = str_to_int(self._search_regex(
|
||||
r'"mv_views_count_number"[^>]*>([\d,.]+) views<',
|
||||
info_page, 'view count', fatal=False))
|
||||
view_count = None
|
||||
views = self._html_search_regex(
|
||||
r'"mv_views_count_number"[^>]*>(.+?\bviews?)<',
|
||||
info_page, 'view count', fatal=False)
|
||||
if views:
|
||||
view_count = str_to_int(self._search_regex(
|
||||
r'([\d,.]+)', views, 'view count', fatal=False))
|
||||
|
||||
formats = [{
|
||||
'format_id': k,
|
||||
|
@ -1,121 +1,171 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse_urlparse,
|
||||
compat_urllib_request,
|
||||
)
|
||||
from ..compat import compat_urllib_request
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
str_to_int,
|
||||
unescapeHTML,
|
||||
unified_strdate,
|
||||
)
|
||||
from ..aes import (
|
||||
aes_decrypt_text
|
||||
)
|
||||
from ..aes import aes_decrypt_text
|
||||
|
||||
|
||||
class YouPornIE(InfoExtractor):
|
||||
_VALID_URL = r'^(?P<proto>https?://)(?:www\.)?(?P<url>youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+))'
|
||||
_TEST = {
|
||||
_VALID_URL = r'https?://(?:www\.)?youporn\.com/watch/(?P<id>\d+)/(?P<display_id>[^/?#&]+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/',
|
||||
'md5': '71ec5fcfddacf80f495efa8b6a8d9a89',
|
||||
'info_dict': {
|
||||
'id': '505835',
|
||||
'display_id': 'sex-ed-is-it-safe-to-masturbate-daily',
|
||||
'ext': 'mp4',
|
||||
'upload_date': '20101221',
|
||||
'description': 'Love & Sex Answers: http://bit.ly/DanAndJenn -- Is It Unhealthy To Masturbate Daily?',
|
||||
'uploader': 'Ask Dan And Jennifer',
|
||||
'title': 'Sex Ed: Is It Safe To Masturbate Daily?',
|
||||
'description': 'Love & Sex Answers: http://bit.ly/DanAndJenn -- Is It Unhealthy To Masturbate Daily?',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'uploader': 'Ask Dan And Jennifer',
|
||||
'upload_date': '20101221',
|
||||
'average_rating': int,
|
||||
'view_count': int,
|
||||
'comment_count': int,
|
||||
'categories': list,
|
||||
'tags': list,
|
||||
'age_limit': 18,
|
||||
}
|
||||
}
|
||||
},
|
||||
}, {
|
||||
# Anonymous User uploader
|
||||
'url': 'http://www.youporn.com/watch/561726/big-tits-awesome-brunette-on-amazing-webcam-show/?from=related3&al=2&from_id=561726&pos=4',
|
||||
'info_dict': {
|
||||
'id': '561726',
|
||||
'display_id': 'big-tits-awesome-brunette-on-amazing-webcam-show',
|
||||
'ext': 'mp4',
|
||||
'title': 'Big Tits Awesome Brunette On amazing webcam show',
|
||||
'description': 'http://sweetlivegirls.com Big Tits Awesome Brunette On amazing webcam show.mp4',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'uploader': 'Anonymous User',
|
||||
'upload_date': '20111125',
|
||||
'average_rating': int,
|
||||
'view_count': int,
|
||||
'comment_count': int,
|
||||
'categories': list,
|
||||
'tags': list,
|
||||
'age_limit': 18,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('videoid')
|
||||
url = mobj.group('proto') + 'www.' + mobj.group('url')
|
||||
video_id = mobj.group('id')
|
||||
display_id = mobj.group('display_id')
|
||||
|
||||
req = compat_urllib_request.Request(url)
|
||||
req.add_header('Cookie', 'age_verified=1')
|
||||
webpage = self._download_webpage(req, video_id)
|
||||
age_limit = self._rta_search(webpage)
|
||||
request = compat_urllib_request.Request(url)
|
||||
request.add_header('Cookie', 'age_verified=1')
|
||||
webpage = self._download_webpage(request, display_id)
|
||||
|
||||
# Get JSON parameters
|
||||
json_params = self._search_regex(
|
||||
[r'videoJa?son\s*=\s*({.+})',
|
||||
r'var\s+currentVideo\s*=\s*new\s+Video\((.+?)\)[,;]'],
|
||||
webpage, 'JSON parameters')
|
||||
try:
|
||||
params = json.loads(json_params)
|
||||
except ValueError:
|
||||
raise ExtractorError('Invalid JSON')
|
||||
title = self._search_regex(
|
||||
[r'(?:video_titles|videoTitle)\s*[:=]\s*(["\'])(?P<title>.+?)\1',
|
||||
r'<h1[^>]+class=["\']heading\d?["\'][^>]*>([^<])<'],
|
||||
webpage, 'title', group='title')
|
||||
|
||||
self.report_extraction(video_id)
|
||||
try:
|
||||
video_title = params['title']
|
||||
upload_date = unified_strdate(params['release_date_f'])
|
||||
video_description = params['description']
|
||||
video_uploader = params['submitted_by']
|
||||
thumbnail = params['thumbnails'][0]['image']
|
||||
except KeyError:
|
||||
raise ExtractorError('Missing JSON parameter: ' + sys.exc_info()[1])
|
||||
links = []
|
||||
|
||||
# Get all of the links from the page
|
||||
DOWNLOAD_LIST_RE = r'(?s)<ul class="downloadList">(?P<download_list>.*?)</ul>'
|
||||
download_list_html = self._search_regex(DOWNLOAD_LIST_RE,
|
||||
webpage, 'download list').strip()
|
||||
LINK_RE = r'<a href="([^"]+)">'
|
||||
links = re.findall(LINK_RE, download_list_html)
|
||||
sources = self._search_regex(
|
||||
r'sources\s*:\s*({.+?})', webpage, 'sources', default=None)
|
||||
if sources:
|
||||
for _, link in re.findall(r'[^:]+\s*:\s*(["\'])(http.+?)\1', sources):
|
||||
links.append(link)
|
||||
|
||||
# Get all encrypted links
|
||||
encrypted_links = re.findall(r'var encryptedQuality[0-9]{3}URL = \'([a-zA-Z0-9+/]+={0,2})\';', webpage)
|
||||
for encrypted_link in encrypted_links:
|
||||
link = aes_decrypt_text(encrypted_link, video_title, 32).decode('utf-8')
|
||||
# Fallback #1
|
||||
for _, link in re.findall(
|
||||
r'(?:videoUrl|videoSrc|videoIpadUrl|html5PlayerSrc)\s*[:=]\s*(["\'])(http.+?)\1', webpage):
|
||||
links.append(link)
|
||||
|
||||
# Fallback #2, this also contains extra low quality 180p format
|
||||
for _, link in re.findall(r'<a[^>]+href=(["\'])(http.+?)\1[^>]+title=["\']Download [Vv]ideo', webpage):
|
||||
links.append(link)
|
||||
|
||||
# Fallback #3, encrypted links
|
||||
for _, encrypted_link in re.findall(
|
||||
r'encryptedQuality\d{3,4}URL\s*=\s*(["\'])([\da-zA-Z+/=]+)\1', webpage):
|
||||
links.append(aes_decrypt_text(encrypted_link, title, 32).decode('utf-8'))
|
||||
|
||||
formats = []
|
||||
for link in links:
|
||||
# A link looks like this:
|
||||
# http://cdn1.download.youporn.phncdn.com/201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4?nvb=20121113051249&nva=20121114051249&ir=1200&sr=1200&hash=014b882080310e95fb6a0
|
||||
# A path looks like this:
|
||||
# /201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4
|
||||
video_url = unescapeHTML(link)
|
||||
path = compat_urllib_parse_urlparse(video_url).path
|
||||
format_parts = path.split('/')[4].split('_')[:2]
|
||||
|
||||
dn = compat_urllib_parse_urlparse(video_url).netloc.partition('.')[0]
|
||||
|
||||
resolution = format_parts[0]
|
||||
height = int(resolution[:-len('p')])
|
||||
bitrate = int(format_parts[1][:-len('k')])
|
||||
format = '-'.join(format_parts) + '-' + dn
|
||||
|
||||
formats.append({
|
||||
for video_url in set(unescapeHTML(link) for link in links):
|
||||
f = {
|
||||
'url': video_url,
|
||||
'format': format,
|
||||
'format_id': format,
|
||||
'height': height,
|
||||
'tbr': bitrate,
|
||||
'resolution': resolution,
|
||||
})
|
||||
|
||||
}
|
||||
# Video URL's path looks like this:
|
||||
# /201012/17/505835/720p_1500k_505835/YouPorn%20-%20Sex%20Ed%20Is%20It%20Safe%20To%20Masturbate%20Daily.mp4
|
||||
# We will benefit from it by extracting some metadata
|
||||
mobj = re.search(r'/(?P<height>\d{3,4})[pP]_(?P<bitrate>\d+)[kK]_\d+/', video_url)
|
||||
if mobj:
|
||||
height = int(mobj.group('height'))
|
||||
bitrate = int(mobj.group('bitrate'))
|
||||
f.update({
|
||||
'format_id': '%dp-%dk' % (height, bitrate),
|
||||
'height': height,
|
||||
'tbr': bitrate,
|
||||
})
|
||||
formats.append(f)
|
||||
self._sort_formats(formats)
|
||||
|
||||
if not formats:
|
||||
raise ExtractorError('ERROR: no known formats available for video')
|
||||
description = self._html_search_regex(
|
||||
r'(?s)<div[^>]+class=["\']video-description["\'][^>]*>(.+?)</div>',
|
||||
webpage, 'description', default=None)
|
||||
thumbnail = self._search_regex(
|
||||
r'(?:imageurl\s*=|poster\s*:)\s*(["\'])(?P<thumbnail>.+?)\1',
|
||||
webpage, 'thumbnail', fatal=False, group='thumbnail')
|
||||
|
||||
uploader = self._html_search_regex(
|
||||
r'(?s)<div[^>]+class=["\']videoInfoBy["\'][^>]*>\s*By:\s*</div>(.+?)</(?:a|div)>',
|
||||
webpage, 'uploader', fatal=False)
|
||||
upload_date = unified_strdate(self._html_search_regex(
|
||||
r'(?s)<div[^>]+class=["\']videoInfoTime["\'][^>]*>(.+?)</div>',
|
||||
webpage, 'upload date', fatal=False))
|
||||
|
||||
age_limit = self._rta_search(webpage)
|
||||
|
||||
average_rating = int_or_none(self._search_regex(
|
||||
r'<div[^>]+class=["\']videoInfoRating["\'][^>]*>\s*<div[^>]+class=["\']videoRatingPercentage["\'][^>]*>(\d+)%</div>',
|
||||
webpage, 'average rating', fatal=False))
|
||||
|
||||
view_count = str_to_int(self._search_regex(
|
||||
r'(?s)<div[^>]+class=["\']videoInfoViews["\'][^>]*>.*?([\d,.]+)\s*</div>',
|
||||
webpage, 'view count', fatal=False))
|
||||
comment_count = str_to_int(self._search_regex(
|
||||
r'>All [Cc]omments? \(([\d,.]+)\)',
|
||||
webpage, 'comment count', fatal=False))
|
||||
|
||||
def extract_tag_box(title):
|
||||
tag_box = self._search_regex(
|
||||
(r'<div[^>]+class=["\']tagBoxTitle["\'][^>]*>\s*%s\b.*?</div>\s*'
|
||||
'<div[^>]+class=["\']tagBoxContent["\']>(.+?)</div>') % re.escape(title),
|
||||
webpage, '%s tag box' % title, default=None)
|
||||
if not tag_box:
|
||||
return []
|
||||
return re.findall(r'<a[^>]+href=[^>]+>([^<]+)', tag_box)
|
||||
|
||||
categories = extract_tag_box('Category')
|
||||
tags = extract_tag_box('Tags')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'uploader': video_uploader,
|
||||
'upload_date': upload_date,
|
||||
'title': video_title,
|
||||
'display_id': display_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'description': video_description,
|
||||
'uploader': uploader,
|
||||
'upload_date': upload_date,
|
||||
'average_rating': average_rating,
|
||||
'view_count': view_count,
|
||||
'comment_count': comment_count,
|
||||
'categories': categories,
|
||||
'tags': tags,
|
||||
'age_limit': age_limit,
|
||||
'formats': formats,
|
||||
}
|
||||
|
@ -178,6 +178,52 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
return
|
||||
|
||||
|
||||
class YoutubePlaylistBaseInfoExtractor(InfoExtractor):
|
||||
# Extract the video ids from the playlist pages
|
||||
def _entries(self, page, playlist_id):
|
||||
more_widget_html = content_html = page
|
||||
for page_num in itertools.count(1):
|
||||
for video_id, video_title in self.extract_videos_from_page(content_html):
|
||||
yield self.url_result(
|
||||
video_id, 'Youtube', video_id=video_id,
|
||||
video_title=video_title)
|
||||
|
||||
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
|
||||
if not mobj:
|
||||
break
|
||||
|
||||
more = self._download_json(
|
||||
'https://youtube.com/%s' % mobj.group('more'), playlist_id,
|
||||
'Downloading page #%s' % page_num,
|
||||
transform_source=uppercase_escape)
|
||||
content_html = more['content_html']
|
||||
if not content_html.strip():
|
||||
# Some webpages show a "Load more" button but they don't
|
||||
# have more videos
|
||||
break
|
||||
more_widget_html = more['load_more_widget_html']
|
||||
|
||||
def extract_videos_from_page(self, page):
|
||||
ids_in_page = []
|
||||
titles_in_page = []
|
||||
for mobj in re.finditer(self._VIDEO_RE, page):
|
||||
# The link with index 0 is not the first video of the playlist (not sure if still actual)
|
||||
if 'index' in mobj.groupdict() and mobj.group('id') == '0':
|
||||
continue
|
||||
video_id = mobj.group('id')
|
||||
video_title = unescapeHTML(mobj.group('title'))
|
||||
if video_title:
|
||||
video_title = video_title.strip()
|
||||
try:
|
||||
idx = ids_in_page.index(video_id)
|
||||
if video_title and not titles_in_page[idx]:
|
||||
titles_in_page[idx] = video_title
|
||||
except ValueError:
|
||||
ids_in_page.append(video_id)
|
||||
titles_in_page.append(video_title)
|
||||
return zip(ids_in_page, titles_in_page)
|
||||
|
||||
|
||||
class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
IE_DESC = 'YouTube.com'
|
||||
_VALID_URL = r"""(?x)^
|
||||
@ -1061,6 +1107,17 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
if not video_info:
|
||||
video_info = get_video_info
|
||||
if 'token' in get_video_info:
|
||||
# Different get_video_info requests may report different results, e.g.
|
||||
# some may report video unavailability, but some may serve it without
|
||||
# any complaint (see https://github.com/rg3/youtube-dl/issues/7362,
|
||||
# the original webpage as well as el=info and el=embedded get_video_info
|
||||
# requests report video unavailability due to geo restriction while
|
||||
# el=detailpage succeeds and returns valid data). This is probably
|
||||
# due to YouTube measures against IP ranges of hosting providers.
|
||||
# Working around by preferring the first succeeded video_info containing
|
||||
# the token if no such video_info yet was found.
|
||||
if 'token' not in video_info:
|
||||
video_info = get_video_info
|
||||
break
|
||||
if 'token' not in video_info:
|
||||
if 'reason' in video_info:
|
||||
@ -1419,7 +1476,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
}
|
||||
|
||||
|
||||
class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
|
||||
class YoutubePlaylistIE(YoutubeBaseInfoExtractor, YoutubePlaylistBaseInfoExtractor):
|
||||
IE_DESC = 'YouTube.com playlists'
|
||||
_VALID_URL = r"""(?x)(?:
|
||||
(?:https?://)?
|
||||
@ -1440,7 +1497,7 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
|
||||
((?:PL|LL|EC|UU|FL|RD|UL)[0-9A-Za-z-_]{10,})
|
||||
)"""
|
||||
_TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
|
||||
_VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&[^"]*?index=(?P<index>\d+)'
|
||||
_VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&[^"]*?index=(?P<index>\d+)(?:[^>]+>(?P<title>[^<]+))?'
|
||||
IE_NAME = 'youtube:playlist'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
|
||||
@ -1557,37 +1614,11 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
|
||||
else:
|
||||
self.report_warning('Youtube gives an alert message: ' + match)
|
||||
|
||||
# Extract the video ids from the playlist pages
|
||||
def _entries():
|
||||
more_widget_html = content_html = page
|
||||
for page_num in itertools.count(1):
|
||||
matches = re.finditer(self._VIDEO_RE, content_html)
|
||||
# We remove the duplicates and the link with index 0
|
||||
# (it's not the first video of the playlist)
|
||||
new_ids = orderedSet(m.group('id') for m in matches if m.group('index') != '0')
|
||||
for vid_id in new_ids:
|
||||
yield self.url_result(vid_id, 'Youtube', video_id=vid_id)
|
||||
|
||||
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
|
||||
if not mobj:
|
||||
break
|
||||
|
||||
more = self._download_json(
|
||||
'https://youtube.com/%s' % mobj.group('more'), playlist_id,
|
||||
'Downloading page #%s' % page_num,
|
||||
transform_source=uppercase_escape)
|
||||
content_html = more['content_html']
|
||||
if not content_html.strip():
|
||||
# Some webpages show a "Load more" button but they don't
|
||||
# have more videos
|
||||
break
|
||||
more_widget_html = more['load_more_widget_html']
|
||||
|
||||
playlist_title = self._html_search_regex(
|
||||
r'(?s)<h1 class="pl-header-title[^"]*">\s*(.*?)\s*</h1>',
|
||||
page, 'title')
|
||||
|
||||
return self.playlist_result(_entries(), playlist_id, playlist_title)
|
||||
return self.playlist_result(self._entries(page, playlist_id), playlist_id, playlist_title)
|
||||
|
||||
def _real_extract(self, url):
|
||||
# Extract playlist id
|
||||
@ -1613,36 +1644,31 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
|
||||
return self._extract_playlist(playlist_id)
|
||||
|
||||
|
||||
class YoutubeChannelIE(InfoExtractor):
|
||||
class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor):
|
||||
IE_DESC = 'YouTube.com channels'
|
||||
_VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)'
|
||||
_TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
|
||||
_VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?'
|
||||
IE_NAME = 'youtube:channel'
|
||||
_TESTS = [{
|
||||
'note': 'paginated channel',
|
||||
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
|
||||
'playlist_mincount': 91,
|
||||
'info_dict': {
|
||||
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
|
||||
'id': 'UUKfVa3S1e4PHvxWcwyMMg8w',
|
||||
'title': 'Uploads from lex will',
|
||||
}
|
||||
}, {
|
||||
'note': 'Age restricted channel',
|
||||
# from https://www.youtube.com/user/DeusExOfficial
|
||||
'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w',
|
||||
'playlist_mincount': 64,
|
||||
'info_dict': {
|
||||
'id': 'UUs0ifCMCm1icqRbqhUINa0w',
|
||||
'title': 'Uploads from Deus Ex',
|
||||
},
|
||||
}]
|
||||
|
||||
@staticmethod
|
||||
def extract_videos_from_page(page):
|
||||
ids_in_page = []
|
||||
titles_in_page = []
|
||||
for mobj in re.finditer(r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?', page):
|
||||
video_id = mobj.group('id')
|
||||
video_title = unescapeHTML(mobj.group('title'))
|
||||
try:
|
||||
idx = ids_in_page.index(video_id)
|
||||
if video_title and not titles_in_page[idx]:
|
||||
titles_in_page[idx] = video_title
|
||||
except ValueError:
|
||||
ids_in_page.append(video_id)
|
||||
titles_in_page.append(video_title)
|
||||
return zip(ids_in_page, titles_in_page)
|
||||
|
||||
def _real_extract(self, url):
|
||||
channel_id = self._match_id(url)
|
||||
|
||||
@ -1661,7 +1687,7 @@ class YoutubeChannelIE(InfoExtractor):
|
||||
'channelId', channel_page, 'channel id', default=None)
|
||||
if not channel_playlist_id:
|
||||
channel_playlist_id = self._search_regex(
|
||||
r'data-channel-external-id="([^"]+)"',
|
||||
r'data-(?:channel-external-|yt)id="([^"]+)"',
|
||||
channel_page, 'channel id', default=None)
|
||||
if channel_playlist_id and channel_playlist_id.startswith('UC'):
|
||||
playlist_id = 'UU' + channel_playlist_id[2:]
|
||||
@ -1685,29 +1711,7 @@ class YoutubeChannelIE(InfoExtractor):
|
||||
for video_id, video_title in self.extract_videos_from_page(channel_page)]
|
||||
return self.playlist_result(entries, channel_id)
|
||||
|
||||
def _entries():
|
||||
more_widget_html = content_html = channel_page
|
||||
for pagenum in itertools.count(1):
|
||||
|
||||
for video_id, video_title in self.extract_videos_from_page(content_html):
|
||||
yield self.url_result(
|
||||
video_id, 'Youtube', video_id=video_id,
|
||||
video_title=video_title)
|
||||
|
||||
mobj = re.search(
|
||||
r'data-uix-load-more-href="/?(?P<more>[^"]+)"',
|
||||
more_widget_html)
|
||||
if not mobj:
|
||||
break
|
||||
|
||||
more = self._download_json(
|
||||
'https://youtube.com/%s' % mobj.group('more'), channel_id,
|
||||
'Downloading page #%s' % (pagenum + 1),
|
||||
transform_source=uppercase_escape)
|
||||
content_html = more['content_html']
|
||||
more_widget_html = more['load_more_widget_html']
|
||||
|
||||
return self.playlist_result(_entries(), channel_id)
|
||||
return self.playlist_result(self._entries(channel_page, channel_id), channel_id)
|
||||
|
||||
|
||||
class YoutubeUserIE(YoutubeChannelIE):
|
||||
|
@ -9,6 +9,7 @@ from ..utils import (
|
||||
int_or_none,
|
||||
unified_strdate,
|
||||
OnDemandPagedList,
|
||||
xpath_text,
|
||||
)
|
||||
|
||||
|
||||
@ -19,13 +20,11 @@ def extract_from_xml_url(ie, video_id, xml_url):
|
||||
errnote='Failed to download video info')
|
||||
|
||||
title = doc.find('.//information/title').text
|
||||
description = doc.find('.//information/detail').text
|
||||
duration = int(doc.find('.//details/lengthSec').text)
|
||||
uploader_node = doc.find('.//details/originChannelTitle')
|
||||
uploader = None if uploader_node is None else uploader_node.text
|
||||
uploader_id_node = doc.find('.//details/originChannelId')
|
||||
uploader_id = None if uploader_id_node is None else uploader_id_node.text
|
||||
upload_date = unified_strdate(doc.find('.//details/airtime').text)
|
||||
description = xpath_text(doc, './/information/detail', 'description')
|
||||
duration = int_or_none(xpath_text(doc, './/details/lengthSec', 'duration'))
|
||||
uploader = xpath_text(doc, './/details/originChannelTitle', 'uploader')
|
||||
uploader_id = xpath_text(doc, './/details/originChannelId', 'uploader id')
|
||||
upload_date = unified_strdate(xpath_text(doc, './/details/airtime', 'upload date'))
|
||||
|
||||
def xml_to_format(fnode):
|
||||
video_url = fnode.find('url').text
|
||||
@ -40,15 +39,14 @@ def extract_from_xml_url(ie, video_id, xml_url):
|
||||
ext = format_m.group('container')
|
||||
proto = format_m.group('proto').lower()
|
||||
|
||||
quality = fnode.find('./quality').text
|
||||
abr = int(fnode.find('./audioBitrate').text) // 1000
|
||||
vbr_node = fnode.find('./videoBitrate')
|
||||
vbr = None if vbr_node is None else int(vbr_node.text) // 1000
|
||||
quality = xpath_text(fnode, './quality', 'quality')
|
||||
abr = int_or_none(xpath_text(fnode, './audioBitrate', 'abr'), 1000)
|
||||
vbr = int_or_none(xpath_text(fnode, './videoBitrate', 'vbr'), 1000)
|
||||
|
||||
width_node = fnode.find('./width')
|
||||
width = None if width_node is None else int_or_none(width_node.text)
|
||||
height_node = fnode.find('./height')
|
||||
height = None if height_node is None else int_or_none(height_node.text)
|
||||
width = int_or_none(xpath_text(fnode, './width', 'width'))
|
||||
height = int_or_none(xpath_text(fnode, './height', 'height'))
|
||||
|
||||
filesize = int_or_none(xpath_text(fnode, './filesize', 'filesize'))
|
||||
|
||||
format_note = ''
|
||||
if not format_note:
|
||||
@ -64,12 +62,31 @@ def extract_from_xml_url(ie, video_id, xml_url):
|
||||
'vbr': vbr,
|
||||
'width': width,
|
||||
'height': height,
|
||||
'filesize': int_or_none(fnode.find('./filesize').text),
|
||||
'filesize': filesize,
|
||||
'format_note': format_note,
|
||||
'protocol': proto,
|
||||
'_available': is_available,
|
||||
}
|
||||
|
||||
def xml_to_thumbnails(fnode):
|
||||
thumbnails = []
|
||||
for node in fnode:
|
||||
thumbnail_url = node.text
|
||||
if not thumbnail_url:
|
||||
continue
|
||||
thumbnail = {
|
||||
'url': thumbnail_url,
|
||||
}
|
||||
if 'key' in node.attrib:
|
||||
m = re.match('^([0-9]+)x([0-9]+)$', node.attrib['key'])
|
||||
if m:
|
||||
thumbnail['width'] = int(m.group(1))
|
||||
thumbnail['height'] = int(m.group(2))
|
||||
thumbnails.append(thumbnail)
|
||||
return thumbnails
|
||||
|
||||
thumbnails = xml_to_thumbnails(doc.findall('.//teaserimages/teaserimage'))
|
||||
|
||||
format_nodes = doc.findall('.//formitaeten/formitaet')
|
||||
formats = list(filter(
|
||||
lambda f: f['_available'],
|
||||
@ -81,6 +98,7 @@ def extract_from_xml_url(ie, video_id, xml_url):
|
||||
'title': title,
|
||||
'description': description,
|
||||
'duration': duration,
|
||||
'thumbnails': thumbnails,
|
||||
'uploader': uploader,
|
||||
'uploader_id': uploader_id,
|
||||
'upload_date': upload_date,
|
||||
|
@ -272,7 +272,7 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
|
||||
return [], information
|
||||
|
||||
try:
|
||||
self._downloader.to_screen('[' + self.basename + '] Destination: ' + new_path)
|
||||
self._downloader.to_screen('[ffmpeg] Destination: ' + new_path)
|
||||
self.run_ffmpeg(path, new_path, acodec, more_opts)
|
||||
except AudioConversionError as e:
|
||||
raise PostProcessingError(
|
||||
|
@ -3,6 +3,7 @@
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import base64
|
||||
import calendar
|
||||
import codecs
|
||||
import contextlib
|
||||
@ -35,6 +36,7 @@ import zlib
|
||||
from .compat import (
|
||||
compat_basestring,
|
||||
compat_chr,
|
||||
compat_etree_fromstring,
|
||||
compat_html_entities,
|
||||
compat_http_client,
|
||||
compat_kwargs,
|
||||
@ -177,10 +179,19 @@ def xpath_with_ns(path, ns_map):
|
||||
|
||||
|
||||
def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
|
||||
if sys.version_info < (2, 7): # Crazy 2.6
|
||||
xpath = xpath.encode('ascii')
|
||||
def _find_xpath(xpath):
|
||||
if sys.version_info < (2, 7): # Crazy 2.6
|
||||
xpath = xpath.encode('ascii')
|
||||
return node.find(xpath)
|
||||
|
||||
if isinstance(xpath, (str, compat_str)):
|
||||
n = _find_xpath(xpath)
|
||||
else:
|
||||
for xp in xpath:
|
||||
n = _find_xpath(xp)
|
||||
if n is not None:
|
||||
break
|
||||
|
||||
n = node.find(xpath)
|
||||
if n is None:
|
||||
if default is not NO_DEFAULT:
|
||||
return default
|
||||
@ -355,7 +366,7 @@ def sanitize_path(s):
|
||||
if drive_or_unc:
|
||||
norm_path.pop(0)
|
||||
sanitized_path = [
|
||||
path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|\.$)', '#', path_part)
|
||||
path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|[\s.]$)', '#', path_part)
|
||||
for path_part in norm_path]
|
||||
if drive_or_unc:
|
||||
sanitized_path.insert(0, drive_or_unc + os.path.sep)
|
||||
@ -813,9 +824,11 @@ def parse_iso8601(date_str, delimiter='T', timezone=None):
|
||||
if date_str is None:
|
||||
return None
|
||||
|
||||
date_str = re.sub(r'\.[0-9]+', '', date_str)
|
||||
|
||||
if timezone is None:
|
||||
m = re.search(
|
||||
r'(\.[0-9]+)?(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
|
||||
r'(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
|
||||
date_str)
|
||||
if not m:
|
||||
timezone = datetime.timedelta()
|
||||
@ -828,9 +841,12 @@ def parse_iso8601(date_str, delimiter='T', timezone=None):
|
||||
timezone = datetime.timedelta(
|
||||
hours=sign * int(m.group('hours')),
|
||||
minutes=sign * int(m.group('minutes')))
|
||||
date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
|
||||
dt = datetime.datetime.strptime(date_str, date_format) - timezone
|
||||
return calendar.timegm(dt.timetuple())
|
||||
try:
|
||||
date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
|
||||
dt = datetime.datetime.strptime(date_str, date_format) - timezone
|
||||
return calendar.timegm(dt.timetuple())
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
|
||||
def unified_strdate(date_str, day_first=True):
|
||||
@ -895,7 +911,8 @@ def unified_strdate(date_str, day_first=True):
|
||||
timetuple = email.utils.parsedate_tz(date_str)
|
||||
if timetuple:
|
||||
upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
|
||||
return upload_date
|
||||
if upload_date is not None:
|
||||
return compat_str(upload_date)
|
||||
|
||||
|
||||
def determine_ext(url, default_ext='unknown_video'):
|
||||
@ -1650,29 +1667,6 @@ def encode_dict(d, encoding='utf-8'):
|
||||
return dict((k.encode(encoding), v.encode(encoding)) for k, v in d.items())
|
||||
|
||||
|
||||
try:
|
||||
etree_iter = xml.etree.ElementTree.Element.iter
|
||||
except AttributeError: # Python <=2.6
|
||||
etree_iter = lambda n: n.findall('.//*')
|
||||
|
||||
|
||||
def parse_xml(s):
|
||||
class TreeBuilder(xml.etree.ElementTree.TreeBuilder):
|
||||
def doctype(self, name, pubid, system):
|
||||
pass # Ignore doctypes
|
||||
|
||||
parser = xml.etree.ElementTree.XMLParser(target=TreeBuilder())
|
||||
kwargs = {'parser': parser} if sys.version_info >= (2, 7) else {}
|
||||
tree = xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs)
|
||||
# Fix up XML parser in Python 2.x
|
||||
if sys.version_info < (3, 0):
|
||||
for n in etree_iter(tree):
|
||||
if n.text is not None:
|
||||
if not isinstance(n.text, compat_str):
|
||||
n.text = n.text.decode('utf-8')
|
||||
return tree
|
||||
|
||||
|
||||
US_RATINGS = {
|
||||
'G': 0,
|
||||
'PG': 10,
|
||||
@ -1700,8 +1694,8 @@ def js_to_json(code):
|
||||
if v in ('true', 'false', 'null'):
|
||||
return v
|
||||
if v.startswith('"'):
|
||||
return v
|
||||
if v.startswith("'"):
|
||||
v = re.sub(r"\\'", "'", v[1:-1])
|
||||
elif v.startswith("'"):
|
||||
v = v[1:-1]
|
||||
v = re.sub(r"\\\\|\\'|\"", lambda m: {
|
||||
'\\\\': '\\\\',
|
||||
@ -1795,6 +1789,10 @@ def urlhandle_detect_ext(url_handle):
|
||||
return mimetype2ext(getheader('Content-Type'))
|
||||
|
||||
|
||||
def encode_data_uri(data, mime_type):
|
||||
return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
|
||||
|
||||
|
||||
def age_restricted(content_limit, age_limit):
|
||||
""" Returns True iff the content should be blocked """
|
||||
|
||||
@ -1969,7 +1967,7 @@ def dfxp2srt(dfxp_data):
|
||||
|
||||
return out
|
||||
|
||||
dfxp = xml.etree.ElementTree.fromstring(dfxp_data.encode('utf-8'))
|
||||
dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8'))
|
||||
out = []
|
||||
paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall('.//p')
|
||||
|
||||
|
@ -1,3 +1,3 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
__version__ = '2015.10.13'
|
||||
__version__ = '2015.11.02'
|
||||
|
Loading…
x
Reference in New Issue
Block a user