From 1cc47c667419e0eadc0a6989256ab7b276852adf Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 24 Apr 2018 23:49:30 +0700
Subject: [PATCH 001/125] [utils] Fix match_str for boolean meta fields

---
 test/test_utils.py  | 12 ++++++++++++
 youtube_dl/utils.py |  4 ++--
 2 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/test/test_utils.py b/test/test_utils.py
index a1fe6fdb2..253a7fe17 100644
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -1072,6 +1072,18 @@ ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4')
         self.assertFalse(match_str(
             'like_count > 100 & dislike_count <? 50 & description',
             {'like_count': 190, 'dislike_count': 10}))
+        self.assertTrue(match_str('is_live', {'is_live': True}))
+        self.assertFalse(match_str('is_live', {'is_live': False}))
+        self.assertFalse(match_str('is_live', {'is_live': None}))
+        self.assertFalse(match_str('is_live', {}))
+        self.assertFalse(match_str('!is_live', {'is_live': True}))
+        self.assertTrue(match_str('!is_live', {'is_live': False}))
+        self.assertTrue(match_str('!is_live', {'is_live': None}))
+        self.assertTrue(match_str('!is_live', {}))
+        self.assertTrue(match_str('title', {'title': 'abc'}))
+        self.assertTrue(match_str('title', {'title': ''}))
+        self.assertFalse(match_str('!title', {'title': 'abc'}))
+        self.assertFalse(match_str('!title', {'title': ''}))
 
     def test_parse_dfxp_time_expr(self):
         self.assertEqual(parse_dfxp_time_expr(None), None)
diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index 027d12785..574284e94 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -2574,8 +2574,8 @@ def _match_one(filter_part, dct):
         return op(actual_value, comparison_value)
 
     UNARY_OPERATORS = {
-        '': lambda v: v is not None,
-        '!': lambda v: v is None,
+        '': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
+        '!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
     }
     operator_rex = re.compile(r'''(?x)\s*
         (?P<op>%s)\s*(?P<key>[a-z_]+)

From 0ff51adae6feab7386874eddc0d61dbeaf063bf2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 24 Apr 2018 23:53:01 +0700
Subject: [PATCH 002/125] [twitch] Extract is_live according to status (closes
 #16259)

---
 youtube_dl/extractor/twitch.py | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/youtube_dl/extractor/twitch.py b/youtube_dl/extractor/twitch.py
index f736283e9..4c11fd3c3 100644
--- a/youtube_dl/extractor/twitch.py
+++ b/youtube_dl/extractor/twitch.py
@@ -168,6 +168,13 @@ class TwitchItemBaseIE(TwitchBaseIE):
         return self.playlist_result(entries, info['id'], info['title'])
 
     def _extract_info(self, info):
+        status = info.get('status')
+        if status == 'recording':
+            is_live = True
+        elif status == 'recorded':
+            is_live = False
+        else:
+            is_live = None
         return {
             'id': info['_id'],
             'title': info.get('title') or 'Untitled Broadcast',
@@ -178,6 +185,7 @@ class TwitchItemBaseIE(TwitchBaseIE):
             'uploader_id': info.get('channel', {}).get('name'),
             'timestamp': parse_iso8601(info.get('recorded_at')),
             'view_count': int_or_none(info.get('views')),
+            'is_live': is_live,
         }
 
     def _real_extract(self, url):

From 76030543cd5e2214c47aa82f03b3e2cec97e7bc1 Mon Sep 17 00:00:00 2001
From: Alexandre Macabies <Zopieux@users.noreply.github.com>
Date: Tue, 24 Apr 2018 19:49:30 +0200
Subject: [PATCH 003/125] [openload] Recognize IPv6 stream URLs (closes #16137)

---
 youtube_dl/extractor/openload.py | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/openload.py b/youtube_dl/extractor/openload.py
index 650f95656..d0bdd60b8 100644
--- a/youtube_dl/extractor/openload.py
+++ b/youtube_dl/extractor/openload.py
@@ -340,7 +340,10 @@ class OpenloadIE(InfoExtractor):
                       get_element_by_id('streamurj', webpage) or
                       self._search_regex(
                           (r'>\s*([\w-]+~\d{10,}~\d+\.\d+\.0\.0~[\w-]+)\s*<',
-                           r'>\s*([\w~-]+~\d+\.\d+\.\d+\.\d+~[\w~-]+)'), webpage,
+                           r'>\s*([\w~-]+~\d+\.\d+\.\d+\.\d+~[\w~-]+)',
+                           r'>\s*([\w-]+~\d{10,}~(?:[a-f\d]+:){2}:~[\w-]+)\s*<',
+                           r'>\s*([\w~-]+~[a-f0-9:]+~[\w~-]+)\s*<',
+                           r'>\s*([\w~-]+~[a-f0-9:]+~[\w~-]+)'), webpage,
                           'stream URL'))
 
         video_url = 'https://openload.co/stream/%s?mime=true' % decoded_id

From 5d0fe6d23e4407bee3caec33955d4cb410bebb5d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 25 Apr 2018 00:56:16 +0700
Subject: [PATCH 004/125] Credit @Zopieux for #16250

---
 AUTHORS | 1 +
 1 file changed, 1 insertion(+)

diff --git a/AUTHORS b/AUTHORS
index 6223212aa..880e0abee 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -236,3 +236,4 @@ Lei Wang
 Petr Novák
 Leonardo Taccari
 Martin Weinelt
+Alexandre Macabies

From 95284bc281d8aa3b1d6863ccb536da9d4cf6433c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 25 Apr 2018 01:01:06 +0700
Subject: [PATCH 005/125] Credit @TingPing for picarto (#15551)

---
 AUTHORS | 1 +
 1 file changed, 1 insertion(+)

diff --git a/AUTHORS b/AUTHORS
index 880e0abee..812051796 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -236,4 +236,5 @@ Lei Wang
 Petr Novák
 Leonardo Taccari
 Martin Weinelt
+TingPing
 Alexandre Macabies

From ecb24f7c081b764dd669cb4b277d8c14e55b2a39 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 25 Apr 2018 01:02:28 +0700
Subject: [PATCH 006/125] Credit @f2face for #16115

---
 AUTHORS | 1 +
 1 file changed, 1 insertion(+)

diff --git a/AUTHORS b/AUTHORS
index 812051796..eaf96d79d 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -236,5 +236,6 @@ Lei Wang
 Petr Novák
 Leonardo Taccari
 Martin Weinelt
+Surya Oktafendri
 TingPing
 Alexandre Macabies

From e028d4f506562a1febf76277795305e296823ad6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 25 Apr 2018 01:03:42 +0700
Subject: [PATCH 007/125] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 29 +++++++++++++++++++++++++++++
 1 file changed, 29 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index 185fa1753..a731fde29 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,32 @@
+version <unreleased>
+
+Core
+* [utils] Fix match_str for boolean meta fields
++ [Makefile] Add support for pandoc 2 and disable smart extension (#16251)
+* [YoutubeDL] Fix typo in media extension compatibility checker (#16215)
+
+Extractors
++ [openload] Recognize IPv6 stream URLs (#16136, #16137, #16205, #16246,
+  #16250)
++ [twitch] Extract is_live according to status (#16259)
+* [pornflip] Relax URL regular expression (#16258)
+- [etonline] Remove extractor (#16256)
+* [breakcom] Fix extraction (#16254)
++ [youtube] Add ability to authenticate with cookies
+* [youtube:feed] Implement lazy playlist extraction (#10184)
++ [svt] Add support for TV channel live streams (#15279, #15809)
+* [ccma] Fix video extraction (#15931)
+* [rentv] Fix extraction (#15227)
++ [nick] Add support for nickjr.nl (#16230)
+* [extremetube] Fix metadata extraction
++ [keezmovies] Add support for generic embeds (#16134, #16154)
+* [nexx] Extract new azure URLs (#16223)
+* [cbssports] Fix extraction (#16217)
+* [kaltura] Improve embeds detection (#16201)
+* [instagram:user] Fix extraction (#16119)
+* [cbs] Skip DRM asset types (#16104)
+
+
 version 2018.04.16
 
 Extractors

From b5802d69f511481a87d8604fa1577bca8370cab5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 25 Apr 2018 01:12:40 +0700
Subject: [PATCH 008/125] release 2018.04.25

---
 .github/ISSUE_TEMPLATE.md | 6 +++---
 ChangeLog                 | 2 +-
 docs/supportedsites.md    | 1 -
 youtube_dl/version.py     | 2 +-
 4 files changed, 5 insertions(+), 6 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index 69f996179..252fa0adf 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -6,8 +6,8 @@
 
 ---
 
-### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.04.16*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
-- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2018.04.16**
+### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.04.25*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
+- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2018.04.25**
 
 ### Before submitting an *issue* make sure you have:
 - [ ] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
@@ -36,7 +36,7 @@ Add the `-v` flag to **your command line** you run youtube-dl with (`youtube-dl
 [debug] User config: []
 [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
 [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
-[debug] youtube-dl version 2018.04.16
+[debug] youtube-dl version 2018.04.25
 [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
 [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
 [debug] Proxy map: {}
diff --git a/ChangeLog b/ChangeLog
index a731fde29..4a3df67df 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2018.04.25
 
 Core
 * [utils] Fix match_str for boolean meta fields
diff --git a/docs/supportedsites.md b/docs/supportedsites.md
index 715d16cfe..a110f687b 100644
--- a/docs/supportedsites.md
+++ b/docs/supportedsites.md
@@ -257,7 +257,6 @@
  - **ESPN**
  - **ESPNArticle**
  - **EsriVideo**
- - **ETOnline**
  - **Europa**
  - **EveryonesMixtape**
  - **ExpoTV**
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 5aefdd0a2..4e3cb39c6 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2018.04.16'
+__version__ = '2018.04.25'

From d3711b00502d9104a3697aba5d210a25066ca756 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 25 Apr 2018 02:14:27 +0700
Subject: [PATCH 009/125] [devscripts/gh-pages/generate-download.py] Use
 program checksum from versions.json

---
 devscripts/gh-pages/generate-download.py | 19 +++++++------------
 1 file changed, 7 insertions(+), 12 deletions(-)

diff --git a/devscripts/gh-pages/generate-download.py b/devscripts/gh-pages/generate-download.py
index fcd7e1dff..a873d32ee 100755
--- a/devscripts/gh-pages/generate-download.py
+++ b/devscripts/gh-pages/generate-download.py
@@ -1,27 +1,22 @@
 #!/usr/bin/env python3
 from __future__ import unicode_literals
 
-import hashlib
-import urllib.request
 import json
 
 versions_info = json.load(open('update/versions.json'))
 version = versions_info['latest']
-URL = versions_info['versions'][version]['bin'][0]
-
-data = urllib.request.urlopen(URL).read()
+version_dict = versions_info['versions'][version]
 
 # Read template page
 with open('download.html.in', 'r', encoding='utf-8') as tmplf:
     template = tmplf.read()
 
-sha256sum = hashlib.sha256(data).hexdigest()
 template = template.replace('@PROGRAM_VERSION@', version)
-template = template.replace('@PROGRAM_URL@', URL)
-template = template.replace('@PROGRAM_SHA256SUM@', sha256sum)
-template = template.replace('@EXE_URL@', versions_info['versions'][version]['exe'][0])
-template = template.replace('@EXE_SHA256SUM@', versions_info['versions'][version]['exe'][1])
-template = template.replace('@TAR_URL@', versions_info['versions'][version]['tar'][0])
-template = template.replace('@TAR_SHA256SUM@', versions_info['versions'][version]['tar'][1])
+template = template.replace('@PROGRAM_URL@', version_dict['bin'][0])
+template = template.replace('@PROGRAM_SHA256SUM@', version_dict['bin'][1])
+template = template.replace('@EXE_URL@', version_dict['exe'][0])
+template = template.replace('@EXE_SHA256SUM@', version_dict['exe'][1])
+template = template.replace('@TAR_URL@', version_dict['tar'][0])
+template = template.replace('@TAR_SHA256SUM@', version_dict['tar'][1])
 with open('download.html', 'w', encoding='utf-8') as dlf:
     dlf.write(template)

From c84eae4f66be8a22c14b852bdb01773bb3807239 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Fri, 27 Apr 2018 03:45:52 +0700
Subject: [PATCH 010/125] [funk:channel] Improve extraction (closes #16285)

---
 youtube_dl/extractor/funk.py | 51 ++++++++++++++++++++++++++++--------
 1 file changed, 40 insertions(+), 11 deletions(-)

diff --git a/youtube_dl/extractor/funk.py b/youtube_dl/extractor/funk.py
index faea6576f..0ff058619 100644
--- a/youtube_dl/extractor/funk.py
+++ b/youtube_dl/extractor/funk.py
@@ -5,7 +5,10 @@ import re
 
 from .common import InfoExtractor
 from .nexx import NexxIE
-from ..utils import int_or_none
+from ..utils import (
+    int_or_none,
+    try_get,
+)
 
 
 class FunkBaseIE(InfoExtractor):
@@ -77,6 +80,20 @@ class FunkChannelIE(FunkBaseIE):
         'params': {
             'skip_download': True,
         },
+    }, {
+        # only available via byIdList API
+        'url': 'https://www.funk.net/channel/informr/martin-sonneborn-erklaert-die-eu',
+        'info_dict': {
+            'id': '205067',
+            'ext': 'mp4',
+            'title': 'Martin Sonneborn erklärt die EU',
+            'description': 'md5:050f74626e4ed87edf4626d2024210c0',
+            'timestamp': 1494424042,
+            'upload_date': '20170510',
+        },
+        'params': {
+            'skip_download': True,
+        },
     }, {
         'url': 'https://www.funk.net/channel/59d5149841dca100012511e3/mein-erster-job-lovemilla-folge-1/lovemilla/',
         'only_matching': True,
@@ -87,16 +104,28 @@ class FunkChannelIE(FunkBaseIE):
         channel_id = mobj.group('id')
         alias = mobj.group('alias')
 
-        results = self._download_json(
-            'https://www.funk.net/api/v3.0/content/videos/filter', channel_id,
-            headers={
-                'authorization': 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJjbGllbnROYW1lIjoiY3VyYXRpb24tdG9vbCIsInNjb3BlIjoic3RhdGljLWNvbnRlbnQtYXBpLGN1cmF0aW9uLWFwaSxzZWFyY2gtYXBpIn0.q4Y2xZG8PFHai24-4Pjx2gym9RmJejtmK6lMXP5wAgc',
-                'Referer': url,
-            }, query={
-                'channelId': channel_id,
-                'size': 100,
-            })['result']
+        headers = {
+            'authorization': 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJjbGllbnROYW1lIjoiY3VyYXRpb24tdG9vbCIsInNjb3BlIjoic3RhdGljLWNvbnRlbnQtYXBpLGN1cmF0aW9uLWFwaSxzZWFyY2gtYXBpIn0.q4Y2xZG8PFHai24-4Pjx2gym9RmJejtmK6lMXP5wAgc',
+            'Referer': url,
+        }
 
-        video = next(r for r in results if r.get('alias') == alias)
+        video = None
+
+        by_id_list = self._download_json(
+            'https://www.funk.net/api/v3.0/content/videos/byIdList', channel_id,
+            headers=headers, query={
+                'ids': alias,
+            }, fatal=False)
+        if by_id_list:
+            video = try_get(by_id_list, lambda x: x['result'][0], dict)
+
+        if not video:
+            results = self._download_json(
+                'https://www.funk.net/api/v3.0/content/videos/filter', channel_id,
+                headers=headers, query={
+                    'channelId': channel_id,
+                    'size': 100,
+                })['result']
+            video = next(r for r in results if r.get('alias') == alias)
 
         return self._make_url_result(video)

From 0fe7783eced5c62dbd95780c2150fd1080bd3927 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 28 Apr 2018 01:59:15 +0700
Subject: [PATCH 011/125] [extractor/common] Add _download_json_handle

---
 youtube_dl/extractor/common.py | 30 +++++++++++++++++++++---------
 1 file changed, 21 insertions(+), 9 deletions(-)

diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index 59b9d3739..e0c3c8eb0 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -682,18 +682,30 @@ class InfoExtractor(object):
             else:
                 self.report_warning(errmsg + str(ve))
 
-    def _download_json(self, url_or_request, video_id,
-                       note='Downloading JSON metadata',
-                       errnote='Unable to download JSON metadata',
-                       transform_source=None,
-                       fatal=True, encoding=None, data=None, headers={}, query={}):
-        json_string = self._download_webpage(
+    def _download_json_handle(
+            self, url_or_request, video_id, note='Downloading JSON metadata',
+            errnote='Unable to download JSON metadata', transform_source=None,
+            fatal=True, encoding=None, data=None, headers={}, query={}):
+        """Return a tuple (JSON object, URL handle)"""
+        res = self._download_webpage_handle(
             url_or_request, video_id, note, errnote, fatal=fatal,
             encoding=encoding, data=data, headers=headers, query=query)
-        if (not fatal) and json_string is False:
-            return None
+        if res is False:
+            return res
+        json_string, urlh = res
         return self._parse_json(
-            json_string, video_id, transform_source=transform_source, fatal=fatal)
+            json_string, video_id, transform_source=transform_source,
+            fatal=fatal), urlh
+
+    def _download_json(
+            self, url_or_request, video_id, note='Downloading JSON metadata',
+            errnote='Unable to download JSON metadata', transform_source=None,
+            fatal=True, encoding=None, data=None, headers={}, query={}):
+        res = self._download_json_handle(
+            url_or_request, video_id, note=note, errnote=errnote,
+            transform_source=transform_source, fatal=fatal, encoding=encoding,
+            data=data, headers=headers, query=query)
+        return res if res is False else res[0]
 
     def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
         if transform_source:

From 6cc622327ff8289f94894f3695ed31014c61cf8e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 28 Apr 2018 02:47:17 +0700
Subject: [PATCH 012/125] [utils] Introduce merge_dicts

---
 test/test_utils.py              | 12 ++++++++++++
 youtube_dl/extractor/generic.py | 16 +---------------
 youtube_dl/utils.py             | 14 ++++++++++++++
 3 files changed, 27 insertions(+), 15 deletions(-)

diff --git a/test/test_utils.py b/test/test_utils.py
index 253a7fe17..14503ab53 100644
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -42,6 +42,7 @@ from youtube_dl.utils import (
     is_html,
     js_to_json,
     limit_length,
+    merge_dicts,
     mimetype2ext,
     month_by_name,
     multipart_encode,
@@ -669,6 +670,17 @@ class TestUtil(unittest.TestCase):
             self.assertEqual(dict_get(d, ('b', 'c', key, )), None)
             self.assertEqual(dict_get(d, ('b', 'c', key, ), skip_false_values=False), false_value)
 
+    def test_merge_dicts(self):
+        self.assertEqual(merge_dicts({'a': 1}, {'b': 2}), {'a': 1, 'b': 2})
+        self.assertEqual(merge_dicts({'a': 1}, {'a': 2}), {'a': 1})
+        self.assertEqual(merge_dicts({'a': 1}, {'a': None}), {'a': 1})
+        self.assertEqual(merge_dicts({'a': 1}, {'a': ''}), {'a': 1})
+        self.assertEqual(merge_dicts({'a': 1}, {}), {'a': 1})
+        self.assertEqual(merge_dicts({'a': None}, {'a': 1}), {'a': 1})
+        self.assertEqual(merge_dicts({'a': ''}, {'a': 1}), {'a': ''})
+        self.assertEqual(merge_dicts({'a': ''}, {'a': 'abc'}), {'a': 'abc'})
+        self.assertEqual(merge_dicts({'a': None}, {'a': ''}, {'a': 'abc'}), {'a': 'abc'})
+
     def test_encode_compat_str(self):
         self.assertEqual(encode_compat_str(b'\xd1\x82\xd0\xb5\xd1\x81\xd1\x82', 'utf-8'), 'тест')
         self.assertEqual(encode_compat_str('тест', 'utf-8'), 'тест')
diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index af1322e00..d48914495 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -23,6 +23,7 @@ from ..utils import (
     is_html,
     js_to_json,
     KNOWN_EXTENSIONS,
+    merge_dicts,
     mimetype2ext,
     orderedSet,
     sanitized_Request,
@@ -3002,21 +3003,6 @@ class GenericIE(InfoExtractor):
             return self.playlist_from_matches(
                 sharevideos_urls, video_id, video_title)
 
-        def merge_dicts(dict1, dict2):
-            merged = {}
-            for k, v in dict1.items():
-                if v is not None:
-                    merged[k] = v
-            for k, v in dict2.items():
-                if v is None:
-                    continue
-                if (k not in merged or
-                        (isinstance(v, compat_str) and v and
-                            isinstance(merged[k], compat_str) and
-                            not merged[k])):
-                    merged[k] = v
-            return merged
-
         # Look for HTML5 media
         entries = self._parse_html5_media_entries(url, webpage, video_id, m3u8_id='hls')
         if entries:
diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index 574284e94..b460393bf 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -2225,6 +2225,20 @@ def try_get(src, getter, expected_type=None):
                 return v
 
 
+def merge_dicts(*dicts):
+    merged = {}
+    for a_dict in dicts:
+        for k, v in a_dict.items():
+            if v is None:
+                continue
+            if (k not in merged or
+                    (isinstance(v, compat_str) and v and
+                        isinstance(merged[k], compat_str) and
+                        not merged[k])):
+                merged[k] = v
+    return merged
+
+
 def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
     return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
 

From e7e4a6e0f9166cee82c165ca69a6a3c94ddc5f45 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 28 Apr 2018 02:48:03 +0700
Subject: [PATCH 013/125] [extractor/common] Extract interaction statistic

---
 youtube_dl/extractor/common.py | 35 ++++++++++++++++++++++++++++++++++
 1 file changed, 35 insertions(+)

diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index e0c3c8eb0..a9939b0fd 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -1020,6 +1020,40 @@ class InfoExtractor(object):
         if isinstance(json_ld, dict):
             json_ld = [json_ld]
 
+        INTERACTION_TYPE_MAP = {
+            'CommentAction': 'comment',
+            'AgreeAction': 'like',
+            'DisagreeAction': 'dislike',
+            'LikeAction': 'like',
+            'DislikeAction': 'dislike',
+            'ListenAction': 'view',
+            'WatchAction': 'view',
+            'ViewAction': 'view',
+        }
+
+        def extract_interaction_statistic(e):
+            interaction_statistic = e.get('interactionStatistic')
+            if not isinstance(interaction_statistic, list):
+                return
+            for is_e in interaction_statistic:
+                if not isinstance(is_e, dict):
+                    continue
+                if is_e.get('@type') != 'InteractionCounter':
+                    continue
+                interaction_type = is_e.get('interactionType')
+                if not isinstance(interaction_type, compat_str):
+                    continue
+                interaction_count = int_or_none(is_e.get('userInteractionCount'))
+                if interaction_count is None:
+                    continue
+                count_kind = INTERACTION_TYPE_MAP.get(interaction_type.split('/')[-1])
+                if not count_kind:
+                    continue
+                count_key = '%s_count' % count_kind
+                if info.get(count_key) is not None:
+                    continue
+                info[count_key] = interaction_count
+
         def extract_video_object(e):
             assert e['@type'] == 'VideoObject'
             info.update({
@@ -1035,6 +1069,7 @@ class InfoExtractor(object):
                 'height': int_or_none(e.get('height')),
                 'view_count': int_or_none(e.get('interactionCount')),
             })
+            extract_interaction_statistic(e)
 
         for e in json_ld:
             if isinstance(e.get('@context'), compat_str) and re.match(r'^https?://schema.org/?$', e.get('@context')):

From ae1c585cee3eb183cddf7c30a09b75d887307dee Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 28 Apr 2018 02:48:20 +0700
Subject: [PATCH 014/125] [vimeo] Extract JSON LD (closes #16295)

---
 youtube_dl/extractor/vimeo.py | 13 +++++++++----
 1 file changed, 9 insertions(+), 4 deletions(-)

diff --git a/youtube_dl/extractor/vimeo.py b/youtube_dl/extractor/vimeo.py
index 08257147e..a026526b2 100644
--- a/youtube_dl/extractor/vimeo.py
+++ b/youtube_dl/extractor/vimeo.py
@@ -16,6 +16,7 @@ from ..utils import (
     ExtractorError,
     InAdvancePagedList,
     int_or_none,
+    merge_dicts,
     NO_DEFAULT,
     RegexNotFoundError,
     sanitized_Request,
@@ -639,16 +640,18 @@ class VimeoIE(VimeoBaseInfoExtractor):
                             'preference': 1,
                         })
 
-        info_dict = self._parse_config(config, video_id)
-        formats.extend(info_dict['formats'])
+        info_dict_config = self._parse_config(config, video_id)
+        formats.extend(info_dict_config['formats'])
         self._vimeo_sort_formats(formats)
 
+        json_ld = self._search_json_ld(webpage, video_id, default={})
+
         if not cc_license:
             cc_license = self._search_regex(
                 r'<link[^>]+rel=["\']license["\'][^>]+href=(["\'])(?P<license>(?:(?!\1).)+)\1',
                 webpage, 'license', default=None, group='license')
 
-        info_dict.update({
+        info_dict = {
             'id': video_id,
             'formats': formats,
             'timestamp': unified_timestamp(timestamp),
@@ -658,7 +661,9 @@ class VimeoIE(VimeoBaseInfoExtractor):
             'like_count': like_count,
             'comment_count': comment_count,
             'license': cc_license,
-        })
+        }
+
+        info_dict = merge_dicts(info_dict, info_dict_config, json_ld)
 
         return info_dict
 

From 7dd6ab4a47b08beafe45befa29c44df2db00547e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 28 Apr 2018 04:51:39 +0700
Subject: [PATCH 015/125] [imdb] Extract all formats (closes #16249)

---
 youtube_dl/extractor/imdb.py | 34 +++++++++++++++++++++-------------
 1 file changed, 21 insertions(+), 13 deletions(-)

diff --git a/youtube_dl/extractor/imdb.py b/youtube_dl/extractor/imdb.py
index 3ff672a89..425421968 100644
--- a/youtube_dl/extractor/imdb.py
+++ b/youtube_dl/extractor/imdb.py
@@ -3,7 +3,9 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
+from ..compat import compat_str
 from ..utils import (
+    determine_ext,
     mimetype2ext,
     qualities,
     remove_end,
@@ -73,19 +75,25 @@ class ImdbIE(InfoExtractor):
             video_info_list = format_info.get('videoInfoList')
             if not video_info_list or not isinstance(video_info_list, list):
                 continue
-            video_info = video_info_list[0]
-            if not video_info or not isinstance(video_info, dict):
-                continue
-            video_url = video_info.get('videoUrl')
-            if not video_url:
-                continue
-            format_id = format_info.get('ffname')
-            formats.append({
-                'format_id': format_id,
-                'url': video_url,
-                'ext': mimetype2ext(video_info.get('videoMimeType')),
-                'quality': quality(format_id),
-            })
+            for video_info in video_info_list:
+                if not video_info or not isinstance(video_info, dict):
+                    continue
+                video_url = video_info.get('videoUrl')
+                if not video_url or not isinstance(video_url, compat_str):
+                    continue
+                if (video_info.get('videoMimeType') == 'application/x-mpegURL' or
+                        determine_ext(video_url) == 'm3u8'):
+                    formats.extend(self._extract_m3u8_formats(
+                        video_url, video_id, 'mp4', entry_protocol='m3u8_native',
+                        m3u8_id='hls', fatal=False))
+                    continue
+                format_id = format_info.get('ffname')
+                formats.append({
+                    'format_id': format_id,
+                    'url': video_url,
+                    'ext': mimetype2ext(video_info.get('videoMimeType')),
+                    'quality': quality(format_id),
+                })
         self._sort_formats(formats)
 
         return {

From 500a86a52ee46a3a1acc864b602b74d141afdc24 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 29 Apr 2018 00:33:31 +0700
Subject: [PATCH 016/125] [downloader/fragment] Restart download if .ytdl file
 is corrupt (closes #16312)

---
 youtube_dl/downloader/fragment.py | 21 ++++++++++++++++-----
 1 file changed, 16 insertions(+), 5 deletions(-)

diff --git a/youtube_dl/downloader/fragment.py b/youtube_dl/downloader/fragment.py
index 927c7e491..917f6dc01 100644
--- a/youtube_dl/downloader/fragment.py
+++ b/youtube_dl/downloader/fragment.py
@@ -74,9 +74,14 @@ class FragmentFD(FileDownloader):
         return not ctx['live'] and not ctx['tmpfilename'] == '-'
 
     def _read_ytdl_file(self, ctx):
+        assert 'ytdl_corrupt' not in ctx
         stream, _ = sanitize_open(self.ytdl_filename(ctx['filename']), 'r')
-        ctx['fragment_index'] = json.loads(stream.read())['downloader']['current_fragment']['index']
-        stream.close()
+        try:
+            ctx['fragment_index'] = json.loads(stream.read())['downloader']['current_fragment']['index']
+        except Exception:
+            ctx['ytdl_corrupt'] = True
+        finally:
+            stream.close()
 
     def _write_ytdl_file(self, ctx):
         frag_index_stream, _ = sanitize_open(self.ytdl_filename(ctx['filename']), 'w')
@@ -158,11 +163,17 @@ class FragmentFD(FileDownloader):
         if self.__do_ytdl_file(ctx):
             if os.path.isfile(encodeFilename(self.ytdl_filename(ctx['filename']))):
                 self._read_ytdl_file(ctx)
-                if ctx['fragment_index'] > 0 and resume_len == 0:
+                is_corrupt = ctx.get('ytdl_corrupt') is True
+                is_inconsistent = ctx['fragment_index'] > 0 and resume_len == 0
+                if is_corrupt or is_inconsistent:
+                    message = (
+                        '.ytdl file is corrupt' if is_corrupt else
+                        'Inconsistent state of incomplete fragment download')
                     self.report_warning(
-                        'Inconsistent state of incomplete fragment download. '
-                        'Restarting from the beginning...')
+                        '%s. Restarting from the beginning...' % message)
                     ctx['fragment_index'] = resume_len = 0
+                    if 'ytdl_corrupt' in ctx:
+                        del ctx['ytdl_corrupt']
                     self._write_ytdl_file(ctx)
             else:
                 self._write_ytdl_file(ctx)

From 106c8c3edbc5b7e95cfba79ddc6252fad0adb859 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 29 Apr 2018 19:04:40 +0700
Subject: [PATCH 017/125] [nrktv] Update API host (closes #16324)

---
 youtube_dl/extractor/nrk.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py
index 18ead9426..3b4f51f61 100644
--- a/youtube_dl/extractor/nrk.py
+++ b/youtube_dl/extractor/nrk.py
@@ -237,7 +237,7 @@ class NRKTVIE(NRKBaseIE):
                             (?:/\d{2}-\d{2}-\d{4})?
                             (?:\#del=(?P<part_id>\d+))?
                     ''' % _EPISODE_RE
-    _API_HOST = 'psapi-ne.nrk.no'
+    _API_HOST = 'psapi-we.nrk.no'
 
     _TESTS = [{
         'url': 'https://tv.nrk.no/serie/20-spoersmaal-tv/MUHH48000314/23-05-2014',

From 12b0d4e0e1df6d6a8b9ce10b9a69013497adc2b0 Mon Sep 17 00:00:00 2001
From: Meneth32 <meneth@hotmail.com>
Date: Sun, 29 Apr 2018 16:59:40 +0200
Subject: [PATCH 018/125] [redditr] Add support for old.reddit.com URLs

---
 youtube_dl/extractor/reddit.py | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/reddit.py b/youtube_dl/extractor/reddit.py
index 53b1c967e..8372925be 100644
--- a/youtube_dl/extractor/reddit.py
+++ b/youtube_dl/extractor/reddit.py
@@ -47,7 +47,7 @@ class RedditIE(InfoExtractor):
 
 
 class RedditRIE(InfoExtractor):
-    _VALID_URL = r'(?P<url>https?://(?:www\.)?reddit\.com/r/[^/]+/comments/(?P<id>[^/?#&]+))'
+    _VALID_URL = r'(?P<url>https?://(?:(?:www|old)\.)?reddit\.com/r/[^/]+/comments/(?P<id>[^/?#&]+))'
     _TESTS = [{
         'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',
         'info_dict': {
@@ -74,6 +74,10 @@ class RedditRIE(InfoExtractor):
         # imgur
         'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
         'only_matching': True,
+    }, {
+        # imgur @ old reddit
+        'url': 'https://old.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
+        'only_matching': True,
     }, {
         # streamable
         'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/',

From 01aec8488084e62aa188b5167e57d01ef66cd256 Mon Sep 17 00:00:00 2001
From: Bastian de Groot <bastiandg@users.noreply.github.com>
Date: Sun, 29 Apr 2018 17:14:37 +0200
Subject: [PATCH 019/125] [generic] Prefer enclosures over links in RSS feeds

---
 youtube_dl/extractor/generic.py | 24 ++++++++++++++++++------
 1 file changed, 18 insertions(+), 6 deletions(-)

diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index d48914495..252f97c26 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -191,6 +191,16 @@ class GenericIE(InfoExtractor):
                 'title': 'pdv_maddow_netcast_m4v-02-27-2015-201624',
             }
         },
+        # RSS feed with enclosures and unsupported link URLs
+        {
+            'url': 'http://www.hellointernet.fm/podcast?format=rss',
+            'info_dict': {
+                'id': 'http://www.hellointernet.fm/podcast?format=rss',
+                'description': 'CGP Grey and Brady Haran talk about YouTube, life, work, whatever.',
+                'title': 'Hello Internet',
+            },
+            'playlist_mincount': 100,
+        },
         # SMIL from http://videolectures.net/promogram_igor_mekjavic_eng
         {
             'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/video/1/smil.xml',
@@ -2026,13 +2036,15 @@ class GenericIE(InfoExtractor):
 
         entries = []
         for it in doc.findall('./channel/item'):
-            next_url = xpath_text(it, 'link', fatal=False)
+            next_url = None
+            enclosure_nodes = it.findall('./enclosure')
+            for e in enclosure_nodes:
+                next_url = e.attrib.get('url')
+                if next_url:
+                    break
+
             if not next_url:
-                enclosure_nodes = it.findall('./enclosure')
-                for e in enclosure_nodes:
-                    next_url = e.attrib.get('url')
-                    if next_url:
-                        break
+                next_url = xpath_text(it, 'link', fatal=False)
 
             if not next_url:
                 continue

From 30226342ab346263b684170c4ce7d5266fec212e Mon Sep 17 00:00:00 2001
From: Niklas Haas <git@haasn.xyz>
Date: Sun, 29 Apr 2018 11:23:23 +0200
Subject: [PATCH 020/125] [youtube] Correctly disable polymer on all requests

Rather than just the one that use the _download_webpage helper. The need
for this was made apparent by 0fe7783e, which refactored
_download_json in a way that completely avoids the use of
_download_webpage, thus breaking youtube.

Fixes #16323
---
 youtube_dl/extractor/youtube.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index e7bd1f18f..04aeb91af 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -246,9 +246,9 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
 
         return True
 
-    def _download_webpage(self, *args, **kwargs):
+    def _download_webpage_handle(self, *args, **kwargs):
         kwargs.setdefault('query', {})['disable_polymer'] = 'true'
-        return super(YoutubeBaseInfoExtractor, self)._download_webpage(
+        return super(YoutubeBaseInfoExtractor, self)._download_webpage_handle(
             *args, **compat_kwargs(kwargs))
 
     def _real_initialize(self):

From e5eadfa82f10bda43294d1da85024eec29c7973f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 29 Apr 2018 22:49:47 +0700
Subject: [PATCH 021/125] [udemy,xiami,yandexmusic] Override
 _download_webpage_handle instead of _download_webpage

---
 youtube_dl/extractor/udemy.py       | 4 ++--
 youtube_dl/extractor/xiami.py       | 4 ++--
 youtube_dl/extractor/yandexmusic.py | 4 ++--
 3 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/youtube_dl/extractor/udemy.py b/youtube_dl/extractor/udemy.py
index 6d6c0a98f..439ed2a89 100644
--- a/youtube_dl/extractor/udemy.py
+++ b/youtube_dl/extractor/udemy.py
@@ -115,9 +115,9 @@ class UdemyIE(InfoExtractor):
                 error_str += ' - %s' % error_data.get('formErrors')
             raise ExtractorError(error_str, expected=True)
 
-    def _download_webpage(self, *args, **kwargs):
+    def _download_webpage_handle(self, *args, **kwargs):
         kwargs.setdefault('headers', {})['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/603.2.4 (KHTML, like Gecko) Version/10.1.1 Safari/603.2.4'
-        return super(UdemyIE, self)._download_webpage(
+        return super(UdemyIE, self)._download_webpage_handle(
             *args, **compat_kwargs(kwargs))
 
     def _download_json(self, url_or_request, *args, **kwargs):
diff --git a/youtube_dl/extractor/xiami.py b/youtube_dl/extractor/xiami.py
index 7f871c8ec..8333fb534 100644
--- a/youtube_dl/extractor/xiami.py
+++ b/youtube_dl/extractor/xiami.py
@@ -9,8 +9,8 @@ from ..utils import int_or_none
 class XiamiBaseIE(InfoExtractor):
     _API_BASE_URL = 'http://www.xiami.com/song/playlist/cat/json/id'
 
-    def _download_webpage(self, *args, **kwargs):
-        webpage = super(XiamiBaseIE, self)._download_webpage(*args, **kwargs)
+    def _download_webpage_handle(self, *args, **kwargs):
+        webpage = super(XiamiBaseIE, self)._download_webpage_handle(*args, **kwargs)
         if '>Xiami is currently not available in your country.<' in webpage:
             self.raise_geo_restricted('Xiami is currently not available in your country')
         return webpage
diff --git a/youtube_dl/extractor/yandexmusic.py b/youtube_dl/extractor/yandexmusic.py
index eb1062142..e85eca073 100644
--- a/youtube_dl/extractor/yandexmusic.py
+++ b/youtube_dl/extractor/yandexmusic.py
@@ -34,8 +34,8 @@ class YandexMusicBaseIE(InfoExtractor):
             'youtube-dl with --cookies',
             expected=True)
 
-    def _download_webpage(self, *args, **kwargs):
-        webpage = super(YandexMusicBaseIE, self)._download_webpage(*args, **kwargs)
+    def _download_webpage_handle(self, *args, **kwargs):
+        webpage = super(YandexMusicBaseIE, self)._download_webpage_handle(*args, **kwargs)
         if 'Нам очень жаль, но&nbsp;запросы, поступившие с&nbsp;вашего IP-адреса, похожи на&nbsp;автоматические.' in webpage:
             self._raise_captcha()
         return webpage

From 796bf9de45d6f01bf2d34ae22e1eacdc1a649fab Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 29 Apr 2018 22:56:07 +0700
Subject: [PATCH 022/125] [yandexmusic] Convert release_year to int

---
 youtube_dl/extractor/yandexmusic.py | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/youtube_dl/extractor/yandexmusic.py b/youtube_dl/extractor/yandexmusic.py
index e85eca073..009203851 100644
--- a/youtube_dl/extractor/yandexmusic.py
+++ b/youtube_dl/extractor/yandexmusic.py
@@ -57,14 +57,14 @@ class YandexMusicTrackIE(YandexMusicBaseIE):
         'info_dict': {
             'id': '4878838',
             'ext': 'mp3',
-            'title': 'Carlo Ambrosio & Fabio Di Bari, Carlo Ambrosio - Gypsy Eyes 1',
+            'title': 'Carlo Ambrosio, Carlo Ambrosio & Fabio Di Bari - Gypsy Eyes 1',
             'filesize': 4628061,
             'duration': 193.04,
             'track': 'Gypsy Eyes 1',
             'album': 'Gypsy Soul',
             'album_artist': 'Carlo Ambrosio',
-            'artist': 'Carlo Ambrosio & Fabio Di Bari, Carlo Ambrosio',
-            'release_year': '2009',
+            'artist': 'Carlo Ambrosio, Carlo Ambrosio & Fabio Di Bari',
+            'release_year': 2009,
         },
         'skip': 'Travis CI servers blocked by YandexMusic',
     }
@@ -120,7 +120,7 @@ class YandexMusicTrackIE(YandexMusicBaseIE):
                 track_info.update({
                     'album': album.get('title'),
                     'album_artist': extract_artist(album.get('artists')),
-                    'release_year': compat_str(year) if year else None,
+                    'release_year': int_or_none(year),
                 })
 
         track_artist = extract_artist(track.get('artists'))

From 4a733545867a014eb786348f8fb9e6ae95850742 Mon Sep 17 00:00:00 2001
From: Alex Seiler <seileralex@gmail.com>
Date: Sun, 5 Nov 2017 18:07:35 +0100
Subject: [PATCH 023/125] [zattoo] Add extractor (closes #14668)

---
 youtube_dl/extractor/extractors.py |   6 +
 youtube_dl/extractor/zattoo.py     | 234 +++++++++++++++++++++++++++++
 2 files changed, 240 insertions(+)
 create mode 100644 youtube_dl/extractor/zattoo.py

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 6fb65e4fe..9fe3f649d 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -1418,5 +1418,11 @@ from .youtube import (
 )
 from .zapiks import ZapiksIE
 from .zaq1 import Zaq1IE
+from .zattoo import (
+    QuicklineIE,
+    QuicklineLiveIE,
+    ZattooIE,
+    ZattooLiveIE,
+)
 from .zdf import ZDFIE, ZDFChannelIE
 from .zingmp3 import ZingMp3IE
diff --git a/youtube_dl/extractor/zattoo.py b/youtube_dl/extractor/zattoo.py
new file mode 100644
index 000000000..928f22566
--- /dev/null
+++ b/youtube_dl/extractor/zattoo.py
@@ -0,0 +1,234 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from uuid import uuid4
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    compat_str,
+    ExtractorError,
+    sanitized_Request,
+    urlencode_postdata,
+)
+
+
+class ZattooBaseIE(InfoExtractor):
+
+    _NETRC_MACHINE = 'zattoo'
+    _HOST_URL = 'https://zattoo.com'
+
+    _power_guide_hash = None
+
+    def _login(self, uuid, session_id):
+        (username, password) = self._get_login_info()
+        if not username or not password:
+            raise ExtractorError(
+                'A valid %s account is needed to access this media.' % self._NETRC_MACHINE,
+                expected=True)
+        login_form = {
+            'login': username,
+            'password': password,
+            'remember': True,
+        }
+        request = sanitized_Request(
+            '%s/zapi/v2/account/login' % self._HOST_URL,
+            urlencode_postdata(login_form))
+        request.add_header(
+            'Referer', '%s/login' % self._HOST_URL)
+        request.add_header(
+            'Content-Type', 'application/x-www-form-urlencoded; charset=UTF-8')
+        request.add_header(
+            'Cookie', 'uuid=%s; beaker.session.id=%s' % (uuid, session_id))
+        response = self._request_webpage(
+            request, None, 'Logging in')
+        data = self._parse_json(response.read(), None)
+        return data['session']['power_guide_hash']
+
+    def _get_app_token_and_version(self):
+        host_webpage = self._download_webpage(
+            self._HOST_URL, None, 'Downloading %s' % self._HOST_URL)
+        app_token = self._html_search_regex(
+            r'<script.+window\.appToken\s*=\s*\'(.+)\'', host_webpage, 'app token')
+        app_version = self._html_search_regex(
+            r'<!--\w+-(.+?)-', host_webpage, 'app version', default='2.8.2')
+        return app_token, app_version
+
+    def _say_hello(self, uuid, app_token, app_version):
+        postdata = {
+            'client_app_token': app_token,
+            'uuid': uuid,
+            'lang': 'en',
+            'app_version': app_version,
+            'format': 'json',
+        }
+        request = sanitized_Request(
+            '%s/zapi/v2/session/hello' % self._HOST_URL,
+            urlencode_postdata(postdata))
+        response = self._request_webpage(
+            request, None, 'Say hello')
+
+        cookie = response.headers.get('Set-Cookie')
+        session_id = self._search_regex(
+            r'beaker\.session\.id\s*=\s*(.+?);', cookie, 'session id')
+        return session_id
+
+    def _extract_cid(self, video_id, channel_name):
+        channel_groups = self._download_json(
+            '%s/zapi/v2/cached/channels/%s' % (self._HOST_URL,
+                                               self._power_guide_hash),
+            video_id,
+            'Downloading available channel list',
+            query={'details': False})['channel_groups']
+        channel_list = []
+        for chgrp in channel_groups:
+            channel_list.extend(chgrp['channels'])
+        try:
+            return next(
+                chan['cid'] for chan in channel_list
+                if chan['display_alias'] == channel_name or chan['cid'] == channel_name)
+        except StopIteration:
+            raise ExtractorError('Could not extract channel id')
+
+    def _extract_cid_and_video_info(self, video_id):
+        data = self._download_json(
+            '%s/zapi/program/details' % self._HOST_URL,
+            video_id,
+            'Downloading video information',
+            query={
+                'program_id': video_id,
+                'complete': True
+            })
+
+        info_dict = {
+            'id': video_id,
+            'title': data['program']['title'],
+            'description': data['program'].get('description'),
+            'thumbnail': data['program'].get('image_url')
+        }
+        cid = data['program']['cid']
+        return cid, info_dict
+
+    def _extract_formats(self, cid, video_id, record_id=None, is_live=False):
+        postdata = {
+            'stream_type': 'dash',
+            'https_watch_urls': True,
+        }
+        if record_id:
+            url = '%s/zapi/watch/recording/%s' % (self._HOST_URL, record_id)
+        else:
+            url = '%s/zapi/watch/recall/%s/%s' % (self._HOST_URL, cid, video_id)
+
+        if is_live:
+            postdata.update({'timeshift': 10800})
+            url = '%s/zapi/watch/live/%s' % (self._HOST_URL, cid)
+
+        data = self._download_json(
+            sanitized_Request(url, urlencode_postdata(postdata)),
+            video_id, 'Downloading dash formats')
+
+        formats = []
+        for elem in data['stream']['watch_urls']:
+            audio_channel = elem.get('audio_channel')
+            maxrate = elem.get('maxrate')
+            formats.extend(
+                self._extract_mpd_formats(
+                    elem['url'], video_id,
+                    mpd_id='dash-maxrate-%s-channel-%s' % (maxrate, audio_channel), fatal=False))
+
+        postdata.update({'stream_type': 'hls'})
+        request = sanitized_Request(
+            url, urlencode_postdata(postdata))
+        data = self._download_json(
+            request, video_id, 'Downloading hls formats')
+        for elem in data['stream']['watch_urls']:
+            audio_channel = elem.get('audio_channel')
+            preference = None
+
+            # Prefer audio channel A:
+            if audio_channel == 'A':
+                preference = 1
+
+            maxrate = elem.get('maxrate')
+            formats.extend(
+                self._extract_m3u8_formats(
+                    elem['url'], video_id, 'mp4', entry_protocol='m3u8_native',
+                    preference=preference,
+                    m3u8_id='hls-maxrate-%s-channel-%s' % (maxrate, audio_channel),
+                    fatal=False))
+
+        self._sort_formats(formats)
+        return formats
+
+    def _real_initialize(self):
+        uuid = compat_str(uuid4())
+        app_token, app_version = self._get_app_token_and_version()
+        session_id = self._say_hello(uuid, app_token, app_version)
+        self._power_guide_hash = self._login(uuid, session_id)
+
+    def _extract_video(self, channel_name, video_id, record_id=None, is_live=False):
+        if is_live:
+            cid = self._extract_cid(video_id, channel_name)
+            info_dict = {
+                'id': channel_name,
+                'title': self._live_title(channel_name),
+                'is_live': True,
+            }
+        else:
+            cid, info_dict = self._extract_cid_and_video_info(video_id)
+        formats = self._extract_formats(
+            cid, video_id, record_id=record_id, is_live=is_live)
+        info_dict['formats'] = formats
+        return info_dict
+
+
+class QuicklineBaseIE(ZattooBaseIE):
+    _NETRC_MACHINE = 'quickline'
+    _HOST_URL = 'https://mobiltv.quickline.com'
+
+
+class QuicklineIE(QuicklineBaseIE):
+    _VALID_URL = r'https?://(?:www\.)?mobiltv\.quickline\.com/watch/(?P<channel>[^/]+)/(?P<id>[0-9]+)'
+
+    def _real_extract(self, url):
+        channel_name, video_id = re.match(self._VALID_URL, url).groups()
+        return self._extract_video(channel_name, video_id)
+
+
+class QuicklineLiveIE(QuicklineBaseIE):
+    _VALID_URL = r'https?://(?:www\.)?mobiltv\.quickline\.com/watch/(?P<id>[^/]+)$'
+
+    def _real_extract(self, url):
+        channel_name = video_id = self._match_id(url)
+        return self._extract_video(channel_name, video_id, is_live=True)
+
+
+class ZattooIE(ZattooBaseIE):
+    _VALID_URL = r'https?://(?:www\.)?zattoo\.com/watch/(?P<channel>[^/]+?)/(?P<id>[0-9]+)[^/]+(?:/(?P<recid>[0-9]+))?'
+
+    # Since regular videos are only available for 7 days and recorded videos
+    # are only available for a specific user, we cannot have detailed tests.
+    _TESTS = [{
+        'url': 'https://zattoo.com/watch/prosieben/130671867-maze-runner-die-auserwaehlten-in-der-brandwueste',
+        'only_matching': True,
+    }, {
+        'url': 'https://zattoo.com/watch/srf_zwei/132905652-eishockey-spengler-cup/102791477/1512211800000/1514433500000/92000',
+        'only_matching': True,
+    }]
+
+    def _real_extract(self, url):
+        channel_name, video_id, record_id = re.match(self._VALID_URL, url).groups()
+        return self._extract_video(channel_name, video_id, record_id)
+
+
+class ZattooLiveIE(ZattooBaseIE):
+    _VALID_URL = r'https?://(?:www\.)?zattoo\.com/watch/(?P<id>[^/]+)$'
+
+    _TEST = {
+        'url': 'https://zattoo.com/watch/srf1',
+        'only_matching': True,
+    }
+
+    def _real_extract(self, url):
+        channel_name = video_id = self._match_id(url)
+        return self._extract_video(channel_name, video_id, is_live=True)

From 67ca1a8ef7ea6094e1e34518b93cdb5ba59f31b3 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 1 May 2018 01:48:21 +0700
Subject: [PATCH 024/125] [zattoo] Improve and simplify (closes #14676)

---
 youtube_dl/extractor/zattoo.py | 238 +++++++++++++++++++--------------
 1 file changed, 137 insertions(+), 101 deletions(-)

diff --git a/youtube_dl/extractor/zattoo.py b/youtube_dl/extractor/zattoo.py
index 928f22566..773073d85 100644
--- a/youtube_dl/extractor/zattoo.py
+++ b/youtube_dl/extractor/zattoo.py
@@ -1,84 +1,82 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
-from uuid import uuid4
 import re
+from uuid import uuid4
 
 from .common import InfoExtractor
-from ..utils import (
+from ..compat import (
+    compat_HTTPError,
     compat_str,
+)
+from ..utils import (
     ExtractorError,
-    sanitized_Request,
+    int_or_none,
+    try_get,
     urlencode_postdata,
 )
 
 
 class ZattooBaseIE(InfoExtractor):
-
     _NETRC_MACHINE = 'zattoo'
     _HOST_URL = 'https://zattoo.com'
 
     _power_guide_hash = None
 
-    def _login(self, uuid, session_id):
+    def _login(self):
         (username, password) = self._get_login_info()
         if not username or not password:
-            raise ExtractorError(
-                'A valid %s account is needed to access this media.' % self._NETRC_MACHINE,
-                expected=True)
-        login_form = {
-            'login': username,
-            'password': password,
-            'remember': True,
-        }
-        request = sanitized_Request(
-            '%s/zapi/v2/account/login' % self._HOST_URL,
-            urlencode_postdata(login_form))
-        request.add_header(
-            'Referer', '%s/login' % self._HOST_URL)
-        request.add_header(
-            'Content-Type', 'application/x-www-form-urlencoded; charset=UTF-8')
-        request.add_header(
-            'Cookie', 'uuid=%s; beaker.session.id=%s' % (uuid, session_id))
-        response = self._request_webpage(
-            request, None, 'Logging in')
-        data = self._parse_json(response.read(), None)
-        return data['session']['power_guide_hash']
+            self.raise_login_required(
+                'A valid %s account is needed to access this media.'
+                % self._NETRC_MACHINE)
 
-    def _get_app_token_and_version(self):
-        host_webpage = self._download_webpage(
-            self._HOST_URL, None, 'Downloading %s' % self._HOST_URL)
+        try:
+            data = self._download_json(
+                '%s/zapi/v2/account/login' % self._HOST_URL, None, 'Logging in',
+                data=urlencode_postdata({
+                    'login': username,
+                    'password': password,
+                    'remember': 'true',
+                }), headers={
+                    'Referer': '%s/login' % self._HOST_URL,
+                    'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
+                })
+        except ExtractorError as e:
+            if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400:
+                raise ExtractorError(
+                    'Unable to login: incorrect username and/or password',
+                    expected=True)
+            raise
+
+        self._power_guide_hash = data['session']['power_guide_hash']
+
+    def _real_initialize(self):
+        webpage = self._download_webpage(
+            self._HOST_URL, None, 'Downloading app token')
         app_token = self._html_search_regex(
-            r'<script.+window\.appToken\s*=\s*\'(.+)\'', host_webpage, 'app token')
+            r'appToken\s*=\s*(["\'])(?P<token>(?:(?!\1).)+?)\1',
+            webpage, 'app token', group='token')
         app_version = self._html_search_regex(
-            r'<!--\w+-(.+?)-', host_webpage, 'app version', default='2.8.2')
-        return app_token, app_version
+            r'<!--\w+-(.+?)-', webpage, 'app version', default='2.8.2')
 
-    def _say_hello(self, uuid, app_token, app_version):
-        postdata = {
-            'client_app_token': app_token,
-            'uuid': uuid,
-            'lang': 'en',
-            'app_version': app_version,
-            'format': 'json',
-        }
-        request = sanitized_Request(
-            '%s/zapi/v2/session/hello' % self._HOST_URL,
-            urlencode_postdata(postdata))
-        response = self._request_webpage(
-            request, None, 'Say hello')
+        # Will setup appropriate cookies
+        self._request_webpage(
+            '%s/zapi/v2/session/hello' % self._HOST_URL, None,
+            'Opening session', data=urlencode_postdata({
+                'client_app_token': app_token,
+                'uuid': compat_str(uuid4()),
+                'lang': 'en',
+                'app_version': app_version,
+                'format': 'json',
+            }))
 
-        cookie = response.headers.get('Set-Cookie')
-        session_id = self._search_regex(
-            r'beaker\.session\.id\s*=\s*(.+?);', cookie, 'session id')
-        return session_id
+        self._login()
 
     def _extract_cid(self, video_id, channel_name):
         channel_groups = self._download_json(
             '%s/zapi/v2/cached/channels/%s' % (self._HOST_URL,
                                                self._power_guide_hash),
-            video_id,
-            'Downloading available channel list',
+            video_id, 'Downloading channel list',
             query={'details': False})['channel_groups']
         channel_list = []
         for chgrp in channel_groups:
@@ -86,7 +84,9 @@ class ZattooBaseIE(InfoExtractor):
         try:
             return next(
                 chan['cid'] for chan in channel_list
-                if chan['display_alias'] == channel_name or chan['cid'] == channel_name)
+                if chan.get('cid') and (
+                    chan.get('display_alias') == channel_name or
+                    chan.get('cid') == channel_name))
         except StopIteration:
             raise ExtractorError('Could not extract channel id')
 
@@ -100,72 +100,90 @@ class ZattooBaseIE(InfoExtractor):
                 'complete': True
             })
 
+        p = data['program']
+        cid = p['cid']
+
         info_dict = {
             'id': video_id,
-            'title': data['program']['title'],
-            'description': data['program'].get('description'),
-            'thumbnail': data['program'].get('image_url')
+            'title': p.get('title') or p['episode_title'],
+            'description': p.get('description'),
+            'thumbnail': p.get('image_url'),
+            'creator': p.get('channel_name'),
+            'episode': p.get('episode_title'),
+            'episode_number': int_or_none(p.get('episode_number')),
+            'season_number': int_or_none(p.get('season_number')),
+            'release_year': int_or_none(p.get('year')),
+            'categories': try_get(p, lambda x: x['categories'], list),
         }
-        cid = data['program']['cid']
+
         return cid, info_dict
 
     def _extract_formats(self, cid, video_id, record_id=None, is_live=False):
-        postdata = {
-            'stream_type': 'dash',
+        postdata_common = {
             'https_watch_urls': True,
         }
-        if record_id:
+
+        if is_live:
+            postdata_common.update({'timeshift': 10800})
+            url = '%s/zapi/watch/live/%s' % (self._HOST_URL, cid)
+        elif record_id:
             url = '%s/zapi/watch/recording/%s' % (self._HOST_URL, record_id)
         else:
             url = '%s/zapi/watch/recall/%s/%s' % (self._HOST_URL, cid, video_id)
 
-        if is_live:
-            postdata.update({'timeshift': 10800})
-            url = '%s/zapi/watch/live/%s' % (self._HOST_URL, cid)
-
-        data = self._download_json(
-            sanitized_Request(url, urlencode_postdata(postdata)),
-            video_id, 'Downloading dash formats')
-
         formats = []
-        for elem in data['stream']['watch_urls']:
-            audio_channel = elem.get('audio_channel')
-            maxrate = elem.get('maxrate')
-            formats.extend(
-                self._extract_mpd_formats(
-                    elem['url'], video_id,
-                    mpd_id='dash-maxrate-%s-channel-%s' % (maxrate, audio_channel), fatal=False))
+        for stream_type in ('dash', 'hls', 'hls5', 'hds'):
+            postdata = postdata_common.copy()
+            postdata['stream_type'] = stream_type
 
-        postdata.update({'stream_type': 'hls'})
-        request = sanitized_Request(
-            url, urlencode_postdata(postdata))
-        data = self._download_json(
-            request, video_id, 'Downloading hls formats')
-        for elem in data['stream']['watch_urls']:
-            audio_channel = elem.get('audio_channel')
-            preference = None
+            data = self._download_json(
+                url, video_id, 'Downloading %s formats' % stream_type.upper(),
+                data=urlencode_postdata(postdata), fatal=False)
+            if not data:
+                continue
 
-            # Prefer audio channel A:
-            if audio_channel == 'A':
-                preference = 1
-
-            maxrate = elem.get('maxrate')
-            formats.extend(
-                self._extract_m3u8_formats(
-                    elem['url'], video_id, 'mp4', entry_protocol='m3u8_native',
-                    preference=preference,
-                    m3u8_id='hls-maxrate-%s-channel-%s' % (maxrate, audio_channel),
-                    fatal=False))
+            watch_urls = try_get(
+                data, lambda x: x['stream']['watch_urls'], list)
+            if not watch_urls:
+                continue
 
+            for watch in watch_urls:
+                if not isinstance(watch, dict):
+                    continue
+                watch_url = watch.get('url')
+                if not watch_url or not isinstance(watch_url, compat_str):
+                    continue
+                format_id_list = [stream_type]
+                maxrate = watch.get('maxrate')
+                if maxrate:
+                    format_id_list.append(compat_str(maxrate))
+                audio_channel = watch.get('audio_channel')
+                if audio_channel:
+                    format_id_list.append(compat_str(audio_channel))
+                preference = 1 if audio_channel == 'A' else None
+                format_id = '-'.join(format_id_list)
+                if stream_type in ('dash', 'dash_widevine', 'dash_playready'):
+                    this_formats = self._extract_mpd_formats(
+                        watch_url, video_id, mpd_id=format_id, fatal=False)
+                elif stream_type in ('hls', 'hls5', 'hls5_fairplay'):
+                    this_formats = self._extract_m3u8_formats(
+                        watch_url, video_id, 'mp4',
+                        entry_protocol='m3u8_native', m3u8_id=format_id,
+                        fatal=False)
+                elif stream_type == 'hds':
+                    this_formats = self._extract_f4m_formats(
+                        watch_url, video_id, f4m_id=format_id, fatal=False)
+                elif stream_type == 'smooth_playready':
+                    this_formats = self._extract_ism_formats(
+                        watch_url, video_id, ism_id=format_id, fatal=False)
+                else:
+                    assert False
+                for this_format in this_formats:
+                    this_format['preference'] = preference
+                formats.extend(this_formats)
         self._sort_formats(formats)
         return formats
 
-    def _real_initialize(self):
-        uuid = compat_str(uuid4())
-        app_token, app_version = self._get_app_token_and_version()
-        session_id = self._say_hello(uuid, app_token, app_version)
-        self._power_guide_hash = self._login(uuid, session_id)
-
     def _extract_video(self, channel_name, video_id, record_id=None, is_live=False):
         if is_live:
             cid = self._extract_cid(video_id, channel_name)
@@ -190,13 +208,27 @@ class QuicklineBaseIE(ZattooBaseIE):
 class QuicklineIE(QuicklineBaseIE):
     _VALID_URL = r'https?://(?:www\.)?mobiltv\.quickline\.com/watch/(?P<channel>[^/]+)/(?P<id>[0-9]+)'
 
+    _TEST = {
+        'url': 'https://mobiltv.quickline.com/watch/prosieben/130671867-maze-runner-die-auserwaehlten-in-der-brandwueste',
+        'only_matching': True,
+    }
+
     def _real_extract(self, url):
         channel_name, video_id = re.match(self._VALID_URL, url).groups()
         return self._extract_video(channel_name, video_id)
 
 
 class QuicklineLiveIE(QuicklineBaseIE):
-    _VALID_URL = r'https?://(?:www\.)?mobiltv\.quickline\.com/watch/(?P<id>[^/]+)$'
+    _VALID_URL = r'https?://(?:www\.)?mobiltv\.quickline\.com/watch/(?P<id>[^/]+)'
+
+    _TEST = {
+        'url': 'https://mobiltv.quickline.com/watch/srf1',
+        'only_matching': True,
+    }
+
+    @classmethod
+    def suitable(cls, url):
+        return False if QuicklineIE.suitable(url) else super(QuicklineLiveIE, cls).suitable(url)
 
     def _real_extract(self, url):
         channel_name = video_id = self._match_id(url)
@@ -222,13 +254,17 @@ class ZattooIE(ZattooBaseIE):
 
 
 class ZattooLiveIE(ZattooBaseIE):
-    _VALID_URL = r'https?://(?:www\.)?zattoo\.com/watch/(?P<id>[^/]+)$'
+    _VALID_URL = r'https?://(?:www\.)?zattoo\.com/watch/(?P<id>[^/]+)'
 
     _TEST = {
         'url': 'https://zattoo.com/watch/srf1',
         'only_matching': True,
     }
 
+    @classmethod
+    def suitable(cls, url):
+        return False if ZattooIE.suitable(url) else super(ZattooLiveIE, cls).suitable(url)
+
     def _real_extract(self, url):
         channel_name = video_id = self._match_id(url)
         return self._extract_video(channel_name, video_id, is_live=True)

From 851396346803f77ab9573af56cae056aa904cf93 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 1 May 2018 02:15:43 +0700
Subject: [PATCH 025/125] [udemy] Extract outputs renditions (closes #16289,
 closes #16291, closes #16320, closes #16321, closes #16334, closes #16335)

---
 youtube_dl/extractor/udemy.py | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/youtube_dl/extractor/udemy.py b/youtube_dl/extractor/udemy.py
index 439ed2a89..bf1134e3f 100644
--- a/youtube_dl/extractor/udemy.py
+++ b/youtube_dl/extractor/udemy.py
@@ -58,6 +58,10 @@ class UdemyIE(InfoExtractor):
         # no url in outputs format entry
         'url': 'https://www.udemy.com/learn-web-development-complete-step-by-step-guide-to-success/learn/v4/t/lecture/4125812',
         'only_matching': True,
+    }, {
+        # only outputs rendition
+        'url': 'https://www.udemy.com/how-you-can-help-your-local-community-5-amazing-examples/learn/v4/t/lecture/3225750?start=0',
+        'only_matching': True,
     }]
 
     def _extract_course_info(self, webpage, video_id):
@@ -357,6 +361,12 @@ class UdemyIE(InfoExtractor):
                     fatal=False)
                 extract_subtitles(text_tracks)
 
+        if not formats and outputs:
+            for format_id, output in outputs.items():
+                f = extract_output_format(output, format_id)
+                if f.get('url'):
+                    formats.append(f)
+
         self._sort_formats(formats, field_preference=('height', 'width', 'tbr', 'format_id'))
 
         return {

From c21692fa94df49ef925c06c00e5db1d8bb0f770d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 1 May 2018 03:09:04 +0700
Subject: [PATCH 026/125] [kaltura] Improve iframe embeds detection (closes
 #16337)

---
 youtube_dl/extractor/generic.py | 17 +++++++++++++++++
 youtube_dl/extractor/kaltura.py |  3 ++-
 2 files changed, 19 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index 252f97c26..73980601c 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -1282,6 +1282,23 @@ class GenericIE(InfoExtractor):
             },
             'add_ie': ['Kaltura'],
         },
+        {
+            # Kaltura iframe embed, more sophisticated
+            'url': 'http://www.cns.nyu.edu/~eero/math-tools/Videos/lecture-05sep2017.html',
+            'info_dict': {
+                'id': '1_9gzouybz',
+                'ext': 'mp4',
+                'title': 'lecture-05sep2017',
+                'description': 'md5:40f347d91fd4ba047e511c5321064b49',
+                'upload_date': '20170913',
+                'uploader_id': 'eps2',
+                'timestamp': 1505340777,
+            },
+            'params': {
+                'skip_download': True,
+            },
+            'add_ie': ['Kaltura'],
+        },
         {
             # meta twitter:player
             'url': 'http://thechive.com/2017/12/08/all-i-want-for-christmas-is-more-twerk/',
diff --git a/youtube_dl/extractor/kaltura.py b/youtube_dl/extractor/kaltura.py
index 0ea89e4d6..04f68fce4 100644
--- a/youtube_dl/extractor/kaltura.py
+++ b/youtube_dl/extractor/kaltura.py
@@ -136,9 +136,10 @@ class KalturaIE(InfoExtractor):
             re.search(
                 r'''(?xs)
                     <(?:iframe[^>]+src|meta[^>]+\bcontent)=(?P<q1>["'])
-                      (?:https?:)?//(?:(?:www|cdnapi)\.)?kaltura\.com/(?:(?!(?P=q1)).)*\b(?:p|partner_id)/(?P<partner_id>\d+)
+                      (?:https?:)?//(?:(?:www|cdnapi(?:sec)?)\.)?kaltura\.com/(?:(?!(?P=q1)).)*\b(?:p|partner_id)/(?P<partner_id>\d+)
                       (?:(?!(?P=q1)).)*
                       [?&;]entry_id=(?P<id>(?:(?!(?P=q1))[^&])+)
+                      (?:(?!(?P=q1)).)*
                     (?P=q1)
                 ''', webpage)
         )

From cc5772c4f0bcb7dfdfb0575787ff124dd7376de5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 1 May 2018 03:30:23 +0700
Subject: [PATCH 027/125] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 26 ++++++++++++++++++++++++++
 1 file changed, 26 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index 4a3df67df..7841ee765 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,29 @@
+version <unreleased>
+
+Core
+* [downloader/fragment] Restart download if .ytdl file is corrupt (#16312)
++ [extractor/common] Extract interaction statistic
++ [utils] Add merge_dicts
++ [extractor/common] Add _download_json_handle
+
+Extractors
+* [kaltura] Improve iframe embeds detection (#16337)
++ [udemy] Extract outputs renditions (#16289, #16291, #16320, #16321, #16334,
+  #16335)
++ [zattoo] Add support for zattoo.com and mobiltv.quickline.com (#14668, #14676)
+* [yandexmusic] Convert release_year to int
+* [udemy] Override _download_webpage_handle instead of _download_webpage
+* [xiami] Override _download_webpage_handle instead of _download_webpage
+* [yandexmusic] Override _download_webpage_handle instead of _download_webpage
+* [youtube] Correctly disable polymer on all requests (#16323, #16326)
+* [generic] Prefer enclosures over links in RSS feeds (#16189)
++ [redditr] Add support for old.reddit.com URLs (#16274)
+* [nrktv] Update API host (#16324)
++ [imdb] Extract all formats (#16249)
++ [vimeo] Extract JSON-LD (#16295)
+* [funk:channel] Improve extraction (#16285)
+
+
 version 2018.04.25
 
 Core

From cc42941390b547ba950b4e76f4950be801f96134 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 1 May 2018 03:38:57 +0700
Subject: [PATCH 028/125] release 2018.05.01

---
 .github/ISSUE_TEMPLATE.md | 6 +++---
 ChangeLog                 | 2 +-
 docs/supportedsites.md    | 4 ++++
 youtube_dl/version.py     | 2 +-
 4 files changed, 9 insertions(+), 5 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index 252fa0adf..c2bd5d8ae 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -6,8 +6,8 @@
 
 ---
 
-### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.04.25*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
-- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2018.04.25**
+### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.05.01*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
+- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2018.05.01**
 
 ### Before submitting an *issue* make sure you have:
 - [ ] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
@@ -36,7 +36,7 @@ Add the `-v` flag to **your command line** you run youtube-dl with (`youtube-dl
 [debug] User config: []
 [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
 [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
-[debug] youtube-dl version 2018.04.25
+[debug] youtube-dl version 2018.05.01
 [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
 [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
 [debug] Proxy map: {}
diff --git a/ChangeLog b/ChangeLog
index 7841ee765..916b8edb8 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2018.05.01
 
 Core
 * [downloader/fragment] Restart download if .ytdl file is corrupt (#16312)
diff --git a/docs/supportedsites.md b/docs/supportedsites.md
index a110f687b..c5a48002b 100644
--- a/docs/supportedsites.md
+++ b/docs/supportedsites.md
@@ -667,6 +667,8 @@
  - **qqmusic:playlist**: QQ音乐 - 歌单
  - **qqmusic:singer**: QQ音乐 - 歌手
  - **qqmusic:toplist**: QQ音乐 - 排行榜
+ - **Quickline**
+ - **QuicklineLive**
  - **R7**
  - **R7Article**
  - **radio.de**
@@ -1092,6 +1094,8 @@
  - **youtube:watchlater**: Youtube watch later list, ":ytwatchlater" for short (requires authentication)
  - **Zapiks**
  - **Zaq1**
+ - **Zattoo**
+ - **ZattooLive**
  - **ZDF**
  - **ZDFChannel**
  - **zingmp3**: mp3.zing.vn
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 4e3cb39c6..04896efc8 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2018.04.25'
+__version__ = '2018.05.01'

From c18142da6e0e99a7b4c9ab488ddb285ad1e8dad9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 1 May 2018 22:46:06 +0700
Subject: [PATCH 029/125] [itv] Improve extraction (closes #16253)

---
 youtube_dl/extractor/itv.py | 11 ++++++++++-
 1 file changed, 10 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/itv.py b/youtube_dl/extractor/itv.py
index 18a7d7f8c..457b424a2 100644
--- a/youtube_dl/extractor/itv.py
+++ b/youtube_dl/extractor/itv.py
@@ -41,6 +41,14 @@ class ITVIE(InfoExtractor):
         # unavailable via data-playlist-url
         'url': 'https://www.itv.com/hub/through-the-keyhole/2a2271a0033',
         'only_matching': True,
+    }, {
+        # InvalidVodcrid
+        'url': 'https://www.itv.com/hub/james-martins-saturday-morning/2a5159a0034',
+        'only_matching': True,
+    }, {
+        # ContentUnavailable
+        'url': 'https://www.itv.com/hub/whos-doing-the-dishes/2a2898a0024',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):
@@ -127,7 +135,8 @@ class ITVIE(InfoExtractor):
             if fault_code == 'InvalidGeoRegion':
                 self.raise_geo_restricted(
                     msg=fault_string, countries=self._GEO_COUNTRIES)
-            elif fault_code != 'InvalidEntity':
+            elif fault_code not in (
+                    'InvalidEntity', 'InvalidVodcrid', 'ContentUnavailable'):
                 raise ExtractorError(
                     '%s said: %s' % (self.IE_NAME, fault_string), expected=True)
             info.update({

From a93ce61bd5cbe7779e4eff0f8ab74a8a02211285 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 2 May 2018 01:29:44 +0700
Subject: [PATCH 030/125] [tunein] Use live title for live streams (closes
 #16347)

---
 youtube_dl/extractor/tunein.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/tunein.py b/youtube_dl/extractor/tunein.py
index 7e51de89e..c7a5f5a63 100644
--- a/youtube_dl/extractor/tunein.py
+++ b/youtube_dl/extractor/tunein.py
@@ -62,7 +62,7 @@ class TuneInBaseIE(InfoExtractor):
 
         return {
             'id': content_id,
-            'title': title,
+            'title': self._live_title(title) if is_live else title,
             'formats': formats,
             'thumbnail': thumbnail,
             'location': location,

From 5f95927a62a533b9e616abb5f1481cedeaa16a4a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 2 May 2018 07:18:01 +0700
Subject: [PATCH 031/125] Improve geo bypass mechanism * Introduce geo bypass
 context * Add ability to bypass based on IP blocks in CIDR notation *
 Introduce --geo-bypass-ip-block

---
 youtube_dl/YoutubeDL.py            |  3 +
 youtube_dl/__init__.py             |  1 +
 youtube_dl/extractor/anvato.py     |  4 +-
 youtube_dl/extractor/brightcove.py |  5 +-
 youtube_dl/extractor/common.py     | 97 ++++++++++++++++++++++++------
 youtube_dl/extractor/dplay.py      |  4 +-
 youtube_dl/extractor/go.py         |  2 +-
 youtube_dl/extractor/limelight.py  |  4 +-
 youtube_dl/extractor/tvplay.py     |  6 +-
 youtube_dl/options.py              |  4 ++
 youtube_dl/utils.py                | 11 ++--
 11 files changed, 113 insertions(+), 28 deletions(-)

diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py
index ad3598805..f1a359011 100755
--- a/youtube_dl/YoutubeDL.py
+++ b/youtube_dl/YoutubeDL.py
@@ -286,6 +286,9 @@ class YoutubeDL(object):
                        Two-letter ISO 3166-2 country code that will be used for
                        explicit geographic restriction bypassing via faking
                        X-Forwarded-For HTTP header (experimental)
+    geo_bypass_ip_block:
+                       IP range in CIDR notation that will be used similarly to
+                       geo_bypass_country (experimental)
 
     The following options determine which downloader is picked:
     external_downloader: Executable of the external downloader to call.
diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py
index 9bb952457..ba435ea42 100644
--- a/youtube_dl/__init__.py
+++ b/youtube_dl/__init__.py
@@ -430,6 +430,7 @@ def _real_main(argv=None):
         'config_location': opts.config_location,
         'geo_bypass': opts.geo_bypass,
         'geo_bypass_country': opts.geo_bypass_country,
+        'geo_bypass_ip_block': opts.geo_bypass_ip_block,
         # just for deprecation check
         'autonumber': opts.autonumber if opts.autonumber is True else None,
         'usetitle': opts.usetitle if opts.usetitle is True else None,
diff --git a/youtube_dl/extractor/anvato.py b/youtube_dl/extractor/anvato.py
index 7a29cd2c6..f6a78eb5d 100644
--- a/youtube_dl/extractor/anvato.py
+++ b/youtube_dl/extractor/anvato.py
@@ -277,7 +277,9 @@ class AnvatoIE(InfoExtractor):
 
     def _real_extract(self, url):
         url, smuggled_data = unsmuggle_url(url, {})
-        self._initialize_geo_bypass(smuggled_data.get('geo_countries'))
+        self._initialize_geo_bypass({
+            'countries': smuggled_data.get('geo_countries'),
+        })
 
         mobj = re.match(self._VALID_URL, url)
         access_key, video_id = mobj.group('access_key_or_mcp', 'id')
diff --git a/youtube_dl/extractor/brightcove.py b/youtube_dl/extractor/brightcove.py
index 0e4eaef65..ab62e54d6 100644
--- a/youtube_dl/extractor/brightcove.py
+++ b/youtube_dl/extractor/brightcove.py
@@ -669,7 +669,10 @@ class BrightcoveNewIE(AdobePassIE):
 
     def _real_extract(self, url):
         url, smuggled_data = unsmuggle_url(url, {})
-        self._initialize_geo_bypass(smuggled_data.get('geo_countries'))
+        self._initialize_geo_bypass({
+            'countries': smuggled_data.get('geo_countries'),
+            'ip_blocks': smuggled_data.get('geo_ip_blocks'),
+        })
 
         account_id, player_id, embed, video_id = re.match(self._VALID_URL, url).groups()
 
diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index a9939b0fd..3ef5af13c 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -346,6 +346,11 @@ class InfoExtractor(object):
     geo restriction bypass mechanism right away in order to bypass
     geo restriction, of course, if the mechanism is not disabled. (experimental)
 
+    _GEO_IP_BLOCKS attribute may contain a list of presumably geo unrestricted
+    IP blocks in CIDR notation for this extractor. One of these IP blocks
+    will be used by geo restriction bypass mechanism similarly
+    to _GEO_COUNTRIES. (experimental)
+
     NB: both these geo attributes are experimental and may change in future
     or be completely removed.
 
@@ -358,6 +363,7 @@ class InfoExtractor(object):
     _x_forwarded_for_ip = None
     _GEO_BYPASS = True
     _GEO_COUNTRIES = None
+    _GEO_IP_BLOCKS = None
     _WORKING = True
 
     def __init__(self, downloader=None):
@@ -392,12 +398,15 @@ class InfoExtractor(object):
 
     def initialize(self):
         """Initializes an instance (authentication, etc)."""
-        self._initialize_geo_bypass(self._GEO_COUNTRIES)
+        self._initialize_geo_bypass({
+            'countries': self._GEO_COUNTRIES,
+            'ip_blocks': self._GEO_IP_BLOCKS,
+        })
         if not self._ready:
             self._real_initialize()
             self._ready = True
 
-    def _initialize_geo_bypass(self, countries):
+    def _initialize_geo_bypass(self, geo_bypass_context):
         """
         Initialize geo restriction bypass mechanism.
 
@@ -408,28 +417,82 @@ class InfoExtractor(object):
         HTTP requests.
 
         This method will be used for initial geo bypass mechanism initialization
-        during the instance initialization with _GEO_COUNTRIES.
+        during the instance initialization with _GEO_COUNTRIES and
+        _GEO_IP_BLOCKS.
 
-        You may also manually call it from extractor's code if geo countries
+        You may also manually call it from extractor's code if geo bypass
         information is not available beforehand (e.g. obtained during
-        extraction) or due to some another reason.
+        extraction) or due to some other reason. In this case you should pass
+        this information in geo bypass context passed as first argument. It may
+        contain following fields:
+
+        countries:  List of geo unrestricted countries (similar
+                    to _GEO_COUNTRIES)
+        ip_blocks:  List of geo unrestricted IP blocks in CIDR notation
+                    (similar to _GEO_IP_BLOCKS)
+
         """
         if not self._x_forwarded_for_ip:
-            country_code = self._downloader.params.get('geo_bypass_country', None)
-            # If there is no explicit country for geo bypass specified and
-            # the extractor is known to be geo restricted let's fake IP
-            # as X-Forwarded-For right away.
-            if (not country_code and
-                    self._GEO_BYPASS and
-                    self._downloader.params.get('geo_bypass', True) and
-                    countries):
-                country_code = random.choice(countries)
-            if country_code:
-                self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
+
+            # Geo bypass mechanism is explicitly disabled by user
+            if not self._downloader.params.get('geo_bypass', True):
+                return
+
+            if not geo_bypass_context:
+                geo_bypass_context = {}
+
+            # Backward compatibility: previously _initialize_geo_bypass
+            # expected a list of countries, some 3rd party code may still use
+            # it this way
+            if isinstance(geo_bypass_context, (list, tuple)):
+                geo_bypass_context = {
+                    'countries': geo_bypass_context,
+                }
+
+            # The whole point of geo bypass mechanism is to fake IP
+            # as X-Forwarded-For HTTP header based on some IP block or
+            # country code.
+
+            # Path 1: bypassing based on IP block in CIDR notation
+
+            # Explicit IP block specified by user, use it right away
+            # regardless of whether extractor is geo bypassable or not
+            ip_block = self._downloader.params.get('geo_bypass_ip_block', None)
+
+            # Otherwise use random IP block from geo bypass context but only
+            # if extractor is known as geo bypassable
+            if not ip_block:
+                ip_blocks = geo_bypass_context.get('ip_blocks')
+                if self._GEO_BYPASS and ip_blocks:
+                    ip_block = random.choice(ip_blocks)
+
+            if ip_block:
+                self._x_forwarded_for_ip = GeoUtils.random_ipv4(ip_block)
+                if self._downloader.params.get('verbose', False):
+                    self._downloader.to_screen(
+                        '[debug] Using fake IP %s as X-Forwarded-For.'
+                        % self._x_forwarded_for_ip)
+                return
+
+            # Path 2: bypassing based on country code
+
+            # Explicit country code specified by user, use it right away
+            # regardless of whether extractor is geo bypassable or not
+            country = self._downloader.params.get('geo_bypass_country', None)
+
+            # Otherwise use random country code from geo bypass context but
+            # only if extractor is known as geo bypassable
+            if not country:
+                countries = geo_bypass_context.get('countries')
+                if self._GEO_BYPASS and countries:
+                    country = random.choice(countries)
+
+            if country:
+                self._x_forwarded_for_ip = GeoUtils.random_ipv4(country)
                 if self._downloader.params.get('verbose', False):
                     self._downloader.to_screen(
                         '[debug] Using fake IP %s (%s) as X-Forwarded-For.'
-                        % (self._x_forwarded_for_ip, country_code.upper()))
+                        % (self._x_forwarded_for_ip, country.upper()))
 
     def extract(self, url):
         """Extracts URL information and returns it in list of dicts."""
diff --git a/youtube_dl/extractor/dplay.py b/youtube_dl/extractor/dplay.py
index b73446773..8e0374320 100644
--- a/youtube_dl/extractor/dplay.py
+++ b/youtube_dl/extractor/dplay.py
@@ -102,7 +102,9 @@ class DPlayIE(InfoExtractor):
         display_id = mobj.group('id')
         domain = mobj.group('domain')
 
-        self._initialize_geo_bypass([mobj.group('country').upper()])
+        self._initialize_geo_bypass({
+            'countries': [mobj.group('country').upper()],
+        })
 
         webpage = self._download_webpage(url, display_id)
 
diff --git a/youtube_dl/extractor/go.py b/youtube_dl/extractor/go.py
index 9c7b1bd37..e781405f2 100644
--- a/youtube_dl/extractor/go.py
+++ b/youtube_dl/extractor/go.py
@@ -123,7 +123,7 @@ class GoIE(AdobePassIE):
                         'adobe_requestor_id': requestor_id,
                     })
                 else:
-                    self._initialize_geo_bypass(['US'])
+                    self._initialize_geo_bypass({'countries': ['US']})
                 entitlement = self._download_json(
                     'https://api.entitlement.watchabc.go.com/vp2/ws-secure/entitlement/2020/authorize.json',
                     video_id, data=urlencode_postdata(data))
diff --git a/youtube_dl/extractor/limelight.py b/youtube_dl/extractor/limelight.py
index 2803d7e8d..729d8de50 100644
--- a/youtube_dl/extractor/limelight.py
+++ b/youtube_dl/extractor/limelight.py
@@ -282,7 +282,9 @@ class LimelightMediaIE(LimelightBaseIE):
     def _real_extract(self, url):
         url, smuggled_data = unsmuggle_url(url, {})
         video_id = self._match_id(url)
-        self._initialize_geo_bypass(smuggled_data.get('geo_countries'))
+        self._initialize_geo_bypass({
+            'countries': smuggled_data.get('geo_countries'),
+        })
 
         pc, mobile, metadata = self._extract(
             video_id, 'getPlaylistByMediaId',
diff --git a/youtube_dl/extractor/tvplay.py b/youtube_dl/extractor/tvplay.py
index 84597b55e..e09b5f804 100644
--- a/youtube_dl/extractor/tvplay.py
+++ b/youtube_dl/extractor/tvplay.py
@@ -227,14 +227,16 @@ class TVPlayIE(InfoExtractor):
 
     def _real_extract(self, url):
         url, smuggled_data = unsmuggle_url(url, {})
-        self._initialize_geo_bypass(smuggled_data.get('geo_countries'))
+        self._initialize_geo_bypass({
+            'countries': smuggled_data.get('geo_countries'),
+        })
 
         video_id = self._match_id(url)
         geo_country = self._search_regex(
             r'https?://[^/]+\.([a-z]{2})', url,
             'geo country', default=None)
         if geo_country:
-            self._initialize_geo_bypass([geo_country.upper()])
+            self._initialize_geo_bypass({'countries': [geo_country.upper()]})
         video = self._download_json(
             'http://playapi.mtgx.tv/v3/videos/%s' % video_id, video_id, 'Downloading video JSON')
 
diff --git a/youtube_dl/options.py b/youtube_dl/options.py
index 3e4ac03a2..f3f8f23b6 100644
--- a/youtube_dl/options.py
+++ b/youtube_dl/options.py
@@ -249,6 +249,10 @@ def parseOpts(overrideArguments=None):
         '--geo-bypass-country', metavar='CODE',
         dest='geo_bypass_country', default=None,
         help='Force bypass geographic restriction with explicitly provided two-letter ISO 3166-2 country code (experimental)')
+    geo.add_option(
+        '--geo-bypass-ip-block', metavar='IP_BLOCK',
+        dest='geo_bypass_ip_block', default=None,
+        help='Force bypass geographic restriction with explicitly provided IP block in CIDR notation (experimental)')
 
     selection = optparse.OptionGroup(parser, 'Video Selection')
     selection.add_option(
diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index b460393bf..f9ca63c58 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -3534,10 +3534,13 @@ class GeoUtils(object):
     }
 
     @classmethod
-    def random_ipv4(cls, code):
-        block = cls._country_ip_map.get(code.upper())
-        if not block:
-            return None
+    def random_ipv4(cls, code_or_block):
+        if len(code_or_block) == 2:
+            block = cls._country_ip_map.get(code_or_block.upper())
+            if not block:
+                return None
+        else:
+            block = code_or_block
         addr, preflen = block.split('/')
         addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0]
         addr_max = addr_min | (0xffffffff >> int(preflen))

From ea1f5e5dbd6c58d4f0872a65b97611732f4b29bd Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 2 May 2018 07:21:24 +0700
Subject: [PATCH 032/125] [itv:btcc] Add extractor (closes #16139)

---
 youtube_dl/extractor/extractors.py |  5 +++-
 youtube_dl/extractor/itv.py        | 37 ++++++++++++++++++++++++++++++
 2 files changed, 41 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 9fe3f649d..316c8199d 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -477,7 +477,10 @@ from .internetvideoarchive import InternetVideoArchiveIE
 from .iprima import IPrimaIE
 from .iqiyi import IqiyiIE
 from .ir90tv import Ir90TvIE
-from .itv import ITVIE
+from .itv import (
+    ITVIE,
+    ITVBTCCIE,
+)
 from .ivi import (
     IviIE,
     IviCompilationIE
diff --git a/youtube_dl/extractor/itv.py b/youtube_dl/extractor/itv.py
index 457b424a2..6a4f8a505 100644
--- a/youtube_dl/extractor/itv.py
+++ b/youtube_dl/extractor/itv.py
@@ -7,6 +7,7 @@ import json
 import re
 
 from .common import InfoExtractor
+from .brightcove import BrightcoveNewIE
 from ..compat import (
     compat_str,
     compat_etree_register_namespace,
@@ -18,6 +19,7 @@ from ..utils import (
     xpath_text,
     int_or_none,
     parse_duration,
+    smuggle_url,
     ExtractorError,
     determine_ext,
 )
@@ -260,3 +262,38 @@ class ITVIE(InfoExtractor):
             'subtitles': subtitles,
         })
         return info
+
+
+class ITVBTCCIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?itv\.com/btcc/(?:[^/]+/)*(?P<id>[^/?#&]+)'
+    _TEST = {
+        'url': 'http://www.itv.com/btcc/races/btcc-2018-all-the-action-from-brands-hatch',
+        'info_dict': {
+            'id': 'btcc-2018-all-the-action-from-brands-hatch',
+            'title': 'BTCC 2018: All the action from Brands Hatch',
+        },
+        'playlist_mincount': 9,
+    }
+    BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1582188683001/HkiHLnNRx_default/index.html?videoId=%s'
+
+    def _real_extract(self, url):
+        playlist_id = self._match_id(url)
+
+        webpage = self._download_webpage(url, playlist_id)
+
+        entries = [
+            self.url_result(
+                smuggle_url(self.BRIGHTCOVE_URL_TEMPLATE % video_id, {
+                    # ITV does not like some GB IP ranges, so here are some
+                    # IP blocks it accepts
+                    'geo_ip_blocks': [
+                        '193.113.0.0/16', '54.36.162.0/23', '159.65.16.0/21'
+                    ],
+                    'referrer': url,
+                }),
+                ie=BrightcoveNewIE.ie_key(), video_id=video_id)
+            for video_id in re.findall(r'data-video-id=["\'](\d+)', webpage)]
+
+        title = self._og_search_title(webpage, fatal=False)
+
+        return self.playlist_result(entries, playlist_id, title)

From 3cc0d0b8299308958bfe8b4c42c739505df27f50 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 2 May 2018 09:32:53 +0100
Subject: [PATCH 033/125] [discovery] extract Affiliate/Anonymous Auth Token
 from cookies(closes #14954)

---
 youtube_dl/extractor/discovery.py | 37 ++++++++++++++++++++++---------
 1 file changed, 26 insertions(+), 11 deletions(-)

diff --git a/youtube_dl/extractor/discovery.py b/youtube_dl/extractor/discovery.py
index 91449dcd8..3589bd428 100644
--- a/youtube_dl/extractor/discovery.py
+++ b/youtube_dl/extractor/discovery.py
@@ -5,7 +5,10 @@ import re
 import string
 
 from .discoverygo import DiscoveryGoBaseIE
-from ..compat import compat_str
+from ..compat import (
+    compat_str,
+    compat_urllib_parse_unquote,
+)
 from ..utils import (
     ExtractorError,
     try_get,
@@ -55,15 +58,27 @@ class DiscoveryIE(DiscoveryGoBaseIE):
         video = next(cb for cb in content_blocks if cb.get('type') == 'video')['content']['items'][0]
         video_id = video['id']
 
-        access_token = self._download_json(
-            'https://www.%s.com/anonymous' % site, display_id, query={
-                'authRel': 'authorization',
-                'client_id': try_get(
-                    react_data, lambda x: x['application']['apiClientId'],
-                    compat_str) or '3020a40c2356a645b4b4',
-                'nonce': ''.join([random.choice(string.ascii_letters) for _ in range(32)]),
-                'redirectUri': 'https://fusion.ddmcdn.com/app/mercury-sdk/180/redirectHandler.html?https://www.%s.com' % site,
-            })['access_token']
+        access_token = None
+        cookies = self._get_cookies(url)
+
+        # prefer Affiliate Auth Token over Anonymous Auth Token
+        auth_storage_cookie = cookies.get('eosAf') or cookies.get('eosAn')
+        if auth_storage_cookie and auth_storage_cookie.value:
+            auth_storage = self._parse_json(compat_urllib_parse_unquote(
+                compat_urllib_parse_unquote(auth_storage_cookie.value)),
+                video_id, fatal=False) or {}
+            access_token = auth_storage.get('a') or auth_storage.get('access_token')
+
+        if not access_token:
+            access_token = self._download_json(
+                'https://www.%s.com/anonymous' % site, display_id, query={
+                    'authRel': 'authorization',
+                    'client_id': try_get(
+                        react_data, lambda x: x['application']['apiClientId'],
+                        compat_str) or '3020a40c2356a645b4b4',
+                    'nonce': ''.join([random.choice(string.ascii_letters) for _ in range(32)]),
+                    'redirectUri': 'https://fusion.ddmcdn.com/app/mercury-sdk/180/redirectHandler.html?https://www.%s.com' % site,
+                })['access_token']
 
         try:
             stream = self._download_json(
@@ -72,7 +87,7 @@ class DiscoveryIE(DiscoveryGoBaseIE):
                     'Authorization': 'Bearer ' + access_token,
                 })
         except ExtractorError as e:
-            if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
+            if isinstance(e.cause, compat_HTTPError) and e.cause.code in (401, 403):
                 e_description = self._parse_json(
                     e.cause.read().decode(), display_id)['description']
                 if 'resource not available for country' in e_description:

From a90a6b54ee5ceb6002f4ebd73d62c65cc00484d9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 2 May 2018 20:43:34 +0700
Subject: [PATCH 034/125] [watchbox] Fix extraction (closes #16356)

---
 youtube_dl/extractor/watchbox.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/watchbox.py b/youtube_dl/extractor/watchbox.py
index b382338fa..be0bcba15 100644
--- a/youtube_dl/extractor/watchbox.py
+++ b/youtube_dl/extractor/watchbox.py
@@ -69,7 +69,7 @@ class WatchBoxIE(InfoExtractor):
 
         source = self._parse_json(
             self._search_regex(
-                r'(?s)source\s*:\s*({.+?})\s*,\s*\n', webpage, 'source',
+                r'(?s)source["\']?\s*:\s*({.+?})\s*[,}]', webpage, 'source',
                 default='{}'),
             video_id, transform_source=js_to_json, fatal=False) or {}
 

From 660a230b2dcc734f018557c7898384ba438e9137 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 5 May 2018 01:21:52 +0700
Subject: [PATCH 035/125] [cloudflarestream] Add support for cloudflare streams
 (closes #16375)

---
 youtube_dl/extractor/cloudflarestream.py | 60 ++++++++++++++++++++++++
 youtube_dl/extractor/extractors.py       |  1 +
 youtube_dl/extractor/generic.py          | 19 ++++++++
 3 files changed, 80 insertions(+)
 create mode 100644 youtube_dl/extractor/cloudflarestream.py

diff --git a/youtube_dl/extractor/cloudflarestream.py b/youtube_dl/extractor/cloudflarestream.py
new file mode 100644
index 000000000..e6d92cca2
--- /dev/null
+++ b/youtube_dl/extractor/cloudflarestream.py
@@ -0,0 +1,60 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+
+class CloudflareStreamIE(InfoExtractor):
+    _VALID_URL = r'''(?x)
+                    https?://
+                        (?:
+                            (?:watch\.)?cloudflarestream\.com/|
+                            embed\.cloudflarestream\.com/embed/[^/]+\.js\?.*?\bvideo=
+                        )
+                        (?P<id>[\da-f]+)
+                    '''
+    _TESTS = [{
+        'url': 'https://embed.cloudflarestream.com/embed/we4g.fla9.latest.js?video=31c9291ab41fac05471db4e73aa11717',
+        'info_dict': {
+            'id': '31c9291ab41fac05471db4e73aa11717',
+            'ext': 'mp4',
+            'title': '31c9291ab41fac05471db4e73aa11717',
+        },
+        'params': {
+            'skip_download': True,
+        },
+    }, {
+        'url': 'https://watch.cloudflarestream.com/9df17203414fd1db3e3ed74abbe936c1',
+        'only_matching': True,
+    }, {
+        'url': 'https://cloudflarestream.com/31c9291ab41fac05471db4e73aa11717/manifest/video.mpd',
+        'only_matching': True,
+    }]
+
+    @staticmethod
+    def _extract_urls(webpage):
+        return [
+            mobj.group('url')
+            for mobj in re.finditer(
+                r'<script[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//embed\.cloudflarestream\.com/embed/[^/]+\.js\?.*?\bvideo=[\da-f]+?.*?)\1',
+                webpage)]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        formats = self._extract_m3u8_formats(
+            'https://cloudflarestream.com/%s/manifest/video.m3u8' % video_id,
+            video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls',
+            fatal=False)
+        formats.extend(self._extract_mpd_formats(
+            'https://cloudflarestream.com/%s/manifest/video.mpd' % video_id,
+            video_id, mpd_id='dash', fatal=False))
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': video_id,
+            'formats': formats,
+        }
diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 316c8199d..a00e003c2 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -195,6 +195,7 @@ from .clippit import ClippitIE
 from .cliprs import ClipRsIE
 from .clipsyndicate import ClipsyndicateIE
 from .closertotruth import CloserToTruthIE
+from .cloudflarestream import CloudflareStreamIE
 from .cloudy import CloudyIE
 from .clubic import ClubicIE
 from .clyp import ClypIE
diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index 73980601c..532c995f5 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -107,6 +107,7 @@ from .springboardplatform import SpringboardPlatformIE
 from .yapfiles import YapFilesIE
 from .vice import ViceIE
 from .xfileshare import XFileShareIE
+from .cloudflarestream import CloudflareStreamIE
 
 
 class GenericIE(InfoExtractor):
@@ -2013,6 +2014,19 @@ class GenericIE(InfoExtractor):
                 'skip_download': True,
             },
         },
+        {
+            # CloudflareStream embed
+            'url': 'https://www.cloudflare.com/products/cloudflare-stream/',
+            'info_dict': {
+                'id': '31c9291ab41fac05471db4e73aa11717',
+                'ext': 'mp4',
+                'title': '31c9291ab41fac05471db4e73aa11717',
+            },
+            'add_ie': [CloudflareStreamIE.ie_key()],
+            'params': {
+                'skip_download': True,
+            },
+        },
         {
             'url': 'http://share-videos.se/auto/video/83645793?uid=13',
             'md5': 'b68d276de422ab07ee1d49388103f457',
@@ -3025,6 +3039,11 @@ class GenericIE(InfoExtractor):
             return self.playlist_from_matches(
                 xfileshare_urls, video_id, video_title, ie=XFileShareIE.ie_key())
 
+        cloudflarestream_urls = CloudflareStreamIE._extract_urls(webpage)
+        if cloudflarestream_urls:
+            return self.playlist_from_matches(
+                cloudflarestream_urls, video_id, video_title, ie=CloudflareStreamIE.ie_key())
+
         sharevideos_urls = [mobj.group('url') for mobj in re.finditer(
             r'<iframe[^>]+?\bsrc\s*=\s*(["\'])(?P<url>(?:https?:)?//embed\.share-videos\.se/auto/embed/\d+\?.*?\buid=\d+.*?)\1',
             webpage)]

From 789b7774a771335c7d0b42c834195bef2e8617c8 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 6 May 2018 21:58:55 +0700
Subject: [PATCH 036/125] [businessinsider] Add extractor (closes #16387,
 closes #16388, closes #16389)

---
 youtube_dl/extractor/businessinsider.py | 42 +++++++++++++++++++++++++
 youtube_dl/extractor/extractors.py      |  1 +
 youtube_dl/extractor/generic.py         | 15 ---------
 3 files changed, 43 insertions(+), 15 deletions(-)
 create mode 100644 youtube_dl/extractor/businessinsider.py

diff --git a/youtube_dl/extractor/businessinsider.py b/youtube_dl/extractor/businessinsider.py
new file mode 100644
index 000000000..dfcf9bc6b
--- /dev/null
+++ b/youtube_dl/extractor/businessinsider.py
@@ -0,0 +1,42 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from .jwplatform import JWPlatformIE
+
+
+class BusinessInsiderIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:[^/]+\.)?businessinsider\.(?:com|nl)/(?:[^/]+/)*(?P<id>[^/?#&]+)'
+    _TESTS = [{
+        'url': 'http://uk.businessinsider.com/how-much-radiation-youre-exposed-to-in-everyday-life-2016-6',
+        'md5': 'ca237a53a8eb20b6dc5bd60564d4ab3e',
+        'info_dict': {
+            'id': 'hZRllCfw',
+            'ext': 'mp4',
+            'title': "Here's how much radiation you're exposed to in everyday life",
+            'description': 'md5:9a0d6e2c279948aadaa5e84d6d9b99bd',
+            'upload_date': '20170709',
+            'timestamp': 1499606400,
+        },
+        'params': {
+            'skip_download': True,
+        },
+    }, {
+        'url': 'https://www.businessinsider.nl/5-scientifically-proven-things-make-you-less-attractive-2017-7/',
+        'only_matching': True,
+    }, {
+        'url': 'http://www.businessinsider.com/excel-index-match-vlookup-video-how-to-2015-2?IR=T',
+        'only_matching': True,
+    }]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+        jwplatform_id = self._search_regex(
+            (r'data-media-id=["\']([a-zA-Z0-9]{8})',
+             r'id=["\']jwplayer_([a-zA-Z0-9]{8})',
+             r'id["\']?\s*:\s*["\']?([a-zA-Z0-9]{8})'),
+            webpage, 'jwplatform id')
+        return self.url_result(
+            'jwplatform:%s' % jwplatform_id, ie=JWPlatformIE.ie_key(),
+            video_id=video_id)
diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index a00e003c2..f03f98a6c 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -137,6 +137,7 @@ from .brightcove import (
     BrightcoveLegacyIE,
     BrightcoveNewIE,
 )
+from .businessinsider import BusinessInsiderIE
 from .buzzfeed import BuzzFeedIE
 from .byutv import BYUtvIE
 from .c56 import C56IE
diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index 532c995f5..76852f9dc 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -1472,21 +1472,6 @@ class GenericIE(InfoExtractor):
             },
             'expected_warnings': ['Failed to parse JSON Expecting value'],
         },
-        # Ooyala embed
-        {
-            'url': 'http://www.businessinsider.com/excel-index-match-vlookup-video-how-to-2015-2?IR=T',
-            'info_dict': {
-                'id': '50YnY4czr4ms1vJ7yz3xzq0excz_pUMs',
-                'ext': 'mp4',
-                'description': 'Index/Match versus VLOOKUP.',
-                'title': 'This is what separates the Excel masters from the wannabes',
-                'duration': 191.933,
-            },
-            'params': {
-                # m3u8 downloads
-                'skip_download': True,
-            }
-        },
         # Brightcove URL in single quotes
         {
             'url': 'http://www.sportsnet.ca/baseball/mlb/sn-presents-russell-martin-world-citizen/',

From 0ce76801e8f6e4d69182c20d9cef4de772555ad7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 8 May 2018 22:33:35 +0700
Subject: [PATCH 037/125] [udemy] Extract stream URLs (closes #16372)

---
 youtube_dl/extractor/udemy.py | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/youtube_dl/extractor/udemy.py b/youtube_dl/extractor/udemy.py
index bf1134e3f..4664e6222 100644
--- a/youtube_dl/extractor/udemy.py
+++ b/youtube_dl/extractor/udemy.py
@@ -105,7 +105,7 @@ class UdemyIE(InfoExtractor):
             % (course_id, lecture_id),
             lecture_id, 'Downloading lecture JSON', query={
                 'fields[lecture]': 'title,description,view_html,asset',
-                'fields[asset]': 'asset_type,stream_url,thumbnail_url,download_urls,data',
+                'fields[asset]': 'asset_type,stream_url,thumbnail_url,download_urls,stream_urls,data',
             })
 
     def _handle_error(self, response):
@@ -303,9 +303,10 @@ class UdemyIE(InfoExtractor):
                     'url': src,
                 })
 
-        download_urls = asset.get('download_urls')
-        if isinstance(download_urls, dict):
-            extract_formats(download_urls.get('Video'))
+        for url_kind in ('download', 'stream'):
+            urls = asset.get('%s_urls' % url_kind)
+            if isinstance(urls, dict):
+                extract_formats(urls.get('Video'))
 
         view_html = lecture.get('view_html')
         if view_html:

From 2fbd86352eaa9df6afeed6698114132aea3cbe81 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 8 May 2018 22:57:01 +0700
Subject: [PATCH 038/125] [udemy] Extract asset captions

---
 youtube_dl/extractor/udemy.py | 18 +++++++++++++++++-
 1 file changed, 17 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/udemy.py b/youtube_dl/extractor/udemy.py
index 4664e6222..0a74a9768 100644
--- a/youtube_dl/extractor/udemy.py
+++ b/youtube_dl/extractor/udemy.py
@@ -18,6 +18,7 @@ from ..utils import (
     int_or_none,
     js_to_json,
     sanitized_Request,
+    try_get,
     unescapeHTML,
     urlencode_postdata,
 )
@@ -105,7 +106,7 @@ class UdemyIE(InfoExtractor):
             % (course_id, lecture_id),
             lecture_id, 'Downloading lecture JSON', query={
                 'fields[lecture]': 'title,description,view_html,asset',
-                'fields[asset]': 'asset_type,stream_url,thumbnail_url,download_urls,stream_urls,data',
+                'fields[asset]': 'asset_type,stream_url,thumbnail_url,download_urls,stream_urls,captions,data',
             })
 
     def _handle_error(self, response):
@@ -308,6 +309,21 @@ class UdemyIE(InfoExtractor):
             if isinstance(urls, dict):
                 extract_formats(urls.get('Video'))
 
+        captions = asset.get('captions')
+        if isinstance(captions, list):
+            for cc in captions:
+                if not isinstance(cc, dict):
+                    continue
+                cc_url = cc.get('url')
+                if not cc_url or not isinstance(cc_url, compat_str):
+                    continue
+                lang = try_get(cc, lambda x: x['locale']['locale'], compat_str)
+                sub_dict = (automatic_captions if cc.get('source') == 'auto'
+                            else subtitles)
+                sub_dict.setdefault(lang or 'en', []).append({
+                    'url': cc_url,
+                })
+
         view_html = lecture.get('view_html')
         if view_html:
             view_html_urls = set()

From 05108a496a0eb6ca5d6f93072e2871dec8958b87 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 8 May 2018 22:57:52 +0700
Subject: [PATCH 039/125] [YoutubeDL] Ensure ext exists for automatic captions

---
 youtube_dl/YoutubeDL.py | 25 +++++++++++++++----------
 1 file changed, 15 insertions(+), 10 deletions(-)

diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py
index f1a359011..046e03247 100755
--- a/youtube_dl/YoutubeDL.py
+++ b/youtube_dl/YoutubeDL.py
@@ -1482,23 +1482,28 @@ class YoutubeDL(object):
             if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
                 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
 
+        for cc_kind in ('subtitles', 'automatic_captions'):
+            cc = info_dict.get(cc_kind)
+            if cc:
+                for _, subtitle in cc.items():
+                    for subtitle_format in subtitle:
+                        if subtitle_format.get('url'):
+                            subtitle_format['url'] = sanitize_url(subtitle_format['url'])
+                        if subtitle_format.get('ext') is None:
+                            subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
+
+        automatic_captions = info_dict.get('automatic_captions')
         subtitles = info_dict.get('subtitles')
-        if subtitles:
-            for _, subtitle in subtitles.items():
-                for subtitle_format in subtitle:
-                    if subtitle_format.get('url'):
-                        subtitle_format['url'] = sanitize_url(subtitle_format['url'])
-                    if subtitle_format.get('ext') is None:
-                        subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
 
         if self.params.get('listsubtitles', False):
             if 'automatic_captions' in info_dict:
-                self.list_subtitles(info_dict['id'], info_dict.get('automatic_captions'), 'automatic captions')
+                self.list_subtitles(
+                    info_dict['id'], automatic_captions, 'automatic captions')
             self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
             return
+
         info_dict['requested_subtitles'] = self.process_subtitles(
-            info_dict['id'], subtitles,
-            info_dict.get('automatic_captions'))
+            info_dict['id'], subtitles, automatic_captions)
 
         # We now pick which formats have to be downloaded
         if info_dict.get('formats') is None:

From 44277998adae1e17e4d21208e7dd1ad44decc733 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 9 May 2018 00:34:39 +0700
Subject: [PATCH 040/125] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 18 ++++++++++++++++++
 1 file changed, 18 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index 916b8edb8..ab6c5dab6 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,21 @@
+version <unreleased>
+
+Core
+* [YoutubeDL] Ensure ext exists for automatic captions
+* Introduce --geo-bypass-ip-block
+
+Extractors
++ [udemy] Extract asset captions
++ [udemy] Extract stream URLs (#16372)
++ [businessinsider] Add support for businessinsider.com (#16387, #16388, #16389)
++ [cloudflarestream] Add support for cloudflarestream.com (#16375)
+* [watchbox] Fix extraction (#16356)
+* [discovery] Extract Affiliate/Anonymous Auth Token from cookies (#14954)
++ [itv:btcc] Add support for itv.com/btcc (#16139)
+* [tunein] Use live title for live streams (#16347)
+* [itv] Improve extraction (#16253)
+
+
 version 2018.05.01
 
 Core

From 9e18bb4c67af7b748ee62247d751c0e705aa791a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 9 May 2018 00:36:47 +0700
Subject: [PATCH 041/125] release 2018.05.09

---
 .github/ISSUE_TEMPLATE.md | 6 +++---
 ChangeLog                 | 2 +-
 README.md                 | 3 +++
 docs/supportedsites.md    | 3 +++
 youtube_dl/version.py     | 2 +-
 5 files changed, 11 insertions(+), 5 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index c2bd5d8ae..b2bfa9ec5 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -6,8 +6,8 @@
 
 ---
 
-### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.05.01*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
-- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2018.05.01**
+### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.05.09*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
+- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2018.05.09**
 
 ### Before submitting an *issue* make sure you have:
 - [ ] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
@@ -36,7 +36,7 @@ Add the `-v` flag to **your command line** you run youtube-dl with (`youtube-dl
 [debug] User config: []
 [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
 [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
-[debug] youtube-dl version 2018.05.01
+[debug] youtube-dl version 2018.05.09
 [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
 [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
 [debug] Proxy map: {}
diff --git a/ChangeLog b/ChangeLog
index ab6c5dab6..ef6cc3850 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2018.05.09
 
 Core
 * [YoutubeDL] Ensure ext exists for automatic captions
diff --git a/README.md b/README.md
index 5af0f387b..d9fe2350a 100644
--- a/README.md
+++ b/README.md
@@ -116,6 +116,9 @@ Alternatively, refer to the [developer instructions](#developer-instructions) fo
     --geo-bypass-country CODE        Force bypass geographic restriction with
                                      explicitly provided two-letter ISO 3166-2
                                      country code (experimental)
+    --geo-bypass-ip-block IP_BLOCK   Force bypass geographic restriction with
+                                     explicitly provided IP block in CIDR
+                                     notation (experimental)
 
 ## Video Selection:
     --playlist-start NUMBER          Playlist video to start at (default is 1)
diff --git a/docs/supportedsites.md b/docs/supportedsites.md
index c5a48002b..88fac6e90 100644
--- a/docs/supportedsites.md
+++ b/docs/supportedsites.md
@@ -122,6 +122,7 @@
  - **BRMediathek**: Bayerischer Rundfunk Mediathek
  - **bt:article**: Bergens Tidende Articles
  - **bt:vestlendingen**: Bergens Tidende - Vestlendingen
+ - **BusinessInsider**
  - **BuzzFeed**
  - **BYUtv**
  - **Camdemy**
@@ -163,6 +164,7 @@
  - **ClipRs**
  - **Clipsyndicate**
  - **CloserToTruth**
+ - **CloudflareStream**
  - **cloudtime**: CloudTime
  - **Cloudy**
  - **Clubic**
@@ -373,6 +375,7 @@
  - **Ir90Tv**
  - **ITTF**
  - **ITV**
+ - **ITVBTCC**
  - **ivi**: ivi.ru
  - **ivi:compilation**: ivi.ru compilations
  - **ivideon**: Ivideon TV
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 04896efc8..6f47b1795 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2018.05.01'
+__version__ = '2018.05.09'

From ff8889cd4dfae0ae3758e3d8a496f5724f6dc092 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Thu, 10 May 2018 08:19:32 +0100
Subject: [PATCH 042/125] [teamcoco] fix extraction(closes #16374)

---
 youtube_dl/extractor/teamcoco.py | 175 ++++++++++++++-----------------
 1 file changed, 80 insertions(+), 95 deletions(-)

diff --git a/youtube_dl/extractor/teamcoco.py b/youtube_dl/extractor/teamcoco.py
index 9056c8cbc..f06e5b19a 100644
--- a/youtube_dl/extractor/teamcoco.py
+++ b/youtube_dl/extractor/teamcoco.py
@@ -1,35 +1,34 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
-import binascii
-import re
 import json
 
 from .common import InfoExtractor
-from ..compat import (
-    compat_b64decode,
-    compat_ord,
-)
 from ..utils import (
-    ExtractorError,
-    qualities,
     determine_ext,
+    ExtractorError,
+    int_or_none,
+    mimetype2ext,
+    parse_duration,
+    parse_iso8601,
+    qualities,
 )
 
 
 class TeamcocoIE(InfoExtractor):
-    _VALID_URL = r'https?://teamcoco\.com/video/(?P<video_id>[0-9]+)?/?(?P<display_id>.*)'
+    _VALID_URL = r'https?://teamcoco\.com/video/(?P<id>[^/?#]+)'
     _TESTS = [
         {
-            'url': 'http://teamcoco.com/video/80187/conan-becomes-a-mary-kay-beauty-consultant',
-            'md5': '3f7746aa0dc86de18df7539903d399ea',
+            'url': 'http://teamcoco.com/video/mary-kay-remote',
+            'md5': '55d532f81992f5c92046ad02fec34d7d',
             'info_dict': {
                 'id': '80187',
                 'ext': 'mp4',
                 'title': 'Conan Becomes A Mary Kay Beauty Consultant',
                 'description': 'Mary Kay is perhaps the most trusted name in female beauty, so of course Conan is a natural choice to sell their products.',
-                'duration': 504,
-                'age_limit': 0,
+                'duration': 495.0,
+                'upload_date': '20140402',
+                'timestamp': 1396407600,
             }
         }, {
             'url': 'http://teamcoco.com/video/louis-ck-interview-george-w-bush',
@@ -40,7 +39,8 @@ class TeamcocoIE(InfoExtractor):
                 'description': 'Louis C.K. got starstruck by George W. Bush, so what? Part one.',
                 'title': 'Louis C.K. Interview Pt. 1 11/3/11',
                 'duration': 288,
-                'age_limit': 0,
+                'upload_date': '20111104',
+                'timestamp': 1320405840,
             }
         }, {
             'url': 'http://teamcoco.com/video/timothy-olyphant-drinking-whiskey',
@@ -49,6 +49,8 @@ class TeamcocoIE(InfoExtractor):
                 'ext': 'mp4',
                 'title': 'Timothy Olyphant Raises A Toast To “Justified”',
                 'description': 'md5:15501f23f020e793aeca761205e42c24',
+                'upload_date': '20150415',
+                'timestamp': 1429088400,
             },
             'params': {
                 'skip_download': True,  # m3u8 downloads
@@ -63,110 +65,93 @@ class TeamcocoIE(InfoExtractor):
             },
             'params': {
                 'skip_download': True,  # m3u8 downloads
-            }
+            },
+            'skip': 'This video is no longer available.',
         }
     ]
-    _VIDEO_ID_REGEXES = (
-        r'"eVar42"\s*:\s*(\d+)',
-        r'Ginger\.TeamCoco\.openInApp\("video",\s*"([^"]+)"',
-        r'"id_not"\s*:\s*(\d+)'
-    )
+
+    def _graphql_call(self, query_template, object_type, object_id):
+        find_object = 'find' + object_type
+        return self._download_json(
+            'http://teamcoco.com/graphql/', object_id, data=json.dumps({
+                'query': query_template % (find_object, object_id)
+            }))['data'][find_object]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
+        display_id = self._match_id(url)
 
-        display_id = mobj.group('display_id')
-        webpage, urlh = self._download_webpage_handle(url, display_id)
-        if 'src=expired' in urlh.geturl():
-            raise ExtractorError('This video is expired.', expected=True)
+        response = self._graphql_call('''{
+  %s(slug: "video/%s") {
+    ... on RecordSlug {
+      record {
+        id
+        title
+        teaser
+        publishOn
+        thumb {
+          preview
+        }
+        tags {
+          name
+        }
+        duration
+      }
+    }
+    ... on NotFoundSlug {
+      status
+    }
+  }
+}''', 'Slug', display_id)
+        if response.get('status'):
+            raise ExtractorError('This video is no longer available.', expected=True)
 
-        video_id = mobj.group('video_id')
-        if not video_id:
-            video_id = self._html_search_regex(
-                self._VIDEO_ID_REGEXES, webpage, 'video id')
+        record = response['record']
+        video_id = record['id']
 
-        data = None
-
-        preload_codes = self._html_search_regex(
-            r'(function.+)setTimeout\(function\(\)\{playlist',
-            webpage, 'preload codes')
-        base64_fragments = re.findall(r'"([a-zA-Z0-9+/=]+)"', preload_codes)
-        base64_fragments.remove('init')
-
-        def _check_sequence(cur_fragments):
-            if not cur_fragments:
-                return
-            for i in range(len(cur_fragments)):
-                cur_sequence = (''.join(cur_fragments[i:] + cur_fragments[:i])).encode('ascii')
-                try:
-                    raw_data = compat_b64decode(cur_sequence)
-                    if compat_ord(raw_data[0]) == compat_ord('{'):
-                        return json.loads(raw_data.decode('utf-8'))
-                except (TypeError, binascii.Error, UnicodeDecodeError, ValueError):
-                    continue
-
-        def _check_data():
-            for i in range(len(base64_fragments) + 1):
-                for j in range(i, len(base64_fragments) + 1):
-                    data = _check_sequence(base64_fragments[:i] + base64_fragments[j:])
-                    if data:
-                        return data
-
-        self.to_screen('Try to compute possible data sequence. This may take some time.')
-        data = _check_data()
-
-        if not data:
-            raise ExtractorError(
-                'Preload information could not be extracted', expected=True)
+        srcs = self._graphql_call('''{
+  %s(id: "%s") {
+    src
+  }
+}''', 'RecordVideoSource', video_id)['src']
 
         formats = []
-        get_quality = qualities(['500k', '480p', '1000k', '720p', '1080p'])
-        for filed in data['files']:
-            if determine_ext(filed['url']) == 'm3u8':
-                # compat_urllib_parse.urljoin does not work here
-                if filed['url'].startswith('/'):
-                    m3u8_url = 'http://ht.cdn.turner.com/tbs/big/teamcoco' + filed['url']
-                else:
-                    m3u8_url = filed['url']
-                m3u8_formats = self._extract_m3u8_formats(
-                    m3u8_url, video_id, ext='mp4')
-                for m3u8_format in m3u8_formats:
-                    if m3u8_format not in formats:
-                        formats.append(m3u8_format)
-            elif determine_ext(filed['url']) == 'f4m':
-                # TODO Correct f4m extraction
+        get_quality = qualities(['low', 'sd', 'hd', 'uhd'])
+        for format_id, src in srcs.items():
+            if not isinstance(src, dict):
                 continue
+            src_url = src.get('src')
+            if not src_url:
+                continue
+            ext = determine_ext(src_url, mimetype2ext(src.get('type')))
+            if format_id == 'hls' or ext == 'm3u8':
+                # compat_urllib_parse.urljoin does not work here
+                if src_url.startswith('/'):
+                    src_url = 'http://ht.cdn.turner.com/tbs/big/teamcoco' + src_url
+                formats.extend(self._extract_m3u8_formats(
+                    src_url, video_id, 'mp4', m3u8_id=format_id, fatal=False))
             else:
-                if filed['url'].startswith('/mp4:protected/'):
+                if src_url.startswith('/mp4:protected/'):
                     # TODO Correct extraction for these files
                     continue
-                m_format = re.search(r'(\d+(k|p))\.mp4', filed['url'])
-                if m_format is not None:
-                    format_id = m_format.group(1)
-                else:
-                    format_id = filed['bitrate']
-                tbr = (
-                    int(filed['bitrate'])
-                    if filed['bitrate'].isdigit()
-                    else None)
+                tbr = int_or_none(self._search_regex(
+                    r'(\d+)k\.mp4', src_url, 'tbr', default=None))
 
                 formats.append({
-                    'url': filed['url'],
-                    'ext': 'mp4',
+                    'url': src_url,
+                    'ext': ext,
                     'tbr': tbr,
                     'format_id': format_id,
                     'quality': get_quality(format_id),
                 })
-
         self._sort_formats(formats)
 
         return {
             'id': video_id,
             'display_id': display_id,
             'formats': formats,
-            'title': data['title'],
-            'thumbnail': data.get('thumb', {}).get('href'),
-            'description': data.get('teaser'),
-            'duration': data.get('duration'),
-            'age_limit': self._family_friendly_search(webpage),
+            'title': record['title'],
+            'thumbnail': record.get('thumb', {}).get('preview'),
+            'description': record.get('teaser'),
+            'duration': parse_duration(record.get('duration')),
+            'timestamp': parse_iso8601(record.get('publishOn')),
         }

From 1344d3e169840f6c9d585648c1597da6a2b00ed1 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Thu, 10 May 2018 22:01:13 +0700
Subject: [PATCH 043/125] [nickbr] Relax _VALID_URL (#13230)

---
 youtube_dl/extractor/nick.py | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/nick.py b/youtube_dl/extractor/nick.py
index 256a24d86..5e34d776b 100644
--- a/youtube_dl/extractor/nick.py
+++ b/youtube_dl/extractor/nick.py
@@ -85,7 +85,7 @@ class NickBrIE(MTVServicesInfoExtractor):
                     https?://
                         (?:
                             (?P<domain>(?:www\.)?nickjr|mundonick\.uol)\.com\.br|
-                            (?:www\.)?nickjr\.nl
+                            (?:www\.)?nickjr\.[a-z]{2}
                         )
                         /(?:programas/)?[^/]+/videos/(?:episodios/)?(?P<id>[^/?\#.]+)
                     '''
@@ -98,6 +98,9 @@ class NickBrIE(MTVServicesInfoExtractor):
     }, {
         'url': 'http://www.nickjr.nl/paw-patrol/videos/311-ge-wol-dig-om-terug-te-zijn/',
         'only_matching': True,
+    }, {
+        'url': 'http://www.nickjr.de/blaze-und-die-monster-maschinen/videos/f6caaf8f-e4e8-4cc1-b489-9380d6dcd059/',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):

From bc5e4aa57e92201610f9ab79b10a3ae3b316fc3b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Thu, 10 May 2018 22:22:26 +0700
Subject: [PATCH 044/125] [mixcloud] Bypass throttling for HTTP formats
 (#12579, #16424)

---
 youtube_dl/extractor/mixcloud.py | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/youtube_dl/extractor/mixcloud.py b/youtube_dl/extractor/mixcloud.py
index a56b7690f..b7bccb504 100644
--- a/youtube_dl/extractor/mixcloud.py
+++ b/youtube_dl/extractor/mixcloud.py
@@ -179,6 +179,10 @@ class MixcloudIE(InfoExtractor):
                     formats.append({
                         'format_id': 'http',
                         'url': decrypted,
+                        'downloader_options': {
+                            # Mixcloud starts throttling at >~5M
+                            'http_chunk_size': 5242880,
+                        },
                     })
             self._sort_formats(formats)
 

From dbd5c502ead468771d45c7893dd5dd14cf99a276 Mon Sep 17 00:00:00 2001
From: llyyr <gopalprasadgd@gmail.com>
Date: Thu, 10 May 2018 21:47:23 +0530
Subject: [PATCH 045/125] [redditr] Relax _VALID_URL (closes #16426)

---
 youtube_dl/extractor/reddit.py | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/reddit.py b/youtube_dl/extractor/reddit.py
index 8372925be..7b0aa6232 100644
--- a/youtube_dl/extractor/reddit.py
+++ b/youtube_dl/extractor/reddit.py
@@ -47,7 +47,7 @@ class RedditIE(InfoExtractor):
 
 
 class RedditRIE(InfoExtractor):
-    _VALID_URL = r'(?P<url>https?://(?:(?:www|old)\.)?reddit\.com/r/[^/]+/comments/(?P<id>[^/?#&]+))'
+    _VALID_URL = r'(?P<url>https?://(?:[^/]+\.)?reddit\.com/r/[^/]+/comments/(?P<id>[^/?#&]+))'
     _TESTS = [{
         'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',
         'info_dict': {
@@ -86,6 +86,10 @@ class RedditRIE(InfoExtractor):
         # youtube
         'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/',
         'only_matching': True,
+    }, {
+        # reddit video @ nm reddit
+        'url': 'https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):

From 49fa7de301019e23e66c01e5007561eefd51ca47 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Fri, 11 May 2018 23:20:12 +0700
Subject: [PATCH 046/125] [twitch:clips] Fix extraction (closes #16429)

---
 youtube_dl/extractor/twitch.py | 102 ++++++++++++++++++++++-----------
 1 file changed, 68 insertions(+), 34 deletions(-)

diff --git a/youtube_dl/extractor/twitch.py b/youtube_dl/extractor/twitch.py
index 4c11fd3c3..ec96ae506 100644
--- a/youtube_dl/extractor/twitch.py
+++ b/youtube_dl/extractor/twitch.py
@@ -8,6 +8,7 @@ import random
 from .common import InfoExtractor
 from ..compat import (
     compat_HTTPError,
+    compat_kwargs,
     compat_parse_qs,
     compat_str,
     compat_urllib_parse_urlencode,
@@ -16,11 +17,14 @@ from ..compat import (
 from ..utils import (
     clean_html,
     ExtractorError,
+    float_or_none,
     int_or_none,
-    js_to_json,
     orderedSet,
     parse_duration,
     parse_iso8601,
+    qualities,
+    try_get,
+    unified_timestamp,
     update_url_query,
     urlencode_postdata,
     urljoin,
@@ -45,10 +49,11 @@ class TwitchBaseIE(InfoExtractor):
                 '%s returned error: %s - %s' % (self.IE_NAME, error, response.get('message')),
                 expected=True)
 
-    def _call_api(self, path, item_id, note):
+    def _call_api(self, path, item_id, *args, **kwargs):
+        kwargs.setdefault('headers', {})['Client-ID'] = self._CLIENT_ID
         response = self._download_json(
-            '%s/%s' % (self._API_BASE, path), item_id, note,
-            headers={'Client-ID': self._CLIENT_ID})
+            '%s/%s' % (self._API_BASE, path), item_id,
+            *args, **compat_kwargs(kwargs))
         self._handle_error(response)
         return response
 
@@ -622,21 +627,23 @@ class TwitchStreamIE(TwitchBaseIE):
         }
 
 
-class TwitchClipsIE(InfoExtractor):
+class TwitchClipsIE(TwitchBaseIE):
     IE_NAME = 'twitch:clips'
     _VALID_URL = r'https?://clips\.twitch\.tv/(?:[^/]+/)*(?P<id>[^/?#&]+)'
 
     _TESTS = [{
-        'url': 'https://clips.twitch.tv/ea/AggressiveCobraPoooound',
+        'url': 'https://clips.twitch.tv/FaintLightGullWholeWheat',
         'md5': '761769e1eafce0ffebfb4089cb3847cd',
         'info_dict': {
-            'id': 'AggressiveCobraPoooound',
+            'id': '42850523',
             'ext': 'mp4',
             'title': 'EA Play 2016 Live from the Novo Theatre',
             'thumbnail': r're:^https?://.*\.jpg',
+            'timestamp': 1465767393,
+            'upload_date': '20160612',
             'creator': 'EA',
             'uploader': 'stereotype_',
-            'uploader_id': 'stereotype_',
+            'uploader_id': '43566419',
         },
     }, {
         # multiple formats
@@ -647,34 +654,61 @@ class TwitchClipsIE(InfoExtractor):
     def _real_extract(self, url):
         video_id = self._match_id(url)
 
-        webpage = self._download_webpage(url, video_id)
+        status = self._download_json(
+            'https://clips.twitch.tv/api/v2/clips/%s/status' % video_id,
+            video_id)
 
-        clip = self._parse_json(
-            self._search_regex(
-                r'(?s)clipInfo\s*=\s*({.+?});', webpage, 'clip info'),
-            video_id, transform_source=js_to_json)
+        formats = []
 
-        title = clip.get('title') or clip.get('channel_title') or self._og_search_title(webpage)
+        for option in status['quality_options']:
+            if not isinstance(option, dict):
+                continue
+            source = option.get('source')
+            if not source or not isinstance(source, compat_str):
+                continue
+            formats.append({
+                'url': source,
+                'format_id': option.get('quality'),
+                'height': int_or_none(option.get('quality')),
+                'fps': int_or_none(option.get('frame_rate')),
+            })
 
-        formats = [{
-            'url': option['source'],
-            'format_id': option.get('quality'),
-            'height': int_or_none(option.get('quality')),
-        } for option in clip.get('quality_options', []) if option.get('source')]
-
-        if not formats:
-            formats = [{
-                'url': clip['clip_video_url'],
-            }]
-
-        self._sort_formats(formats)
-
-        return {
-            'id': video_id,
-            'title': title,
-            'thumbnail': self._og_search_thumbnail(webpage),
-            'creator': clip.get('broadcaster_display_name') or clip.get('broadcaster_login'),
-            'uploader': clip.get('curator_login'),
-            'uploader_id': clip.get('curator_display_name'),
+        info = {
             'formats': formats,
         }
+
+        clip = self._call_api(
+            'kraken/clips/%s' % video_id, video_id, fatal=False, headers={
+                'Accept': 'application/vnd.twitchtv.v5+json',
+            })
+
+        if clip:
+            quality_key = qualities(('tiny', 'small', 'medium'))
+            thumbnails = []
+            thumbnails_dict = clip.get('thumbnails')
+            if isinstance(thumbnails_dict, dict):
+                for thumbnail_id, thumbnail_url in thumbnails_dict.items():
+                    thumbnails.append({
+                        'id': thumbnail_id,
+                        'url': thumbnail_url,
+                        'preference': quality_key(thumbnail_id),
+                    })
+
+            info.update({
+                'id': clip.get('tracking_id') or video_id,
+                'title': clip.get('title') or video_id,
+                'duration': float_or_none(clip.get('duration')),
+                'views': int_or_none(clip.get('views')),
+                'timestamp': unified_timestamp(clip.get('created_at')),
+                'thumbnails': thumbnails,
+                'creator': try_get(clip, lambda x: x['broadcaster']['display_name'], compat_str),
+                'uploader': try_get(clip, lambda x: x['curator']['display_name'], compat_str),
+                'uploader_id': try_get(clip, lambda x: x['curator']['id'], compat_str),
+            })
+        else:
+            info.update({
+                'title': video_id,
+                'id': video_id,
+            })
+
+        return info

From 07acdc5afcff3b47b26b26355a75704e3cda670f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 12 May 2018 12:08:54 +0700
Subject: [PATCH 047/125] [twitch:clips] Sort formats

---
 youtube_dl/extractor/twitch.py | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/youtube_dl/extractor/twitch.py b/youtube_dl/extractor/twitch.py
index ec96ae506..3ee2af52e 100644
--- a/youtube_dl/extractor/twitch.py
+++ b/youtube_dl/extractor/twitch.py
@@ -673,6 +673,8 @@ class TwitchClipsIE(TwitchBaseIE):
                 'fps': int_or_none(option.get('frame_rate')),
             })
 
+        self._sort_formats(formats)
+
         info = {
             'formats': formats,
         }

From 90b633f86b000f8b6a58ce99d9bbbe0fff6d4f62 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Sun, 13 May 2018 11:30:21 +0100
Subject: [PATCH 048/125] [nbc] improve info extraction(fixes #16440)

---
 youtube_dl/extractor/nbc.py | 14 ++++++++++----
 1 file changed, 10 insertions(+), 4 deletions(-)

diff --git a/youtube_dl/extractor/nbc.py b/youtube_dl/extractor/nbc.py
index 9dc8f9ebc..1b1722cfa 100644
--- a/youtube_dl/extractor/nbc.py
+++ b/youtube_dl/extractor/nbc.py
@@ -9,6 +9,7 @@ from .adobepass import AdobePassIE
 from ..utils import (
     find_xpath_attr,
     smuggle_url,
+    try_get,
     unescapeHTML,
     update_url_query,
     int_or_none,
@@ -78,10 +79,14 @@ class NBCIE(AdobePassIE):
     def _real_extract(self, url):
         permalink, video_id = re.match(self._VALID_URL, url).groups()
         permalink = 'http' + permalink
-        video_data = self._download_json(
+        response = self._download_json(
             'https://api.nbc.com/v3/videos', video_id, query={
                 'filter[permalink]': permalink,
-            })['data'][0]['attributes']
+                'fields[videos]': 'description,entitlement,episodeNumber,guid,keywords,seasonNumber,title,vChipRating',
+                'fields[shows]': 'shortTitle',
+                'include': 'show.shortTitle',
+            })
+        video_data = response['data'][0]['attributes']
         query = {
             'mbr': 'true',
             'manifest': 'm3u',
@@ -103,10 +108,11 @@ class NBCIE(AdobePassIE):
             'title': title,
             'url': theplatform_url,
             'description': video_data.get('description'),
-            'keywords': video_data.get('keywords'),
+            'tags': video_data.get('keywords'),
             'season_number': int_or_none(video_data.get('seasonNumber')),
             'episode_number': int_or_none(video_data.get('episodeNumber')),
-            'series': video_data.get('showName'),
+            'episode': title,
+            'series': try_get(response, lambda x: x['included'][0]['attributes']['shortTitle']),
             'ie_key': 'ThePlatform',
         }
 

From 4c76aa06665621c7689938afd7bbdbc797b5c7ea Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Sun, 13 May 2018 13:20:16 +0100
Subject: [PATCH 049/125] [youtube] fix extraction for embed restricted live
 streams(fixes #16433)

---
 youtube_dl/extractor/youtube.py | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 04aeb91af..1f29e8a4e 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -1537,7 +1537,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
             ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
             if ytplayer_config:
                 args = ytplayer_config['args']
-                if args.get('url_encoded_fmt_stream_map'):
+                if args.get('url_encoded_fmt_stream_map') or args.get('hlsvp'):
                     # Convert to the same format returned by compat_parse_qs
                     video_info = dict((k, [v]) for k, v in args.items())
                     add_dash_mpd(video_info)
@@ -1969,9 +1969,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                 a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
                 formats.append(a_format)
         else:
-            unavailable_message = extract_unavailable_message()
-            if unavailable_message:
-                raise ExtractorError(unavailable_message, expected=True)
+            error_message = clean_html(video_info.get('reason', [None])[0])
+            if not error_message:
+                error_message = extract_unavailable_message()
+            if error_message:
+                raise ExtractorError(error_message, expected=True)
             raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
 
         # Look for the DASH manifest

From 84a9fef899374d46cfad8d292187ca8d84791c1f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 13 May 2018 22:49:01 +0700
Subject: [PATCH 050/125] [youtube] Make uploader extraction non fatal (#16444)

---
 youtube_dl/extractor/youtube.py | 8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 1f29e8a4e..897398d20 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -1697,9 +1697,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
         self.report_information_extraction(video_id)
 
         # uploader
-        if 'author' not in video_info:
-            raise ExtractorError('Unable to extract uploader name')
-        video_uploader = compat_urllib_parse_unquote_plus(video_info['author'][0])
+        video_uploader = try_get(video_info, lambda x: x['author'][0], compat_str)
+        if video_uploader:
+            video_uploader = compat_urllib_parse_unquote_plus(video_uploader)
+        else:
+            self._downloader.report_warning('unable to extract uploader name')
 
         # uploader_id
         video_uploader_id = None

From c63ca0eef8ac147b3f2a39ba7265ad1b3c11d516 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Mon, 14 May 2018 23:27:56 +0700
Subject: [PATCH 051/125] [youtube] Improve format filesize extraction (#16453)

---
 youtube_dl/extractor/youtube.py | 11 ++++++++++-
 1 file changed, 10 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 897398d20..7f4298c08 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -1815,6 +1815,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
 
         chapters = self._extract_chapters(description_original, video_duration)
 
+        def _extract_filesize(media_url):
+            return int_or_none(self._search_regex(
+                r'\bclen[=/](\d+)', media_url, 'filesize', default=None))
+
         if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
             self.report_rtmp_download()
             formats = [{
@@ -1919,8 +1923,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                 mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
                 width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
 
+                filesize = int_or_none(url_data.get(
+                    'clen', [None])[0]) or _extract_filesize(url)
+
                 more_fields = {
-                    'filesize': int_or_none(url_data.get('clen', [None])[0]),
+                    'filesize': filesize,
                     'tbr': float_or_none(url_data.get('bitrate', [None])[0], 1000),
                     'width': width,
                     'height': height,
@@ -1994,6 +2001,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                     for df in self._extract_mpd_formats(
                             mpd_url, video_id, fatal=dash_mpd_fatal,
                             formats_dict=self._formats):
+                        if not df.get('filesize'):
+                            df['filesize'] = _extract_filesize(df['url'])
                         # Do not overwrite DASH format found in some previous DASH manifest
                         if df['format_id'] not in dash_formats:
                             dash_formats[df['format_id']] = df

From 1e4fe5a7cc80f73b92e068515352d7c7124a49c2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Mon, 14 May 2018 23:42:33 +0700
Subject: [PATCH 052/125] [options] Fix typo (closes #16450)

---
 youtube_dl/options.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/options.py b/youtube_dl/options.py
index f3f8f23b6..b692c6b3b 100644
--- a/youtube_dl/options.py
+++ b/youtube_dl/options.py
@@ -232,7 +232,7 @@ def parseOpts(overrideArguments=None):
         '--geo-verification-proxy',
         dest='geo_verification_proxy', default=None, metavar='URL',
         help='Use this proxy to verify the IP address for some geo-restricted sites. '
-        'The default proxy specified by --proxy (or none, if the options is not present) is used for the actual downloading.')
+        'The default proxy specified by --proxy (or none, if the option is not present) is used for the actual downloading.')
     geo.add_option(
         '--cn-verification-proxy',
         dest='cn_verification_proxy', default=None, metavar='URL',

From 7f34984e811897b65e1b7e3a25cfdb45bf863dcf Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 16 May 2018 08:08:44 +0100
Subject: [PATCH 053/125] [dtube] Add new extractor(closes #15201)

---
 youtube_dl/extractor/dtube.py      | 83 ++++++++++++++++++++++++++++++
 youtube_dl/extractor/extractors.py |  1 +
 2 files changed, 84 insertions(+)
 create mode 100644 youtube_dl/extractor/dtube.py

diff --git a/youtube_dl/extractor/dtube.py b/youtube_dl/extractor/dtube.py
new file mode 100644
index 000000000..4ca97f860
--- /dev/null
+++ b/youtube_dl/extractor/dtube.py
@@ -0,0 +1,83 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import json
+import re
+from socket import timeout
+
+from .common import InfoExtractor
+from ..utils import (
+    int_or_none,
+    parse_iso8601,
+)
+
+
+class DTubeIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?d\.tube/(?:#!/)?v/(?P<uploader_id>[0-9a-z.-]+)/(?P<id>[0-9a-z]{8})'
+    _TEST = {
+        'url': 'https://d.tube/#!/v/benswann/zqd630em',
+        'md5': 'a03eaa186618ffa7a3145945543a251e',
+        'info_dict': {
+            'id': 'zqd630em',
+            'ext': 'mp4',
+            'title': 'Reality Check: FDA\'s Disinformation Campaign on Kratom',
+            'description': 'md5:700d164e066b87f9eac057949e4227c2',
+            'uploader_id': 'benswann',
+            'upload_date': '20180222',
+            'timestamp': 1519328958,
+        },
+        'params': {
+            'format': '480p',
+        },
+    }
+
+    def _real_extract(self, url):
+        uploader_id, video_id = re.match(self._VALID_URL, url).groups()
+        result = self._download_json('https://api.steemit.com/', video_id, data=json.dumps({
+            'jsonrpc': '2.0',
+            'method': 'get_content',
+            'params': [uploader_id, video_id],
+        }).encode())['result']
+
+        metadata = json.loads(result['json_metadata'])
+        video = metadata['video']
+        content = video['content']
+        info = video.get('info', {})
+        title = info.get('title') or result['title']
+
+        def canonical_url(h):
+            if not h:
+                return None
+            return 'https://ipfs.io/ipfs/' + h
+
+        formats = []
+        for q in ('240', '480', '720', '1080', ''):
+            video_url = canonical_url(content.get('video%shash' % q))
+            if not video_url:
+                continue
+            format_id = (q + 'p') if q else 'Source'
+            try:
+                self.to_screen('%s: Checking %s video format URL' % (video_id, format_id))
+                self._downloader._opener.open(video_url, timeout=5).close()
+            except timeout as e:
+                self.to_screen(
+                    '%s: %s URL is invalid, skipping' % (video_id, format_id))
+                continue
+            formats.append({
+                'format_id': format_id,
+                'url': video_url,
+                'height': int_or_none(q),
+                'ext': 'mp4',
+            })
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': content.get('description'),
+            'thumbnail': canonical_url(info.get('snaphash')),
+            'tags': content.get('tags') or metadata.get('tags'),
+            'duration': info.get('duration'),
+            'formats': formats,
+            'timestamp': parse_iso8601(result.get('created')),
+            'uploader_id': uploader_id,
+        }
diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index f03f98a6c..4da477647 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -283,6 +283,7 @@ from .drtv import (
     DRTVIE,
     DRTVLiveIE,
 )
+from .dtube import DTubeIE
 from .dvtv import DVTVIE
 from .dumpert import DumpertIE
 from .defense import DefenseGouvFrIE

From fe3a60f040f614d36e99f80ea1e3a8387d995fff Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 16 May 2018 11:24:44 +0100
Subject: [PATCH 054/125] [dreisat] improve extraction(closes #15350)

- extract all formats
- extract more format metadata extraction
- improve format sorting
- use hls native downloader
- detect geo-restriction
- bypass geo-restriction
---
 youtube_dl/extractor/dreisat.py | 141 +++++++++++++++-----------------
 1 file changed, 65 insertions(+), 76 deletions(-)

diff --git a/youtube_dl/extractor/dreisat.py b/youtube_dl/extractor/dreisat.py
index f138025d5..8d31258c1 100644
--- a/youtube_dl/extractor/dreisat.py
+++ b/youtube_dl/extractor/dreisat.py
@@ -8,7 +8,6 @@ from ..utils import (
     unified_strdate,
     xpath_text,
     determine_ext,
-    qualities,
     float_or_none,
     ExtractorError,
 )
@@ -16,7 +15,8 @@ from ..utils import (
 
 class DreiSatIE(InfoExtractor):
     IE_NAME = '3sat'
-    _VALID_URL = r'(?:https?://)?(?:www\.)?3sat\.de/mediathek/(?:index\.php|mediathek\.php)?\?(?:(?:mode|display)=[^&]+&)*obj=(?P<id>[0-9]+)$'
+    _GEO_COUNTRIES = ['DE']
+    _VALID_URL = r'https?://(?:www\.)?3sat\.de/mediathek/(?:(?:index|mediathek)\.php)?\?(?:(?:mode|display)=[^&]+&)*obj=(?P<id>[0-9]+)'
     _TESTS = [
         {
             'url': 'http://www.3sat.de/mediathek/index.php?mode=play&obj=45918',
@@ -43,7 +43,8 @@ class DreiSatIE(InfoExtractor):
     def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
         param_groups = {}
         for param_group in smil.findall(self._xpath_ns('./head/paramGroup', namespace)):
-            group_id = param_group.attrib.get(self._xpath_ns('id', 'http://www.w3.org/XML/1998/namespace'))
+            group_id = param_group.get(self._xpath_ns(
+                'id', 'http://www.w3.org/XML/1998/namespace'))
             params = {}
             for param in param_group:
                 params[param.get('name')] = param.get('value')
@@ -54,7 +55,7 @@ class DreiSatIE(InfoExtractor):
             src = video.get('src')
             if not src:
                 continue
-            bitrate = float_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
+            bitrate = int_or_none(self._search_regex(r'_(\d+)k', src, 'bitrate', None)) or float_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
             group_id = video.get('paramGroup')
             param_group = param_groups[group_id]
             for proto in param_group['protocols'].split(','):
@@ -75,66 +76,36 @@ class DreiSatIE(InfoExtractor):
             note='Downloading video info',
             errnote='Failed to download video info')
 
-        status_code = doc.find('./status/statuscode')
-        if status_code is not None and status_code.text != 'ok':
-            code = status_code.text
-            if code == 'notVisibleAnymore':
+        status_code = xpath_text(doc, './status/statuscode')
+        if status_code and status_code != 'ok':
+            if status_code == 'notVisibleAnymore':
                 message = 'Video %s is not available' % video_id
             else:
-                message = '%s returned error: %s' % (self.IE_NAME, code)
+                message = '%s returned error: %s' % (self.IE_NAME, status_code)
             raise ExtractorError(message, expected=True)
 
-        title = doc.find('.//information/title').text
-        description = xpath_text(doc, './/information/detail', 'description')
-        duration = int_or_none(xpath_text(doc, './/details/lengthSec', 'duration'))
-        uploader = xpath_text(doc, './/details/originChannelTitle', 'uploader')
-        uploader_id = xpath_text(doc, './/details/originChannelId', 'uploader id')
-        upload_date = unified_strdate(xpath_text(doc, './/details/airtime', 'upload date'))
+        title = xpath_text(doc, './/information/title', 'title', True)
 
-        def xml_to_thumbnails(fnode):
-            thumbnails = []
-            for node in fnode:
-                thumbnail_url = node.text
-                if not thumbnail_url:
-                    continue
-                thumbnail = {
-                    'url': thumbnail_url,
-                }
-                if 'key' in node.attrib:
-                    m = re.match('^([0-9]+)x([0-9]+)$', node.attrib['key'])
-                    if m:
-                        thumbnail['width'] = int(m.group(1))
-                        thumbnail['height'] = int(m.group(2))
-                thumbnails.append(thumbnail)
-            return thumbnails
-
-        thumbnails = xml_to_thumbnails(doc.findall('.//teaserimages/teaserimage'))
-
-        format_nodes = doc.findall('.//formitaeten/formitaet')
-        quality = qualities(['veryhigh', 'high', 'med', 'low'])
-
-        def get_quality(elem):
-            return quality(xpath_text(elem, 'quality'))
-        format_nodes.sort(key=get_quality)
-        format_ids = []
+        urls = []
         formats = []
-        for fnode in format_nodes:
-            video_url = fnode.find('url').text
-            is_available = 'http://www.metafilegenerator' not in video_url
-            if not is_available:
+        for fnode in doc.findall('.//formitaeten/formitaet'):
+            video_url = xpath_text(fnode, 'url')
+            if not video_url or video_url in urls:
                 continue
+            urls.append(video_url)
+
+            is_available = 'http://www.metafilegenerator' not in video_url
+            geoloced = 'static_geoloced_online' in video_url
+            if not is_available or geoloced:
+                continue
+
             format_id = fnode.attrib['basetype']
-            quality = xpath_text(fnode, './quality', 'quality')
             format_m = re.match(r'''(?x)
                 (?P<vcodec>[^_]+)_(?P<acodec>[^_]+)_(?P<container>[^_]+)_
                 (?P<proto>[^_]+)_(?P<index>[^_]+)_(?P<indexproto>[^_]+)
             ''', format_id)
 
             ext = determine_ext(video_url, None) or format_m.group('container')
-            if ext not in ('smil', 'f4m', 'm3u8'):
-                format_id = format_id + '-' + quality
-            if format_id in format_ids:
-                continue
 
             if ext == 'meta':
                 continue
@@ -147,24 +118,23 @@ class DreiSatIE(InfoExtractor):
                 if video_url.startswith('https://'):
                     continue
                 formats.extend(self._extract_m3u8_formats(
-                    video_url, video_id, 'mp4', m3u8_id=format_id, fatal=False))
+                    video_url, video_id, 'mp4', 'm3u8_native',
+                    m3u8_id=format_id, fatal=False))
             elif ext == 'f4m':
                 formats.extend(self._extract_f4m_formats(
                     video_url, video_id, f4m_id=format_id, fatal=False))
             else:
-                proto = format_m.group('proto').lower()
+                quality = xpath_text(fnode, './quality')
+                if quality:
+                    format_id += '-' + quality
 
-                abr = int_or_none(xpath_text(fnode, './audioBitrate', 'abr'), 1000)
-                vbr = int_or_none(xpath_text(fnode, './videoBitrate', 'vbr'), 1000)
+                abr = int_or_none(xpath_text(fnode, './audioBitrate'), 1000)
+                vbr = int_or_none(xpath_text(fnode, './videoBitrate'), 1000)
 
-                width = int_or_none(xpath_text(fnode, './width', 'width'))
-                height = int_or_none(xpath_text(fnode, './height', 'height'))
-
-                filesize = int_or_none(xpath_text(fnode, './filesize', 'filesize'))
-
-                format_note = ''
-                if not format_note:
-                    format_note = None
+                tbr = int_or_none(self._search_regex(
+                    r'_(\d+)k', video_url, 'bitrate', None))
+                if tbr and vbr and not abr:
+                    abr = tbr - vbr
 
                 formats.append({
                     'format_id': format_id,
@@ -174,31 +144,50 @@ class DreiSatIE(InfoExtractor):
                     'vcodec': format_m.group('vcodec'),
                     'abr': abr,
                     'vbr': vbr,
-                    'width': width,
-                    'height': height,
-                    'filesize': filesize,
-                    'format_note': format_note,
-                    'protocol': proto,
-                    '_available': is_available,
+                    'tbr': tbr,
+                    'width': int_or_none(xpath_text(fnode, './width')),
+                    'height': int_or_none(xpath_text(fnode, './height')),
+                    'filesize': int_or_none(xpath_text(fnode, './filesize')),
+                    'protocol': format_m.group('proto').lower(),
                 })
-            format_ids.append(format_id)
+
+        geolocation = xpath_text(doc, './/details/geolocation')
+        if not formats and geolocation and geolocation != 'none':
+            self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
 
         self._sort_formats(formats)
 
+        thumbnails = []
+        for node in doc.findall('.//teaserimages/teaserimage'):
+            thumbnail_url = node.text
+            if not thumbnail_url:
+                continue
+            thumbnail = {
+                'url': thumbnail_url,
+            }
+            thumbnail_key = node.get('key')
+            if thumbnail_key:
+                m = re.match('^([0-9]+)x([0-9]+)$', thumbnail_key)
+                if m:
+                    thumbnail['width'] = int(m.group(1))
+                    thumbnail['height'] = int(m.group(2))
+            thumbnails.append(thumbnail)
+
+        upload_date = unified_strdate(xpath_text(doc, './/details/airtime'))
+
         return {
             'id': video_id,
             'title': title,
-            'description': description,
-            'duration': duration,
+            'description': xpath_text(doc, './/information/detail'),
+            'duration': int_or_none(xpath_text(doc, './/details/lengthSec')),
             'thumbnails': thumbnails,
-            'uploader': uploader,
-            'uploader_id': uploader_id,
+            'uploader': xpath_text(doc, './/details/originChannelTitle'),
+            'uploader_id': xpath_text(doc, './/details/originChannelId'),
             'upload_date': upload_date,
             'formats': formats,
         }
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-        details_url = 'http://www.3sat.de/mediathek/xmlservice/web/beitragsDetails?ak=web&id=%s' % video_id
+        video_id = self._match_id(url)
+        details_url = 'http://www.3sat.de/mediathek/xmlservice/web/beitragsDetails?id=%s' % video_id
         return self.extract_from_xml_url(video_id, details_url)

From 997530d9d472285126bdfb642915062f286d38a8 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 16 May 2018 12:04:24 +0100
Subject: [PATCH 055/125] [dailymotion] remove fragment part from m3u8
 urls(closes #8915)

---
 youtube_dl/extractor/dailymotion.py | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/dailymotion.py b/youtube_dl/extractor/dailymotion.py
index 0e7d587dd..de27fffd4 100644
--- a/youtube_dl/extractor/dailymotion.py
+++ b/youtube_dl/extractor/dailymotion.py
@@ -180,9 +180,12 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
                         continue
                     ext = mimetype2ext(type_) or determine_ext(media_url)
                     if ext == 'm3u8':
-                        formats.extend(self._extract_m3u8_formats(
+                        m3u8_formats = self._extract_m3u8_formats(
                             media_url, video_id, 'mp4', preference=-1,
-                            m3u8_id='hls', fatal=False))
+                            m3u8_id='hls', fatal=False)
+                        for f in m3u8_formats:
+                            f['url'] = f['url'].split('#')[0]
+                            formats.append(f)
                     elif ext == 'f4m':
                         formats.extend(self._extract_f4m_formats(
                             media_url, video_id, preference=-1, f4m_id='hds', fatal=False))

From 54fc90aabfb71968f28af68dfe3f7a3544cc2f0b Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 16 May 2018 16:24:44 +0100
Subject: [PATCH 056/125] [youtube] fix hd720 format position

---
 youtube_dl/extractor/youtube.py | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 7f4298c08..e4eec7c30 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -37,6 +37,7 @@ from ..utils import (
     orderedSet,
     parse_codecs,
     parse_duration,
+    qualities,
     remove_quotes,
     remove_start,
     smuggle_url,
@@ -1844,6 +1845,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                                 'width': int_or_none(width_height[0]),
                                 'height': int_or_none(width_height[1]),
                             }
+            q = qualities(['small', 'medium', 'hd720'])
             formats = []
             for url_data_str in encoded_url_map.split(','):
                 url_data = compat_parse_qs(url_data_str)
@@ -1926,13 +1928,16 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                 filesize = int_or_none(url_data.get(
                     'clen', [None])[0]) or _extract_filesize(url)
 
+                quality = url_data.get('quality_label', [None])[0] or url_data.get('quality', [None])[0]
+
                 more_fields = {
                     'filesize': filesize,
                     'tbr': float_or_none(url_data.get('bitrate', [None])[0], 1000),
                     'width': width,
                     'height': height,
                     'fps': int_or_none(url_data.get('fps', [None])[0]),
-                    'format_note': url_data.get('quality_label', [None])[0] or url_data.get('quality', [None])[0],
+                    'format_note': quality,
+                    'quality': q(quality),
                 }
                 for key, value in more_fields.items():
                     if value:

From 6843ac5b1395157608324be71dc84803b3495857 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 16 May 2018 17:49:35 +0100
Subject: [PATCH 057/125] add support for paramountnetwork.com and
 bellator.com(fixes #15418)

---
 youtube_dl/extractor/extractors.py |  5 ++-
 youtube_dl/extractor/spike.py      | 63 +++++++++++++-----------------
 2 files changed, 31 insertions(+), 37 deletions(-)

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 4da477647..48e3da9c4 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -1016,7 +1016,10 @@ from .spankbang import SpankBangIE
 from .spankwire import SpankwireIE
 from .spiegel import SpiegelIE, SpiegelArticleIE
 from .spiegeltv import SpiegeltvIE
-from .spike import SpikeIE
+from .spike import (
+    BellatorIE,
+    ParamountNetworkIE,
+)
 from .stitcher import StitcherIE
 from .sport5 import Sport5IE
 from .sportbox import SportBoxEmbedIE
diff --git a/youtube_dl/extractor/spike.py b/youtube_dl/extractor/spike.py
index a7b1b3b5f..e76522b45 100644
--- a/youtube_dl/extractor/spike.py
+++ b/youtube_dl/extractor/spike.py
@@ -1,55 +1,46 @@
 from __future__ import unicode_literals
 
-import re
-
 from .mtv import MTVServicesInfoExtractor
 
 
-class SpikeIE(MTVServicesInfoExtractor):
-    _VALID_URL = r'https?://(?:[^/]+\.)?spike\.com/[^/]+/[\da-z]{6}(?:[/?#&]|$)'
+class BellatorIE(MTVServicesInfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?bellator\.com/[^/]+/[\da-z]{6}(?:[/?#&]|$)'
     _TESTS = [{
-        'url': 'http://www.spike.com/video-clips/lhtu8m/auction-hunters-can-allen-ride-a-hundred-year-old-motorcycle',
-        'md5': '1a9265f32b0c375793d6c4ce45255256',
+        'url': 'http://www.bellator.com/fight/atwr7k/bellator-158-michael-page-vs-evangelista-cyborg',
         'info_dict': {
-            'id': 'b9c8221a-4e50-479a-b86d-3333323e38ba',
+            'id': 'b55e434e-fde1-4a98-b7cc-92003a034de4',
             'ext': 'mp4',
-            'title': 'Auction Hunters|December 27, 2013|4|414|Can Allen Ride A Hundred Year-Old Motorcycle?',
-            'description': 'md5:fbed7e82ed5fad493615b3094a9499cb',
-            'timestamp': 1388120400,
-            'upload_date': '20131227',
+            'title': 'Douglas Lima vs. Paul Daley - Round 1',
+            'description': 'md5:805a8dd29310fd611d32baba2f767885',
+        },
+        'params': {
+            # m3u8 download
+            'skip_download': True,
         },
     }, {
-        'url': 'http://www.spike.com/full-episodes/j830qm/lip-sync-battle-joel-mchale-vs-jim-rash-season-2-ep-209',
-        'md5': 'b25c6f16418aefb9ad5a6cae2559321f',
+        'url': 'http://www.bellator.com/video-clips/bw6k7n/bellator-158-foundations-michael-venom-page',
+        'only_matching': True,
+    }]
+
+    _FEED_URL = 'http://www.spike.com/feeds/mrss/'
+    _GEO_COUNTRIES = ['US']
+
+
+class ParamountNetworkIE(MTVServicesInfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?paramountnetwork\.com/[^/]+/[\da-z]{6}(?:[/?#&]|$)'
+    _TESTS = [{
+        'url': 'http://www.paramountnetwork.com/episodes/j830qm/lip-sync-battle-joel-mchale-vs-jim-rash-season-2-ep-13',
         'info_dict': {
             'id': '37ace3a8-1df6-48be-85b8-38df8229e241',
             'ext': 'mp4',
             'title': 'Lip Sync Battle|April 28, 2016|2|209|Joel McHale Vs. Jim Rash|Act 1',
             'description': 'md5:a739ca8f978a7802f67f8016d27ce114',
         },
-    }, {
-        'url': 'http://www.spike.com/video-clips/lhtu8m/',
-        'only_matching': True,
-    }, {
-        'url': 'http://www.spike.com/video-clips/lhtu8m',
-        'only_matching': True,
-    }, {
-        'url': 'http://bellator.spike.com/fight/atwr7k/bellator-158-michael-page-vs-evangelista-cyborg',
-        'only_matching': True,
-    }, {
-        'url': 'http://bellator.spike.com/video-clips/bw6k7n/bellator-158-foundations-michael-venom-page',
-        'only_matching': True,
+        'params': {
+            # m3u8 download
+            'skip_download': True,
+        },
     }]
 
-    _FEED_URL = 'http://www.spike.com/feeds/mrss/'
-    _MOBILE_TEMPLATE = 'http://m.spike.com/videos/video.rbml?id=%s'
-    _CUSTOM_URL_REGEX = re.compile(r'spikenetworkapp://([^/]+/[-a-fA-F0-9]+)')
+    _FEED_URL = 'http://www.paramountnetwork.com/feeds/mrss/'
     _GEO_COUNTRIES = ['US']
-
-    def _extract_mgid(self, webpage):
-        mgid = super(SpikeIE, self)._extract_mgid(webpage)
-        if mgid is None:
-            url_parts = self._search_regex(self._CUSTOM_URL_REGEX, webpage, 'episode_id')
-            video_type, episode_id = url_parts.split('/', 1)
-            mgid = 'mgid:arc:{0}:spike.com:{1}'.format(video_type, episode_id)
-        return mgid

From eea2fafcf506336e37ca514f72757acf8ee004af Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 16 May 2018 18:34:25 +0100
Subject: [PATCH 058/125] [pbs] fix embed data extraction(fixes #16474)

---
 youtube_dl/extractor/pbs.py | 14 +++++++++-----
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git a/youtube_dl/extractor/pbs.py b/youtube_dl/extractor/pbs.py
index f11d5da52..a28ee17ca 100644
--- a/youtube_dl/extractor/pbs.py
+++ b/youtube_dl/extractor/pbs.py
@@ -505,7 +505,7 @@ class PBSIE(InfoExtractor):
             if player:
                 video_info = self._parse_json(
                     self._search_regex(
-                        r'(?s)PBS\.videoData\s*=\s*({.+?});\n',
+                        [r'(?s)PBS\.videoData\s*=\s*({.+?});\n', r'window\.videoBridge\s*=\s*({.+?});'],
                         player, '%s video data' % page, default='{}'),
                     display_id, transform_source=js_to_json, fatal=False)
                 if video_info:
@@ -513,10 +513,14 @@ class PBSIE(InfoExtractor):
                     if not info:
                         info = video_info
                 if not chapters:
-                    for chapter_data in re.findall(r'(?s)chapters\.push\(({.*?})\)', player):
-                        chapter = self._parse_json(chapter_data, video_id, js_to_json, fatal=False)
-                        if not chapter:
-                            continue
+                    raw_chapters = video_info.get('chapters') or []
+                    if not raw_chapters:
+                        for chapter_data in re.findall(r'(?s)chapters\.push\(({.*?})\)', player):
+                            chapter = self._parse_json(chapter_data, video_id, js_to_json, fatal=False)
+                            if not chapter:
+                                continue
+                            raw_chapters.append(chapter)
+                    for chapter in raw_chapters:
                         start_time = float_or_none(chapter.get('start_time'), 1000)
                         duration = float_or_none(chapter.get('duration'), 1000)
                         if start_time is None or duration is None:

From 58a68d8fdae5358273ee52d05d77fe42094e128e Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 16 May 2018 18:44:33 +0100
Subject: [PATCH 059/125] [moniker] Remove extractor(closes #15336)

---
 youtube_dl/extractor/extractors.py |   1 -
 youtube_dl/extractor/moniker.py    | 116 -----------------------------
 2 files changed, 117 deletions(-)
 delete mode 100644 youtube_dl/extractor/moniker.py

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 48e3da9c4..24c23646c 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -625,7 +625,6 @@ from .mnet import MnetIE
 from .moevideo import MoeVideoIE
 from .mofosex import MofosexIE
 from .mojvideo import MojvideoIE
-from .moniker import MonikerIE
 from .morningstar import MorningstarIE
 from .motherless import (
     MotherlessIE,
diff --git a/youtube_dl/extractor/moniker.py b/youtube_dl/extractor/moniker.py
deleted file mode 100644
index b208820fe..000000000
--- a/youtube_dl/extractor/moniker.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# coding: utf-8
-from __future__ import unicode_literals
-
-import os.path
-import re
-
-from .common import InfoExtractor
-from ..utils import (
-    ExtractorError,
-    remove_start,
-    sanitized_Request,
-    urlencode_postdata,
-)
-
-
-class MonikerIE(InfoExtractor):
-    IE_DESC = 'allmyvideos.net and vidspot.net'
-    _VALID_URL = r'https?://(?:www\.)?(?:allmyvideos|vidspot)\.net/(?:(?:2|v)/v-)?(?P<id>[a-zA-Z0-9_-]+)'
-
-    _TESTS = [{
-        'url': 'http://allmyvideos.net/jih3nce3x6wn',
-        'md5': '710883dee1bfc370ecf9fa6a89307c88',
-        'info_dict': {
-            'id': 'jih3nce3x6wn',
-            'ext': 'mp4',
-            'title': 'youtube-dl test video',
-        },
-    }, {
-        'url': 'http://allmyvideos.net/embed-jih3nce3x6wn',
-        'md5': '710883dee1bfc370ecf9fa6a89307c88',
-        'info_dict': {
-            'id': 'jih3nce3x6wn',
-            'ext': 'mp4',
-            'title': 'youtube-dl test video',
-        },
-    }, {
-        'url': 'http://vidspot.net/l2ngsmhs8ci5',
-        'md5': '710883dee1bfc370ecf9fa6a89307c88',
-        'info_dict': {
-            'id': 'l2ngsmhs8ci5',
-            'ext': 'mp4',
-            'title': 'youtube-dl test video',
-        },
-    }, {
-        'url': 'https://www.vidspot.net/l2ngsmhs8ci5',
-        'only_matching': True,
-    }, {
-        'url': 'http://vidspot.net/2/v-ywDf99',
-        'md5': '5f8254ce12df30479428b0152fb8e7ba',
-        'info_dict': {
-            'id': 'ywDf99',
-            'ext': 'mp4',
-            'title': 'IL FAIT LE MALIN EN PORSHE CAYENNE ( mais pas pour longtemps)',
-            'description': 'IL FAIT LE MALIN EN PORSHE CAYENNE.',
-        },
-    }, {
-        'url': 'http://allmyvideos.net/v/v-HXZm5t',
-        'only_matching': True,
-    }]
-
-    def _real_extract(self, url):
-        orig_video_id = self._match_id(url)
-        video_id = remove_start(orig_video_id, 'embed-')
-        url = url.replace(orig_video_id, video_id)
-        assert re.match(self._VALID_URL, url) is not None
-        orig_webpage = self._download_webpage(url, video_id)
-
-        if '>File Not Found<' in orig_webpage:
-            raise ExtractorError('Video %s does not exist' % video_id, expected=True)
-
-        error = self._search_regex(
-            r'class="err">([^<]+)<', orig_webpage, 'error', default=None)
-        if error:
-            raise ExtractorError(
-                '%s returned error: %s' % (self.IE_NAME, error), expected=True)
-
-        builtin_url = self._search_regex(
-            r'<iframe[^>]+src=(["\'])(?P<url>.+?/builtin-.+?)\1',
-            orig_webpage, 'builtin URL', default=None, group='url')
-
-        if builtin_url:
-            req = sanitized_Request(builtin_url)
-            req.add_header('Referer', url)
-            webpage = self._download_webpage(req, video_id, 'Downloading builtin page')
-            title = self._og_search_title(orig_webpage).strip()
-            description = self._og_search_description(orig_webpage).strip()
-        else:
-            fields = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', orig_webpage)
-            data = dict(fields)
-
-            post = urlencode_postdata(data)
-            headers = {
-                b'Content-Type': b'application/x-www-form-urlencoded',
-            }
-            req = sanitized_Request(url, post, headers)
-            webpage = self._download_webpage(
-                req, video_id, note='Downloading video page ...')
-
-            title = os.path.splitext(data['fname'])[0]
-            description = None
-
-        # Could be several links with different quality
-        links = re.findall(r'"file" : "?(.+?)",', webpage)
-        # Assume the links are ordered in quality
-        formats = [{
-            'url': l,
-            'quality': i,
-        } for i, l in enumerate(links)]
-        self._sort_formats(formats)
-
-        return {
-            'id': video_id,
-            'title': title,
-            'description': description,
-            'formats': formats,
-        }

From 1306f5ed726b9f8778a5cc0586436b555f64c2ff Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 16 May 2018 19:11:48 +0100
Subject: [PATCH 060/125] [mychannels] add support for mychannels.com(closes
 #15334)

---
 youtube_dl/extractor/extractors.py            |  2 +-
 youtube_dl/extractor/minoto.py                | 19 +++++++-----------
 .../{makerschannel.py => mychannels.py}       | 20 +++++++++----------
 3 files changed, 18 insertions(+), 23 deletions(-)
 rename youtube_dl/extractor/{makerschannel.py => mychannels.py} (59%)

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 24c23646c..7d5927131 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -582,7 +582,6 @@ from .mailru import (
     MailRuMusicIE,
     MailRuMusicSearchIE,
 )
-from .makerschannel import MakersChannelIE
 from .makertv import MakerTVIE
 from .mangomolo import (
     MangomoloVideoIE,
@@ -645,6 +644,7 @@ from .mtv import (
 from .muenchentv import MuenchenTVIE
 from .musicplayon import MusicPlayOnIE
 from .mwave import MwaveIE, MwaveMeetGreetIE
+from .mychannels import MyChannelsIE
 from .myspace import MySpaceIE, MySpaceAlbumIE
 from .myspass import MySpassIE
 from .myvi import (
diff --git a/youtube_dl/extractor/minoto.py b/youtube_dl/extractor/minoto.py
index 959a10589..636731195 100644
--- a/youtube_dl/extractor/minoto.py
+++ b/youtube_dl/extractor/minoto.py
@@ -4,7 +4,10 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import int_or_none
+from ..utils import (
+    int_or_none,
+    parse_codecs,
+)
 
 
 class MinotoIE(InfoExtractor):
@@ -26,7 +29,7 @@ class MinotoIE(InfoExtractor):
                 formats.extend(fmt_url, video_id, 'mp4', m3u8_id='hls', fatal=False)
             else:
                 fmt_profile = fmt.get('profile') or {}
-                f = {
+                formats.append({
                     'format_id': fmt_profile.get('name-short'),
                     'format_note': fmt_profile.get('name'),
                     'url': fmt_url,
@@ -35,16 +38,8 @@ class MinotoIE(InfoExtractor):
                     'filesize': int_or_none(fmt.get('filesize')),
                     'width': int_or_none(fmt.get('width')),
                     'height': int_or_none(fmt.get('height')),
-                }
-                codecs = fmt.get('codecs')
-                if codecs:
-                    codecs = codecs.split(',')
-                    if len(codecs) == 2:
-                        f.update({
-                            'vcodec': codecs[0],
-                            'acodec': codecs[1],
-                        })
-                formats.append(f)
+                    'codecs': parse_codecs(fmt.get('codecs')),
+                })
         self._sort_formats(formats)
 
         return {
diff --git a/youtube_dl/extractor/makerschannel.py b/youtube_dl/extractor/mychannels.py
similarity index 59%
rename from youtube_dl/extractor/makerschannel.py
rename to youtube_dl/extractor/mychannels.py
index f5d00e61d..b1ffe7848 100644
--- a/youtube_dl/extractor/makerschannel.py
+++ b/youtube_dl/extractor/mychannels.py
@@ -6,17 +6,17 @@ import re
 from .common import InfoExtractor
 
 
-class MakersChannelIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?makerschannel\.com/.*(?P<id_type>video|production)_id=(?P<id>[0-9]+)'
+class MyChannelsIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?mychannels\.com/.*(?P<id_type>video|production)_id=(?P<id>[0-9]+)'
     _TEST = {
-        'url': 'http://makerschannel.com/en/zoomin/community-highlights?video_id=849',
-        'md5': '624a512c6969236b5967bf9286345ad1',
+        'url': 'https://mychannels.com/missholland/miss-holland?production_id=3416',
+        'md5': 'b8993daad4262dd68d89d651c0c52c45',
         'info_dict': {
-            'id': '849',
+            'id': 'wUUDZZep6vQD',
             'ext': 'mp4',
-            'title': 'Landing a bus on a plane is an epic win',
-            'uploader': 'ZoomIn',
-            'description': 'md5:cd9cca2ea7b69b78be81d07020c97139',
+            'title': 'Miss Holland joins VOTE LEAVE',
+            'description': 'Miss Holland | #13 Not a potato',
+            'uploader': 'Miss Holland',
         }
     }
 
@@ -27,12 +27,12 @@ class MakersChannelIE(InfoExtractor):
 
         def extract_data_val(attr, fatal=False):
             return self._html_search_regex(r'data-%s\s*=\s*"([^"]+)"' % attr, video_data, attr, fatal=fatal)
-        minoto_id = self._search_regex(r'/id/([a-zA-Z0-9]+)', extract_data_val('video-src', True), 'minoto id')
+        minoto_id = extract_data_val('minoto-id') or self._search_regex(r'/id/([a-zA-Z0-9]+)', extract_data_val('video-src', True), 'minoto id')
 
         return {
             '_type': 'url_transparent',
             'url': 'minoto:%s' % minoto_id,
-            'id': extract_data_val('video-id', True),
+            'id': url_id,
             'title': extract_data_val('title', True),
             'description': extract_data_val('description'),
             'thumbnail': extract_data_val('image'),

From a3f86160fa15f9e65789a73208cb50b0d82d715f Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Thu, 17 May 2018 13:46:05 +0100
Subject: [PATCH 061/125] [pluralsight] fix clip id extraction(fixes #16460)

---
 youtube_dl/extractor/pluralsight.py | 19 ++++++++++---------
 1 file changed, 10 insertions(+), 9 deletions(-)

diff --git a/youtube_dl/extractor/pluralsight.py b/youtube_dl/extractor/pluralsight.py
index aacc5d4bb..3c508c9ca 100644
--- a/youtube_dl/extractor/pluralsight.py
+++ b/youtube_dl/extractor/pluralsight.py
@@ -140,10 +140,10 @@ class PluralsightIE(PluralsightBaseIE):
 
             raise ExtractorError('Unable to log in')
 
-    def _get_subtitles(self, author, clip_id, lang, name, duration, video_id):
+    def _get_subtitles(self, author, clip_idx, lang, name, duration, video_id):
         captions_post = {
             'a': author,
-            'cn': clip_id,
+            'cn': clip_idx,
             'lc': lang,
             'm': name,
         }
@@ -195,13 +195,13 @@ class PluralsightIE(PluralsightBaseIE):
 
         author = qs.get('author', [None])[0]
         name = qs.get('name', [None])[0]
-        clip_id = qs.get('clip', [None])[0]
+        clip_idx = qs.get('clip', [None])[0]
         course_name = qs.get('course', [None])[0]
 
-        if any(not f for f in (author, name, clip_id, course_name,)):
+        if any(not f for f in (author, name, clip_idx, course_name,)):
             raise ExtractorError('Invalid URL', expected=True)
 
-        display_id = '%s-%s' % (name, clip_id)
+        display_id = '%s-%s' % (name, clip_idx)
 
         course = self._download_course(course_name, url, display_id)
 
@@ -217,7 +217,7 @@ class PluralsightIE(PluralsightBaseIE):
                         clip_index = clip_.get('index')
                     if clip_index is None:
                         continue
-                    if compat_str(clip_index) == clip_id:
+                    if compat_str(clip_index) == clip_idx:
                         clip = clip_
                         break
 
@@ -225,6 +225,7 @@ class PluralsightIE(PluralsightBaseIE):
             raise ExtractorError('Unable to resolve clip')
 
         title = clip['title']
+        clip_id = clip.get('clipName') or clip.get('name') or clip['clipId']
 
         QUALITIES = {
             'low': {'width': 640, 'height': 480},
@@ -277,7 +278,7 @@ class PluralsightIE(PluralsightBaseIE):
                 clip_post = {
                     'author': author,
                     'includeCaptions': False,
-                    'clipIndex': int(clip_id),
+                    'clipIndex': int(clip_idx),
                     'courseName': course_name,
                     'locale': 'en',
                     'moduleName': name,
@@ -330,10 +331,10 @@ class PluralsightIE(PluralsightBaseIE):
 
         # TODO: other languages?
         subtitles = self.extract_subtitles(
-            author, clip_id, 'en', name, duration, display_id)
+            author, clip_idx, 'en', name, duration, display_id)
 
         return {
-            'id': clip.get('clipName') or clip['name'],
+            'id': clip_id,
             'title': title,
             'duration': duration,
             'creator': author,

From 361a965b5cd83b725560f740570d208c2a6886ca Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Thu, 17 May 2018 23:21:40 +0700
Subject: [PATCH 062/125] [vimeo:likes] Relax _VALID_URL and fix single page
 likes extraction (closes #16475)

---
 youtube_dl/extractor/vimeo.py | 15 +++++++++------
 1 file changed, 9 insertions(+), 6 deletions(-)

diff --git a/youtube_dl/extractor/vimeo.py b/youtube_dl/extractor/vimeo.py
index a026526b2..8dfd8891c 100644
--- a/youtube_dl/extractor/vimeo.py
+++ b/youtube_dl/extractor/vimeo.py
@@ -989,10 +989,10 @@ class VimeoWatchLaterIE(VimeoChannelIE):
 
 
 class VimeoLikesIE(InfoExtractor):
-    _VALID_URL = r'https://(?:www\.)?vimeo\.com/user(?P<id>[0-9]+)/likes/?(?:$|[?#]|sort:)'
+    _VALID_URL = r'https://(?:www\.)?vimeo\.com/(?P<id>[^/]+)/likes/?(?:$|[?#]|sort:)'
     IE_NAME = 'vimeo:likes'
     IE_DESC = 'Vimeo user likes'
-    _TEST = {
+    _TESTS = [{
         'url': 'https://vimeo.com/user755559/likes/',
         'playlist_mincount': 293,
         'info_dict': {
@@ -1000,7 +1000,10 @@ class VimeoLikesIE(InfoExtractor):
             'description': 'See all the videos urza likes',
             'title': 'Videos urza likes',
         },
-    }
+    }, {
+        'url': 'https://vimeo.com/stormlapse/likes',
+        'only_matching': True,
+    }]
 
     def _real_extract(self, url):
         user_id = self._match_id(url)
@@ -1009,7 +1012,7 @@ class VimeoLikesIE(InfoExtractor):
             self._search_regex(
                 r'''(?x)<li><a\s+href="[^"]+"\s+data-page="([0-9]+)">
                     .*?</a></li>\s*<li\s+class="pagination_next">
-                ''', webpage, 'page count'),
+                ''', webpage, 'page count', default=1),
             'page count', fatal=True)
         PAGE_SIZE = 12
         title = self._html_search_regex(
@@ -1017,7 +1020,7 @@ class VimeoLikesIE(InfoExtractor):
         description = self._html_search_meta('description', webpage)
 
         def _get_page(idx):
-            page_url = 'https://vimeo.com/user%s/likes/page:%d/sort:date' % (
+            page_url = 'https://vimeo.com/%s/likes/page:%d/sort:date' % (
                 user_id, idx + 1)
             webpage = self._download_webpage(
                 page_url, user_id,
@@ -1037,7 +1040,7 @@ class VimeoLikesIE(InfoExtractor):
 
         return {
             '_type': 'playlist',
-            'id': 'user%s_likes' % user_id,
+            'id': '%s_likes' % user_id,
             'title': title,
             'description': description,
             'entries': pl,

From 58197205d32ae7164303b8ac37ad1d1191a91a8d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Fri, 18 May 2018 00:30:41 +0700
Subject: [PATCH 063/125] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 31 +++++++++++++++++++++++++++++++
 1 file changed, 31 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index ef6cc3850..37dba892e 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,34 @@
+version <unreleased>
+
+Extractors
+* [vimeo:likes] Relax URL regular expression and fix single page likes
+  extraction (#16475)
+* [pluralsight] Fix clip id extraction (#16460)
++ [mychannels] Add support for mychannels.com (#15334)
+- [moniker] Remove extractor (#15336)
+* [pbs] Fix embed data extraction (#16474)
++ [mtv] Add support for paramountnetwork.com and bellator.com (#15418)
+* [youtube] Fix hd720 format position
+* [dailymotion] Remove fragment part from m3u8 URLs (#8915)
+* [3sat] Improve extraction (#15350)
+    * Extract all formats
+    * Extract more format metadata
+    * Improve format sorting
+    * Use hls native downloader
+    * Detect and bypass geo-restriction
++ [dtube] Add support for d.tube (#15201)
+* [options] Fix typo (#16450)
+* [youtube] Improve format filesize extraction (#16453)
+* [youtube] Make uploader extraction non fatal (#16444)
+* [youtube] Fix extraction for embed restricted live streams (#16433)
+* [nbc] Improve info extraction (#16440)
+* [twitch:clips] Fix extraction (#16429)
+* [redditr] Relax URL regular expression (#16426, #16427)
+* [mixcloud] Bypass throttling for HTTP formats (#12579, #16424)
++ [nick] Add support for nickjr.de (#13230)
+* [teamcoco] Fix extraction (#16374)
+
+
 version 2018.05.09
 
 Core

From 7550ea501a94ed9060220cf4c8f696e514862c1a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Fri, 18 May 2018 00:32:51 +0700
Subject: [PATCH 064/125] release 2018.05.18

---
 .github/ISSUE_TEMPLATE.md | 6 +++---
 ChangeLog                 | 2 +-
 README.md                 | 2 +-
 docs/supportedsites.md    | 7 ++++---
 youtube_dl/version.py     | 2 +-
 5 files changed, 10 insertions(+), 9 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index b2bfa9ec5..7d9de5171 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -6,8 +6,8 @@
 
 ---
 
-### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.05.09*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
-- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2018.05.09**
+### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.05.18*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
+- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2018.05.18**
 
 ### Before submitting an *issue* make sure you have:
 - [ ] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
@@ -36,7 +36,7 @@ Add the `-v` flag to **your command line** you run youtube-dl with (`youtube-dl
 [debug] User config: []
 [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
 [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
-[debug] youtube-dl version 2018.05.09
+[debug] youtube-dl version 2018.05.18
 [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
 [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
 [debug] Proxy map: {}
diff --git a/ChangeLog b/ChangeLog
index 37dba892e..08233cd5b 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2018.05.18
 
 Extractors
 * [vimeo:likes] Relax URL regular expression and fix single page likes
diff --git a/README.md b/README.md
index d9fe2350a..20982b0f1 100644
--- a/README.md
+++ b/README.md
@@ -106,7 +106,7 @@ Alternatively, refer to the [developer instructions](#developer-instructions) fo
     --geo-verification-proxy URL     Use this proxy to verify the IP address for
                                      some geo-restricted sites. The default
                                      proxy specified by --proxy (or none, if the
-                                     options is not present) is used for the
+                                     option is not present) is used for the
                                      actual downloading.
     --geo-bypass                     Bypass geographic restriction via faking
                                      X-Forwarded-For HTTP header (experimental)
diff --git a/docs/supportedsites.md b/docs/supportedsites.md
index 88fac6e90..c1048cc4c 100644
--- a/docs/supportedsites.md
+++ b/docs/supportedsites.md
@@ -100,6 +100,7 @@
  - **Beatport**
  - **Beeg**
  - **BehindKink**
+ - **Bellator**
  - **BellMedia**
  - **Bet**
  - **Bigflix**
@@ -234,6 +235,7 @@
  - **DrTuber**
  - **drtv**
  - **drtv:live**
+ - **DTube**
  - **Dumpert**
  - **dvtv**: http://video.aktualne.cz/
  - **dw**
@@ -448,7 +450,6 @@
  - **mailru**: Видео@Mail.Ru
  - **mailru:music**: Музыка@Mail.Ru
  - **mailru:music:search**: Музыка@Mail.Ru
- - **MakersChannel**
  - **MakerTV**
  - **mangomolo:live**
  - **mangomolo:video**
@@ -486,7 +487,6 @@
  - **MoeVideo**: LetitBit video services: moevideo.net, playreplay.net and videochart.net
  - **Mofosex**
  - **Mojvideo**
- - **Moniker**: allmyvideos.net and vidspot.net
  - **Morningstar**: morningstar.com
  - **Motherless**
  - **MotherlessGroup**
@@ -508,6 +508,7 @@
  - **mva:course**: Microsoft Virtual Academy courses
  - **Mwave**
  - **MwaveMeetGreet**
+ - **MyChannels**
  - **MySpace**
  - **MySpace:album**
  - **MySpass**
@@ -618,6 +619,7 @@
  - **PacktPubCourse**
  - **PandaTV**: 熊猫TV
  - **pandora.tv**: 판도라TV
+ - **ParamountNetwork**
  - **parliamentlive.tv**: UK parliament videos
  - **Patreon**
  - **pbs**: Public Broadcasting Service (PBS) and member stations: PBS: Public Broadcasting Service, APT - Alabama Public Television (WBIQ), GPB/Georgia Public Broadcasting (WGTV), Mississippi Public Broadcasting (WMPN), Nashville Public Television (WNPT), WFSU-TV (WFSU), WSRE (WSRE), WTCI (WTCI), WPBA/Channel 30 (WPBA), Alaska Public Media (KAKM), Arizona PBS (KAET), KNME-TV/Channel 5 (KNME), Vegas PBS (KLVX), AETN/ARKANSAS ETV NETWORK (KETS), KET (WKLE), WKNO/Channel 10 (WKNO), LPB/LOUISIANA PUBLIC BROADCASTING (WLPB), OETA (KETA), Ozarks Public Television (KOZK), WSIU Public Broadcasting (WSIU), KEET TV (KEET), KIXE/Channel 9 (KIXE), KPBS San Diego (KPBS), KQED (KQED), KVIE Public Television (KVIE), PBS SoCal/KOCE (KOCE), ValleyPBS (KVPT), CONNECTICUT PUBLIC TELEVISION (WEDH), KNPB Channel 5 (KNPB), SOPTV (KSYS), Rocky Mountain PBS (KRMA), KENW-TV3 (KENW), KUED Channel 7 (KUED), Wyoming PBS (KCWC), Colorado Public Television / KBDI 12 (KBDI), KBYU-TV (KBYU), Thirteen/WNET New York (WNET), WGBH/Channel 2 (WGBH), WGBY (WGBY), NJTV Public Media NJ (WNJT), WLIW21 (WLIW), mpt/Maryland Public Television (WMPB), WETA Television and Radio (WETA), WHYY (WHYY), PBS 39 (WLVT), WVPT - Your Source for PBS and More! (WVPT), Howard University Television (WHUT), WEDU PBS (WEDU), WGCU Public Media (WGCU), WPBT2 (WPBT), WUCF TV (WUCF), WUFT/Channel 5 (WUFT), WXEL/Channel 42 (WXEL), WLRN/Channel 17 (WLRN), WUSF Public Broadcasting (WUSF), ETV (WRLK), UNC-TV (WUNC), PBS Hawaii - Oceanic Cable Channel 10 (KHET), Idaho Public Television (KAID), KSPS (KSPS), OPB (KOPB), KWSU/Channel 10 & KTNW/Channel 31 (KWSU), WILL-TV (WILL), Network Knowledge - WSEC/Springfield (WSEC), WTTW11 (WTTW), Iowa Public Television/IPTV (KDIN), Nine Network (KETC), PBS39 Fort Wayne (WFWA), WFYI Indianapolis (WFYI), Milwaukee Public Television (WMVS), WNIN (WNIN), WNIT Public Television (WNIT), WPT (WPNE), WVUT/Channel 22 (WVUT), WEIU/Channel 51 (WEIU), WQPT-TV (WQPT), WYCC PBS Chicago (WYCC), WIPB-TV (WIPB), WTIU (WTIU), CET  (WCET), ThinkTVNetwork (WPTD), WBGU-TV (WBGU), WGVU TV (WGVU), NET1 (KUON), Pioneer Public Television (KWCM), SDPB Television (KUSD), TPT (KTCA), KSMQ (KSMQ), KPTS/Channel 8 (KPTS), KTWU/Channel 11 (KTWU), East Tennessee PBS (WSJK), WCTE-TV (WCTE), WLJT, Channel 11 (WLJT), WOSU TV (WOSU), WOUB/WOUC (WOUB), WVPB (WVPB), WKYU-PBS (WKYU), KERA 13 (KERA), MPBN (WCBB), Mountain Lake PBS (WCFE), NHPTV (WENH), Vermont PBS (WETK), witf (WITF), WQED Multimedia (WQED), WMHT Educational Telecommunications (WMHT), Q-TV (WDCQ), WTVS Detroit Public TV (WTVS), CMU Public Television (WCMU), WKAR-TV (WKAR), WNMU-TV Public TV 13 (WNMU), WDSE - WRPT (WDSE), WGTE TV (WGTE), Lakeland Public Television (KAWE), KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS), MontanaPBS (KUSM), KRWG/Channel 22 (KRWG), KACV (KACV), KCOS/Channel 13 (KCOS), WCNY/Channel 24 (WCNY), WNED (WNED), WPBS (WPBS), WSKG Public TV (WSKG), WXXI (WXXI), WPSU (WPSU), WVIA Public Media Studios (WVIA), WTVI (WTVI), Western Reserve PBS (WNEO), WVIZ/PBS ideastream (WVIZ), KCTS 9 (KCTS), Basin PBS (KPBT), KUHT / Channel 8 (KUHT), KLRN (KLRN), KLRU (KLRU), WTJX Channel 12 (WTJX), WCVE PBS (WCVE), KBTC Public Television (KBTC)
@@ -789,7 +791,6 @@
  - **Spiegel**
  - **Spiegel:Article**: Articles on spiegel.de
  - **Spiegeltv**
- - **Spike**
  - **Sport5**
  - **SportBoxEmbed**
  - **SportDeutschland**
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 6f47b1795..a43eec860 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2018.05.09'
+__version__ = '2018.05.18'

From 0167f0dbfe792355e793ea82791d61fc1d05f1f9 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Sat, 19 May 2018 10:15:11 +0100
Subject: [PATCH 065/125] [imdb] improve extraction(fixes #4085)(fixes #14557)

---
 youtube_dl/extractor/imdb.py | 107 ++++++++++++++++-------------------
 1 file changed, 48 insertions(+), 59 deletions(-)

diff --git a/youtube_dl/extractor/imdb.py b/youtube_dl/extractor/imdb.py
index 425421968..926c2c388 100644
--- a/youtube_dl/extractor/imdb.py
+++ b/youtube_dl/extractor/imdb.py
@@ -7,23 +7,23 @@ from ..compat import compat_str
 from ..utils import (
     determine_ext,
     mimetype2ext,
+    parse_duration,
     qualities,
-    remove_end,
 )
 
 
 class ImdbIE(InfoExtractor):
     IE_NAME = 'imdb'
     IE_DESC = 'Internet Movie Database trailers'
-    _VALID_URL = r'https?://(?:www|m)\.imdb\.com/(?:video|title).+?[/-]vi(?P<id>\d+)'
+    _VALID_URL = r'https?://(?:www|m)\.imdb\.com/(?:video|title|list).+?[/-]vi(?P<id>\d+)'
 
     _TESTS = [{
         'url': 'http://www.imdb.com/video/imdb/vi2524815897',
         'info_dict': {
             'id': '2524815897',
             'ext': 'mp4',
-            'title': 'Ice Age: Continental Drift Trailer (No. 2)',
-            'description': 'md5:9061c2219254e5d14e03c25c98e96a81',
+            'title': 'No. 2 from Ice Age: Continental Drift (2012)',
+            'description': 'md5:87bd0bdc61e351f21f20d2d7441cb4e7',
         }
     }, {
         'url': 'http://www.imdb.com/video/_/vi2524815897',
@@ -40,82 +40,67 @@ class ImdbIE(InfoExtractor):
     }, {
         'url': 'http://www.imdb.com/title/tt4218696/videoplayer/vi2608641561',
         'only_matching': True,
+    }, {
+        'url': 'https://www.imdb.com/list/ls009921623/videoplayer/vi260482329',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
-        webpage = self._download_webpage('http://www.imdb.com/video/imdb/vi%s' % video_id, video_id)
-        descr = self._html_search_regex(
-            r'(?s)<span itemprop="description">(.*?)</span>',
-            webpage, 'description', fatal=False)
-        player_url = 'http://www.imdb.com/video/imdb/vi%s/imdb/single' % video_id
-        player_page = self._download_webpage(
-            player_url, video_id, 'Downloading player page')
-        # the player page contains the info for the default format, we have to
-        # fetch other pages for the rest of the formats
-        extra_formats = re.findall(r'href="(?P<url>%s.*?)".*?>(?P<name>.*?)<' % re.escape(player_url), player_page)
-        format_pages = [
-            self._download_webpage(
-                f_url, video_id, 'Downloading info for %s format' % f_name)
-            for f_url, f_name in extra_formats]
-        format_pages.append(player_page)
+        webpage = self._download_webpage(
+            'https://www.imdb.com/videoplayer/vi' + video_id, video_id)
+        video_metadata = self._parse_json(self._search_regex(
+            r'window\.IMDbReactInitialState\.push\(({.+?})\);', webpage,
+            'video metadata'), video_id)['videos']['videoMetadata']['vi' + video_id]
+        title = self._html_search_meta(
+            ['og:title', 'twitter:title'], webpage) or self._html_search_regex(
+            r'<title>(.+?)</title>', webpage, 'title', fatal=False) or video_metadata['title']
 
         quality = qualities(('SD', '480p', '720p', '1080p'))
         formats = []
-        for format_page in format_pages:
-            json_data = self._search_regex(
-                r'<script[^>]+class="imdb-player-data"[^>]*?>(.*?)</script>',
-                format_page, 'json data', flags=re.DOTALL)
-            info = self._parse_json(json_data, video_id, fatal=False)
-            if not info:
+        for encoding in video_metadata.get('encodings', []):
+            if not encoding or not isinstance(encoding, dict):
                 continue
-            format_info = info.get('videoPlayerObject', {}).get('video', {})
-            if not format_info:
+            video_url = encoding.get('videoUrl')
+            if not video_url or not isinstance(video_url, compat_str):
                 continue
-            video_info_list = format_info.get('videoInfoList')
-            if not video_info_list or not isinstance(video_info_list, list):
+            ext = determine_ext(video_url, mimetype2ext(encoding.get('mimeType')))
+            if ext == 'm3u8':
+                formats.extend(self._extract_m3u8_formats(
+                    video_url, video_id, 'mp4', entry_protocol='m3u8_native',
+                    m3u8_id='hls', fatal=False))
                 continue
-            for video_info in video_info_list:
-                if not video_info or not isinstance(video_info, dict):
-                    continue
-                video_url = video_info.get('videoUrl')
-                if not video_url or not isinstance(video_url, compat_str):
-                    continue
-                if (video_info.get('videoMimeType') == 'application/x-mpegURL' or
-                        determine_ext(video_url) == 'm3u8'):
-                    formats.extend(self._extract_m3u8_formats(
-                        video_url, video_id, 'mp4', entry_protocol='m3u8_native',
-                        m3u8_id='hls', fatal=False))
-                    continue
-                format_id = format_info.get('ffname')
-                formats.append({
-                    'format_id': format_id,
-                    'url': video_url,
-                    'ext': mimetype2ext(video_info.get('videoMimeType')),
-                    'quality': quality(format_id),
-                })
+            format_id = encoding.get('definition')
+            formats.append({
+                'format_id': format_id,
+                'url': video_url,
+                'ext': ext,
+                'quality': quality(format_id),
+            })
         self._sort_formats(formats)
 
         return {
             'id': video_id,
-            'title': remove_end(self._og_search_title(webpage), ' - IMDb'),
+            'title': title,
             'formats': formats,
-            'description': descr,
-            'thumbnail': format_info.get('slate'),
+            'description': video_metadata.get('description'),
+            'thumbnail': video_metadata.get('slate', {}).get('url'),
+            'duration': parse_duration(video_metadata.get('duration')),
         }
 
 
 class ImdbListIE(InfoExtractor):
     IE_NAME = 'imdb:list'
     IE_DESC = 'Internet Movie Database lists'
-    _VALID_URL = r'https?://(?:www\.)?imdb\.com/list/(?P<id>[\da-zA-Z_-]{11})'
+    _VALID_URL = r'https?://(?:www\.)?imdb\.com/list/ls(?P<id>\d+)(?!/videoplayer/vi\d+)'
     _TEST = {
-        'url': 'http://www.imdb.com/list/JFs9NWw6XI0',
+        'url': 'https://www.imdb.com/list/ls009921623/',
         'info_dict': {
-            'id': 'JFs9NWw6XI0',
-            'title': 'March 23, 2012 Releases',
+            'id': '009921623',
+            'title': 'The Bourne Legacy',
+            'description': 'A list of trailers, clips, and more from The Bourne Legacy, starring Jeremy Renner and Rachel Weisz.',
         },
-        'playlist_count': 7,
+        'playlist_count': 8,
     }
 
     def _real_extract(self, url):
@@ -123,9 +108,13 @@ class ImdbListIE(InfoExtractor):
         webpage = self._download_webpage(url, list_id)
         entries = [
             self.url_result('http://www.imdb.com' + m, 'Imdb')
-            for m in re.findall(r'href="(/video/imdb/vi[^"]+)"\s+data-type="playlist"', webpage)]
+            for m in re.findall(r'href="(/list/ls%s/videoplayer/vi[^"]+)"' % list_id, webpage)]
 
         list_title = self._html_search_regex(
-            r'<h1 class="header">(.*?)</h1>', webpage, 'list title')
+            r'<h1[^>]+class="[^"]*header[^"]*"[^>]*>(.*?)</h1>',
+            webpage, 'list title')
+        list_description = self._html_search_regex(
+            r'<div[^>]+class="[^"]*list-description[^"]*"[^>]*><p>(.*?)</p>',
+            webpage, 'list description')
 
-        return self.playlist_result(entries, list_id, list_title)
+        return self.playlist_result(entries, list_id, list_title, list_description)

From 27694fe7ad77d5f99d7b46fa7395f4ccbb378777 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Sat, 19 May 2018 11:04:08 +0100
Subject: [PATCH 066/125] [imdb:list] fix _VALID_URL regex

---
 youtube_dl/extractor/imdb.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/imdb.py b/youtube_dl/extractor/imdb.py
index 926c2c388..4bafa54a2 100644
--- a/youtube_dl/extractor/imdb.py
+++ b/youtube_dl/extractor/imdb.py
@@ -92,7 +92,7 @@ class ImdbIE(InfoExtractor):
 class ImdbListIE(InfoExtractor):
     IE_NAME = 'imdb:list'
     IE_DESC = 'Internet Movie Database lists'
-    _VALID_URL = r'https?://(?:www\.)?imdb\.com/list/ls(?P<id>\d+)(?!/videoplayer/vi\d+)'
+    _VALID_URL = r'https?://(?:www\.)?imdb\.com/list/ls(?P<id>\d{9})(?!/videoplayer/vi\d+)'
     _TEST = {
         'url': 'https://www.imdb.com/list/ls009921623/',
         'info_dict': {

From acd620c930a92511c2e2099a4fc82d41825fdf93 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Sat, 19 May 2018 12:19:05 +0100
Subject: [PATCH 067/125] [teamcoco] improve _VALID_URL regex(#16484)

---
 youtube_dl/extractor/teamcoco.py | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/teamcoco.py b/youtube_dl/extractor/teamcoco.py
index f06e5b19a..64235b0f6 100644
--- a/youtube_dl/extractor/teamcoco.py
+++ b/youtube_dl/extractor/teamcoco.py
@@ -16,7 +16,7 @@ from ..utils import (
 
 
 class TeamcocoIE(InfoExtractor):
-    _VALID_URL = r'https?://teamcoco\.com/video/(?P<id>[^/?#]+)'
+    _VALID_URL = r'https?://teamcoco\.com/video/(?P<id>([^/]+/)*[^/?#]+)'
     _TESTS = [
         {
             'url': 'http://teamcoco.com/video/mary-kay-remote',
@@ -67,6 +67,9 @@ class TeamcocoIE(InfoExtractor):
                 'skip_download': True,  # m3u8 downloads
             },
             'skip': 'This video is no longer available.',
+        }, {
+            'url': 'http://teamcoco.com/video/the-conan-audiencey-awards-for-04/25/18',
+            'only_matching': True,
         }
     ]
 

From f2b1fa07ec063ca63373e8558223e7af544f2cf8 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Sat, 19 May 2018 13:05:51 +0100
Subject: [PATCH 068/125] [teamcoco] relax _VALID_URL regex and add a fallback
 for format extraction(fixes #16484)

---
 youtube_dl/extractor/teamcoco.py | 25 ++++++++++++++++++++-----
 1 file changed, 20 insertions(+), 5 deletions(-)

diff --git a/youtube_dl/extractor/teamcoco.py b/youtube_dl/extractor/teamcoco.py
index 64235b0f6..63fd4fe1c 100644
--- a/youtube_dl/extractor/teamcoco.py
+++ b/youtube_dl/extractor/teamcoco.py
@@ -16,7 +16,7 @@ from ..utils import (
 
 
 class TeamcocoIE(InfoExtractor):
-    _VALID_URL = r'https?://teamcoco\.com/video/(?P<id>([^/]+/)*[^/?#]+)'
+    _VALID_URL = r'https?://teamcoco\.com/(?P<id>([^/]+/)*[^/?#]+)'
     _TESTS = [
         {
             'url': 'http://teamcoco.com/video/mary-kay-remote',
@@ -70,6 +70,15 @@ class TeamcocoIE(InfoExtractor):
         }, {
             'url': 'http://teamcoco.com/video/the-conan-audiencey-awards-for-04/25/18',
             'only_matching': True,
+        }, {
+            'url': 'http://teamcoco.com/italy/conan-jordan-schlansky-hit-the-streets-of-florence',
+            'only_matching': True,
+        }, {
+            'url': 'http://teamcoco.com/haiti/conan-s-haitian-history-lesson',
+            'only_matching': True,
+        }, {
+            'url': 'http://teamcoco.com/israel/conan-hits-the-streets-beaches-of-tel-aviv',
+            'only_matching': True,
         }
     ]
 
@@ -84,7 +93,7 @@ class TeamcocoIE(InfoExtractor):
         display_id = self._match_id(url)
 
         response = self._graphql_call('''{
-  %s(slug: "video/%s") {
+  %s(slug: "%s") {
     ... on RecordSlug {
       record {
         id
@@ -94,6 +103,9 @@ class TeamcocoIE(InfoExtractor):
         thumb {
           preview
         }
+        file {
+          url
+        }
         tags {
           name
         }
@@ -111,15 +123,15 @@ class TeamcocoIE(InfoExtractor):
         record = response['record']
         video_id = record['id']
 
-        srcs = self._graphql_call('''{
+        video_sources = self._graphql_call('''{
   %s(id: "%s") {
     src
   }
-}''', 'RecordVideoSource', video_id)['src']
+}''', 'RecordVideoSource', video_id) or {}
 
         formats = []
         get_quality = qualities(['low', 'sd', 'hd', 'uhd'])
-        for format_id, src in srcs.items():
+        for format_id, src in video_sources.get('src', {}).items():
             if not isinstance(src, dict):
                 continue
             src_url = src.get('src')
@@ -146,6 +158,9 @@ class TeamcocoIE(InfoExtractor):
                     'format_id': format_id,
                     'quality': get_quality(format_id),
                 })
+        if not formats:
+            formats = self._extract_m3u8_formats(
+                record['file']['url'], video_id, 'mp4', fatal=False)
         self._sort_formats(formats)
 
         return {

From 504f20dd302189db2cfe1cb5ee9a622c39ee693c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 19 May 2018 23:53:24 +0700
Subject: [PATCH 069/125] Remove experimental mark for some options

---
 youtube_dl/YoutubeDL.py         | 10 +++++-----
 youtube_dl/downloader/common.py |  1 -
 youtube_dl/extractor/common.py  |  9 +++------
 youtube_dl/options.py           | 12 ++++++------
 4 files changed, 14 insertions(+), 18 deletions(-)

diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py
index 046e03247..2a405c5ca 100755
--- a/youtube_dl/YoutubeDL.py
+++ b/youtube_dl/YoutubeDL.py
@@ -211,7 +211,7 @@ class YoutubeDL(object):
                        At the moment, this is only supported by YouTube.
     proxy:             URL of the proxy server to use
     geo_verification_proxy:  URL of the proxy to use for IP address verification
-                       on geo-restricted sites. (Experimental)
+                       on geo-restricted sites.
     socket_timeout:    Time to wait for unresponsive hosts, in seconds
     bidi_workaround:   Work around buggy terminals without bidirectional text
                        support, using fridibi
@@ -259,7 +259,7 @@ class YoutubeDL(object):
                        - "warn": only emit a warning
                        - "detect_or_warn": check whether we can do anything
                                            about it, warn otherwise (default)
-    source_address:    (Experimental) Client-side IP address to bind to.
+    source_address:    Client-side IP address to bind to.
     call_home:         Boolean, true iff we are allowed to contact the
                        youtube-dl servers for debugging.
     sleep_interval:    Number of seconds to sleep before each download when
@@ -281,14 +281,14 @@ class YoutubeDL(object):
                        match_filter_func in utils.py is one example for this.
     no_color:          Do not emit color codes in output.
     geo_bypass:        Bypass geographic restriction via faking X-Forwarded-For
-                       HTTP header (experimental)
+                       HTTP header
     geo_bypass_country:
                        Two-letter ISO 3166-2 country code that will be used for
                        explicit geographic restriction bypassing via faking
-                       X-Forwarded-For HTTP header (experimental)
+                       X-Forwarded-For HTTP header
     geo_bypass_ip_block:
                        IP range in CIDR notation that will be used similarly to
-                       geo_bypass_country (experimental)
+                       geo_bypass_country
 
     The following options determine which downloader is picked:
     external_downloader: Executable of the external downloader to call.
diff --git a/youtube_dl/downloader/common.py b/youtube_dl/downloader/common.py
index edd125ee2..5979833c0 100644
--- a/youtube_dl/downloader/common.py
+++ b/youtube_dl/downloader/common.py
@@ -45,7 +45,6 @@ class FileDownloader(object):
     min_filesize:       Skip files smaller than this size
     max_filesize:       Skip files larger than this size
     xattr_set_filesize: Set ytdl.filesize user xattribute with expected size.
-                        (experimental)
     external_downloader_args:  A list of additional command-line arguments for the
                         external downloader.
     hls_use_mpegts:     Use the mpegts container for HLS videos.
diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index 3ef5af13c..a2548dba3 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -339,20 +339,17 @@ class InfoExtractor(object):
     _GEO_BYPASS attribute may be set to False in order to disable
     geo restriction bypass mechanisms for a particular extractor.
     Though it won't disable explicit geo restriction bypass based on
-    country code provided with geo_bypass_country. (experimental)
+    country code provided with geo_bypass_country.
 
     _GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted
     countries for this extractor. One of these countries will be used by
     geo restriction bypass mechanism right away in order to bypass
-    geo restriction, of course, if the mechanism is not disabled. (experimental)
+    geo restriction, of course, if the mechanism is not disabled.
 
     _GEO_IP_BLOCKS attribute may contain a list of presumably geo unrestricted
     IP blocks in CIDR notation for this extractor. One of these IP blocks
     will be used by geo restriction bypass mechanism similarly
-    to _GEO_COUNTRIES. (experimental)
-
-    NB: both these geo attributes are experimental and may change in future
-    or be completely removed.
+    to _GEO_COUNTRIES.
 
     Finally, the _WORKING attribute should be set to False for broken IEs
     in order to warn the users and skip the tests.
diff --git a/youtube_dl/options.py b/youtube_dl/options.py
index b692c6b3b..e83d546a0 100644
--- a/youtube_dl/options.py
+++ b/youtube_dl/options.py
@@ -203,7 +203,7 @@ def parseOpts(overrideArguments=None):
     network.add_option(
         '--proxy', dest='proxy',
         default=None, metavar='URL',
-        help='Use the specified HTTP/HTTPS/SOCKS proxy. To enable experimental '
+        help='Use the specified HTTP/HTTPS/SOCKS proxy. To enable '
              'SOCKS proxy, specify a proper scheme. For example '
              'socks5://127.0.0.1:1080/. Pass in an empty string (--proxy "") '
              'for direct connection')
@@ -240,19 +240,19 @@ def parseOpts(overrideArguments=None):
     geo.add_option(
         '--geo-bypass',
         action='store_true', dest='geo_bypass', default=True,
-        help='Bypass geographic restriction via faking X-Forwarded-For HTTP header (experimental)')
+        help='Bypass geographic restriction via faking X-Forwarded-For HTTP header')
     geo.add_option(
         '--no-geo-bypass',
         action='store_false', dest='geo_bypass', default=True,
-        help='Do not bypass geographic restriction via faking X-Forwarded-For HTTP header (experimental)')
+        help='Do not bypass geographic restriction via faking X-Forwarded-For HTTP header')
     geo.add_option(
         '--geo-bypass-country', metavar='CODE',
         dest='geo_bypass_country', default=None,
-        help='Force bypass geographic restriction with explicitly provided two-letter ISO 3166-2 country code (experimental)')
+        help='Force bypass geographic restriction with explicitly provided two-letter ISO 3166-2 country code')
     geo.add_option(
         '--geo-bypass-ip-block', metavar='IP_BLOCK',
         dest='geo_bypass_ip_block', default=None,
-        help='Force bypass geographic restriction with explicitly provided IP block in CIDR notation (experimental)')
+        help='Force bypass geographic restriction with explicitly provided IP block in CIDR notation')
 
     selection = optparse.OptionGroup(parser, 'Video Selection')
     selection.add_option(
@@ -502,7 +502,7 @@ def parseOpts(overrideArguments=None):
     downloader.add_option(
         '--xattr-set-filesize',
         dest='xattr_set_filesize', action='store_true',
-        help='Set file xattribute ytdl.filesize with expected file size (experimental)')
+        help='Set file xattribute ytdl.filesize with expected file size')
     downloader.add_option(
         '--hls-prefer-native',
         dest='hls_prefer_native', action='store_true', default=None,

From 5c766952dc6de9065060344342184c4037403409 Mon Sep 17 00:00:00 2001
From: huichen90 <35417991+huichen90@users.noreply.github.com>
Date: Wed, 16 May 2018 17:29:25 +0800
Subject: [PATCH 070/125] Update leeco.py
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Fixed this bug :youtube_dl.utils.ExtractorError: An extractor error has occurred. (caused by KeyError('location',));
---
 youtube_dl/extractor/leeco.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/leeco.py b/youtube_dl/extractor/leeco.py
index ffe10154b..8dd1ce0d0 100644
--- a/youtube_dl/extractor/leeco.py
+++ b/youtube_dl/extractor/leeco.py
@@ -130,7 +130,7 @@ class LeIE(InfoExtractor):
             media_id, 'Downloading flash playJson data', query={
                 'id': media_id,
                 'platid': 1,
-                'splatid': 101,
+                'splatid': 105,
                 'format': 1,
                 'source': 1000,
                 'tkey': self.calc_time_key(int(time.time())),

From db2058f63e64ff59ffad0e1e8ad5e18d18d3da71 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Mon, 21 May 2018 14:53:02 +0100
Subject: [PATCH 071/125] [globo] improve extraction(closes #4189)

- add support for authentication
- simplify url signing
- extract DASH and MSS formats
---
 youtube_dl/extractor/globo.py | 321 +++++-----------------------------
 1 file changed, 41 insertions(+), 280 deletions(-)

diff --git a/youtube_dl/extractor/globo.py b/youtube_dl/extractor/globo.py
index dc7b2661c..730deda6b 100644
--- a/youtube_dl/extractor/globo.py
+++ b/youtube_dl/extractor/globo.py
@@ -1,16 +1,14 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
+import base64
+import hashlib
+import json
 import random
 import re
-import math
 
 from .common import InfoExtractor
-from ..compat import (
-    compat_str,
-    compat_chr,
-    compat_ord,
-)
+from ..compat import compat_str
 from ..utils import (
     ExtractorError,
     float_or_none,
@@ -22,12 +20,7 @@ from ..utils import (
 
 class GloboIE(InfoExtractor):
     _VALID_URL = r'(?:globo:|https?://.+?\.globo\.com/(?:[^/]+/)*(?:v/(?:[^/]+/)?|videos/))(?P<id>\d{7,})'
-
-    _API_URL_TEMPLATE = 'http://api.globovideos.com/videos/%s/playlist'
-    _SECURITY_URL_TEMPLATE = 'http://security.video.globo.com/videos/%s/hash?player=flash&version=17.0.0.132&resource_id=%s'
-
-    _RESIGN_EXPIRATION = 86400
-
+    _LOGGED_IN = False
     _TESTS = [{
         'url': 'http://g1.globo.com/carros/autoesporte/videos/t/exclusivos-do-g1/v/mercedes-benz-gla-passa-por-teste-de-colisao-na-europa/3607726/',
         'md5': 'b3ccc801f75cd04a914d51dadb83a78d',
@@ -70,287 +63,49 @@ class GloboIE(InfoExtractor):
         'only_matching': True,
     }]
 
-    class MD5(object):
-        HEX_FORMAT_LOWERCASE = 0
-        HEX_FORMAT_UPPERCASE = 1
-        BASE64_PAD_CHARACTER_DEFAULT_COMPLIANCE = ''
-        BASE64_PAD_CHARACTER_RFC_COMPLIANCE = '='
-        PADDING = '=0xFF01DD'
-        hexcase = 0
-        b64pad = ''
+    def _real_initialize(self):
+        if self._LOGGED_IN:
+            return
 
-        def __init__(self):
-            pass
+        email, password = self._get_login_info()
+        if email is None:
+            return
 
-        class JSArray(list):
-            def __getitem__(self, y):
-                try:
-                    return list.__getitem__(self, y)
-                except IndexError:
-                    return 0
-
-            def __setitem__(self, i, y):
-                try:
-                    return list.__setitem__(self, i, y)
-                except IndexError:
-                    self.extend([0] * (i - len(self) + 1))
-                    self[-1] = y
-
-        @classmethod
-        def hex_md5(cls, param1):
-            return cls.rstr2hex(cls.rstr_md5(cls.str2rstr_utf8(param1)))
-
-        @classmethod
-        def b64_md5(cls, param1, param2=None):
-            return cls.rstr2b64(cls.rstr_md5(cls.str2rstr_utf8(param1, param2)))
-
-        @classmethod
-        def any_md5(cls, param1, param2):
-            return cls.rstr2any(cls.rstr_md5(cls.str2rstr_utf8(param1)), param2)
-
-        @classmethod
-        def rstr_md5(cls, param1):
-            return cls.binl2rstr(cls.binl_md5(cls.rstr2binl(param1), len(param1) * 8))
-
-        @classmethod
-        def rstr2hex(cls, param1):
-            _loc_2 = '0123456789ABCDEF' if cls.hexcase else '0123456789abcdef'
-            _loc_3 = ''
-            for _loc_5 in range(0, len(param1)):
-                _loc_4 = compat_ord(param1[_loc_5])
-                _loc_3 += _loc_2[_loc_4 >> 4 & 15] + _loc_2[_loc_4 & 15]
-            return _loc_3
-
-        @classmethod
-        def rstr2b64(cls, param1):
-            _loc_2 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_'
-            _loc_3 = ''
-            _loc_4 = len(param1)
-            for _loc_5 in range(0, _loc_4, 3):
-                _loc_6_1 = compat_ord(param1[_loc_5]) << 16
-                _loc_6_2 = compat_ord(param1[_loc_5 + 1]) << 8 if _loc_5 + 1 < _loc_4 else 0
-                _loc_6_3 = compat_ord(param1[_loc_5 + 2]) if _loc_5 + 2 < _loc_4 else 0
-                _loc_6 = _loc_6_1 | _loc_6_2 | _loc_6_3
-                for _loc_7 in range(0, 4):
-                    if _loc_5 * 8 + _loc_7 * 6 > len(param1) * 8:
-                        _loc_3 += cls.b64pad
-                    else:
-                        _loc_3 += _loc_2[_loc_6 >> 6 * (3 - _loc_7) & 63]
-            return _loc_3
-
-        @staticmethod
-        def rstr2any(param1, param2):
-            _loc_3 = len(param2)
-            _loc_4 = []
-            _loc_9 = [0] * ((len(param1) >> 2) + 1)
-            for _loc_5 in range(0, len(_loc_9)):
-                _loc_9[_loc_5] = compat_ord(param1[_loc_5 * 2]) << 8 | compat_ord(param1[_loc_5 * 2 + 1])
-
-            while len(_loc_9) > 0:
-                _loc_8 = []
-                _loc_7 = 0
-                for _loc_5 in range(0, len(_loc_9)):
-                    _loc_7 = (_loc_7 << 16) + _loc_9[_loc_5]
-                    _loc_6 = math.floor(_loc_7 / _loc_3)
-                    _loc_7 -= _loc_6 * _loc_3
-                    if len(_loc_8) > 0 or _loc_6 > 0:
-                        _loc_8[len(_loc_8)] = _loc_6
-
-                _loc_4[len(_loc_4)] = _loc_7
-                _loc_9 = _loc_8
-
-            _loc_10 = ''
-            _loc_5 = len(_loc_4) - 1
-            while _loc_5 >= 0:
-                _loc_10 += param2[_loc_4[_loc_5]]
-                _loc_5 -= 1
-
-            return _loc_10
-
-        @classmethod
-        def str2rstr_utf8(cls, param1, param2=None):
-            _loc_3 = ''
-            _loc_4 = -1
-            if not param2:
-                param2 = cls.PADDING
-            param1 = param1 + param2[1:9]
-            while True:
-                _loc_4 += 1
-                if _loc_4 >= len(param1):
-                    break
-                _loc_5 = compat_ord(param1[_loc_4])
-                _loc_6 = compat_ord(param1[_loc_4 + 1]) if _loc_4 + 1 < len(param1) else 0
-                if 55296 <= _loc_5 <= 56319 and 56320 <= _loc_6 <= 57343:
-                    _loc_5 = 65536 + ((_loc_5 & 1023) << 10) + (_loc_6 & 1023)
-                    _loc_4 += 1
-                if _loc_5 <= 127:
-                    _loc_3 += compat_chr(_loc_5)
-                    continue
-                if _loc_5 <= 2047:
-                    _loc_3 += compat_chr(192 | _loc_5 >> 6 & 31) + compat_chr(128 | _loc_5 & 63)
-                    continue
-                if _loc_5 <= 65535:
-                    _loc_3 += compat_chr(224 | _loc_5 >> 12 & 15) + compat_chr(128 | _loc_5 >> 6 & 63) + compat_chr(
-                        128 | _loc_5 & 63)
-                    continue
-                if _loc_5 <= 2097151:
-                    _loc_3 += compat_chr(240 | _loc_5 >> 18 & 7) + compat_chr(128 | _loc_5 >> 12 & 63) + compat_chr(
-                        128 | _loc_5 >> 6 & 63) + compat_chr(128 | _loc_5 & 63)
-            return _loc_3
-
-        @staticmethod
-        def rstr2binl(param1):
-            _loc_2 = [0] * ((len(param1) >> 2) + 1)
-            for _loc_3 in range(0, len(_loc_2)):
-                _loc_2[_loc_3] = 0
-            for _loc_3 in range(0, len(param1) * 8, 8):
-                _loc_2[_loc_3 >> 5] |= (compat_ord(param1[_loc_3 // 8]) & 255) << _loc_3 % 32
-            return _loc_2
-
-        @staticmethod
-        def binl2rstr(param1):
-            _loc_2 = ''
-            for _loc_3 in range(0, len(param1) * 32, 8):
-                _loc_2 += compat_chr(param1[_loc_3 >> 5] >> _loc_3 % 32 & 255)
-            return _loc_2
-
-        @classmethod
-        def binl_md5(cls, param1, param2):
-            param1 = cls.JSArray(param1)
-            param1[param2 >> 5] |= 128 << param2 % 32
-            param1[(param2 + 64 >> 9 << 4) + 14] = param2
-            _loc_3 = 1732584193
-            _loc_4 = -271733879
-            _loc_5 = -1732584194
-            _loc_6 = 271733878
-            for _loc_7 in range(0, len(param1), 16):
-                _loc_8 = _loc_3
-                _loc_9 = _loc_4
-                _loc_10 = _loc_5
-                _loc_11 = _loc_6
-                _loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 0], 7, -680876936)
-                _loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 1], 12, -389564586)
-                _loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 2], 17, 606105819)
-                _loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 3], 22, -1044525330)
-                _loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 4], 7, -176418897)
-                _loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 5], 12, 1200080426)
-                _loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 6], 17, -1473231341)
-                _loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 7], 22, -45705983)
-                _loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 8], 7, 1770035416)
-                _loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 9], 12, -1958414417)
-                _loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 10], 17, -42063)
-                _loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 11], 22, -1990404162)
-                _loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 12], 7, 1804603682)
-                _loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 13], 12, -40341101)
-                _loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 14], 17, -1502002290)
-                _loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 15], 22, 1236535329)
-                _loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 1], 5, -165796510)
-                _loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 6], 9, -1069501632)
-                _loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 11], 14, 643717713)
-                _loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 0], 20, -373897302)
-                _loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 5], 5, -701558691)
-                _loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 10], 9, 38016083)
-                _loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 15], 14, -660478335)
-                _loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 4], 20, -405537848)
-                _loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 9], 5, 568446438)
-                _loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 14], 9, -1019803690)
-                _loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 3], 14, -187363961)
-                _loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 8], 20, 1163531501)
-                _loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 13], 5, -1444681467)
-                _loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 2], 9, -51403784)
-                _loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 7], 14, 1735328473)
-                _loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 12], 20, -1926607734)
-                _loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 5], 4, -378558)
-                _loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 8], 11, -2022574463)
-                _loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 11], 16, 1839030562)
-                _loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 14], 23, -35309556)
-                _loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 1], 4, -1530992060)
-                _loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 4], 11, 1272893353)
-                _loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 7], 16, -155497632)
-                _loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 10], 23, -1094730640)
-                _loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 13], 4, 681279174)
-                _loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 0], 11, -358537222)
-                _loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 3], 16, -722521979)
-                _loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 6], 23, 76029189)
-                _loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 9], 4, -640364487)
-                _loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 12], 11, -421815835)
-                _loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 15], 16, 530742520)
-                _loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 2], 23, -995338651)
-                _loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 0], 6, -198630844)
-                _loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 7], 10, 1126891415)
-                _loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 14], 15, -1416354905)
-                _loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 5], 21, -57434055)
-                _loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 12], 6, 1700485571)
-                _loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 3], 10, -1894986606)
-                _loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 10], 15, -1051523)
-                _loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 1], 21, -2054922799)
-                _loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 8], 6, 1873313359)
-                _loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 15], 10, -30611744)
-                _loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 6], 15, -1560198380)
-                _loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 13], 21, 1309151649)
-                _loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 4], 6, -145523070)
-                _loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 11], 10, -1120210379)
-                _loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 2], 15, 718787259)
-                _loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 9], 21, -343485551)
-                _loc_3 = cls.safe_add(_loc_3, _loc_8)
-                _loc_4 = cls.safe_add(_loc_4, _loc_9)
-                _loc_5 = cls.safe_add(_loc_5, _loc_10)
-                _loc_6 = cls.safe_add(_loc_6, _loc_11)
-            return [_loc_3, _loc_4, _loc_5, _loc_6]
-
-        @classmethod
-        def md5_cmn(cls, param1, param2, param3, param4, param5, param6):
-            return cls.safe_add(
-                cls.bit_rol(cls.safe_add(cls.safe_add(param2, param1), cls.safe_add(param4, param6)), param5), param3)
-
-        @classmethod
-        def md5_ff(cls, param1, param2, param3, param4, param5, param6, param7):
-            return cls.md5_cmn(param2 & param3 | ~param2 & param4, param1, param2, param5, param6, param7)
-
-        @classmethod
-        def md5_gg(cls, param1, param2, param3, param4, param5, param6, param7):
-            return cls.md5_cmn(param2 & param4 | param3 & ~param4, param1, param2, param5, param6, param7)
-
-        @classmethod
-        def md5_hh(cls, param1, param2, param3, param4, param5, param6, param7):
-            return cls.md5_cmn(param2 ^ param3 ^ param4, param1, param2, param5, param6, param7)
-
-        @classmethod
-        def md5_ii(cls, param1, param2, param3, param4, param5, param6, param7):
-            return cls.md5_cmn(param3 ^ (param2 | ~param4), param1, param2, param5, param6, param7)
-
-        @classmethod
-        def safe_add(cls, param1, param2):
-            _loc_3 = (param1 & 65535) + (param2 & 65535)
-            _loc_4 = (param1 >> 16) + (param2 >> 16) + (_loc_3 >> 16)
-            return cls.lshift(_loc_4, 16) | _loc_3 & 65535
-
-        @classmethod
-        def bit_rol(cls, param1, param2):
-            return cls.lshift(param1, param2) | (param1 & 0xFFFFFFFF) >> (32 - param2)
-
-        @staticmethod
-        def lshift(value, count):
-            r = (0xFFFFFFFF & value) << count
-            return -(~(r - 1) & 0xFFFFFFFF) if r > 0x7FFFFFFF else r
+        self._download_json(
+            'https://login.globo.com/api/authentication', None, data=json.dumps({
+                'payload': {
+                    'email': email,
+                    'password': password,
+                    'serviceId': 4654,
+                },
+            }).encode(), headers={
+                'Content-Type': 'application/json; charset=utf-8',
+            })
+        self._LOGGED_IN = True
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
 
         video = self._download_json(
-            self._API_URL_TEMPLATE % video_id, video_id)['videos'][0]
+            'http://api.globovideos.com/videos/%s/playlist' % video_id,
+            video_id)['videos'][0]
 
         title = video['title']
 
         formats = []
         for resource in video['resources']:
             resource_id = resource.get('_id')
-            if not resource_id or resource_id.endswith('manifest'):
+            resource_url = resource.get('url')
+            if not resource_id or not resource_url:
                 continue
 
             security = self._download_json(
-                self._SECURITY_URL_TEMPLATE % (video_id, resource_id),
-                video_id, 'Downloading security hash for %s' % resource_id)
+                'http://security.video.globo.com/videos/%s/hash' % video_id,
+                video_id, 'Downloading security hash for %s' % resource_id, query={
+                    'player': 'flash',
+                    'version': '17.0.0.132',
+                    'resource_id': resource_id,
+                })
 
             security_hash = security.get('hash')
             if not security_hash:
@@ -365,18 +120,24 @@ class GloboIE(InfoExtractor):
             received_random = security_hash[12:22]
             received_md5 = security_hash[22:]
 
-            sign_time = received_time + self._RESIGN_EXPIRATION
+            sign_time = received_time + 86400
             padding = '%010d' % random.randint(1, 10000000000)
 
-            signed_md5 = self.MD5.b64_md5(received_md5 + compat_str(sign_time) + padding)
+            md5_data = (received_md5 + str(sign_time) + padding + '0xFF01DD').encode()
+            signed_md5 = base64.urlsafe_b64encode(hashlib.md5(md5_data).digest()).decode().strip('=')
             signed_hash = hash_code + compat_str(received_time) + received_random + compat_str(sign_time) + padding + signed_md5
 
-            resource_url = resource['url']
             signed_url = '%s?h=%s&k=%s' % (resource_url, signed_hash, 'flash')
             if resource_id.endswith('m3u8') or resource_url.endswith('.m3u8'):
                 formats.extend(self._extract_m3u8_formats(
                     signed_url, resource_id, 'mp4', entry_protocol='m3u8_native',
                     m3u8_id='hls', fatal=False))
+            elif resource_id.endswith('mpd') or resource_url.endswith('.mpd'):
+                formats.extend(self._extract_mpd_formats(
+                    signed_url, resource_id, mpd_id='dash', fatal=False))
+            elif resource_id.endswith('manifest') or resource_url.endswith('/manifest'):
+                formats.extend(self._extract_ism_formats(
+                    signed_url, resource_id, ism_id='mss', fatal=False))
             else:
                 formats.append({
                     'url': signed_url,

From e5187493002f1d089d450fc3b2b4af64c996dc71 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Mon, 21 May 2018 15:07:24 +0100
Subject: [PATCH 072/125] [globo] handle login errors

---
 youtube_dl/extractor/globo.py | 31 ++++++++++++++++++++-----------
 1 file changed, 20 insertions(+), 11 deletions(-)

diff --git a/youtube_dl/extractor/globo.py b/youtube_dl/extractor/globo.py
index 730deda6b..9c2360464 100644
--- a/youtube_dl/extractor/globo.py
+++ b/youtube_dl/extractor/globo.py
@@ -8,7 +8,10 @@ import random
 import re
 
 from .common import InfoExtractor
-from ..compat import compat_str
+from ..compat import (
+    compat_HTTPError,
+    compat_str,
+)
 from ..utils import (
     ExtractorError,
     float_or_none,
@@ -71,16 +74,22 @@ class GloboIE(InfoExtractor):
         if email is None:
             return
 
-        self._download_json(
-            'https://login.globo.com/api/authentication', None, data=json.dumps({
-                'payload': {
-                    'email': email,
-                    'password': password,
-                    'serviceId': 4654,
-                },
-            }).encode(), headers={
-                'Content-Type': 'application/json; charset=utf-8',
-            })
+        try:
+            self._download_json(
+                'https://login.globo.com/api/authentication', None, data=json.dumps({
+                    'payload': {
+                        'email': email,
+                        'password': password,
+                        'serviceId': 4654,
+                    },
+                }).encode(), headers={
+                    'Content-Type': 'application/json; charset=utf-8',
+                })
+        except ExtractorError as e:
+            if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
+                resp = self._parse_json(e.cause.read(), None)
+                raise ExtractorError(resp.get('userMessage') or resp['id'], expected=True)
+            raise
         self._LOGGED_IN = True
 
     def _real_extract(self, url):

From d81ffc3aa0f7b4114cec68cac9e347689a6d5462 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Mon, 21 May 2018 15:39:02 +0100
Subject: [PATCH 073/125] [globo] Add entry for netrc authentication

---
 youtube_dl/extractor/globo.py | 1 +
 1 file changed, 1 insertion(+)

diff --git a/youtube_dl/extractor/globo.py b/youtube_dl/extractor/globo.py
index 9c2360464..8e6c38742 100644
--- a/youtube_dl/extractor/globo.py
+++ b/youtube_dl/extractor/globo.py
@@ -24,6 +24,7 @@ from ..utils import (
 class GloboIE(InfoExtractor):
     _VALID_URL = r'(?:globo:|https?://.+?\.globo\.com/(?:[^/]+/)*(?:v/(?:[^/]+/)?|videos/))(?P<id>\d{7,})'
     _LOGGED_IN = False
+    _NETRC_MACHINE = 'globo'
     _TESTS = [{
         'url': 'http://g1.globo.com/carros/autoesporte/videos/t/exclusivos-do-g1/v/mercedes-benz-gla-passa-por-teste-de-colisao-na-europa/3607726/',
         'md5': 'b3ccc801f75cd04a914d51dadb83a78d',

From b89ac534555692c3c29a57d97ec0bda3bef3b086 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Mon, 21 May 2018 17:46:52 +0100
Subject: [PATCH 074/125] [globo] use compat_str

---
 youtube_dl/extractor/globo.py | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/youtube_dl/extractor/globo.py b/youtube_dl/extractor/globo.py
index 8e6c38742..81d6d36d3 100644
--- a/youtube_dl/extractor/globo.py
+++ b/youtube_dl/extractor/globo.py
@@ -126,16 +126,16 @@ class GloboIE(InfoExtractor):
                 continue
 
             hash_code = security_hash[:2]
-            received_time = int(security_hash[2:12])
+            received_time = security_hash[2:12]
             received_random = security_hash[12:22]
             received_md5 = security_hash[22:]
 
-            sign_time = received_time + 86400
+            sign_time = compat_str(int(received_time) + 86400)
             padding = '%010d' % random.randint(1, 10000000000)
 
-            md5_data = (received_md5 + str(sign_time) + padding + '0xFF01DD').encode()
+            md5_data = (received_md5 + sign_time + padding + '0xFF01DD').encode()
             signed_md5 = base64.urlsafe_b64encode(hashlib.md5(md5_data).digest()).decode().strip('=')
-            signed_hash = hash_code + compat_str(received_time) + received_random + compat_str(sign_time) + padding + signed_md5
+            signed_hash = hash_code + received_time + received_random + sign_time + padding + signed_md5
 
             signed_url = '%s?h=%s&k=%s' % (resource_url, signed_hash, 'flash')
             if resource_id.endswith('m3u8') or resource_url.endswith('.m3u8'):

From 57d6792024f2670a21f923dfbd81614a1ee6b735 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 23 May 2018 11:27:36 +0100
Subject: [PATCH 075/125] [viewlift] fix extraction for snagfils.com(closes
 #15766)

---
 youtube_dl/extractor/viewlift.py | 164 ++++++++++++++++++++++---------
 1 file changed, 115 insertions(+), 49 deletions(-)

diff --git a/youtube_dl/extractor/viewlift.py b/youtube_dl/extractor/viewlift.py
index 1f29c273f..e466156f6 100644
--- a/youtube_dl/extractor/viewlift.py
+++ b/youtube_dl/extractor/viewlift.py
@@ -1,24 +1,27 @@
 from __future__ import unicode_literals
 
+import base64
 import re
 
 from .common import InfoExtractor
+from ..compat import compat_urllib_parse_unquote
 from ..utils import (
     ExtractorError,
     clean_html,
     determine_ext,
     int_or_none,
     js_to_json,
+    parse_age_limit,
     parse_duration,
 )
 
 
 class ViewLiftBaseIE(InfoExtractor):
-    _DOMAINS_REGEX = r'(?:snagfilms|snagxtreme|funnyforfree|kiddovid|winnersview|monumentalsportsnetwork|vayafilm)\.com|kesari\.tv'
+    _DOMAINS_REGEX = r'(?:snagfilms|snagxtreme|funnyforfree|kiddovid|winnersview|(?:monumental|lax)sportsnetwork|vayafilm)\.com'
 
 
 class ViewLiftEmbedIE(ViewLiftBaseIE):
-    _VALID_URL = r'https?://(?:(?:www|embed)\.)?(?:%s)/embed/player\?.*\bfilmId=(?P<id>[\da-f-]{36})' % ViewLiftBaseIE._DOMAINS_REGEX
+    _VALID_URL = r'https?://(?:(?:www|embed)\.)?(?:%s)/embed/player\?.*\bfilmId=(?P<id>[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12})' % ViewLiftBaseIE._DOMAINS_REGEX
     _TESTS = [{
         'url': 'http://embed.snagfilms.com/embed/player?filmId=74849a00-85a9-11e1-9660-123139220831&w=500',
         'md5': '2924e9215c6eff7a55ed35b72276bd93',
@@ -60,8 +63,10 @@ class ViewLiftEmbedIE(ViewLiftBaseIE):
 
         formats = []
         has_bitrate = False
-        for source in self._parse_json(js_to_json(self._search_regex(
-                r'(?s)sources:\s*(\[.+?\]),', webpage, 'json')), video_id):
+        sources = self._parse_json(self._search_regex(
+            r'(?s)sources:\s*(\[.+?\]),', webpage,
+            'sources', default='[]'), video_id, js_to_json)
+        for source in sources:
             file_ = source.get('file')
             if not file_:
                 continue
@@ -70,7 +75,8 @@ class ViewLiftEmbedIE(ViewLiftBaseIE):
             format_id = source.get('label') or ext
             if all(v in ('m3u8', 'hls') for v in (type_, ext)):
                 formats.extend(self._extract_m3u8_formats(
-                    file_, video_id, 'mp4', m3u8_id='hls'))
+                    file_, video_id, 'mp4', 'm3u8_native',
+                    m3u8_id='hls', fatal=False))
             else:
                 bitrate = int_or_none(self._search_regex(
                     [r'(\d+)kbps', r'_\d{1,2}x\d{1,2}_(\d{3,})\.%s' % ext],
@@ -85,6 +91,13 @@ class ViewLiftEmbedIE(ViewLiftBaseIE):
                     'tbr': bitrate,
                     'height': height,
                 })
+        if not formats:
+            hls_url = self._parse_json(self._search_regex(
+                r'filmInfo\.src\s*=\s*({.+?});',
+                webpage, 'src'), video_id, js_to_json)['src']
+            formats = self._extract_m3u8_formats(
+                hls_url, video_id, 'mp4', 'm3u8_native',
+                m3u8_id='hls', fatal=False)
         field_preference = None if has_bitrate else ('height', 'tbr', 'format_id')
         self._sort_formats(formats, field_preference)
 
@@ -109,10 +122,13 @@ class ViewLiftIE(ViewLiftBaseIE):
             'display_id': 'lost_for_life',
             'ext': 'mp4',
             'title': 'Lost for Life',
-            'description': 'md5:fbdacc8bb6b455e464aaf98bc02e1c82',
+            'description': 'md5:ea10b5a50405ae1f7b5269a6ec594102',
             'thumbnail': r're:^https?://.*\.jpg',
             'duration': 4489,
-            'categories': ['Documentary', 'Crime', 'Award Winning', 'Festivals']
+            'categories': 'mincount:3',
+            'age_limit': 14,
+            'upload_date': '20150421',
+            'timestamp': 1429656819,
         }
     }, {
         'url': 'http://www.snagfilms.com/show/the_world_cut_project/india',
@@ -125,7 +141,9 @@ class ViewLiftIE(ViewLiftBaseIE):
             'description': 'md5:5c168c5a8f4719c146aad2e0dfac6f5f',
             'thumbnail': r're:^https?://.*\.jpg',
             'duration': 979,
-            'categories': ['Documentary', 'Sports', 'Politics']
+            'categories': 'mincount:2',
+            'timestamp': 1399478279,
+            'upload_date': '20140507',
         }
     }, {
         # Film is not playable in your area.
@@ -138,9 +156,6 @@ class ViewLiftIE(ViewLiftBaseIE):
     }, {
         'url': 'http://www.winnersview.com/videos/the-good-son',
         'only_matching': True,
-    }, {
-        'url': 'http://www.kesari.tv/news/video/1461919076414',
-        'only_matching': True,
     }, {
         # Was once Kaltura embed
         'url': 'https://www.monumentalsportsnetwork.com/videos/john-carlson-postgame-2-25-15',
@@ -156,45 +171,96 @@ class ViewLiftIE(ViewLiftBaseIE):
             raise ExtractorError(
                 'Film %s is not available.' % display_id, expected=True)
 
-        film_id = self._search_regex(r'filmId=([\da-f-]{36})"', webpage, 'film id')
+        initial_store_state = self._search_regex(
+            r"window\.initialStoreState\s*=.*?JSON\.parse\(unescape\(atob\('([^']+)'\)\)\)",
+            webpage, 'Initial Store State', default=None)
+        if initial_store_state:
+            modules = self._parse_json(compat_urllib_parse_unquote(base64.b64decode(
+                initial_store_state).decode()), display_id)['page']['data']['modules']
+            content_data = next(m['contentData'][0] for m in modules if m.get('moduleType') == 'VideoDetailModule')
+            gist = content_data['gist']
+            film_id = gist['id']
+            title = gist['title']
+            video_assets = content_data['streamingInfo']['videoAssets']
 
-        snag = self._parse_json(
-            self._search_regex(
-                r'Snag\.page\.data\s*=\s*(\[.+?\]);', webpage, 'snag'),
-            display_id)
+            formats = []
+            mpeg_video_assets = video_assets.get('mpeg') or []
+            for video_asset in mpeg_video_assets:
+                video_asset_url = video_asset.get('url')
+                if not video_asset:
+                    continue
+                bitrate = int_or_none(video_asset.get('bitrate'))
+                height = int_or_none(self._search_regex(
+                    r'^_?(\d+)[pP]$', video_asset.get('renditionValue'),
+                    'height', default=None))
+                formats.append({
+                    'url': video_asset_url,
+                    'format_id': 'http%s' % ('-%d' % bitrate if bitrate else ''),
+                    'tbr': bitrate,
+                    'height': height,
+                    'vcodec': video_asset.get('codec'),
+                })
 
-        for item in snag:
-            if item.get('data', {}).get('film', {}).get('id') == film_id:
-                data = item['data']['film']
-                title = data['title']
-                description = clean_html(data.get('synopsis'))
-                thumbnail = data.get('image')
-                duration = int_or_none(data.get('duration') or data.get('runtime'))
-                categories = [
-                    category['title'] for category in data.get('categories', [])
-                    if category.get('title')]
-                break
+            hls_url = video_assets.get('hls')
+            if hls_url:
+                formats.extend(self._extract_m3u8_formats(
+                    hls_url, film_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False))
+            self._sort_formats(formats, ('height', 'tbr', 'format_id'))
+
+            info = {
+                'id': film_id,
+                'display_id': display_id,
+                'title': title,
+                'description': gist.get('description'),
+                'thumbnail': gist.get('videoImageUrl'),
+                'duration': int_or_none(gist.get('runtime')),
+                'age_limit': parse_age_limit(content_data.get('parentalRating', '').replace('_', '-')),
+                'timestamp': int_or_none(gist.get('publishDate'), 1000),
+                'formats': formats,
+            }
+            for k in ('categories', 'tags'):
+                info[k] = [v['title'] for v in content_data.get(k, []) if v.get('title')]
+            return info
         else:
-            title = self._search_regex(
-                r'itemprop="title">([^<]+)<', webpage, 'title')
-            description = self._html_search_regex(
-                r'(?s)<div itemprop="description" class="film-synopsis-inner ">(.+?)</div>',
-                webpage, 'description', default=None) or self._og_search_description(webpage)
-            thumbnail = self._og_search_thumbnail(webpage)
-            duration = parse_duration(self._search_regex(
-                r'<span itemprop="duration" class="film-duration strong">([^<]+)<',
-                webpage, 'duration', fatal=False))
-            categories = re.findall(r'<a href="/movies/[^"]+">([^<]+)</a>', webpage)
+            film_id = self._search_regex(r'filmId=([\da-f-]{36})"', webpage, 'film id')
 
-        return {
-            '_type': 'url_transparent',
-            'url': 'http://%s/embed/player?filmId=%s' % (domain, film_id),
-            'id': film_id,
-            'display_id': display_id,
-            'title': title,
-            'description': description,
-            'thumbnail': thumbnail,
-            'duration': duration,
-            'categories': categories,
-            'ie_key': 'ViewLiftEmbed',
-        }
+            snag = self._parse_json(
+                self._search_regex(
+                    r'Snag\.page\.data\s*=\s*(\[.+?\]);', webpage, 'snag', default='[]'),
+                display_id)
+
+            for item in snag:
+                if item.get('data', {}).get('film', {}).get('id') == film_id:
+                    data = item['data']['film']
+                    title = data['title']
+                    description = clean_html(data.get('synopsis'))
+                    thumbnail = data.get('image')
+                    duration = int_or_none(data.get('duration') or data.get('runtime'))
+                    categories = [
+                        category['title'] for category in data.get('categories', [])
+                        if category.get('title')]
+                    break
+            else:
+                title = self._search_regex(
+                    r'itemprop="title">([^<]+)<', webpage, 'title')
+                description = self._html_search_regex(
+                    r'(?s)<div itemprop="description" class="film-synopsis-inner ">(.+?)</div>',
+                    webpage, 'description', default=None) or self._og_search_description(webpage)
+                thumbnail = self._og_search_thumbnail(webpage)
+                duration = parse_duration(self._search_regex(
+                    r'<span itemprop="duration" class="film-duration strong">([^<]+)<',
+                    webpage, 'duration', fatal=False))
+                categories = re.findall(r'<a href="/movies/[^"]+">([^<]+)</a>', webpage)
+
+            return {
+                '_type': 'url_transparent',
+                'url': 'http://%s/embed/player?filmId=%s' % (domain, film_id),
+                'id': film_id,
+                'display_id': display_id,
+                'title': title,
+                'description': description,
+                'thumbnail': thumbnail,
+                'duration': duration,
+                'categories': categories,
+                'ie_key': 'ViewLiftEmbed',
+            }

From b836118724122a639a1cb78d55d91724bf1e7251 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 23 May 2018 12:12:20 +0100
Subject: [PATCH 076/125] [utils] Relax TV Parental Guidelines matching

---
 youtube_dl/utils.py | 17 ++++++++++-------
 1 file changed, 10 insertions(+), 7 deletions(-)

diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index f9ca63c58..d61af8837 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -2253,12 +2253,12 @@ US_RATINGS = {
 
 
 TV_PARENTAL_GUIDELINES = {
-    'TV-Y': 0,
-    'TV-Y7': 7,
-    'TV-G': 0,
-    'TV-PG': 0,
-    'TV-14': 14,
-    'TV-MA': 17,
+    'Y': 0,
+    'Y7': 7,
+    'G': 0,
+    'PG': 0,
+    '14': 14,
+    'MA': 17,
 }
 
 
@@ -2272,7 +2272,10 @@ def parse_age_limit(s):
         return int(m.group('age'))
     if s in US_RATINGS:
         return US_RATINGS[s]
-    return TV_PARENTAL_GUIDELINES.get(s)
+    m = re.match(r'^TV[_-]?(%s)$' % '|'.join(TV_PARENTAL_GUIDELINES.keys()), s)
+    if m:
+        return TV_PARENTAL_GUIDELINES[m.group(1)]
+    return None
 
 
 def strip_jsonp(code):

From 670dcba8c73ee69545513522676b2c480bc48662 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 23 May 2018 12:13:44 +0100
Subject: [PATCH 077/125] [viewlift] Remove rating format transformation

---
 youtube_dl/extractor/viewlift.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/viewlift.py b/youtube_dl/extractor/viewlift.py
index e466156f6..51a002b11 100644
--- a/youtube_dl/extractor/viewlift.py
+++ b/youtube_dl/extractor/viewlift.py
@@ -214,7 +214,7 @@ class ViewLiftIE(ViewLiftBaseIE):
                 'description': gist.get('description'),
                 'thumbnail': gist.get('videoImageUrl'),
                 'duration': int_or_none(gist.get('runtime')),
-                'age_limit': parse_age_limit(content_data.get('parentalRating', '').replace('_', '-')),
+                'age_limit': parse_age_limit(content_data.get('parentalRating')),
                 'timestamp': int_or_none(gist.get('publishDate'), 1000),
                 'formats': formats,
             }

From 268e132dec96ea9e8a9a3cafb788baf39a498c7d Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 23 May 2018 12:15:21 +0100
Subject: [PATCH 078/125] [go90] extract age limit and detect drm
 protection(#10127)

---
 youtube_dl/extractor/go90.py | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/youtube_dl/extractor/go90.py b/youtube_dl/extractor/go90.py
index 9b2e1c164..35dde42d0 100644
--- a/youtube_dl/extractor/go90.py
+++ b/youtube_dl/extractor/go90.py
@@ -6,7 +6,9 @@ import re
 from .common import InfoExtractor
 from ..utils import (
     determine_ext,
+    ExtractorError,
     int_or_none,
+    parse_age_limit,
     parse_iso8601,
 )
 
@@ -23,6 +25,7 @@ class Go90IE(InfoExtractor):
             'description': 'VICE\'s Karley Sciortino meets with activists who discuss the state\'s strong anti-porn stance. Then, VICE Sports explains NFL contracts.',
             'timestamp': 1491868800,
             'upload_date': '20170411',
+            'age_limit': 14,
         }
     }
 
@@ -33,6 +36,8 @@ class Go90IE(InfoExtractor):
             video_id, headers={
                 'Content-Type': 'application/json; charset=utf-8',
             }, data=b'{"client":"web","device_type":"pc"}')
+        if video_data.get('requires_drm'):
+            raise ExtractorError('This video is DRM protected.', expected=True)
         main_video_asset = video_data['main_video_asset']
 
         episode_number = int_or_none(video_data.get('episode_number'))
@@ -123,4 +128,5 @@ class Go90IE(InfoExtractor):
             'season_number': season_number,
             'episode_number': episode_number,
             'subtitles': subtitles,
+            'age_limit': parse_age_limit(video_data.get('rating')),
         }

From 3bb3ff38a15ccf00686c75af8d6635903632ee87 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 23 May 2018 12:20:05 +0100
Subject: [PATCH 079/125] [test_utils] add tests for
 b836118724122a639a1cb78d55d91724bf1e7251

---
 test/test_utils.py | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/test/test_utils.py b/test/test_utils.py
index 14503ab53..f2b51131c 100644
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -519,6 +519,8 @@ class TestUtil(unittest.TestCase):
         self.assertEqual(parse_age_limit('PG-13'), 13)
         self.assertEqual(parse_age_limit('TV-14'), 14)
         self.assertEqual(parse_age_limit('TV-MA'), 17)
+        self.assertEqual(parse_age_limit('TV14'), 14)
+        self.assertEqual(parse_age_limit('TV_G'), 0)
 
     def test_parse_duration(self):
         self.assertEqual(parse_duration(None), None)

From ca0aef42d4fa77123c56c19ef3fe2673645391a2 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 23 May 2018 23:04:12 +0100
Subject: [PATCH 080/125] [viewlift] add support for hoichoi.tv(closes #16536)

---
 youtube_dl/extractor/viewlift.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/viewlift.py b/youtube_dl/extractor/viewlift.py
index 51a002b11..c43d1a1e8 100644
--- a/youtube_dl/extractor/viewlift.py
+++ b/youtube_dl/extractor/viewlift.py
@@ -17,7 +17,7 @@ from ..utils import (
 
 
 class ViewLiftBaseIE(InfoExtractor):
-    _DOMAINS_REGEX = r'(?:snagfilms|snagxtreme|funnyforfree|kiddovid|winnersview|(?:monumental|lax)sportsnetwork|vayafilm)\.com'
+    _DOMAINS_REGEX = r'(?:snagfilms|snagxtreme|funnyforfree|kiddovid|winnersview|(?:monumental|lax)sportsnetwork|vayafilm)\.com|hoichoi\.tv'
 
 
 class ViewLiftEmbedIE(ViewLiftBaseIE):

From 1139935db78b610d15ade2b667e2a07b4df0ecf0 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Thu, 24 May 2018 02:51:47 +0100
Subject: [PATCH 081/125] [nbc] add support for stream.nbcsports.com(closes
 #13911)

---
 youtube_dl/extractor/extractors.py |  1 +
 youtube_dl/extractor/nbc.py        | 62 +++++++++++++++++++++++++++++-
 2 files changed, 62 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 7d5927131..52e330955 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -666,6 +666,7 @@ from .nbc import (
     NBCOlympicsIE,
     NBCOlympicsStreamIE,
     NBCSportsIE,
+    NBCSportsStreamIE,
     NBCSportsVPlayerIE,
 )
 from .ndr import (
diff --git a/youtube_dl/extractor/nbc.py b/youtube_dl/extractor/nbc.py
index 1b1722cfa..c843f8649 100644
--- a/youtube_dl/extractor/nbc.py
+++ b/youtube_dl/extractor/nbc.py
@@ -1,7 +1,8 @@
 from __future__ import unicode_literals
 
-import re
 import base64
+import json
+import re
 
 from .common import InfoExtractor
 from .theplatform import ThePlatformIE
@@ -175,6 +176,65 @@ class NBCSportsIE(InfoExtractor):
             NBCSportsVPlayerIE._extract_url(webpage), 'NBCSportsVPlayer')
 
 
+class NBCSportsStreamIE(AdobePassIE):
+    _VALID_URL = r'https?://stream\.nbcsports\.com/.+?\bpid=(?P<id>\d+)'
+    _TEST = {
+        'url': 'http://stream.nbcsports.com/nbcsn/generic?pid=206559',
+        'info_dict': {
+            'id': '206559',
+            'ext': 'mp4',
+            'title': 'Amgen Tour of California Women\'s Recap',
+            'description': 'md5:66520066b3b5281ada7698d0ea2aa894',
+        },
+        'params': {
+            # m3u8 download
+            'skip_download': True,
+        },
+        'skip': 'Requires Adobe Pass Authentication',
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        live_source = self._download_json(
+            'http://stream.nbcsports.com/data/live_sources_%s.json' % video_id,
+            video_id)
+        video_source = live_source['videoSources'][0]
+        title = video_source['title']
+        source_url = None
+        for k in ('source', 'msl4source', 'iossource', 'hlsv4'):
+            sk = k + 'Url'
+            source_url = video_source.get(sk) or video_source.get(sk + 'Alt')
+            if source_url:
+                break
+        else:
+            source_url = video_source['ottStreamUrl']
+        is_live = video_source.get('type') == 'live' or video_source.get('status') == 'Live'
+        resource = self._get_mvpd_resource('nbcsports', title, video_id, '')
+        token = self._extract_mvpd_auth(url, video_id, 'nbcsports', resource)
+        tokenized_url = self._download_json(
+            'https://token.playmakerservices.com/cdn',
+            video_id, data=json.dumps({
+                'requestorId': 'nbcsports',
+                'pid': video_id,
+                'application': 'NBCSports',
+                'version': 'v1',
+                'platform': 'desktop',
+                'cdn': 'akamai',
+                'url': video_source['sourceUrl'],
+                'token': base64.b64encode(token.encode()).decode(),
+                'resourceId': base64.b64encode(resource.encode()).decode(),
+            }).encode())['tokenizedUrl']
+        formats = self._extract_m3u8_formats(tokenized_url, video_id, 'mp4')
+        self._sort_formats(formats)
+        return {
+            'id': video_id,
+            'title': self._live_title(title) if is_live else title,
+            'description': live_source.get('description'),
+            'formats': formats,
+            'is_live': is_live,
+        }
+
+
 class CSNNEIE(InfoExtractor):
     _VALID_URL = r'https?://(?:www\.)?csnne\.com/video/(?P<id>[0-9a-z-]+)'
 

From e8e58c22786918f93e6928d86b878fdc56461c4d Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Thu, 24 May 2018 11:53:42 +0100
Subject: [PATCH 082/125] [hidive] add support for authentication(closes
 #16534)

---
 youtube_dl/extractor/hidive.py | 27 +++++++++++++++++++++++++++
 1 file changed, 27 insertions(+)

diff --git a/youtube_dl/extractor/hidive.py b/youtube_dl/extractor/hidive.py
index eee517071..d8f2e682f 100644
--- a/youtube_dl/extractor/hidive.py
+++ b/youtube_dl/extractor/hidive.py
@@ -17,6 +17,9 @@ class HiDiveIE(InfoExtractor):
     # Using X-Forwarded-For results in 403 HTTP error for HLS fragments,
     # so disabling geo bypass completely
     _GEO_BYPASS = False
+    _NETRC_MACHINE = 'hidive'
+    _LOGGED_IN = False
+    _LOGIN_URL = 'https://www.hidive.com/account/login'
 
     _TESTS = [{
         'url': 'https://www.hidive.com/stream/the-comic-artist-and-his-assistants/s01e001',
@@ -31,8 +34,30 @@ class HiDiveIE(InfoExtractor):
         'params': {
             'skip_download': True,
         },
+        'skip': 'Requires Authentication',
     }]
 
+    def _real_initialize(self):
+        if self._LOGGED_IN:
+            return
+
+        (email, password) = self._get_login_info()
+        if email is None:
+            return
+
+        webpage = self._download_webpage(self._LOGIN_URL, None)
+        form = self._search_regex(
+            r'(?s)<form[^>]+action="/account/login"[^>]*>(.+?)</form>',
+            webpage, 'login form')
+        data = self._hidden_inputs(form)
+        data.update({
+            'Email': email,
+            'Password': password,
+        })
+        self._download_webpage(
+            self._LOGIN_URL, None, 'Logging in', data=urlencode_postdata(data))
+        self._LOGGED_IN = True
+
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         title, key = mobj.group('title', 'key')
@@ -43,6 +68,7 @@ class HiDiveIE(InfoExtractor):
             data=urlencode_postdata({
                 'Title': title,
                 'Key': key,
+                'PlayerId': 'f4f895ce1ca713ba263b91caeb1daa2d08904783',
             }))
 
         restriction = settings.get('restrictionReason')
@@ -79,6 +105,7 @@ class HiDiveIE(InfoExtractor):
                 subtitles.setdefault(cc_lang, []).append({
                     'url': cc_url,
                 })
+        self._sort_formats(formats)
 
         season_number = int_or_none(self._search_regex(
             r's(\d+)', key, 'season number', default=None))

From 3d2a643fdcba126b209b758f2e403742ee631cf3 Mon Sep 17 00:00:00 2001
From: Jakub Wilk <jwilk@jwilk.net>
Date: Thu, 24 May 2018 11:15:03 +0200
Subject: [PATCH 083/125] [imgur] Fix extraction

---
 youtube_dl/extractor/imgur.py | 13 +++++--------
 1 file changed, 5 insertions(+), 8 deletions(-)

diff --git a/youtube_dl/extractor/imgur.py b/youtube_dl/extractor/imgur.py
index 67c24a51c..2901960a5 100644
--- a/youtube_dl/extractor/imgur.py
+++ b/youtube_dl/extractor/imgur.py
@@ -3,7 +3,6 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..compat import compat_urlparse
 from ..utils import (
     int_or_none,
     js_to_json,
@@ -21,7 +20,7 @@ class ImgurIE(InfoExtractor):
             'id': 'A61SaA1',
             'ext': 'mp4',
             'title': 're:Imgur GIF$|MRW gifv is up and running without any bugs$',
-            'description': 'Imgur: The most awesome images on the Internet.',
+            'description': 'Imgur: The magic of the Internet',
         },
     }, {
         'url': 'https://imgur.com/A61SaA1',
@@ -29,7 +28,7 @@ class ImgurIE(InfoExtractor):
             'id': 'A61SaA1',
             'ext': 'mp4',
             'title': 're:Imgur GIF$|MRW gifv is up and running without any bugs$',
-            'description': 'Imgur: The most awesome images on the Internet.',
+            'description': 'Imgur: The magic of the Internet',
         },
     }, {
         'url': 'https://imgur.com/gallery/YcAQlkx',
@@ -37,8 +36,6 @@ class ImgurIE(InfoExtractor):
             'id': 'YcAQlkx',
             'ext': 'mp4',
             'title': 'Classic Steve Carell gif...cracks me up everytime....damn the repost downvotes....',
-            'description': 'Imgur: The most awesome images on the Internet.'
-
         }
     }, {
         'url': 'http://imgur.com/topic/Funny/N8rOudd',
@@ -50,8 +47,8 @@ class ImgurIE(InfoExtractor):
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
-        webpage = self._download_webpage(
-            compat_urlparse.urljoin(url, video_id), video_id)
+        gifv_url = 'https://i.imgur.com/{id}.gifv'.format(id=video_id)
+        webpage = self._download_webpage(gifv_url, video_id)
 
         width = int_or_none(self._og_search_property(
             'video:width', webpage, default=None))
@@ -107,7 +104,7 @@ class ImgurIE(InfoExtractor):
         return {
             'id': video_id,
             'formats': formats,
-            'description': self._og_search_description(webpage),
+            'description': self._og_search_description(webpage, default=None),
             'title': self._og_search_title(webpage),
         }
 

From c561b75c82247188e010b6b53c118bb26b4daaf0 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 26 May 2018 00:09:15 +0700
Subject: [PATCH 084/125] [peertube] Add extractor (closes #16301, closes
 #16329)

---
 youtube_dl/extractor/extractors.py |   1 +
 youtube_dl/extractor/peertube.py   | 210 +++++++++++++++++++++++++++++
 2 files changed, 211 insertions(+)
 create mode 100644 youtube_dl/extractor/peertube.py

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 52e330955..374aa185c 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -811,6 +811,7 @@ from .parliamentliveuk import ParliamentLiveUKIE
 from .patreon import PatreonIE
 from .pbs import PBSIE
 from .pearvideo import PearVideoIE
+from .peertube import PeerTubeIE
 from .people import PeopleIE
 from .performgroup import PerformGroupIE
 from .periscope import (
diff --git a/youtube_dl/extractor/peertube.py b/youtube_dl/extractor/peertube.py
new file mode 100644
index 000000000..b086f6f5a
--- /dev/null
+++ b/youtube_dl/extractor/peertube.py
@@ -0,0 +1,210 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import compat_str
+from ..utils import (
+    int_or_none,
+    parse_resolution,
+    try_get,
+    unified_timestamp,
+    urljoin,
+)
+
+
+class PeerTubeIE(InfoExtractor):
+    _VALID_URL = r'''(?x)
+                    https?://
+                        (?:
+                            # Taken from https://instances.joinpeertube.org/instances
+                            tube\.openalgeria\.org|
+                            peertube\.pointsecu\.fr|
+                            peertube\.nogafa\.org|
+                            peertube\.pl|
+                            megatube\.lilomoino\.fr|
+                            peertube\.tamanoir\.foucry\.net|
+                            peertube\.inapurna\.org|
+                            peertube\.netzspielplatz\.de|
+                            video\.deadsuperhero\.com|
+                            peertube\.devosi\.org|
+                            peertube\.1312\.media|
+                            tube\.worldofhauru\.xyz|
+                            tube\.bootlicker\.party|
+                            skeptikon\.fr|
+                            peertube\.geekshell\.fr|
+                            tube\.opportunis\.me|
+                            peertube\.peshane\.net|
+                            video\.blueline\.mg|
+                            tube\.homecomputing\.fr|
+                            videos\.cloudfrancois\.fr|
+                            peertube\.viviers-fibre\.net|
+                            tube\.ouahpiti\.info|
+                            video\.tedomum\.net|
+                            video\.g3l\.org|
+                            fontube\.fr|
+                            peertube\.gaialabs\.ch|
+                            peertube\.extremely\.online|
+                            peertube\.public-infrastructure\.eu|
+                            tube\.kher\.nl|
+                            peertube\.qtg\.fr|
+                            tube\.22decembre\.eu|
+                            facegirl\.me|
+                            video\.migennes\.net|
+                            janny\.moe|
+                            tube\.p2p\.legal|
+                            video\.atlanti\.se|
+                            troll\.tv|
+                            peertube\.geekael\.fr|
+                            vid\.leotindall\.com|
+                            video\.anormallostpod\.ovh|
+                            p-tube\.h3z\.jp|
+                            tube\.darfweb\.eu|
+                            videos\.iut-orsay\.fr|
+                            peertube\.solidev\.net|
+                            videos\.symphonie-of-code\.fr|
+                            testtube\.ortg\.de|
+                            videos\.cemea\.org|
+                            peertube\.gwendalavir\.eu|
+                            video\.passageenseine\.fr|
+                            videos\.festivalparminous\.org|
+                            peertube\.touhoppai\.moe|
+                            peertube\.duckdns\.org|
+                            sikke\.fi|
+                            peertube\.mastodon\.host|
+                            firedragonvideos\.com|
+                            vidz\.dou\.bet|
+                            peertube\.koehn\.com|
+                            peer\.hostux\.social|
+                            share\.tube|
+                            peertube\.walkingmountains\.fr|
+                            medias\.libox\.fr|
+                            peertube\.moe|
+                            peertube\.xyz|
+                            jp\.peertube\.network|
+                            videos\.benpro\.fr|
+                            tube\.otter\.sh|
+                            peertube\.angristan\.xyz|
+                            peertube\.parleur\.net|
+                            peer\.ecutsa\.fr|
+                            peertube\.heraut\.eu|
+                            peertube\.tifox\.fr|
+                            peertube\.maly\.io|
+                            vod\.mochi\.academy|
+                            exode\.me|
+                            coste\.video|
+                            tube\.aquilenet\.fr|
+                            peertube\.gegeweb\.eu|
+                            framatube\.org|
+                            thinkerview\.video|
+                            tube\.conferences-gesticulees\.net|
+                            peertube\.datagueule\.tv|
+                            video\.lqdn\.fr|
+                            meilleurtube\.delire\.party|
+                            tube\.mochi\.academy|
+                            peertube\.dav\.li|
+                            media\.zat\.im|
+                            pytu\.be|
+                            peertube\.valvin\.fr|
+                            peertube\.nsa\.ovh|
+                            video\.colibris-outilslibres\.org|
+                            video\.hispagatos\.org|
+                            tube\.svnet\.fr|
+                            peertube\.video|
+                            videos\.lecygnenoir\.info|
+                            peertube3\.cpy\.re|
+                            peertube2\.cpy\.re|
+                            videos\.tcit\.fr|
+                            peertube\.cpy\.re
+                        )
+                        /videos/watch/(?P<id>[^/?#&]+)
+                    '''
+    _TESTS = [{
+        'url': 'https://peertube.moe/videos/watch/2790feb0-8120-4e63-9af3-c943c69f5e6c',
+        'md5': '80f24ff364cc9d333529506a263e7feb',
+        'info_dict': {
+            'id': '2790feb0-8120-4e63-9af3-c943c69f5e6c',
+            'ext': 'mp4',
+            'title': 'wow',
+            'description': 'wow such video, so gif',
+            'thumbnail': r're:https?://.*\.(?:jpg|png)',
+            'timestamp': 1519297480,
+            'upload_date': '20180222',
+            'uploader': 'Luclu7',
+            'uploader_id': '7fc42640-efdb-4505-a45d-a15b1a5496f1',
+            'uploder_url': 'https://peertube.nsa.ovh/accounts/luclu7',
+            'license': 'Unknown',
+            'duration': 3,
+            'view_count': int,
+            'like_count': int,
+            'dislike_count': int,
+            'tags': list,
+            'categories': list,
+        }
+    }, {
+        'url': 'https://peertube.tamanoir.foucry.net/videos/watch/0b04f13d-1e18-4f1d-814e-4979aa7c9c44',
+        'only_matching': True,
+    }, {
+        # nsfw
+        'url': 'https://tube.22decembre.eu/videos/watch/9bb88cd3-9959-46d9-9ab9-33d2bb704c39',
+        'only_matching': True,
+    }]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        video = self._download_json(
+            urljoin(url, '/api/v1/videos/%s' % video_id), video_id)
+
+        title = video['name']
+
+        formats = []
+        for file_ in video['files']:
+            if not isinstance(file_, dict):
+                continue
+            file_url = file_.get('fileUrl')
+            if not file_url or not isinstance(file_url, compat_str):
+                continue
+            file_size = int_or_none(file_.get('size'))
+            format_id = try_get(
+                file_, lambda x: x['resolution']['label'], compat_str)
+            f = parse_resolution(format_id)
+            f.update({
+                'url': file_url,
+                'format_id': format_id,
+                'filesize': file_size,
+            })
+            formats.append(f)
+        self._sort_formats(formats)
+
+        def account_data(field):
+            return try_get(video, lambda x: x['account'][field], compat_str)
+
+        category = try_get(video, lambda x: x['category']['label'], compat_str)
+        categories = [category] if category else None
+
+        nsfw = video.get('nsfw')
+        if nsfw is bool:
+            age_limit = 18 if nsfw else 0
+        else:
+            age_limit = None
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': video.get('description'),
+            'thumbnail': urljoin(url, video.get('thumbnailPath')),
+            'timestamp': unified_timestamp(video.get('publishedAt')),
+            'uploader': account_data('displayName'),
+            'uploader_id': account_data('uuid'),
+            'uploder_url': account_data('url'),
+            'license': try_get(
+                video, lambda x: x['licence']['label'], compat_str),
+            'duration': int_or_none(video.get('duration')),
+            'view_count': int_or_none(video.get('views')),
+            'like_count': int_or_none(video.get('likes')),
+            'dislike_count': int_or_none(video.get('dislikes')),
+            'age_limit': age_limit,
+            'tags': try_get(video, lambda x: x['tags'], list),
+            'categories': categories,
+            'formats': formats,
+        }

From f2fc63a5a873391b9ac15642507a2eae71e42906 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 26 May 2018 00:15:38 +0700
Subject: [PATCH 085/125] [peertube] Add support for embed and API URLs

---
 youtube_dl/extractor/peertube.py | 9 ++++++++-
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/peertube.py b/youtube_dl/extractor/peertube.py
index b086f6f5a..61c41add0 100644
--- a/youtube_dl/extractor/peertube.py
+++ b/youtube_dl/extractor/peertube.py
@@ -116,7 +116,8 @@ class PeerTubeIE(InfoExtractor):
                             videos\.tcit\.fr|
                             peertube\.cpy\.re
                         )
-                        /videos/watch/(?P<id>[^/?#&]+)
+                        /(?:videos/(?:watch|embed)|api/v\d/videos)/
+                        (?P<id>[^/?#&]+)
                     '''
     _TESTS = [{
         'url': 'https://peertube.moe/videos/watch/2790feb0-8120-4e63-9af3-c943c69f5e6c',
@@ -147,6 +148,12 @@ class PeerTubeIE(InfoExtractor):
         # nsfw
         'url': 'https://tube.22decembre.eu/videos/watch/9bb88cd3-9959-46d9-9ab9-33d2bb704c39',
         'only_matching': True,
+    }, {
+        'url': 'https://tube.22decembre.eu/videos/embed/fed67262-6edb-4d1c-833b-daa9085c71d7',
+        'only_matching': True,
+    }, {
+        'url': 'https://tube.openalgeria.org/api/v1/videos/c1875674-97d0-4c94-a058-3f7e64c962e8',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):

From 6bd499e8ca769cf69c4b24fa2d7a751d7869b679 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 26 May 2018 00:28:30 +0700
Subject: [PATCH 086/125] [peertube] Add support for generic embeds

---
 youtube_dl/extractor/generic.py  | 15 +++++++++++++++
 youtube_dl/extractor/peertube.py | 23 +++++++++++++++++------
 2 files changed, 32 insertions(+), 6 deletions(-)

diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index 76852f9dc..47ac139c9 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -108,6 +108,7 @@ from .yapfiles import YapFilesIE
 from .vice import ViceIE
 from .xfileshare import XFileShareIE
 from .cloudflarestream import CloudflareStreamIE
+from .peertube import PeerTubeIE
 
 
 class GenericIE(InfoExtractor):
@@ -2012,6 +2013,15 @@ class GenericIE(InfoExtractor):
                 'skip_download': True,
             },
         },
+        {
+            # PeerTube embed
+            'url': 'https://joinpeertube.org/fr/home/',
+            'info_dict': {
+                'id': 'home',
+                'title': 'Reprenez le contrôle de vos vidéos ! #JoinPeertube',
+            },
+            'playlist_count': 2,
+        },
         {
             'url': 'http://share-videos.se/auto/video/83645793?uid=13',
             'md5': 'b68d276de422ab07ee1d49388103f457',
@@ -3029,6 +3039,11 @@ class GenericIE(InfoExtractor):
             return self.playlist_from_matches(
                 cloudflarestream_urls, video_id, video_title, ie=CloudflareStreamIE.ie_key())
 
+        peertube_urls = PeerTubeIE._extract_urls(webpage)
+        if peertube_urls:
+            return self.playlist_from_matches(
+                peertube_urls, video_id, video_title, ie=PeerTubeIE.ie_key())
+
         sharevideos_urls = [mobj.group('url') for mobj in re.finditer(
             r'<iframe[^>]+?\bsrc\s*=\s*(["\'])(?P<url>(?:https?:)?//embed\.share-videos\.se/auto/embed/\d+\?.*?\buid=\d+.*?)\1',
             webpage)]
diff --git a/youtube_dl/extractor/peertube.py b/youtube_dl/extractor/peertube.py
index 61c41add0..a481b3151 100644
--- a/youtube_dl/extractor/peertube.py
+++ b/youtube_dl/extractor/peertube.py
@@ -1,6 +1,8 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
+import re
+
 from .common import InfoExtractor
 from ..compat import compat_str
 from ..utils import (
@@ -13,9 +15,7 @@ from ..utils import (
 
 
 class PeerTubeIE(InfoExtractor):
-    _VALID_URL = r'''(?x)
-                    https?://
-                        (?:
+    _INSTANCES_RE = r'''(?:
                             # Taken from https://instances.joinpeertube.org/instances
                             tube\.openalgeria\.org|
                             peertube\.pointsecu\.fr|
@@ -115,10 +115,13 @@ class PeerTubeIE(InfoExtractor):
                             peertube2\.cpy\.re|
                             videos\.tcit\.fr|
                             peertube\.cpy\.re
-                        )
+                        )'''
+    _VALID_URL = r'''(?x)
+                    https?://
+                        %s
                         /(?:videos/(?:watch|embed)|api/v\d/videos)/
-                        (?P<id>[^/?#&]+)
-                    '''
+                        (?P<id>[^/?\#&]+)
+                    ''' % _INSTANCES_RE
     _TESTS = [{
         'url': 'https://peertube.moe/videos/watch/2790feb0-8120-4e63-9af3-c943c69f5e6c',
         'md5': '80f24ff364cc9d333529506a263e7feb',
@@ -156,6 +159,14 @@ class PeerTubeIE(InfoExtractor):
         'only_matching': True,
     }]
 
+    @staticmethod
+    def _extract_urls(webpage):
+        return [
+            mobj.group('url')
+            for mobj in re.finditer(
+                r'''(?x)<iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//%s/videos/embed/[^/?\#&]+)\1'''
+                % PeerTubeIE._INSTANCES_RE, webpage)]
+
     def _real_extract(self, url):
         video_id = self._match_id(url)
 

From b39f42ee92a3cd669da24db9798e1dc9b574720f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Andr=C3=A1s=20Veres-Szentkir=C3=A1lyi?= <vsza@vsza.hu>
Date: Fri, 25 May 2018 19:46:05 +0200
Subject: [PATCH 087/125] [indavideo] Sign download URLs

---
 youtube_dl/extractor/indavideo.py | 23 ++++++++++++++++++-----
 1 file changed, 18 insertions(+), 5 deletions(-)

diff --git a/youtube_dl/extractor/indavideo.py b/youtube_dl/extractor/indavideo.py
index 11cf3c609..15b766fb2 100644
--- a/youtube_dl/extractor/indavideo.py
+++ b/youtube_dl/extractor/indavideo.py
@@ -2,10 +2,12 @@
 from __future__ import unicode_literals
 
 from .common import InfoExtractor
+from ..compat import compat_str
 from ..utils import (
     int_or_none,
     parse_age_limit,
     parse_iso8601,
+    update_url_query,
 )
 
 
@@ -58,11 +60,10 @@ class IndavideoEmbedIE(InfoExtractor):
             if flv_url not in video_urls:
                 video_urls.append(flv_url)
 
-        formats = [{
-            'url': video_url,
-            'height': int_or_none(self._search_regex(
-                r'\.(\d{3,4})\.mp4(?:\?|$)', video_url, 'height', default=None)),
-        } for video_url in video_urls]
+        filesh = video.get('filesh')
+        formats = [
+            self.video_url_to_format(video_url, filesh)
+            for video_url in video_urls]
         self._sort_formats(formats)
 
         timestamp = video.get('date')
@@ -90,6 +91,18 @@ class IndavideoEmbedIE(InfoExtractor):
             'formats': formats,
         }
 
+    def video_url_to_format(self, video_url, filesh):
+        height = int_or_none(self._search_regex(
+            r'\.(\d{3,4})\.mp4(?:\?|$)', video_url, 'height', default=None))
+        if height and filesh:
+            token = filesh.get(compat_str(height))
+            if token is not None:
+                video_url = update_url_query(video_url, {'token': token})
+        return {
+            'url': video_url,
+            'height': height,
+        }
+
 
 class IndavideoIE(InfoExtractor):
     _VALID_URL = r'https?://(?:.+?\.)?indavideo\.hu/video/(?P<id>[^/#?]+)'

From 2a7c6befc16f72df5368cb4adccd1cd84fd432d7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 26 May 2018 01:09:44 +0700
Subject: [PATCH 088/125] [indavideo] Fix extraction (closes #11221)

---
 youtube_dl/extractor/indavideo.py | 48 +++++++++++++++++++------------
 1 file changed, 29 insertions(+), 19 deletions(-)

diff --git a/youtube_dl/extractor/indavideo.py b/youtube_dl/extractor/indavideo.py
index 15b766fb2..2946c7b84 100644
--- a/youtube_dl/extractor/indavideo.py
+++ b/youtube_dl/extractor/indavideo.py
@@ -15,7 +15,7 @@ class IndavideoEmbedIE(InfoExtractor):
     _VALID_URL = r'https?://(?:(?:embed\.)?indavideo\.hu/player/video/|assets\.indavideo\.hu/swf/player\.swf\?.*\b(?:v(?:ID|id))=)(?P<id>[\da-f]+)'
     _TESTS = [{
         'url': 'http://indavideo.hu/player/video/1bdc3c6d80/',
-        'md5': 'f79b009c66194acacd40712a6778acfa',
+        'md5': 'c8a507a1c7410685f83a06eaeeaafeab',
         'info_dict': {
             'id': '1837039',
             'ext': 'mp4',
@@ -47,7 +47,14 @@ class IndavideoEmbedIE(InfoExtractor):
 
         title = video['title']
 
-        video_urls = video.get('video_files', [])
+        video_urls = []
+
+        video_files = video.get('video_files')
+        if isinstance(video_files, list):
+            video_urls.extend(video_files)
+        elif isinstance(video_files, dict):
+            video_urls.extend(video_files.values())
+
         video_file = video.get('video_file')
         if video:
             video_urls.append(video_file)
@@ -61,9 +68,22 @@ class IndavideoEmbedIE(InfoExtractor):
                 video_urls.append(flv_url)
 
         filesh = video.get('filesh')
-        formats = [
-            self.video_url_to_format(video_url, filesh)
-            for video_url in video_urls]
+
+        formats = []
+        for video_url in video_urls:
+            height = int_or_none(self._search_regex(
+                r'\.(\d{3,4})\.mp4(?:\?|$)', video_url, 'height', default=None))
+            if filesh:
+                if not height:
+                    continue
+                token = filesh.get(compat_str(height))
+                if token is None:
+                    continue
+                video_url = update_url_query(video_url, {'token': token})
+            formats.append({
+                'url': video_url,
+                'height': height,
+            })
         self._sort_formats(formats)
 
         timestamp = video.get('date')
@@ -91,18 +111,6 @@ class IndavideoEmbedIE(InfoExtractor):
             'formats': formats,
         }
 
-    def video_url_to_format(self, video_url, filesh):
-        height = int_or_none(self._search_regex(
-            r'\.(\d{3,4})\.mp4(?:\?|$)', video_url, 'height', default=None))
-        if height and filesh:
-            token = filesh.get(compat_str(height))
-            if token is not None:
-                video_url = update_url_query(video_url, {'token': token})
-        return {
-            'url': video_url,
-            'height': height,
-        }
-
 
 class IndavideoIE(InfoExtractor):
     _VALID_URL = r'https?://(?:.+?\.)?indavideo\.hu/video/(?P<id>[^/#?]+)'
@@ -122,7 +130,7 @@ class IndavideoIE(InfoExtractor):
             'upload_date': '20140127',
             'duration': 7,
             'age_limit': 0,
-            'tags': ['vicces', 'macska', 'cica', 'ügyes', 'nevetés', 'játszik', 'Cukiság', 'Jet_Pack'],
+            'tags': list,
         },
     }, {
         'url': 'http://index.indavideo.hu/video/2015_0728_beregszasz',
@@ -146,7 +154,9 @@ class IndavideoIE(InfoExtractor):
 
         webpage = self._download_webpage(url, display_id)
         embed_url = self._search_regex(
-            r'<link[^>]+rel="video_src"[^>]+href="(.+?)"', webpage, 'embed url')
+            (r'<iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//embed\.indavideo\.hu/player/video/.+?)\1',
+             r'<link[^>]+rel="video_src"[^>]+href="(?P<url>.+?)"'),
+            webpage, 'embed url', group='url')
 
         return {
             '_type': 'url_transparent',

From aee36ca832ec3a5696c40707098d97be0353e997 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 26 May 2018 01:25:40 +0700
Subject: [PATCH 089/125] [indavideo] Add support for generic embeds (closes
 #11989)

---
 youtube_dl/extractor/extractors.py |  5 +--
 youtube_dl/extractor/generic.py    | 24 ++++++++++
 youtube_dl/extractor/indavideo.py  | 70 +++++++-----------------------
 3 files changed, 41 insertions(+), 58 deletions(-)

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 374aa185c..c9b49a0cd 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -469,10 +469,7 @@ from .imgur import (
 )
 from .ina import InaIE
 from .inc import IncIE
-from .indavideo import (
-    IndavideoIE,
-    IndavideoEmbedIE,
-)
+from .indavideo import IndavideoEmbedIE
 from .infoq import InfoQIE
 from .instagram import InstagramIE, InstagramUserIE
 from .internazionale import InternazionaleIE
diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index 47ac139c9..0292e0458 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -109,6 +109,7 @@ from .vice import ViceIE
 from .xfileshare import XFileShareIE
 from .cloudflarestream import CloudflareStreamIE
 from .peertube import PeerTubeIE
+from .indavideo import IndavideoEmbedIE
 
 
 class GenericIE(InfoExtractor):
@@ -2022,6 +2023,24 @@ class GenericIE(InfoExtractor):
             },
             'playlist_count': 2,
         },
+        {
+            # Indavideo embed
+            'url': 'https://streetkitchen.hu/receptek/igy_kell_otthon_hamburgert_sutni/',
+            'info_dict': {
+                'id': '1693903',
+                'ext': 'mp4',
+                'title': 'Így kell otthon hamburgert sütni',
+                'description': 'md5:f5a730ecf900a5c852e1e00540bbb0f7',
+                'timestamp': 1426330212,
+                'upload_date': '20150314',
+                'uploader': 'StreetKitchen',
+                'uploader_id': '546363',
+            },
+            'add_ie': [IndavideoEmbedIE.ie_key()],
+            'params': {
+                'skip_download': True,
+            },
+        },
         {
             'url': 'http://share-videos.se/auto/video/83645793?uid=13',
             'md5': 'b68d276de422ab07ee1d49388103f457',
@@ -3044,6 +3063,11 @@ class GenericIE(InfoExtractor):
             return self.playlist_from_matches(
                 peertube_urls, video_id, video_title, ie=PeerTubeIE.ie_key())
 
+        indavideo_urls = IndavideoEmbedIE._extract_urls(webpage)
+        if indavideo_urls:
+            return self.playlist_from_matches(
+                indavideo_urls, video_id, video_title, ie=IndavideoEmbedIE.ie_key())
+
         sharevideos_urls = [mobj.group('url') for mobj in re.finditer(
             r'<iframe[^>]+?\bsrc\s*=\s*(["\'])(?P<url>(?:https?:)?//embed\.share-videos\.se/auto/embed/\d+\?.*?\buid=\d+.*?)\1',
             webpage)]
diff --git a/youtube_dl/extractor/indavideo.py b/youtube_dl/extractor/indavideo.py
index 2946c7b84..2b5b2b5b0 100644
--- a/youtube_dl/extractor/indavideo.py
+++ b/youtube_dl/extractor/indavideo.py
@@ -1,6 +1,8 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
+import re
+
 from .common import InfoExtractor
 from ..compat import compat_str
 from ..utils import (
@@ -38,6 +40,20 @@ class IndavideoEmbedIE(InfoExtractor):
         'only_matching': True,
     }]
 
+    # Some example URLs covered by generic extractor:
+    #   http://indavideo.hu/video/Vicces_cica_1
+    #   http://index.indavideo.hu/video/2015_0728_beregszasz
+    #   http://auto.indavideo.hu/video/Sajat_utanfutoban_a_kis_tacsko
+    #   http://erotika.indavideo.hu/video/Amator_tini_punci
+    #   http://film.indavideo.hu/video/f_hrom_nagymamm_volt
+    #   http://palyazat.indavideo.hu/video/Embertelen_dal_Dodgem_egyuttes
+
+    @staticmethod
+    def _extract_urls(webpage):
+        return re.findall(
+            r'<iframe[^>]+\bsrc=["\'](?P<url>(?:https?:)?//embed\.indavideo\.hu/player/video/[\da-f]+)',
+            webpage)
+
     def _real_extract(self, url):
         video_id = self._match_id(url)
 
@@ -110,57 +126,3 @@ class IndavideoEmbedIE(InfoExtractor):
             'tags': tags,
             'formats': formats,
         }
-
-
-class IndavideoIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:.+?\.)?indavideo\.hu/video/(?P<id>[^/#?]+)'
-    _TESTS = [{
-        'url': 'http://indavideo.hu/video/Vicces_cica_1',
-        'md5': '8c82244ba85d2a2310275b318eb51eac',
-        'info_dict': {
-            'id': '1335611',
-            'display_id': 'Vicces_cica_1',
-            'ext': 'mp4',
-            'title': 'Vicces cica',
-            'description': 'Játszik a tablettel. :D',
-            'thumbnail': r're:^https?://.*\.jpg$',
-            'uploader': 'Jet_Pack',
-            'uploader_id': '491217',
-            'timestamp': 1390821212,
-            'upload_date': '20140127',
-            'duration': 7,
-            'age_limit': 0,
-            'tags': list,
-        },
-    }, {
-        'url': 'http://index.indavideo.hu/video/2015_0728_beregszasz',
-        'only_matching': True,
-    }, {
-        'url': 'http://auto.indavideo.hu/video/Sajat_utanfutoban_a_kis_tacsko',
-        'only_matching': True,
-    }, {
-        'url': 'http://erotika.indavideo.hu/video/Amator_tini_punci',
-        'only_matching': True,
-    }, {
-        'url': 'http://film.indavideo.hu/video/f_hrom_nagymamm_volt',
-        'only_matching': True,
-    }, {
-        'url': 'http://palyazat.indavideo.hu/video/Embertelen_dal_Dodgem_egyuttes',
-        'only_matching': True,
-    }]
-
-    def _real_extract(self, url):
-        display_id = self._match_id(url)
-
-        webpage = self._download_webpage(url, display_id)
-        embed_url = self._search_regex(
-            (r'<iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//embed\.indavideo\.hu/player/video/.+?)\1',
-             r'<link[^>]+rel="video_src"[^>]+href="(?P<url>.+?)"'),
-            webpage, 'embed url', group='url')
-
-        return {
-            '_type': 'url_transparent',
-            'ie_key': 'IndavideoEmbed',
-            'url': embed_url,
-            'display_id': display_id,
-        }

From f4d261b765a17ef2beccec78680ec693c7df014c Mon Sep 17 00:00:00 2001
From: Enes <enessolak99@gmail.com>
Date: Tue, 24 Apr 2018 22:48:40 +0300
Subject: [PATCH 090/125] [izlesene] Fix extraction (closes #16233)

---
 youtube_dl/extractor/izlesene.py | 33 ++++++++++----------------------
 1 file changed, 10 insertions(+), 23 deletions(-)

diff --git a/youtube_dl/extractor/izlesene.py b/youtube_dl/extractor/izlesene.py
index b1d72177d..5b2095490 100644
--- a/youtube_dl/extractor/izlesene.py
+++ b/youtube_dl/extractor/izlesene.py
@@ -1,8 +1,6 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
 from ..compat import compat_urllib_parse_unquote
 from ..utils import (
@@ -72,7 +70,7 @@ class IzleseneIE(InfoExtractor):
             'uploadDate', webpage, 'upload date'))
 
         duration = float_or_none(self._html_search_regex(
-            r'"videoduration"\s*:\s*"([^"]+)"',
+            r'videoduration\s*=\s*\'([^\']+)\'',
             webpage, 'duration', fatal=False), scale=1000)
 
         view_count = str_to_int(get_element_by_id('videoViewCount', webpage))
@@ -80,29 +78,18 @@ class IzleseneIE(InfoExtractor):
             r'comment_count\s*=\s*\'([^\']+)\';',
             webpage, 'comment_count', fatal=False)
 
-        content_url = self._html_search_meta(
-            'contentURL', webpage, 'content URL', fatal=False)
-        ext = determine_ext(content_url, 'mp4')
-
-        # Might be empty for some videos.
-        streams = self._html_search_regex(
-            r'"qualitylevel"\s*:\s*"([^"]+)"', webpage, 'streams', default='')
+        streams_json = self._html_search_regex(
+            r'_videoObj\s*=\s*(.+);', webpage, 'streams')
+        streams = self._parse_json(streams_json, video_id)
 
         formats = []
-        if streams:
-            for stream in streams.split('|'):
-                quality, url = re.search(r'\[(\w+)\](.+)', stream).groups()
-                formats.append({
-                    'format_id': '%sp' % quality if quality else 'sd',
-                    'url': compat_urllib_parse_unquote(url),
-                    'ext': ext,
-                })
-        else:
-            stream_url = self._search_regex(
-                r'"streamurl"\s*:\s*"([^"]+)"', webpage, 'stream URL')
+        for stream in streams.get('media').get('level'):
+            url = stream.get('source')
+            ext = determine_ext(url, 'mp4')
+            quality = stream.get('value')
             formats.append({
-                'format_id': 'sd',
-                'url': compat_urllib_parse_unquote(stream_url),
+                'format_id': '%sp' % quality,
+                'url': compat_urllib_parse_unquote(url),
                 'ext': ext,
             })
 

From 03fad17cb6ae24259808078a165c287c23d77f77 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 26 May 2018 01:51:38 +0700
Subject: [PATCH 091/125] [izlesene] Improve extraction and fix issues (closes
 #16407, closes #16271)

---
 youtube_dl/extractor/izlesene.py | 55 +++++++++++++++++++-------------
 1 file changed, 32 insertions(+), 23 deletions(-)

diff --git a/youtube_dl/extractor/izlesene.py b/youtube_dl/extractor/izlesene.py
index 5b2095490..f8fca6c8f 100644
--- a/youtube_dl/extractor/izlesene.py
+++ b/youtube_dl/extractor/izlesene.py
@@ -2,7 +2,10 @@
 from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..compat import compat_urllib_parse_unquote
+from ..compat import (
+    compat_str,
+    compat_urllib_parse_unquote,
+)
 from ..utils import (
     determine_ext,
     float_or_none,
@@ -55,12 +58,33 @@ class IzleseneIE(InfoExtractor):
     def _real_extract(self, url):
         video_id = self._match_id(url)
 
-        url = 'http://www.izlesene.com/video/%s' % video_id
-        webpage = self._download_webpage(url, video_id)
+        webpage = self._download_webpage('http://www.izlesene.com/video/%s' % video_id, video_id)
+
+        video = self._parse_json(
+            self._search_regex(
+                r'videoObj\s*=\s*({.+?})\s*;\s*\n', webpage, 'streams'),
+            video_id)
+
+        title = video.get('videoTitle') or self._og_search_title(webpage)
+
+        formats = []
+        for stream in video['media']['level']:
+            source_url = stream.get('source')
+            if not source_url or not isinstance(source_url, compat_str):
+                continue
+            ext = determine_ext(url, 'mp4')
+            quality = stream.get('value')
+            height = int_or_none(quality)
+            formats.append({
+                'format_id': '%sp' % quality if quality else 'sd',
+                'url': compat_urllib_parse_unquote(source_url),
+                'ext': ext,
+                'height': height,
+            })
+        self._sort_formats(formats)
 
-        title = self._og_search_title(webpage)
         description = self._og_search_description(webpage, default=None)
-        thumbnail = self._proto_relative_url(
+        thumbnail = video.get('posterURL') or self._proto_relative_url(
             self._og_search_thumbnail(webpage), scheme='http:')
 
         uploader = self._html_search_regex(
@@ -69,30 +93,15 @@ class IzleseneIE(InfoExtractor):
         timestamp = parse_iso8601(self._html_search_meta(
             'uploadDate', webpage, 'upload date'))
 
-        duration = float_or_none(self._html_search_regex(
-            r'videoduration\s*=\s*\'([^\']+)\'',
-            webpage, 'duration', fatal=False), scale=1000)
+        duration = float_or_none(video.get('duration') or self._html_search_regex(
+            r'videoduration["\']?\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1',
+            webpage, 'duration', fatal=False, group='value'), scale=1000)
 
         view_count = str_to_int(get_element_by_id('videoViewCount', webpage))
         comment_count = self._html_search_regex(
             r'comment_count\s*=\s*\'([^\']+)\';',
             webpage, 'comment_count', fatal=False)
 
-        streams_json = self._html_search_regex(
-            r'_videoObj\s*=\s*(.+);', webpage, 'streams')
-        streams = self._parse_json(streams_json, video_id)
-
-        formats = []
-        for stream in streams.get('media').get('level'):
-            url = stream.get('source')
-            ext = determine_ext(url, 'mp4')
-            quality = stream.get('value')
-            formats.append({
-                'format_id': '%sp' % quality,
-                'url': compat_urllib_parse_unquote(url),
-                'ext': ext,
-            })
-
         return {
             'id': video_id,
             'title': title,

From 9ef5cdb5cb637660decbc82117d5d6790c48ad99 Mon Sep 17 00:00:00 2001
From: rhhayward <rhhayward@att.net>
Date: Fri, 25 May 2018 14:13:29 -0500
Subject: [PATCH 092/125] [audiomack] Stringify video id (closes #15310)

---
 youtube_dl/extractor/audiomack.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/audiomack.py b/youtube_dl/extractor/audiomack.py
index f3bd4d444..62049b921 100644
--- a/youtube_dl/extractor/audiomack.py
+++ b/youtube_dl/extractor/audiomack.py
@@ -65,7 +65,7 @@ class AudiomackIE(InfoExtractor):
             return {'_type': 'url', 'url': api_response['url'], 'ie_key': 'Soundcloud'}
 
         return {
-            'id': api_response.get('id', album_url_tag),
+            'id': compat_str(api_response.get('id', album_url_tag)),
             'uploader': api_response.get('artist'),
             'title': api_response.get('title'),
             'url': api_response['url'],

From bdbcc8eecb6d498e5c33dcbfb330d7d82021b3f7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Petr=20Nov=C3=A1k?= <petr.novak@cosmoboy.cz>
Date: Fri, 25 May 2018 21:15:50 +0200
Subject: [PATCH 093/125] [dvtv] Remove dead test

---
 youtube_dl/extractor/dvtv.py | 11 -----------
 1 file changed, 11 deletions(-)

diff --git a/youtube_dl/extractor/dvtv.py b/youtube_dl/extractor/dvtv.py
index 3f760888e..20996962a 100644
--- a/youtube_dl/extractor/dvtv.py
+++ b/youtube_dl/extractor/dvtv.py
@@ -91,17 +91,6 @@ class DVTVIE(InfoExtractor):
     }, {
         'url': 'http://video.aktualne.cz/v-cechach-poprve-zazni-zelenkova-zrestaurovana-mse/r~45b4b00483ec11e4883b002590604f2e/',
         'only_matching': True,
-    }, {
-        'url': 'https://video.aktualne.cz/dvtv/babis-a-zeman-nesou-vinu-za-to-ze-nemame-jasno-v-tom-kdo-bud/r~026afb54fad711e79704ac1f6b220ee8/',
-        'md5': '87defe16681b1429c91f7a74809823c6',
-        'info_dict': {
-            'id': 'f5ae72f6fad611e794dbac1f6b220ee8',
-            'ext': 'mp4',
-            'title': 'Babiš a Zeman nesou vinu za to, že nemáme jasno v tom, kdo bude vládnout, říká Pekarová Adamová',
-        },
-        'params': {
-            'skip_download': True,
-        },
     }]
 
     def _parse_video_metadata(self, js, video_id, live_js=None):

From 5a16c9d9d37389d163b0004f1c9332764a50ef83 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Fri, 25 May 2018 23:12:18 +0100
Subject: [PATCH 094/125] [utils] keep the original TV_PARENTAL_GUIDELINES dict

---
 youtube_dl/utils.py | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index d61af8837..7b4fd882f 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -2253,12 +2253,12 @@ US_RATINGS = {
 
 
 TV_PARENTAL_GUIDELINES = {
-    'Y': 0,
-    'Y7': 7,
-    'G': 0,
-    'PG': 0,
-    '14': 14,
-    'MA': 17,
+    'TV-Y': 0,
+    'TV-Y7': 7,
+    'TV-G': 0,
+    'TV-PG': 0,
+    'TV-14': 14,
+    'TV-MA': 17,
 }
 
 
@@ -2272,9 +2272,9 @@ def parse_age_limit(s):
         return int(m.group('age'))
     if s in US_RATINGS:
         return US_RATINGS[s]
-    m = re.match(r'^TV[_-]?(%s)$' % '|'.join(TV_PARENTAL_GUIDELINES.keys()), s)
+    m = re.match(r'^TV[_-]?(%s)$' % '|'.join(k[3:] for k in TV_PARENTAL_GUIDELINES), s)
     if m:
-        return TV_PARENTAL_GUIDELINES[m.group(1)]
+        return TV_PARENTAL_GUIDELINES['TV-' + m.group(1)]
     return None
 
 

From 38e4e8ab80b784f59b3a3ef6d313a70e13f17cd3 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 26 May 2018 12:58:34 +0700
Subject: [PATCH 095/125] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 28 ++++++++++++++++++++++++++++
 1 file changed, 28 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index 08233cd5b..9d0264bf7 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,31 @@
+version <unreleased>
+
+Core
+* [utils] Improve parse_age_limit
+
+Extractors
+* [audiomack] Stringify video id (#15310)
+* [izlesene] Fix extraction (#16233, #16271, #16407)
++ [indavideo] Add support for generic embeds (#11989)
+* [indavideo] Fix extraction (#11221)
+* [indavideo] Sign download URLs (#16174)
++ [peertube] Add support for PeerTube based sites (#16301, #16329)
+* [imgur] Fix extraction (#16537)
++ [hidive] Add support for authentication (#16534)
++ [nbc] Add support for stream.nbcsports.com (#13911)
++ [viewlift] Add support for hoichoi.tv (#16536)
+* [go90] Extract age limit and detect DRM protection(#10127)
+* [viewlift] fix extraction for snagfilms.com (#15766)
+* [globo] Improve extraction (#4189)
+    * Add support for authentication
+    * Simplify URL signing
+    * Extract DASH and MSS formats
+* [leeco] Fix extraction (#16464)
+* [teamcoco] Add fallback for format extraction (#16484)
+* [teamcoco] Improve URL regular expression (#16484)
+* [imdb] Improve extraction (#4085, #14557)
+
+
 version 2018.05.18
 
 Extractors

From 0934c9d4faadbfd2b076d13c7e24f4bf039cdc79 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 26 May 2018 13:02:21 +0700
Subject: [PATCH 096/125] release 2018.05.26

---
 .github/ISSUE_TEMPLATE.md |  6 +++---
 ChangeLog                 |  2 +-
 README.md                 | 13 ++++++-------
 docs/supportedsites.md    |  3 ++-
 youtube_dl/version.py     |  2 +-
 5 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index 7d9de5171..c4d4e534e 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -6,8 +6,8 @@
 
 ---
 
-### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.05.18*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
-- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2018.05.18**
+### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.05.26*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
+- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2018.05.26**
 
 ### Before submitting an *issue* make sure you have:
 - [ ] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
@@ -36,7 +36,7 @@ Add the `-v` flag to **your command line** you run youtube-dl with (`youtube-dl
 [debug] User config: []
 [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
 [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
-[debug] youtube-dl version 2018.05.18
+[debug] youtube-dl version 2018.05.26
 [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
 [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
 [debug] Proxy map: {}
diff --git a/ChangeLog b/ChangeLog
index 9d0264bf7..280390ea0 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2018.05.26
 
 Core
 * [utils] Improve parse_age_limit
diff --git a/README.md b/README.md
index 20982b0f1..499a0c206 100644
--- a/README.md
+++ b/README.md
@@ -93,8 +93,8 @@ Alternatively, refer to the [developer instructions](#developer-instructions) fo
 
 ## Network Options:
     --proxy URL                      Use the specified HTTP/HTTPS/SOCKS proxy.
-                                     To enable experimental SOCKS proxy, specify
-                                     a proper scheme. For example
+                                     To enable SOCKS proxy, specify a proper
+                                     scheme. For example
                                      socks5://127.0.0.1:1080/. Pass in an empty
                                      string (--proxy "") for direct connection
     --socket-timeout SECONDS         Time to wait before giving up, in seconds
@@ -109,16 +109,15 @@ Alternatively, refer to the [developer instructions](#developer-instructions) fo
                                      option is not present) is used for the
                                      actual downloading.
     --geo-bypass                     Bypass geographic restriction via faking
-                                     X-Forwarded-For HTTP header (experimental)
+                                     X-Forwarded-For HTTP header
     --no-geo-bypass                  Do not bypass geographic restriction via
                                      faking X-Forwarded-For HTTP header
-                                     (experimental)
     --geo-bypass-country CODE        Force bypass geographic restriction with
                                      explicitly provided two-letter ISO 3166-2
-                                     country code (experimental)
+                                     country code
     --geo-bypass-ip-block IP_BLOCK   Force bypass geographic restriction with
                                      explicitly provided IP block in CIDR
-                                     notation (experimental)
+                                     notation
 
 ## Video Selection:
     --playlist-start NUMBER          Playlist video to start at (default is 1)
@@ -209,7 +208,7 @@ Alternatively, refer to the [developer instructions](#developer-instructions) fo
     --playlist-reverse               Download playlist videos in reverse order
     --playlist-random                Download playlist videos in random order
     --xattr-set-filesize             Set file xattribute ytdl.filesize with
-                                     expected file size (experimental)
+                                     expected file size
     --hls-prefer-native              Use the native HLS downloader instead of
                                      ffmpeg
     --hls-prefer-ffmpeg              Use ffmpeg instead of the native HLS
diff --git a/docs/supportedsites.md b/docs/supportedsites.md
index c1048cc4c..b60f2ff23 100644
--- a/docs/supportedsites.md
+++ b/docs/supportedsites.md
@@ -365,7 +365,6 @@
  - **ImgurAlbum**
  - **Ina**
  - **Inc**
- - **Indavideo**
  - **IndavideoEmbed**
  - **InfoQ**
  - **Instagram**
@@ -526,6 +525,7 @@
  - **nbcolympics**
  - **nbcolympics:stream**
  - **NBCSports**
+ - **NBCSportsStream**
  - **NBCSportsVPlayer**
  - **ndr**: NDR.de - Norddeutscher Rundfunk
  - **ndr:embed**
@@ -625,6 +625,7 @@
  - **pbs**: Public Broadcasting Service (PBS) and member stations: PBS: Public Broadcasting Service, APT - Alabama Public Television (WBIQ), GPB/Georgia Public Broadcasting (WGTV), Mississippi Public Broadcasting (WMPN), Nashville Public Television (WNPT), WFSU-TV (WFSU), WSRE (WSRE), WTCI (WTCI), WPBA/Channel 30 (WPBA), Alaska Public Media (KAKM), Arizona PBS (KAET), KNME-TV/Channel 5 (KNME), Vegas PBS (KLVX), AETN/ARKANSAS ETV NETWORK (KETS), KET (WKLE), WKNO/Channel 10 (WKNO), LPB/LOUISIANA PUBLIC BROADCASTING (WLPB), OETA (KETA), Ozarks Public Television (KOZK), WSIU Public Broadcasting (WSIU), KEET TV (KEET), KIXE/Channel 9 (KIXE), KPBS San Diego (KPBS), KQED (KQED), KVIE Public Television (KVIE), PBS SoCal/KOCE (KOCE), ValleyPBS (KVPT), CONNECTICUT PUBLIC TELEVISION (WEDH), KNPB Channel 5 (KNPB), SOPTV (KSYS), Rocky Mountain PBS (KRMA), KENW-TV3 (KENW), KUED Channel 7 (KUED), Wyoming PBS (KCWC), Colorado Public Television / KBDI 12 (KBDI), KBYU-TV (KBYU), Thirteen/WNET New York (WNET), WGBH/Channel 2 (WGBH), WGBY (WGBY), NJTV Public Media NJ (WNJT), WLIW21 (WLIW), mpt/Maryland Public Television (WMPB), WETA Television and Radio (WETA), WHYY (WHYY), PBS 39 (WLVT), WVPT - Your Source for PBS and More! (WVPT), Howard University Television (WHUT), WEDU PBS (WEDU), WGCU Public Media (WGCU), WPBT2 (WPBT), WUCF TV (WUCF), WUFT/Channel 5 (WUFT), WXEL/Channel 42 (WXEL), WLRN/Channel 17 (WLRN), WUSF Public Broadcasting (WUSF), ETV (WRLK), UNC-TV (WUNC), PBS Hawaii - Oceanic Cable Channel 10 (KHET), Idaho Public Television (KAID), KSPS (KSPS), OPB (KOPB), KWSU/Channel 10 & KTNW/Channel 31 (KWSU), WILL-TV (WILL), Network Knowledge - WSEC/Springfield (WSEC), WTTW11 (WTTW), Iowa Public Television/IPTV (KDIN), Nine Network (KETC), PBS39 Fort Wayne (WFWA), WFYI Indianapolis (WFYI), Milwaukee Public Television (WMVS), WNIN (WNIN), WNIT Public Television (WNIT), WPT (WPNE), WVUT/Channel 22 (WVUT), WEIU/Channel 51 (WEIU), WQPT-TV (WQPT), WYCC PBS Chicago (WYCC), WIPB-TV (WIPB), WTIU (WTIU), CET  (WCET), ThinkTVNetwork (WPTD), WBGU-TV (WBGU), WGVU TV (WGVU), NET1 (KUON), Pioneer Public Television (KWCM), SDPB Television (KUSD), TPT (KTCA), KSMQ (KSMQ), KPTS/Channel 8 (KPTS), KTWU/Channel 11 (KTWU), East Tennessee PBS (WSJK), WCTE-TV (WCTE), WLJT, Channel 11 (WLJT), WOSU TV (WOSU), WOUB/WOUC (WOUB), WVPB (WVPB), WKYU-PBS (WKYU), KERA 13 (KERA), MPBN (WCBB), Mountain Lake PBS (WCFE), NHPTV (WENH), Vermont PBS (WETK), witf (WITF), WQED Multimedia (WQED), WMHT Educational Telecommunications (WMHT), Q-TV (WDCQ), WTVS Detroit Public TV (WTVS), CMU Public Television (WCMU), WKAR-TV (WKAR), WNMU-TV Public TV 13 (WNMU), WDSE - WRPT (WDSE), WGTE TV (WGTE), Lakeland Public Television (KAWE), KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS), MontanaPBS (KUSM), KRWG/Channel 22 (KRWG), KACV (KACV), KCOS/Channel 13 (KCOS), WCNY/Channel 24 (WCNY), WNED (WNED), WPBS (WPBS), WSKG Public TV (WSKG), WXXI (WXXI), WPSU (WPSU), WVIA Public Media Studios (WVIA), WTVI (WTVI), Western Reserve PBS (WNEO), WVIZ/PBS ideastream (WVIZ), KCTS 9 (KCTS), Basin PBS (KPBT), KUHT / Channel 8 (KUHT), KLRN (KLRN), KLRU (KLRU), WTJX Channel 12 (WTJX), WCVE PBS (WCVE), KBTC Public Television (KBTC)
  - **pcmag**
  - **PearVideo**
+ - **PeerTube**
  - **People**
  - **PerformGroup**
  - **periscope**: Periscope
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index a43eec860..2253da927 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2018.05.18'
+__version__ = '2018.05.26'

From c678192af3f004205b18a16b7418cbd937c1b584 Mon Sep 17 00:00:00 2001
From: Zack Fernandes <zack@ohnoco.com>
Date: Sun, 31 Dec 2017 13:55:35 -0800
Subject: [PATCH 097/125] [tumblr] Add support for authentication

---
 youtube_dl/extractor/tumblr.py | 34 +++++++++++++++++++++++++++++++++-
 1 file changed, 33 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/tumblr.py b/youtube_dl/extractor/tumblr.py
index 786143525..58ac66755 100644
--- a/youtube_dl/extractor/tumblr.py
+++ b/youtube_dl/extractor/tumblr.py
@@ -4,11 +4,18 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..utils import int_or_none
+from ..utils import (
+    ExtractorError,
+    int_or_none,
+    sanitized_Request,
+    urlencode_postdata
+)
 
 
 class TumblrIE(InfoExtractor):
     _VALID_URL = r'https?://(?P<blog_name>[^/?#&]+)\.tumblr\.com/(?:post|video)/(?P<id>[0-9]+)(?:$|[/?#])'
+    _NETRC_MACHINE = 'tumblr'
+    _LOGIN_URL = 'https://www.tumblr.com/login'
     _TESTS = [{
         'url': 'http://tatianamaslanydaily.tumblr.com/post/54196191430/orphan-black-dvd-extra-behind-the-scenes',
         'md5': '479bb068e5b16462f5176a6828829767',
@@ -97,6 +104,31 @@ class TumblrIE(InfoExtractor):
         'add_ie': ['Instagram'],
     }]
 
+    def _real_initialize(self):
+        self._login()
+
+    def _login(self):
+        (username, password) = self._get_login_info()
+        if username is None:
+            return
+        self.report_login()
+        webpage = self._download_webpage(self._LOGIN_URL, None, False)
+        form = self._hidden_inputs(webpage)
+        form.update({
+            'user[email]': username,
+            'user[password]': password
+        })
+        login_response = self._download_webpage(
+            sanitized_Request(self._LOGIN_URL, urlencode_postdata(form), {
+                'Content-Type': 'application/x-www-form-urlencoded',
+                'Referer': self._LOGIN_URL
+            }), None, False, 'Wrong login info')
+
+        # Check the login response from Tumblr for an error message and fail the extraction if we find one.
+        login_errors = self._search_regex(r'Tumblr\.RegistrationForm\.errors\s*=\s*\[[\"|\'](.+)[\"|\']\]', login_response, 'login errors', False)
+        if login_errors:
+            raise ExtractorError('Error logging in: %s' % login_errors, expected=True)
+
     def _real_extract(self, url):
         m_url = re.match(self._VALID_URL, url)
         video_id = m_url.group('id')

From 56cd31f32015cce131fb40a112d323da57fdda8e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 26 May 2018 19:53:32 +0700
Subject: [PATCH 098/125] [tumblr] Improve authentication (closes #15133)

---
 youtube_dl/extractor/tumblr.py | 39 ++++++++++++++++++++++------------
 1 file changed, 26 insertions(+), 13 deletions(-)

diff --git a/youtube_dl/extractor/tumblr.py b/youtube_dl/extractor/tumblr.py
index 58ac66755..758ccbb44 100644
--- a/youtube_dl/extractor/tumblr.py
+++ b/youtube_dl/extractor/tumblr.py
@@ -7,7 +7,6 @@ from .common import InfoExtractor
 from ..utils import (
     ExtractorError,
     int_or_none,
-    sanitized_Request,
     urlencode_postdata
 )
 
@@ -111,23 +110,37 @@ class TumblrIE(InfoExtractor):
         (username, password) = self._get_login_info()
         if username is None:
             return
-        self.report_login()
-        webpage = self._download_webpage(self._LOGIN_URL, None, False)
-        form = self._hidden_inputs(webpage)
-        form.update({
+
+        login_page = self._download_webpage(
+            self._LOGIN_URL, None, 'Downloading login page')
+
+        login_form = self._hidden_inputs(login_page)
+        login_form.update({
             'user[email]': username,
             'user[password]': password
         })
-        login_response = self._download_webpage(
-            sanitized_Request(self._LOGIN_URL, urlencode_postdata(form), {
-                'Content-Type': 'application/x-www-form-urlencoded',
-                'Referer': self._LOGIN_URL
-            }), None, False, 'Wrong login info')
 
-        # Check the login response from Tumblr for an error message and fail the extraction if we find one.
-        login_errors = self._search_regex(r'Tumblr\.RegistrationForm\.errors\s*=\s*\[[\"|\'](.+)[\"|\']\]', login_response, 'login errors', False)
+        response, urlh = self._download_webpage_handle(
+            self._LOGIN_URL, None, 'Logging in',
+            data=urlencode_postdata(login_form), headers={
+                'Content-Type': 'application/x-www-form-urlencoded',
+                'Referer': self._LOGIN_URL,
+            })
+
+        # Successful login
+        if '/dashboard' in urlh.geturl():
+            return
+
+        login_errors = self._parse_json(
+            self._search_regex(
+                r'RegistrationForm\.errors\s*=\s*(\[.+?\])\s*;', response,
+                'login errors', default='[]'),
+            None, fatal=False)
         if login_errors:
-            raise ExtractorError('Error logging in: %s' % login_errors, expected=True)
+            raise ExtractorError(
+                'Unable to login: %s' % login_errors[0], expected=True)
+
+        self.report_warning('Login has probably failed')
 
     def _real_extract(self, url):
         m_url = re.match(self._VALID_URL, url)

From 97b01144bd9771f224749ffca10156a1cd7e9c1f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 26 May 2018 20:00:00 +0700
Subject: [PATCH 099/125] [tumblr] Detect and report sensitive media (closes
 #13829)

---
 youtube_dl/extractor/tumblr.py | 11 ++++++++++-
 1 file changed, 10 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/tumblr.py b/youtube_dl/extractor/tumblr.py
index 758ccbb44..89e6eb5ab 100644
--- a/youtube_dl/extractor/tumblr.py
+++ b/youtube_dl/extractor/tumblr.py
@@ -4,6 +4,7 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
+from ..compat import compat_str
 from ..utils import (
     ExtractorError,
     int_or_none,
@@ -150,11 +151,19 @@ class TumblrIE(InfoExtractor):
         url = 'http://%s.tumblr.com/post/%s/' % (blog, video_id)
         webpage, urlh = self._download_webpage_handle(url, video_id)
 
+        redirect_url = compat_str(urlh.geturl())
+        if 'tumblr.com/safe-mode' in redirect_url or redirect_url.startswith('/safe-mode'):
+            raise ExtractorError(
+                'This Tumblr may contain sensitive media. '
+                'Disable safe mode in your account settings '
+                'at https://www.tumblr.com/settings/account#safe_mode',
+                expected=True)
+
         iframe_url = self._search_regex(
             r'src=\'(https?://www\.tumblr\.com/video/[^\']+)\'',
             webpage, 'iframe url', default=None)
         if iframe_url is None:
-            return self.url_result(urlh.geturl(), 'Generic')
+            return self.url_result(redirect_url, 'Generic')
 
         iframe = self._download_webpage(iframe_url, video_id, 'Downloading iframe page')
 

From 986c0b0215b127713825fa1523966ac66e03157b Mon Sep 17 00:00:00 2001
From: Parmjit Virk <pvirk@mts.net>
Date: Sat, 26 May 2018 08:05:54 -0500
Subject: [PATCH 100/125] [cbc] Fix playlist title extraction (closes #16502)

---
 youtube_dl/extractor/cbc.py | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/cbc.py b/youtube_dl/extractor/cbc.py
index 54b4b9be9..ce8e3d346 100644
--- a/youtube_dl/extractor/cbc.py
+++ b/youtube_dl/extractor/cbc.py
@@ -20,6 +20,7 @@ from ..utils import (
     parse_duration,
     parse_iso8601,
     parse_age_limit,
+    strip_or_none,
     int_or_none,
     ExtractorError,
 )
@@ -129,6 +130,9 @@ class CBCIE(InfoExtractor):
     def _real_extract(self, url):
         display_id = self._match_id(url)
         webpage = self._download_webpage(url, display_id)
+        title = self._og_search_title(webpage, default=None) or self._html_search_meta(
+            'twitter:title', webpage, 'title', default=None) or self._html_search_regex(
+                r'<title>([^<]+)</title>', webpage, 'title', fatal=False)
         entries = [
             self._extract_player_init(player_init, display_id)
             for player_init in re.findall(r'CBC\.APP\.Caffeine\.initInstance\(({.+?})\);', webpage)]
@@ -136,8 +140,7 @@ class CBCIE(InfoExtractor):
             self.url_result('cbcplayer:%s' % media_id, 'CBCPlayer', media_id)
             for media_id in re.findall(r'<iframe[^>]+src="[^"]+?mediaId=(\d+)"', webpage)])
         return self.playlist_result(
-            entries, display_id,
-            self._og_search_title(webpage, fatal=False),
+            entries, display_id, strip_or_none(title),
             self._og_search_description(webpage))
 
 

From c0fd20abcad16bb2e377b6342a894a374c219763 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Sat, 26 May 2018 14:34:13 +0100
Subject: [PATCH 101/125] [soundcloud] detect format extension(closes #16549)

---
 youtube_dl/extractor/soundcloud.py | 17 ++++++++++-------
 1 file changed, 10 insertions(+), 7 deletions(-)

diff --git a/youtube_dl/extractor/soundcloud.py b/youtube_dl/extractor/soundcloud.py
index 46332e5c2..81c81c8d5 100644
--- a/youtube_dl/extractor/soundcloud.py
+++ b/youtube_dl/extractor/soundcloud.py
@@ -181,7 +181,6 @@ class SoundcloudIE(InfoExtractor):
         thumbnail = info.get('artwork_url') or info.get('user', {}).get('avatar_url')
         if isinstance(thumbnail, compat_str):
             thumbnail = thumbnail.replace('-large', '-t500x500')
-        ext = 'mp3'
         result = {
             'id': track_id,
             'uploader': info.get('user', {}).get('username'),
@@ -215,8 +214,11 @@ class SoundcloudIE(InfoExtractor):
             track_id, 'Downloading track url', query=query)
 
         for key, stream_url in format_dict.items():
-            abr = int_or_none(self._search_regex(
-                r'_(\d+)_url', key, 'audio bitrate', default=None))
+            ext, abr = 'mp3', None
+            mobj = re.search(r'_([^_]+)_(\d+)_url', key)
+            if mobj:
+                ext, abr = mobj.groups()
+                abr = int(abr)
             if key.startswith('http'):
                 stream_formats = [{
                     'format_id': key,
@@ -234,13 +236,14 @@ class SoundcloudIE(InfoExtractor):
                 }]
             elif key.startswith('hls'):
                 stream_formats = self._extract_m3u8_formats(
-                    stream_url, track_id, 'mp3', entry_protocol='m3u8_native',
+                    stream_url, track_id, ext, entry_protocol='m3u8_native',
                     m3u8_id=key, fatal=False)
             else:
                 continue
 
-            for f in stream_formats:
-                f['abr'] = abr
+            if abr:
+                for f in stream_formats:
+                    f['abr'] = abr
 
             formats.extend(stream_formats)
 
@@ -250,7 +253,7 @@ class SoundcloudIE(InfoExtractor):
             formats.append({
                 'format_id': 'fallback',
                 'url': update_url_query(info['stream_url'], query),
-                'ext': ext,
+                'ext': 'mp3',
             })
 
         for f in formats:

From 261f47306c594614edb8a5f0b8f5f3b8a87ce9c0 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Sat, 26 May 2018 14:35:47 +0100
Subject: [PATCH 102/125] [utils] fix style id extraction for namespaced id
 attribute(closes #16551)

---
 youtube_dl/utils.py | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index 7b4fd882f..63f24c0b6 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -2667,6 +2667,7 @@ def dfxp2srt(dfxp_data):
     ]
 
     _x = functools.partial(xpath_with_ns, ns_map={
+        'xml': 'http://www.w3.org/XML/1998/namespace',
         'ttml': 'http://www.w3.org/ns/ttml',
         'tts': 'http://www.w3.org/ns/ttml#styling',
     })
@@ -2758,7 +2759,9 @@ def dfxp2srt(dfxp_data):
     repeat = False
     while True:
         for style in dfxp.findall(_x('.//ttml:style')):
-            style_id = style.get('id')
+            style_id = style.get('id') or style.get(_x('xml:id'))
+            if not style_id:
+                continue
             parent_style_id = style.get('style')
             if parent_style_id:
                 if parent_style_id not in styles:

From 2a49d01992e0b4b87d78da8f83af2f6e57fb8ba8 Mon Sep 17 00:00:00 2001
From: mars67857 <mars67857@yahoo.com>
Date: Sat, 14 Oct 2017 22:09:44 -0700
Subject: [PATCH 103/125] [cammodels] Add extractor

---
 youtube_dl/extractor/cammodels.py  | 93 ++++++++++++++++++++++++++++++
 youtube_dl/extractor/extractors.py |  1 +
 2 files changed, 94 insertions(+)
 create mode 100644 youtube_dl/extractor/cammodels.py

diff --git a/youtube_dl/extractor/cammodels.py b/youtube_dl/extractor/cammodels.py
new file mode 100644
index 000000000..1711d7096
--- /dev/null
+++ b/youtube_dl/extractor/cammodels.py
@@ -0,0 +1,93 @@
+from __future__ import unicode_literals
+from .common import InfoExtractor
+from .common import ExtractorError
+import json
+import re
+from ..utils import int_or_none
+
+
+class CamModelsIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?cammodels\.com/cam/(?P<id>\w+)'
+    _HEADERS = {
+        'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'
+        # Needed because server doesn't return links to video URLs if a browser-like User-Agent is not used
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(
+            url,
+            video_id,
+            headers=self._HEADERS)
+        manifest_url_root = self._html_search_regex(
+            r'manifestUrlRoot=(?P<id>https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*))',
+            webpage,
+            'manifest',
+            None,
+            False)
+        if not manifest_url_root:
+            offline = self._html_search_regex(
+                r'(?P<id>I\'m offline, but let\'s stay connected!)',
+                webpage,
+                'offline indicator',
+                None,
+                False)
+            private = self._html_search_regex(
+                r'(?P<id>I’m in a private show right now)',
+                webpage,
+                'private show indicator',
+                None,
+                False)
+            err = 'This user is currently offline, so nothing can be downloaded.' if offline \
+                else 'This user is doing a private show, which requires payment. This extractor currently does not support private streams.' if private \
+                else 'Unable to find link to stream info on webpage. Room is not offline, so something else is wrong.'
+            raise ExtractorError(
+                err,
+                expected=True if offline or private else False,
+                video_id=video_id
+            )
+        manifest_url = manifest_url_root + video_id + '.json'
+        manifest = self._download_json(
+            manifest_url,
+            video_id,
+            'Downloading links to streams.',
+            'Link to stream URLs was found, but we couldn\'t access it.',
+            headers=self._HEADERS)
+        try:
+            formats = []
+            for fmtName in ['mp4-rtmp', 'mp4-hls']:
+                for encoding in manifest['formats'][fmtName]['encodings']:
+                    formats.append({
+                        'ext': 'mp4',
+                        'url': encoding['location'],
+                        'width': int_or_none(encoding.get('videoWidth')),
+                        'height': int_or_none(encoding.get('videoHeight')),
+                        'vbr': int_or_none(encoding.get('videoKbps')),
+                        'abr': int_or_none(encoding.get('audioKbps')),
+                        'format_id': fmtName + str(encoding.get('videoWidth'))
+                    })
+        # If they change the JSON format, then fallback to parsing out RTMP links via regex.
+        except KeyError:
+            manifest_json = json.dumps(manifest)
+            manifest_links = re.finditer(
+                r'(?P<id>rtmp?:\/\/[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#&//=]*))',
+                manifest_json)
+            if not manifest_links:
+                raise ExtractorError(
+                    'Link to stream info was found, but we couldn\'t read the response. This is probably a bug.',
+                    expected=False,
+                    video_id=video_id)
+            formats = []
+            for manifest_link in manifest_links:
+                url = manifest_link.group('id')
+                formats.append({
+                    'ext': 'mp4',
+                    'url': url,
+                    'format_id': url.split(sep='/')[-1]
+                })
+        self._sort_formats(formats)
+        return {
+            'id': video_id,
+            'title': self._live_title(video_id),
+            'formats': formats
+        }
diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index c9b49a0cd..d54e8df9f 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -145,6 +145,7 @@ from .camdemy import (
     CamdemyIE,
     CamdemyFolderIE
 )
+from .cammodels import CamModelsIE
 from .camwithher import CamWithHerIE
 from .canalplus import CanalplusIE
 from .canalc2 import Canalc2IE

From 8b1da46e8f6dd0de790a54a4809d224041262537 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 26 May 2018 21:25:01 +0700
Subject: [PATCH 104/125] [cammodels] Improve and simplify (closes #14499)

---
 youtube_dl/extractor/cammodels.py | 159 +++++++++++++++---------------
 1 file changed, 80 insertions(+), 79 deletions(-)

diff --git a/youtube_dl/extractor/cammodels.py b/youtube_dl/extractor/cammodels.py
index 1711d7096..4f1b88d14 100644
--- a/youtube_dl/extractor/cammodels.py
+++ b/youtube_dl/extractor/cammodels.py
@@ -1,93 +1,94 @@
+# coding: utf-8
 from __future__ import unicode_literals
+
 from .common import InfoExtractor
-from .common import ExtractorError
-import json
-import re
-from ..utils import int_or_none
+from ..compat import compat_str
+from ..utils import (
+    ExtractorError,
+    int_or_none,
+)
 
 
 class CamModelsIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?cammodels\.com/cam/(?P<id>\w+)'
-    _HEADERS = {
-        'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'
-        # Needed because server doesn't return links to video URLs if a browser-like User-Agent is not used
-    }
+    _VALID_URL = r'https?://(?:www\.)?cammodels\.com/cam/(?P<id>[^/?#&]+)'
+    _TESTS = [{
+        'url': 'https://www.cammodels.com/cam/AutumnKnight/',
+        'only_matching': True,
+    }]
 
     def _real_extract(self, url):
-        video_id = self._match_id(url)
-        webpage = self._download_webpage(
-            url,
-            video_id,
-            headers=self._HEADERS)
-        manifest_url_root = self._html_search_regex(
-            r'manifestUrlRoot=(?P<id>https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*))',
-            webpage,
-            'manifest',
-            None,
-            False)
-        if not manifest_url_root:
-            offline = self._html_search_regex(
-                r'(?P<id>I\'m offline, but let\'s stay connected!)',
-                webpage,
-                'offline indicator',
-                None,
-                False)
-            private = self._html_search_regex(
-                r'(?P<id>I’m in a private show right now)',
-                webpage,
-                'private show indicator',
-                None,
-                False)
-            err = 'This user is currently offline, so nothing can be downloaded.' if offline \
-                else 'This user is doing a private show, which requires payment. This extractor currently does not support private streams.' if private \
-                else 'Unable to find link to stream info on webpage. Room is not offline, so something else is wrong.'
-            raise ExtractorError(
-                err,
-                expected=True if offline or private else False,
-                video_id=video_id
+        user_id = self._match_id(url)
+
+        webpage = self._download_webpage(url, user_id)
+
+        manifest_root = self._html_search_regex(
+            r'manifestUrlRoot=([^&\']+)', webpage, 'manifest', default=None)
+
+        if not manifest_root:
+            ERRORS = (
+                ("I'm offline, but let's stay connected", 'This user is currently offline'),
+                ('in a private show', 'This user is in a private show'),
             )
-        manifest_url = manifest_url_root + video_id + '.json'
+            for pattern, message in ERRORS:
+                if pattern in webpage:
+                    error = message
+                    expected = True
+                    break
+            else:
+                error = 'Unable to find manifest URL root'
+                expected = False
+            raise ExtractorError(error, expected=expected)
+
         manifest = self._download_json(
-            manifest_url,
-            video_id,
-            'Downloading links to streams.',
-            'Link to stream URLs was found, but we couldn\'t access it.',
-            headers=self._HEADERS)
-        try:
-            formats = []
-            for fmtName in ['mp4-rtmp', 'mp4-hls']:
-                for encoding in manifest['formats'][fmtName]['encodings']:
-                    formats.append({
+            '%s%s.json' % (manifest_root, user_id), user_id)
+
+        formats = []
+        for format_id, format_dict in manifest['formats'].items():
+            if not isinstance(format_dict, dict):
+                continue
+            encodings = format_dict.get('encodings')
+            if not isinstance(encodings, list):
+                continue
+            vcodec = format_dict.get('videoCodec')
+            acodec = format_dict.get('audioCodec')
+            for media in encodings:
+                if not isinstance(media, dict):
+                    continue
+                media_url = media.get('location')
+                if not media_url or not isinstance(media_url, compat_str):
+                    continue
+
+                format_id_list = [format_id]
+                height = int_or_none(media.get('videoHeight'))
+                if height is not None:
+                    format_id_list.append('%dp' % height)
+                f = {
+                    'url': media_url,
+                    'format_id': '-'.join(format_id_list),
+                    'width': int_or_none(media.get('videoWidth')),
+                    'height': height,
+                    'vbr': int_or_none(media.get('videoKbps')),
+                    'abr': int_or_none(media.get('audioKbps')),
+                    'fps': int_or_none(media.get('fps')),
+                    'vcodec': vcodec,
+                    'acodec': acodec,
+                }
+                if 'rtmp' in format_id:
+                    f['ext'] = 'flv'
+                elif 'hls' in format_id:
+                    f.update({
                         'ext': 'mp4',
-                        'url': encoding['location'],
-                        'width': int_or_none(encoding.get('videoWidth')),
-                        'height': int_or_none(encoding.get('videoHeight')),
-                        'vbr': int_or_none(encoding.get('videoKbps')),
-                        'abr': int_or_none(encoding.get('audioKbps')),
-                        'format_id': fmtName + str(encoding.get('videoWidth'))
+                        # hls skips fragments, preferring rtmp
+                        'preference': -1,
                     })
-        # If they change the JSON format, then fallback to parsing out RTMP links via regex.
-        except KeyError:
-            manifest_json = json.dumps(manifest)
-            manifest_links = re.finditer(
-                r'(?P<id>rtmp?:\/\/[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#&//=]*))',
-                manifest_json)
-            if not manifest_links:
-                raise ExtractorError(
-                    'Link to stream info was found, but we couldn\'t read the response. This is probably a bug.',
-                    expected=False,
-                    video_id=video_id)
-            formats = []
-            for manifest_link in manifest_links:
-                url = manifest_link.group('id')
-                formats.append({
-                    'ext': 'mp4',
-                    'url': url,
-                    'format_id': url.split(sep='/')[-1]
-                })
+                else:
+                    continue
+                formats.append(f)
         self._sort_formats(formats)
+
         return {
-            'id': video_id,
-            'title': self._live_title(video_id),
-            'formats': formats
+            'id': user_id,
+            'title': self._live_title(user_id),
+            'is_live': True,
+            'formats': formats,
         }

From ec2f3d2800185920629a7e6946701edebbf14dd6 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Sat, 26 May 2018 15:34:36 +0100
Subject: [PATCH 105/125] [ufctv] add support for authentication(closes #16542)

---
 youtube_dl/extractor/ufctv.py | 18 ++++++++++++++++++
 1 file changed, 18 insertions(+)

diff --git a/youtube_dl/extractor/ufctv.py b/youtube_dl/extractor/ufctv.py
index ab823814b..f3eaee6b3 100644
--- a/youtube_dl/extractor/ufctv.py
+++ b/youtube_dl/extractor/ufctv.py
@@ -3,13 +3,16 @@ from __future__ import unicode_literals
 
 from .common import InfoExtractor
 from ..utils import (
+    ExtractorError,
     parse_duration,
     parse_iso8601,
+    urlencode_postdata,
 )
 
 
 class UFCTVIE(InfoExtractor):
     _VALID_URL = r'https?://(?:www\.)?ufc\.tv/video/(?P<id>[^/]+)'
+    _NETRC_MACHINE = 'ufctv'
     _TEST = {
         'url': 'https://www.ufc.tv/video/ufc-219-countdown-full-episode',
         'info_dict': {
@@ -26,6 +29,21 @@ class UFCTVIE(InfoExtractor):
         }
     }
 
+    def _real_initialize(self):
+        username, password = self._get_login_info()
+        if username is None:
+            return
+
+        code = self._download_json(
+            'https://www.ufc.tv/secure/authenticate',
+            None, 'Logging in', data=urlencode_postdata({
+                'username': username,
+                'password': password,
+                'format': 'json',
+            })).get('code')
+        if code and code != 'loginsuccess':
+            raise ExtractorError(code, expected=True)
+
     def _real_extract(self, url):
         display_id = self._match_id(url)
         video_data = self._download_json(url, display_id, query={

From 68217024e83c8e7965f2800e9ff7a9575f049b5c Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Sat, 26 May 2018 16:12:44 +0100
Subject: [PATCH 106/125] remove unnecessary assignment parenthesis

---
 youtube_dl/extractor/animeondemand.py   | 2 +-
 youtube_dl/extractor/atresplayer.py     | 2 +-
 youtube_dl/extractor/bambuser.py        | 2 +-
 youtube_dl/extractor/crunchyroll.py     | 2 +-
 youtube_dl/extractor/curiositystream.py | 2 +-
 youtube_dl/extractor/dramafever.py      | 2 +-
 youtube_dl/extractor/facebook.py        | 2 +-
 youtube_dl/extractor/fc2.py             | 2 +-
 youtube_dl/extractor/funimation.py      | 2 +-
 youtube_dl/extractor/gdcvault.py        | 2 +-
 youtube_dl/extractor/globo.py           | 5 -----
 youtube_dl/extractor/hidive.py          | 7 +------
 youtube_dl/extractor/hrti.py            | 2 +-
 youtube_dl/extractor/iqiyi.py           | 2 +-
 youtube_dl/extractor/niconico.py        | 2 +-
 youtube_dl/extractor/noco.py            | 2 +-
 youtube_dl/extractor/packtpub.py        | 2 +-
 youtube_dl/extractor/patreon.py         | 2 +-
 youtube_dl/extractor/pluralsight.py     | 2 +-
 youtube_dl/extractor/roosterteeth.py    | 2 +-
 youtube_dl/extractor/safari.py          | 2 +-
 youtube_dl/extractor/sina.py            | 2 +-
 youtube_dl/extractor/tennistv.py        | 2 +-
 youtube_dl/extractor/tubitv.py          | 2 +-
 youtube_dl/extractor/tumblr.py          | 2 +-
 youtube_dl/extractor/twitch.py          | 2 +-
 youtube_dl/extractor/udemy.py           | 2 +-
 youtube_dl/extractor/vessel.py          | 2 +-
 youtube_dl/extractor/viki.py            | 2 +-
 youtube_dl/extractor/vimeo.py           | 2 +-
 youtube_dl/extractor/vk.py              | 2 +-
 youtube_dl/extractor/youtube.py         | 2 +-
 youtube_dl/extractor/zattoo.py          | 2 +-
 33 files changed, 32 insertions(+), 42 deletions(-)

diff --git a/youtube_dl/extractor/animeondemand.py b/youtube_dl/extractor/animeondemand.py
index e4fa72f46..1fe5d5e56 100644
--- a/youtube_dl/extractor/animeondemand.py
+++ b/youtube_dl/extractor/animeondemand.py
@@ -52,7 +52,7 @@ class AnimeOnDemandIE(InfoExtractor):
     }]
 
     def _login(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         if username is None:
             return
 
diff --git a/youtube_dl/extractor/atresplayer.py b/youtube_dl/extractor/atresplayer.py
index 1a31ebe08..ae1c09427 100644
--- a/youtube_dl/extractor/atresplayer.py
+++ b/youtube_dl/extractor/atresplayer.py
@@ -74,7 +74,7 @@ class AtresPlayerIE(InfoExtractor):
         self._login()
 
     def _login(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         if username is None:
             return
 
diff --git a/youtube_dl/extractor/bambuser.py b/youtube_dl/extractor/bambuser.py
index 633c57553..34f1b3d83 100644
--- a/youtube_dl/extractor/bambuser.py
+++ b/youtube_dl/extractor/bambuser.py
@@ -44,7 +44,7 @@ class BambuserIE(InfoExtractor):
     }
 
     def _login(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         if username is None:
             return
 
diff --git a/youtube_dl/extractor/crunchyroll.py b/youtube_dl/extractor/crunchyroll.py
index 3efdc8c21..311da515d 100644
--- a/youtube_dl/extractor/crunchyroll.py
+++ b/youtube_dl/extractor/crunchyroll.py
@@ -49,7 +49,7 @@ class CrunchyrollBaseIE(InfoExtractor):
             })
 
     def _login(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         if username is None:
             return
 
diff --git a/youtube_dl/extractor/curiositystream.py b/youtube_dl/extractor/curiositystream.py
index 8e45923e3..35b1e7a34 100644
--- a/youtube_dl/extractor/curiositystream.py
+++ b/youtube_dl/extractor/curiositystream.py
@@ -35,7 +35,7 @@ class CuriosityStreamBaseIE(InfoExtractor):
         return result['data']
 
     def _real_initialize(self):
-        (email, password) = self._get_login_info()
+        email, password = self._get_login_info()
         if email is None:
             return
         result = self._download_json(
diff --git a/youtube_dl/extractor/dramafever.py b/youtube_dl/extractor/dramafever.py
index ffbd2623d..ab32ba4ff 100644
--- a/youtube_dl/extractor/dramafever.py
+++ b/youtube_dl/extractor/dramafever.py
@@ -42,7 +42,7 @@ class DramaFeverBaseIE(InfoExtractor):
         self._login()
 
     def _login(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         if username is None:
             return
 
diff --git a/youtube_dl/extractor/facebook.py b/youtube_dl/extractor/facebook.py
index 220ada3a6..0971ce356 100644
--- a/youtube_dl/extractor/facebook.py
+++ b/youtube_dl/extractor/facebook.py
@@ -226,7 +226,7 @@ class FacebookIE(InfoExtractor):
         return urls
 
     def _login(self):
-        (useremail, password) = self._get_login_info()
+        useremail, password = self._get_login_info()
         if useremail is None:
             return
 
diff --git a/youtube_dl/extractor/fc2.py b/youtube_dl/extractor/fc2.py
index 448647d72..435561147 100644
--- a/youtube_dl/extractor/fc2.py
+++ b/youtube_dl/extractor/fc2.py
@@ -46,7 +46,7 @@ class FC2IE(InfoExtractor):
     }]
 
     def _login(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         if username is None or password is None:
             return False
 
diff --git a/youtube_dl/extractor/funimation.py b/youtube_dl/extractor/funimation.py
index 107f658ba..07d01caec 100644
--- a/youtube_dl/extractor/funimation.py
+++ b/youtube_dl/extractor/funimation.py
@@ -51,7 +51,7 @@ class FunimationIE(InfoExtractor):
     }]
 
     def _login(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         if username is None:
             return
         try:
diff --git a/youtube_dl/extractor/gdcvault.py b/youtube_dl/extractor/gdcvault.py
index f71d9092e..8806dc48a 100644
--- a/youtube_dl/extractor/gdcvault.py
+++ b/youtube_dl/extractor/gdcvault.py
@@ -91,7 +91,7 @@ class GDCVaultIE(InfoExtractor):
     ]
 
     def _login(self, webpage_url, display_id):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         if username is None or password is None:
             self.report_warning('It looks like ' + webpage_url + ' requires a login. Try specifying a username and password and try again.')
             return None
diff --git a/youtube_dl/extractor/globo.py b/youtube_dl/extractor/globo.py
index 81d6d36d3..c2140c362 100644
--- a/youtube_dl/extractor/globo.py
+++ b/youtube_dl/extractor/globo.py
@@ -23,7 +23,6 @@ from ..utils import (
 
 class GloboIE(InfoExtractor):
     _VALID_URL = r'(?:globo:|https?://.+?\.globo\.com/(?:[^/]+/)*(?:v/(?:[^/]+/)?|videos/))(?P<id>\d{7,})'
-    _LOGGED_IN = False
     _NETRC_MACHINE = 'globo'
     _TESTS = [{
         'url': 'http://g1.globo.com/carros/autoesporte/videos/t/exclusivos-do-g1/v/mercedes-benz-gla-passa-por-teste-de-colisao-na-europa/3607726/',
@@ -68,9 +67,6 @@ class GloboIE(InfoExtractor):
     }]
 
     def _real_initialize(self):
-        if self._LOGGED_IN:
-            return
-
         email, password = self._get_login_info()
         if email is None:
             return
@@ -91,7 +87,6 @@ class GloboIE(InfoExtractor):
                 resp = self._parse_json(e.cause.read(), None)
                 raise ExtractorError(resp.get('userMessage') or resp['id'], expected=True)
             raise
-        self._LOGGED_IN = True
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
diff --git a/youtube_dl/extractor/hidive.py b/youtube_dl/extractor/hidive.py
index d8f2e682f..39fabe8a5 100644
--- a/youtube_dl/extractor/hidive.py
+++ b/youtube_dl/extractor/hidive.py
@@ -18,7 +18,6 @@ class HiDiveIE(InfoExtractor):
     # so disabling geo bypass completely
     _GEO_BYPASS = False
     _NETRC_MACHINE = 'hidive'
-    _LOGGED_IN = False
     _LOGIN_URL = 'https://www.hidive.com/account/login'
 
     _TESTS = [{
@@ -38,10 +37,7 @@ class HiDiveIE(InfoExtractor):
     }]
 
     def _real_initialize(self):
-        if self._LOGGED_IN:
-            return
-
-        (email, password) = self._get_login_info()
+        email, password = self._get_login_info()
         if email is None:
             return
 
@@ -56,7 +52,6 @@ class HiDiveIE(InfoExtractor):
         })
         self._download_webpage(
             self._LOGIN_URL, None, 'Logging in', data=urlencode_postdata(data))
-        self._LOGGED_IN = True
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
diff --git a/youtube_dl/extractor/hrti.py b/youtube_dl/extractor/hrti.py
index 6424d34ac..9ba1aa703 100644
--- a/youtube_dl/extractor/hrti.py
+++ b/youtube_dl/extractor/hrti.py
@@ -66,7 +66,7 @@ class HRTiBaseIE(InfoExtractor):
         self._logout_url = modules['user']['resources']['logout']['uri']
 
     def _login(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         # TODO: figure out authentication with cookies
         if username is None or password is None:
             self.raise_login_required()
diff --git a/youtube_dl/extractor/iqiyi.py b/youtube_dl/extractor/iqiyi.py
index fdfa7de9e..4b081bd46 100644
--- a/youtube_dl/extractor/iqiyi.py
+++ b/youtube_dl/extractor/iqiyi.py
@@ -239,7 +239,7 @@ class IqiyiIE(InfoExtractor):
         return ohdave_rsa_encrypt(data, e, N)
 
     def _login(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
 
         # No authentication to be performed
         if not username:
diff --git a/youtube_dl/extractor/niconico.py b/youtube_dl/extractor/niconico.py
index df7f528be..dbe871f16 100644
--- a/youtube_dl/extractor/niconico.py
+++ b/youtube_dl/extractor/niconico.py
@@ -163,7 +163,7 @@ class NiconicoIE(InfoExtractor):
         self._login()
 
     def _login(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         # No authentication to be performed
         if not username:
             return True
diff --git a/youtube_dl/extractor/noco.py b/youtube_dl/extractor/noco.py
index a9f9b10c4..58b371ed7 100644
--- a/youtube_dl/extractor/noco.py
+++ b/youtube_dl/extractor/noco.py
@@ -65,7 +65,7 @@ class NocoIE(InfoExtractor):
         self._login()
 
     def _login(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         if username is None:
             return
 
diff --git a/youtube_dl/extractor/packtpub.py b/youtube_dl/extractor/packtpub.py
index 8ed3c6347..56a2a1083 100644
--- a/youtube_dl/extractor/packtpub.py
+++ b/youtube_dl/extractor/packtpub.py
@@ -42,7 +42,7 @@ class PacktPubIE(PacktPubBaseIE):
     _TOKEN = None
 
     def _real_initialize(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         if username is None:
             return
         try:
diff --git a/youtube_dl/extractor/patreon.py b/youtube_dl/extractor/patreon.py
index d4b1d34ca..9eb027679 100644
--- a/youtube_dl/extractor/patreon.py
+++ b/youtube_dl/extractor/patreon.py
@@ -53,7 +53,7 @@ class PatreonIE(InfoExtractor):
     # needed. Keeping this commented for when this inevitably changes.
     '''
     def _login(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         if username is None:
             return
 
diff --git a/youtube_dl/extractor/pluralsight.py b/youtube_dl/extractor/pluralsight.py
index 3c508c9ca..a207ca9cb 100644
--- a/youtube_dl/extractor/pluralsight.py
+++ b/youtube_dl/extractor/pluralsight.py
@@ -94,7 +94,7 @@ class PluralsightIE(PluralsightBaseIE):
         self._login()
 
     def _login(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         if username is None:
             return
 
diff --git a/youtube_dl/extractor/roosterteeth.py b/youtube_dl/extractor/roosterteeth.py
index 8b703800e..857434540 100644
--- a/youtube_dl/extractor/roosterteeth.py
+++ b/youtube_dl/extractor/roosterteeth.py
@@ -50,7 +50,7 @@ class RoosterTeethIE(InfoExtractor):
     }]
 
     def _login(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         if username is None:
             return
 
diff --git a/youtube_dl/extractor/safari.py b/youtube_dl/extractor/safari.py
index cc6698f88..8a5d48fc2 100644
--- a/youtube_dl/extractor/safari.py
+++ b/youtube_dl/extractor/safari.py
@@ -27,7 +27,7 @@ class SafariBaseIE(InfoExtractor):
         self._login()
 
     def _login(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         if username is None:
             return
 
diff --git a/youtube_dl/extractor/sina.py b/youtube_dl/extractor/sina.py
index 8fc66732a..07b766b4a 100644
--- a/youtube_dl/extractor/sina.py
+++ b/youtube_dl/extractor/sina.py
@@ -64,7 +64,7 @@ class SinaIE(InfoExtractor):
                 # The video id is in the redirected url
                 self.to_screen('Getting video id')
                 request = HEADRequest(url)
-                (_, urlh) = self._download_webpage_handle(request, 'NA', False)
+                _, urlh = self._download_webpage_handle(request, 'NA', False)
                 return self._real_extract(urlh.geturl())
             else:
                 pseudo_id = mobj.group('pseudo_id')
diff --git a/youtube_dl/extractor/tennistv.py b/youtube_dl/extractor/tennistv.py
index 0c6f70784..a586f30ad 100644
--- a/youtube_dl/extractor/tennistv.py
+++ b/youtube_dl/extractor/tennistv.py
@@ -32,7 +32,7 @@ class TennisTVIE(InfoExtractor):
     _NETRC_MACHINE = 'tennistv'
 
     def _login(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         if not username or not password:
             raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
 
diff --git a/youtube_dl/extractor/tubitv.py b/youtube_dl/extractor/tubitv.py
index 36f6c1673..a51fa6515 100644
--- a/youtube_dl/extractor/tubitv.py
+++ b/youtube_dl/extractor/tubitv.py
@@ -36,7 +36,7 @@ class TubiTvIE(InfoExtractor):
     }]
 
     def _login(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         if username is None:
             return
         self.report_login()
diff --git a/youtube_dl/extractor/tumblr.py b/youtube_dl/extractor/tumblr.py
index 89e6eb5ab..edbb0aa69 100644
--- a/youtube_dl/extractor/tumblr.py
+++ b/youtube_dl/extractor/tumblr.py
@@ -108,7 +108,7 @@ class TumblrIE(InfoExtractor):
         self._login()
 
     def _login(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         if username is None:
             return
 
diff --git a/youtube_dl/extractor/twitch.py b/youtube_dl/extractor/twitch.py
index 3ee2af52e..e01f11331 100644
--- a/youtube_dl/extractor/twitch.py
+++ b/youtube_dl/extractor/twitch.py
@@ -61,7 +61,7 @@ class TwitchBaseIE(InfoExtractor):
         self._login()
 
     def _login(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         if username is None:
             return
 
diff --git a/youtube_dl/extractor/udemy.py b/youtube_dl/extractor/udemy.py
index 0a74a9768..a7196997e 100644
--- a/youtube_dl/extractor/udemy.py
+++ b/youtube_dl/extractor/udemy.py
@@ -151,7 +151,7 @@ class UdemyIE(InfoExtractor):
         self._login()
 
     def _login(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         if username is None:
             return
 
diff --git a/youtube_dl/extractor/vessel.py b/youtube_dl/extractor/vessel.py
index 80a643dfe..31eee0ba7 100644
--- a/youtube_dl/extractor/vessel.py
+++ b/youtube_dl/extractor/vessel.py
@@ -75,7 +75,7 @@ class VesselIE(InfoExtractor):
                     'Access to this content is restricted. (%s said: %s)' % (self.IE_NAME, err_code), expected=True)
 
     def _login(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         if username is None:
             return
         self.report_login()
diff --git a/youtube_dl/extractor/viki.py b/youtube_dl/extractor/viki.py
index ad2a2a4b7..546de95d8 100644
--- a/youtube_dl/extractor/viki.py
+++ b/youtube_dl/extractor/viki.py
@@ -88,7 +88,7 @@ class VikiBaseIE(InfoExtractor):
         self._login()
 
     def _login(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         if username is None:
             return
 
diff --git a/youtube_dl/extractor/vimeo.py b/youtube_dl/extractor/vimeo.py
index 8dfd8891c..3baa2d075 100644
--- a/youtube_dl/extractor/vimeo.py
+++ b/youtube_dl/extractor/vimeo.py
@@ -37,7 +37,7 @@ class VimeoBaseInfoExtractor(InfoExtractor):
     _LOGIN_URL = 'https://vimeo.com/log_in'
 
     def _login(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         if username is None:
             if self._LOGIN_REQUIRED:
                 raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
diff --git a/youtube_dl/extractor/vk.py b/youtube_dl/extractor/vk.py
index b50d4f170..29002b35f 100644
--- a/youtube_dl/extractor/vk.py
+++ b/youtube_dl/extractor/vk.py
@@ -32,7 +32,7 @@ class VKBaseIE(InfoExtractor):
     _NETRC_MACHINE = 'vk'
 
     def _login(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         if username is None:
             return
 
diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index e4eec7c30..379559825 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -85,7 +85,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
 
         If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
         """
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         # No authentication to be performed
         if username is None:
             if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None:
diff --git a/youtube_dl/extractor/zattoo.py b/youtube_dl/extractor/zattoo.py
index 773073d85..b5a3a0716 100644
--- a/youtube_dl/extractor/zattoo.py
+++ b/youtube_dl/extractor/zattoo.py
@@ -24,7 +24,7 @@ class ZattooBaseIE(InfoExtractor):
     _power_guide_hash = None
 
     def _login(self):
-        (username, password) = self._get_login_info()
+        username, password = self._get_login_info()
         if not username or not password:
             self.raise_login_required(
                 'A valid %s account is needed to access this media.'

From ddd8486a448ee94134a62f2488e5e39bbd72880e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 26 May 2018 22:10:08 +0700
Subject: [PATCH 107/125] [downloader/rtmp] Gracefully handle live streams
 interrupted by user

---
 youtube_dl/downloader/rtmp.py | 119 +++++++++++++++++++---------------
 1 file changed, 66 insertions(+), 53 deletions(-)

diff --git a/youtube_dl/downloader/rtmp.py b/youtube_dl/downloader/rtmp.py
index b823b5171..63e2b5c89 100644
--- a/youtube_dl/downloader/rtmp.py
+++ b/youtube_dl/downloader/rtmp.py
@@ -24,71 +24,78 @@ class RtmpFD(FileDownloader):
     def real_download(self, filename, info_dict):
         def run_rtmpdump(args):
             start = time.time()
-            resume_percent = None
-            resume_downloaded_data_len = None
             proc = subprocess.Popen(args, stderr=subprocess.PIPE)
             cursor_in_new_line = True
-            proc_stderr_closed = False
-            while not proc_stderr_closed:
-                # read line from stderr
-                line = ''
-                while True:
-                    char = proc.stderr.read(1)
-                    if not char:
-                        proc_stderr_closed = True
-                        break
-                    if char in [b'\r', b'\n']:
-                        break
-                    line += char.decode('ascii', 'replace')
-                if not line:
-                    # proc_stderr_closed is True
-                    continue
-                mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line)
-                if mobj:
-                    downloaded_data_len = int(float(mobj.group(1)) * 1024)
-                    percent = float(mobj.group(2))
-                    if not resume_percent:
-                        resume_percent = percent
-                        resume_downloaded_data_len = downloaded_data_len
-                    time_now = time.time()
-                    eta = self.calc_eta(start, time_now, 100 - resume_percent, percent - resume_percent)
-                    speed = self.calc_speed(start, time_now, downloaded_data_len - resume_downloaded_data_len)
-                    data_len = None
-                    if percent > 0:
-                        data_len = int(downloaded_data_len * 100 / percent)
-                    self._hook_progress({
-                        'status': 'downloading',
-                        'downloaded_bytes': downloaded_data_len,
-                        'total_bytes_estimate': data_len,
-                        'tmpfilename': tmpfilename,
-                        'filename': filename,
-                        'eta': eta,
-                        'elapsed': time_now - start,
-                        'speed': speed,
-                    })
-                    cursor_in_new_line = False
-                else:
-                    # no percent for live streams
-                    mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line)
+
+            def dl():
+                resume_percent = None
+                resume_downloaded_data_len = None
+                proc_stderr_closed = False
+                while not proc_stderr_closed:
+                    # read line from stderr
+                    line = ''
+                    while True:
+                        char = proc.stderr.read(1)
+                        if not char:
+                            proc_stderr_closed = True
+                            break
+                        if char in [b'\r', b'\n']:
+                            break
+                        line += char.decode('ascii', 'replace')
+                    if not line:
+                        # proc_stderr_closed is True
+                        continue
+                    mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line)
                     if mobj:
                         downloaded_data_len = int(float(mobj.group(1)) * 1024)
+                        percent = float(mobj.group(2))
+                        if not resume_percent:
+                            resume_percent = percent
+                            resume_downloaded_data_len = downloaded_data_len
                         time_now = time.time()
-                        speed = self.calc_speed(start, time_now, downloaded_data_len)
+                        eta = self.calc_eta(start, time_now, 100 - resume_percent, percent - resume_percent)
+                        speed = self.calc_speed(start, time_now, downloaded_data_len - resume_downloaded_data_len)
+                        data_len = None
+                        if percent > 0:
+                            data_len = int(downloaded_data_len * 100 / percent)
                         self._hook_progress({
+                            'status': 'downloading',
                             'downloaded_bytes': downloaded_data_len,
+                            'total_bytes_estimate': data_len,
                             'tmpfilename': tmpfilename,
                             'filename': filename,
-                            'status': 'downloading',
+                            'eta': eta,
                             'elapsed': time_now - start,
                             'speed': speed,
                         })
                         cursor_in_new_line = False
-                    elif self.params.get('verbose', False):
-                        if not cursor_in_new_line:
-                            self.to_screen('')
-                        cursor_in_new_line = True
-                        self.to_screen('[rtmpdump] ' + line)
-            proc.wait()
+                    else:
+                        # no percent for live streams
+                        mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line)
+                        if mobj:
+                            downloaded_data_len = int(float(mobj.group(1)) * 1024)
+                            time_now = time.time()
+                            speed = self.calc_speed(start, time_now, downloaded_data_len)
+                            self._hook_progress({
+                                'downloaded_bytes': downloaded_data_len,
+                                'tmpfilename': tmpfilename,
+                                'filename': filename,
+                                'status': 'downloading',
+                                'elapsed': time_now - start,
+                                'speed': speed,
+                            })
+                            cursor_in_new_line = False
+                        elif self.params.get('verbose', False):
+                            if not cursor_in_new_line:
+                                self.to_screen('')
+                            cursor_in_new_line = True
+                            self.to_screen('[rtmpdump] ' + line)
+
+            try:
+                dl()
+            finally:
+                proc.wait()
+
             if not cursor_in_new_line:
                 self.to_screen('')
             return proc.returncode
@@ -163,7 +170,13 @@ class RtmpFD(FileDownloader):
         RD_INCOMPLETE = 2
         RD_NO_CONNECT = 3
 
-        retval = run_rtmpdump(args)
+        try:
+            retval = run_rtmpdump(args)
+        except KeyboardInterrupt:
+            if not info_dict.get('is_live'):
+                raise
+            retval = RD_SUCCESS
+            self.to_screen('\n[rtmpdump] Interrupted by user')
 
         if retval == RD_NO_CONNECT:
             self.report_error('[rtmpdump] Could not connect to RTMP server.')

From f16f48779cbad4a6d39a908e131a8d55941d1671 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 26 May 2018 22:14:09 +0700
Subject: [PATCH 108/125] [downloader/rtmp] Generalize download messages and
 report time elapsed on finish

---
 youtube_dl/downloader/rtmp.py | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/downloader/rtmp.py b/youtube_dl/downloader/rtmp.py
index 63e2b5c89..9e0ddbb18 100644
--- a/youtube_dl/downloader/rtmp.py
+++ b/youtube_dl/downloader/rtmp.py
@@ -170,6 +170,8 @@ class RtmpFD(FileDownloader):
         RD_INCOMPLETE = 2
         RD_NO_CONNECT = 3
 
+        started = time.time()
+
         try:
             retval = run_rtmpdump(args)
         except KeyboardInterrupt:
@@ -184,7 +186,7 @@ class RtmpFD(FileDownloader):
 
         while retval in (RD_INCOMPLETE, RD_FAILED) and not test and not live:
             prevsize = os.path.getsize(encodeFilename(tmpfilename))
-            self.to_screen('[rtmpdump] %s bytes' % prevsize)
+            self.to_screen('[rtmpdump] Downloaded %s bytes' % prevsize)
             time.sleep(5.0)  # This seems to be needed
             args = basic_args + ['--resume']
             if retval == RD_FAILED:
@@ -201,13 +203,14 @@ class RtmpFD(FileDownloader):
                 break
         if retval == RD_SUCCESS or (test and retval == RD_INCOMPLETE):
             fsize = os.path.getsize(encodeFilename(tmpfilename))
-            self.to_screen('[rtmpdump] %s bytes' % fsize)
+            self.to_screen('[rtmpdump] Downloaded %s bytes' % fsize)
             self.try_rename(tmpfilename, filename)
             self._hook_progress({
                 'downloaded_bytes': fsize,
                 'total_bytes': fsize,
                 'filename': filename,
                 'status': 'finished',
+                'elapsed': time.time() - started,
             })
             return True
         else:

From 2ce35d9f43328e82108bae6661c2ac0ba2a0498c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 26 May 2018 22:21:55 +0700
Subject: [PATCH 109/125] [cammodels] Add another error pattern

---
 youtube_dl/extractor/cammodels.py | 1 +
 1 file changed, 1 insertion(+)

diff --git a/youtube_dl/extractor/cammodels.py b/youtube_dl/extractor/cammodels.py
index 4f1b88d14..17f7ac043 100644
--- a/youtube_dl/extractor/cammodels.py
+++ b/youtube_dl/extractor/cammodels.py
@@ -28,6 +28,7 @@ class CamModelsIE(InfoExtractor):
             ERRORS = (
                 ("I'm offline, but let's stay connected", 'This user is currently offline'),
                 ('in a private show', 'This user is in a private show'),
+                ('is currently performing LIVE', 'This model is currently performing live'),
             )
             for pattern, message in ERRORS:
                 if pattern in webpage:

From 8882840ec5d9536772d7de75b7fb6389103a3a1a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 26 May 2018 22:22:28 +0700
Subject: [PATCH 110/125] [cammodels] Use geo verification headers

---
 youtube_dl/extractor/cammodels.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/cammodels.py b/youtube_dl/extractor/cammodels.py
index 17f7ac043..ee0165dba 100644
--- a/youtube_dl/extractor/cammodels.py
+++ b/youtube_dl/extractor/cammodels.py
@@ -19,7 +19,8 @@ class CamModelsIE(InfoExtractor):
     def _real_extract(self, url):
         user_id = self._match_id(url)
 
-        webpage = self._download_webpage(url, user_id)
+        webpage = self._download_webpage(
+            url, user_id, headers=self.geo_verification_headers())
 
         manifest_root = self._html_search_regex(
             r'manifestUrlRoot=([^&\']+)', webpage, 'manifest', default=None)

From c9e12a618c9420c2bb21c09bf47b9469785f492e Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Sun, 27 May 2018 12:10:12 +0100
Subject: [PATCH 111/125] [9c9media] extract mpd formats and subtitles

---
 youtube_dl/extractor/ctvnews.py        |  4 +-
 youtube_dl/extractor/extractors.py     |  5 +-
 youtube_dl/extractor/ninecninemedia.py | 93 ++++++++++----------------
 youtube_dl/extractor/rds.py            |  2 +-
 4 files changed, 41 insertions(+), 63 deletions(-)

diff --git a/youtube_dl/extractor/ctvnews.py b/youtube_dl/extractor/ctvnews.py
index 55a127b76..03f8cefb7 100644
--- a/youtube_dl/extractor/ctvnews.py
+++ b/youtube_dl/extractor/ctvnews.py
@@ -11,10 +11,10 @@ class CTVNewsIE(InfoExtractor):
     _VALID_URL = r'https?://(?:.+?\.)?ctvnews\.ca/(?:video\?(?:clip|playlist|bin)Id=|.*?)(?P<id>[0-9.]+)'
     _TESTS = [{
         'url': 'http://www.ctvnews.ca/video?clipId=901995',
-        'md5': '10deb320dc0ccb8d01d34d12fc2ea672',
+        'md5': '9b8624ba66351a23e0b6e1391971f9af',
         'info_dict': {
             'id': '901995',
-            'ext': 'mp4',
+            'ext': 'flv',
             'title': 'Extended: \'That person cannot be me\' Johnson says',
             'description': 'md5:958dd3b4f5bbbf0ed4d045c790d89285',
             'timestamp': 1467286284,
diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index d54e8df9f..2f485012f 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -718,10 +718,7 @@ from .nick import (
     NickRuIE,
 )
 from .niconico import NiconicoIE, NiconicoPlaylistIE
-from .ninecninemedia import (
-    NineCNineMediaStackIE,
-    NineCNineMediaIE,
-)
+from .ninecninemedia import NineCNineMediaIE
 from .ninegag import NineGagIE
 from .ninenow import NineNowIE
 from .nintendo import NintendoIE
diff --git a/youtube_dl/extractor/ninecninemedia.py b/youtube_dl/extractor/ninecninemedia.py
index 8961309fd..875665d43 100644
--- a/youtube_dl/extractor/ninecninemedia.py
+++ b/youtube_dl/extractor/ninecninemedia.py
@@ -13,38 +13,11 @@ from ..utils import (
 )
 
 
-class NineCNineMediaBaseIE(InfoExtractor):
-    _API_BASE_TEMPLATE = 'http://capi.9c9media.com/destinations/%s/platforms/desktop/contents/%s/'
-
-
-class NineCNineMediaStackIE(NineCNineMediaBaseIE):
-    IE_NAME = '9c9media:stack'
-    _GEO_COUNTRIES = ['CA']
-    _VALID_URL = r'9c9media:stack:(?P<destination_code>[^:]+):(?P<content_id>\d+):(?P<content_package>\d+):(?P<id>\d+)'
-
-    def _real_extract(self, url):
-        destination_code, content_id, package_id, stack_id = re.match(self._VALID_URL, url).groups()
-        stack_base_url_template = self._API_BASE_TEMPLATE + 'contentpackages/%s/stacks/%s/manifest.'
-        stack_base_url = stack_base_url_template % (destination_code, content_id, package_id, stack_id)
-
-        formats = []
-        formats.extend(self._extract_m3u8_formats(
-            stack_base_url + 'm3u8', stack_id, 'mp4',
-            'm3u8_native', m3u8_id='hls', fatal=False))
-        formats.extend(self._extract_f4m_formats(
-            stack_base_url + 'f4m', stack_id,
-            f4m_id='hds', fatal=False))
-        self._sort_formats(formats)
-
-        return {
-            'id': stack_id,
-            'formats': formats,
-        }
-
-
-class NineCNineMediaIE(NineCNineMediaBaseIE):
+class NineCNineMediaIE(InfoExtractor):
     IE_NAME = '9c9media'
+    _GEO_COUNTRIES = ['CA']
     _VALID_URL = r'9c9media:(?P<destination_code>[^:]+):(?P<id>\d+)'
+    _API_BASE_TEMPLATE = 'http://capi.9c9media.com/destinations/%s/platforms/desktop/contents/%s/'
 
     def _real_extract(self, url):
         destination_code, content_id = re.match(self._VALID_URL, url).groups()
@@ -58,13 +31,26 @@ class NineCNineMediaIE(NineCNineMediaBaseIE):
         content_package = content['ContentPackages'][0]
         package_id = content_package['Id']
         content_package_url = api_base_url + 'contentpackages/%s/' % package_id
-        content_package = self._download_json(content_package_url, content_id)
+        content_package = self._download_json(
+            content_package_url, content_id, query={
+                '$include': '[HasClosedCaptions]',
+            })
 
-        if content_package.get('Constraints', {}).get('Security', {}).get('Type') == 'adobe-drm':
+        if content_package.get('Constraints', {}).get('Security', {}).get('Type'):
             raise ExtractorError('This video is DRM protected.', expected=True)
 
-        stacks = self._download_json(content_package_url + 'stacks/', package_id)['Items']
-        multistacks = len(stacks) > 1
+        manifest_base_url = content_package_url + 'manifest.'
+        formats = []
+        formats.extend(self._extract_m3u8_formats(
+            manifest_base_url + 'm3u8', content_id, 'mp4',
+            'm3u8_native', m3u8_id='hls', fatal=False))
+        formats.extend(self._extract_f4m_formats(
+            manifest_base_url + 'f4m', content_id,
+            f4m_id='hds', fatal=False))
+        formats.extend(self._extract_mpd_formats(
+            manifest_base_url + 'mpd', content_id,
+            mpd_id='dash', fatal=False))
+        self._sort_formats(formats)
 
         thumbnails = []
         for image in content.get('Images', []):
@@ -85,10 +71,12 @@ class NineCNineMediaIE(NineCNineMediaBaseIE):
                     continue
                 container.append(e_name)
 
-        description = content.get('Desc') or content.get('ShortDesc')
         season = content.get('Season', {})
-        base_info = {
-            'description': description,
+
+        info = {
+            'id': content_id,
+            'title': title,
+            'description': content.get('Desc') or content.get('ShortDesc'),
             'timestamp': parse_iso8601(content.get('BroadcastDateTime')),
             'episode_number': int_or_none(content.get('Episode')),
             'season': season.get('Name'),
@@ -97,26 +85,19 @@ class NineCNineMediaIE(NineCNineMediaBaseIE):
             'series': content.get('Media', {}).get('Name'),
             'tags': tags,
             'categories': categories,
+            'duration': float_or_none(content_package.get('Duration')),
+            'formats': formats,
         }
 
-        entries = []
-        for stack in stacks:
-            stack_id = compat_str(stack['Id'])
-            entry = {
-                '_type': 'url_transparent',
-                'url': '9c9media:stack:%s:%s:%s:%s' % (destination_code, content_id, package_id, stack_id),
-                'id': stack_id,
-                'title': '%s_part%s' % (title, stack['Name']) if multistacks else title,
-                'duration': float_or_none(stack.get('Duration')),
-                'ie_key': 'NineCNineMediaStack',
+        if content_package.get('HasClosedCaptions'):
+            info['subtitles'] = {
+                'en': [{
+                    'url': manifest_base_url + 'vtt',
+                    'ext': 'vtt',
+                }, {
+                    'url': manifest_base_url + 'srt',
+                    'ext': 'srt',
+                }]
             }
-            entry.update(base_info)
-            entries.append(entry)
 
-        return {
-            '_type': 'multi_video',
-            'id': content_id,
-            'title': title,
-            'description': description,
-            'entries': entries,
-        }
+        return info
diff --git a/youtube_dl/extractor/rds.py b/youtube_dl/extractor/rds.py
index bf200ea4d..8c016a77d 100644
--- a/youtube_dl/extractor/rds.py
+++ b/youtube_dl/extractor/rds.py
@@ -19,7 +19,7 @@ class RDSIE(InfoExtractor):
         'info_dict': {
             'id': '604333',
             'display_id': 'fowler-jr-prend-la-direction-de-jacksonville',
-            'ext': 'mp4',
+            'ext': 'flv',
             'title': 'Fowler Jr. prend la direction de Jacksonville',
             'description': 'Dante Fowler Jr. est le troisième choix du repêchage 2015 de la NFL. ',
             'timestamp': 1430397346,

From 9c65c4a6cd981e081f4a99d11206e984999f51ff Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Sun, 27 May 2018 12:11:53 +0100
Subject: [PATCH 112/125] [bellmedia] add support for bnnbloomberg.ca(#16560)

---
 youtube_dl/extractor/bellmedia.py | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/youtube_dl/extractor/bellmedia.py b/youtube_dl/extractor/bellmedia.py
index 8820a3914..f36a2452d 100644
--- a/youtube_dl/extractor/bellmedia.py
+++ b/youtube_dl/extractor/bellmedia.py
@@ -12,7 +12,7 @@ class BellMediaIE(InfoExtractor):
             (?:
                 ctv|
                 tsn|
-                bnn|
+                bnn(?:bloomberg)?|
                 thecomedynetwork|
                 discovery|
                 discoveryvelocity|
@@ -27,17 +27,16 @@ class BellMediaIE(InfoExtractor):
             much\.com
         )/.*?(?:\bvid(?:eoid)?=|-vid|~|%7E|/(?:episode)?)(?P<id>[0-9]{6,})'''
     _TESTS = [{
-        'url': 'http://www.ctv.ca/video/player?vid=706966',
-        'md5': 'ff2ebbeae0aa2dcc32a830c3fd69b7b0',
+        'url': 'https://www.bnnbloomberg.ca/video/david-cockfield-s-top-picks~1403070',
+        'md5': '36d3ef559cfe8af8efe15922cd3ce950',
         'info_dict': {
-            'id': '706966',
-            'ext': 'mp4',
-            'title': 'Larry Day and Richard Jutras on the TIFF red carpet of \'Stonewall\'',
-            'description': 'etalk catches up with Larry Day and Richard Jutras on the TIFF red carpet of "Stonewall”.',
-            'upload_date': '20150919',
-            'timestamp': 1442624700,
+            'id': '1403070',
+            'ext': 'flv',
+            'title': 'David Cockfield\'s Top Picks',
+            'description': 'md5:810f7f8c6a83ad5b48677c3f8e5bb2c3',
+            'upload_date': '20180525',
+            'timestamp': 1527288600,
         },
-        'expected_warnings': ['HTTP Error 404'],
     }, {
         'url': 'http://www.thecomedynetwork.ca/video/player?vid=923582',
         'only_matching': True,
@@ -70,6 +69,7 @@ class BellMediaIE(InfoExtractor):
         'investigationdiscovery': 'invdisc',
         'animalplanet': 'aniplan',
         'etalk': 'ctv',
+        'bnnbloomberg': 'bnn',
     }
 
     def _real_extract(self, url):

From cfd7f2a6365e4d4ed9036b7fd873747be5e91d44 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 27 May 2018 18:24:37 +0700
Subject: [PATCH 113/125] [apa] Add extractor (closes #15041, closes #15672)

---
 youtube_dl/extractor/apa.py        | 94 ++++++++++++++++++++++++++++++
 youtube_dl/extractor/extractors.py |  1 +
 youtube_dl/extractor/generic.py    | 23 ++++++++
 3 files changed, 118 insertions(+)
 create mode 100644 youtube_dl/extractor/apa.py

diff --git a/youtube_dl/extractor/apa.py b/youtube_dl/extractor/apa.py
new file mode 100644
index 000000000..a30a935aa
--- /dev/null
+++ b/youtube_dl/extractor/apa.py
@@ -0,0 +1,94 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..compat import compat_str
+from ..utils import (
+    determine_ext,
+    js_to_json,
+)
+
+
+class APAIE(InfoExtractor):
+    _VALID_URL = r'https?://[^/]+\.apa\.at/embed/(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
+    _TESTS = [{
+        'url': 'http://uvp.apa.at/embed/293f6d17-692a-44e3-9fd5-7b178f3a1029',
+        'md5': '2b12292faeb0a7d930c778c7a5b4759b',
+        'info_dict': {
+            'id': 'jjv85FdZ',
+            'ext': 'mp4',
+            'title': '"Blau ist mysteriös": Die Blue Man Group im Interview',
+            'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
+            'thumbnail': r're:^https?://.*\.jpg$',
+            'duration': 254,
+            'timestamp': 1519211149,
+            'upload_date': '20180221',
+        },
+    }, {
+        'url': 'https://uvp-apapublisher.sf.apa.at/embed/2f94e9e6-d945-4db2-9548-f9a41ebf7b78',
+        'only_matching': True,
+    }, {
+        'url': 'http://uvp-rma.sf.apa.at/embed/70404cca-2f47-4855-bbb8-20b1fae58f76',
+        'only_matching': True,
+    }, {
+        'url': 'http://uvp-kleinezeitung.sf.apa.at/embed/f1c44979-dba2-4ebf-b021-e4cf2cac3c81',
+        'only_matching': True,
+    }]
+
+    @staticmethod
+    def _extract_urls(webpage):
+        return [
+            mobj.group('url')
+            for mobj in re.finditer(
+                r'<iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//[^/]+\.apa\.at/embed/[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}.*?)\1',
+                webpage)]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        webpage = self._download_webpage(url, video_id)
+
+        jwplatform_id = self._search_regex(
+            r'media[iI]d\s*:\s*["\'](?P<id>[a-zA-Z0-9]{8})', webpage,
+            'jwplatform id', default=None)
+
+        if jwplatform_id:
+            return self.url_result(
+                'jwplatform:' + jwplatform_id, ie='JWPlatform',
+                video_id=video_id)
+
+        sources = self._parse_json(
+            self._search_regex(
+                r'sources\s*=\s*(\[.+?\])\s*;', webpage, 'sources'),
+            video_id, transform_source=js_to_json)
+
+        formats = []
+        for source in sources:
+            if not isinstance(source, dict):
+                continue
+            source_url = source.get('file')
+            if not source_url or not isinstance(source_url, compat_str):
+                continue
+            ext = determine_ext(source_url)
+            if ext == 'm3u8':
+                formats.extend(self._extract_m3u8_formats(
+                    source_url, video_id, 'mp4', entry_protocol='m3u8_native',
+                    m3u8_id='hls', fatal=False))
+            else:
+                formats.append({
+                    'url': source_url,
+                })
+        self._sort_formats(formats)
+
+        thumbnail = self._search_regex(
+            r'image\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage,
+            'thumbnail', fatal=False, group='url')
+
+        return {
+            'id': video_id,
+            'title': video_id,
+            'thumbnail': thumbnail,
+            'formats': formats,
+        }
diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 2f485012f..5f829c72c 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -44,6 +44,7 @@ from .anysex import AnySexIE
 from .aol import AolIE
 from .allocine import AllocineIE
 from .aliexpress import AliExpressLiveIE
+from .apa import APAIE
 from .aparat import AparatIE
 from .appleconnect import AppleConnectIE
 from .appletrailers import (
diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index 0292e0458..dad951b75 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -110,6 +110,7 @@ from .xfileshare import XFileShareIE
 from .cloudflarestream import CloudflareStreamIE
 from .peertube import PeerTubeIE
 from .indavideo import IndavideoEmbedIE
+from .apa import APAIE
 
 
 class GenericIE(InfoExtractor):
@@ -2041,6 +2042,23 @@ class GenericIE(InfoExtractor):
                 'skip_download': True,
             },
         },
+        {
+            # APA embed via JWPlatform embed
+            'url': 'http://www.vol.at/blue-man-group/5593454',
+            'info_dict': {
+                'id': 'jjv85FdZ',
+                'ext': 'mp4',
+                'title': '"Blau ist mysteriös": Die Blue Man Group im Interview',
+                'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
+                'thumbnail': r're:^https?://.*\.jpg$',
+                'duration': 254,
+                'timestamp': 1519211149,
+                'upload_date': '20180221',
+            },
+            'params': {
+                'skip_download': True,
+            },
+        },
         {
             'url': 'http://share-videos.se/auto/video/83645793?uid=13',
             'md5': 'b68d276de422ab07ee1d49388103f457',
@@ -3068,6 +3086,11 @@ class GenericIE(InfoExtractor):
             return self.playlist_from_matches(
                 indavideo_urls, video_id, video_title, ie=IndavideoEmbedIE.ie_key())
 
+        apa_urls = APAIE._extract_urls(webpage)
+        if apa_urls:
+            return self.playlist_from_matches(
+                apa_urls, video_id, video_title, ie=APAIE.ie_key())
+
         sharevideos_urls = [mobj.group('url') for mobj in re.finditer(
             r'<iframe[^>]+?\bsrc\s*=\s*(["\'])(?P<url>(?:https?:)?//embed\.share-videos\.se/auto/embed/\d+\?.*?\buid=\d+.*?)\1',
             webpage)]

From a07879d6b2edc474b0595a29932726fa7aa14b3a Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Mon, 28 May 2018 00:10:01 +0100
Subject: [PATCH 114/125] [spiegel] fix info extraction(#16538)

---
 youtube_dl/extractor/spiegel.py | 78 +++++++++++----------------------
 1 file changed, 25 insertions(+), 53 deletions(-)

diff --git a/youtube_dl/extractor/spiegel.py b/youtube_dl/extractor/spiegel.py
index fc995e8c1..4df7f4ddc 100644
--- a/youtube_dl/extractor/spiegel.py
+++ b/youtube_dl/extractor/spiegel.py
@@ -11,9 +11,9 @@ from .nexx import (
 from .spiegeltv import SpiegeltvIE
 from ..compat import compat_urlparse
 from ..utils import (
-    extract_attributes,
-    unified_strdate,
-    get_element_by_attribute,
+    parse_duration,
+    strip_or_none,
+    unified_timestamp,
 )
 
 
@@ -21,35 +21,38 @@ class SpiegelIE(InfoExtractor):
     _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<id>[0-9]+)(?:-embed|-iframe)?(?:\.html)?(?:#.*)?$'
     _TESTS = [{
         'url': 'http://www.spiegel.de/video/vulkan-tungurahua-in-ecuador-ist-wieder-aktiv-video-1259285.html',
-        'md5': '2c2754212136f35fb4b19767d242f66e',
+        'md5': 'b57399839d055fccfeb9a0455c439868',
         'info_dict': {
-            'id': '1259285',
+            'id': '563747',
             'ext': 'mp4',
             'title': 'Vulkanausbruch in Ecuador: Der "Feuerschlund" ist wieder aktiv',
             'description': 'md5:8029d8310232196eb235d27575a8b9f4',
             'duration': 49,
             'upload_date': '20130311',
+            'timestamp': 1362994320,
         },
     }, {
         'url': 'http://www.spiegel.de/video/schach-wm-videoanalyse-des-fuenften-spiels-video-1309159.html',
-        'md5': 'f2cdf638d7aa47654e251e1aee360af1',
+        'md5': '5b6c2f4add9d62912ed5fc78a1faed80',
         'info_dict': {
-            'id': '1309159',
+            'id': '580988',
             'ext': 'mp4',
             'title': 'Schach-WM in der Videoanalyse: Carlsen nutzt die Fehlgriffe des Titelverteidigers',
             'description': 'md5:c2322b65e58f385a820c10fa03b2d088',
             'duration': 983,
             'upload_date': '20131115',
+            'timestamp': 1384546642,
         },
     }, {
         'url': 'http://www.spiegel.de/video/astronaut-alexander-gerst-von-der-iss-station-beantwortet-fragen-video-1519126-embed.html',
-        'md5': 'd8eeca6bfc8f1cd6f490eb1f44695d51',
+        'md5': '97b91083a672d72976faa8433430afb9',
         'info_dict': {
-            'id': '1519126',
+            'id': '601883',
             'ext': 'mp4',
             'description': 'SPIEGEL ONLINE-Nutzer durften den deutschen Astronauten Alexander Gerst über sein Leben auf der ISS-Station befragen. Hier kommen seine Antworten auf die besten sechs Fragen.',
             'title': 'Fragen an Astronaut Alexander Gerst: "Bekommen Sie die Tageszeiten mit?"',
             'upload_date': '20140904',
+            'timestamp': 1409834160,
         }
     }, {
         'url': 'http://www.spiegel.de/video/astronaut-alexander-gerst-von-der-iss-station-beantwortet-fragen-video-1519126-iframe.html',
@@ -62,59 +65,28 @@ class SpiegelIE(InfoExtractor):
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
-        webpage, handle = self._download_webpage_handle(url, video_id)
+        metadata_url = 'http://www.spiegel.de/video/metadata/video-%s.json' % video_id
+        handle = self._request_webpage(metadata_url, video_id)
 
         # 302 to spiegel.tv, like http://www.spiegel.de/video/der-film-zum-wochenende-die-wahrheit-ueber-maenner-video-99003272.html
         if SpiegeltvIE.suitable(handle.geturl()):
             return self.url_result(handle.geturl(), 'Spiegeltv')
 
-        nexx_id = self._search_regex(
-            r'nexxOmniaId\s*:\s*(\d+)', webpage, 'nexx id', default=None)
-        if nexx_id:
-            domain_id = NexxIE._extract_domain_id(webpage) or '748'
-            return self.url_result(
-                'nexx:%s:%s' % (domain_id, nexx_id), ie=NexxIE.ie_key(),
-                video_id=nexx_id)
-
-        video_data = extract_attributes(self._search_regex(r'(<div[^>]+id="spVideoElements"[^>]+>)', webpage, 'video element', default=''))
-
-        title = video_data.get('data-video-title') or get_element_by_attribute('class', 'module-title', webpage)
-        description = video_data.get('data-video-teaser') or self._html_search_meta('description', webpage, 'description')
-
-        base_url = self._search_regex(
-            [r'server\s*:\s*(["\'])(?P<url>.+?)\1', r'var\s+server\s*=\s*"(?P<url>[^"]+)\"'],
-            webpage, 'server URL', group='url')
-
-        xml_url = base_url + video_id + '.xml'
-        idoc = self._download_xml(xml_url, video_id)
-
-        formats = []
-        for n in list(idoc):
-            if n.tag.startswith('type') and n.tag != 'type6':
-                format_id = n.tag.rpartition('type')[2]
-                video_url = base_url + n.find('./filename').text
-                formats.append({
-                    'format_id': format_id,
-                    'url': video_url,
-                    'width': int(n.find('./width').text),
-                    'height': int(n.find('./height').text),
-                    'abr': int(n.find('./audiobitrate').text),
-                    'vbr': int(n.find('./videobitrate').text),
-                    'vcodec': n.find('./codec').text,
-                    'acodec': 'MP4A',
-                })
-        duration = float(idoc[0].findall('./duration')[0].text)
-
-        self._check_formats(formats, video_id)
-        self._sort_formats(formats)
+        video_data = self._parse_json(self._webpage_read_content(
+            handle, metadata_url, video_id), video_id)
+        title = video_data['title']
+        nexx_id = video_data['nexxOmniaId']
+        domain_id = video_data.get('nexxOmniaDomain') or '748'
 
         return {
+            '_type': 'url_transparent',
             'id': video_id,
+            'url': 'nexx:%s:%s' % (domain_id, nexx_id),
             'title': title,
-            'description': description.strip() if description else None,
-            'duration': duration,
-            'upload_date': unified_strdate(video_data.get('data-video-date')),
-            'formats': formats,
+            'description': strip_or_none(video_data.get('teaser')),
+            'duration': parse_duration(video_data.get('duration')),
+            'timestamp': unified_timestamp(video_data.get('datum')),
+            'ie_key': NexxIE.ie_key(),
         }
 
 

From e0d42dd4b270d06a953822c091afefd946bd93f2 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 30 May 2018 13:21:07 +0100
Subject: [PATCH 115/125] [teamcoco] Fix extraction for full episodes(closes
 #16573)

---
 youtube_dl/extractor/tbs.py      |  61 ++++++------------
 youtube_dl/extractor/teamcoco.py | 102 ++++++++++++++++++-------------
 youtube_dl/extractor/turner.py   |  47 +++++++++++++-
 3 files changed, 122 insertions(+), 88 deletions(-)

diff --git a/youtube_dl/extractor/tbs.py b/youtube_dl/extractor/tbs.py
index edc31729d..784f8ed66 100644
--- a/youtube_dl/extractor/tbs.py
+++ b/youtube_dl/extractor/tbs.py
@@ -4,6 +4,10 @@ from __future__ import unicode_literals
 import re
 
 from .turner import TurnerBaseIE
+from ..compat import (
+    compat_urllib_parse_urlparse,
+    compat_parse_qs,
+)
 from ..utils import (
     float_or_none,
     int_or_none,
@@ -38,48 +42,22 @@ class TBSIE(TurnerBaseIE):
     def _real_extract(self, url):
         site, display_id = re.match(self._VALID_URL, url).groups()
         webpage = self._download_webpage(url, display_id)
-        video_data = self._parse_json(self._search_regex(
+        drupal_settings = self._parse_json(self._search_regex(
             r'<script[^>]+?data-drupal-selector="drupal-settings-json"[^>]*?>({.+?})</script>',
-            webpage, 'drupal setting'), display_id)['turner_playlist'][0]
+            webpage, 'drupal setting'), display_id)
+        video_data = drupal_settings['turner_playlist'][0]
 
         media_id = video_data['mediaID']
         title = video_data['title']
+        tokenizer_query = compat_parse_qs(compat_urllib_parse_urlparse(
+            drupal_settings['ngtv_token_url']).query)
 
-        streams_data = self._download_json(
-            'http://medium.ngtv.io/media/%s/tv' % media_id,
-            media_id)['media']['tv']
-        duration = None
-        chapters = []
-        formats = []
-        for supported_type in ('unprotected', 'bulkaes'):
-            stream_data = streams_data.get(supported_type, {})
-            m3u8_url = stream_data.get('secureUrl') or stream_data.get('url')
-            if not m3u8_url:
-                continue
-            if stream_data.get('playlistProtection') == 'spe':
-                m3u8_url = self._add_akamai_spe_token(
-                    'http://token.vgtf.net/token/token_spe',
-                    m3u8_url, media_id, {
-                        'url': url,
-                        'site_name': site[:3].upper(),
-                        'auth_required': video_data.get('authRequired') == '1',
-                    })
-            formats.extend(self._extract_m3u8_formats(
-                m3u8_url, media_id, 'mp4', m3u8_id='hls', fatal=False))
-
-            duration = float_or_none(stream_data.get('totalRuntime') or video_data.get('duration'))
-
-            if not chapters:
-                for chapter in stream_data.get('contentSegments', []):
-                    start_time = float_or_none(chapter.get('start'))
-                    duration = float_or_none(chapter.get('duration'))
-                    if start_time is None or duration is None:
-                        continue
-                    chapters.append({
-                        'start_time': start_time,
-                        'end_time': start_time + duration,
-                    })
-        self._sort_formats(formats)
+        info = self._extract_ngtv_info(
+            media_id, tokenizer_query, {
+                'url': url,
+                'site_name': site[:3].upper(),
+                'auth_required': video_data.get('authRequired') == '1',
+            })
 
         thumbnails = []
         for image_id, image in video_data.get('images', {}).items():
@@ -98,15 +76,14 @@ class TBSIE(TurnerBaseIE):
                 })
             thumbnails.append(i)
 
-        return {
+        info.update({
             'id': media_id,
             'title': title,
             'description': strip_or_none(video_data.get('descriptionNoTags') or video_data.get('shortDescriptionNoTags')),
-            'duration': duration,
+            'duration': float_or_none(video_data.get('duration')) or info.get('duration'),
             'timestamp': int_or_none(video_data.get('created')),
             'season_number': int_or_none(video_data.get('season')),
             'episode_number': int_or_none(video_data.get('episode')),
-            'cahpters': chapters,
             'thumbnails': thumbnails,
-            'formats': formats,
-        }
+        })
+        return info
diff --git a/youtube_dl/extractor/teamcoco.py b/youtube_dl/extractor/teamcoco.py
index 63fd4fe1c..73469cc5d 100644
--- a/youtube_dl/extractor/teamcoco.py
+++ b/youtube_dl/extractor/teamcoco.py
@@ -3,7 +3,7 @@ from __future__ import unicode_literals
 
 import json
 
-from .common import InfoExtractor
+from .turner import TurnerBaseIE
 from ..utils import (
     determine_ext,
     ExtractorError,
@@ -15,7 +15,7 @@ from ..utils import (
 )
 
 
-class TeamcocoIE(InfoExtractor):
+class TeamcocoIE(TurnerBaseIE):
     _VALID_URL = r'https?://teamcoco\.com/(?P<id>([^/]+/)*[^/?#]+)'
     _TESTS = [
         {
@@ -110,6 +110,8 @@ class TeamcocoIE(InfoExtractor):
           name
         }
         duration
+        turnerMediaId
+        turnerMediaAuthToken
       }
     }
     ... on NotFoundSlug {
@@ -123,53 +125,65 @@ class TeamcocoIE(InfoExtractor):
         record = response['record']
         video_id = record['id']
 
-        video_sources = self._graphql_call('''{
-  %s(id: "%s") {
-    src
-  }
-}''', 'RecordVideoSource', video_id) or {}
-
-        formats = []
-        get_quality = qualities(['low', 'sd', 'hd', 'uhd'])
-        for format_id, src in video_sources.get('src', {}).items():
-            if not isinstance(src, dict):
-                continue
-            src_url = src.get('src')
-            if not src_url:
-                continue
-            ext = determine_ext(src_url, mimetype2ext(src.get('type')))
-            if format_id == 'hls' or ext == 'm3u8':
-                # compat_urllib_parse.urljoin does not work here
-                if src_url.startswith('/'):
-                    src_url = 'http://ht.cdn.turner.com/tbs/big/teamcoco' + src_url
-                formats.extend(self._extract_m3u8_formats(
-                    src_url, video_id, 'mp4', m3u8_id=format_id, fatal=False))
-            else:
-                if src_url.startswith('/mp4:protected/'):
-                    # TODO Correct extraction for these files
-                    continue
-                tbr = int_or_none(self._search_regex(
-                    r'(\d+)k\.mp4', src_url, 'tbr', default=None))
-
-                formats.append({
-                    'url': src_url,
-                    'ext': ext,
-                    'tbr': tbr,
-                    'format_id': format_id,
-                    'quality': get_quality(format_id),
-                })
-        if not formats:
-            formats = self._extract_m3u8_formats(
-                record['file']['url'], video_id, 'mp4', fatal=False)
-        self._sort_formats(formats)
-
-        return {
+        info = {
             'id': video_id,
             'display_id': display_id,
-            'formats': formats,
             'title': record['title'],
             'thumbnail': record.get('thumb', {}).get('preview'),
             'description': record.get('teaser'),
             'duration': parse_duration(record.get('duration')),
             'timestamp': parse_iso8601(record.get('publishOn')),
         }
+
+        media_id = record.get('turnerMediaId')
+        if media_id:
+            self._initialize_geo_bypass({
+                'countries': ['US'],
+            })
+            info.update(self._extract_ngtv_info(media_id, {
+                'accessToken': record['turnerMediaAuthToken'],
+                'accessTokenType': 'jws',
+            }))
+        else:
+            video_sources = self._graphql_call('''{
+  %s(id: "%s") {
+    src
+  }
+}''', 'RecordVideoSource', video_id) or {}
+
+            formats = []
+            get_quality = qualities(['low', 'sd', 'hd', 'uhd'])
+            for format_id, src in video_sources.get('src', {}).items():
+                if not isinstance(src, dict):
+                    continue
+                src_url = src.get('src')
+                if not src_url:
+                    continue
+                ext = determine_ext(src_url, mimetype2ext(src.get('type')))
+                if format_id == 'hls' or ext == 'm3u8':
+                    # compat_urllib_parse.urljoin does not work here
+                    if src_url.startswith('/'):
+                        src_url = 'http://ht.cdn.turner.com/tbs/big/teamcoco' + src_url
+                    formats.extend(self._extract_m3u8_formats(
+                        src_url, video_id, 'mp4', m3u8_id=format_id, fatal=False))
+                else:
+                    if src_url.startswith('/mp4:protected/'):
+                        # TODO Correct extraction for these files
+                        continue
+                    tbr = int_or_none(self._search_regex(
+                        r'(\d+)k\.mp4', src_url, 'tbr', default=None))
+
+                    formats.append({
+                        'url': src_url,
+                        'ext': ext,
+                        'tbr': tbr,
+                        'format_id': format_id,
+                        'quality': get_quality(format_id),
+                    })
+            if not formats:
+                formats = self._extract_m3u8_formats(
+                    record['file']['url'], video_id, 'mp4', fatal=False)
+            self._sort_formats(formats)
+            info['formats'] = formats
+
+        return info
diff --git a/youtube_dl/extractor/turner.py b/youtube_dl/extractor/turner.py
index e73b64aeb..2b7b0d6e1 100644
--- a/youtube_dl/extractor/turner.py
+++ b/youtube_dl/extractor/turner.py
@@ -9,6 +9,7 @@ from ..utils import (
     xpath_text,
     int_or_none,
     determine_ext,
+    float_or_none,
     parse_duration,
     xpath_attr,
     update_url_query,
@@ -23,14 +24,17 @@ class TurnerBaseIE(AdobePassIE):
     def _extract_timestamp(self, video_data):
         return int_or_none(xpath_attr(video_data, 'dateCreated', 'uts'))
 
-    def _add_akamai_spe_token(self, tokenizer_src, video_url, content_id, ap_data):
+    def _add_akamai_spe_token(self, tokenizer_src, video_url, content_id, ap_data, custom_tokenizer_query=None):
         secure_path = self._search_regex(r'https?://[^/]+(.+/)', video_url, 'secure path') + '*'
         token = self._AKAMAI_SPE_TOKEN_CACHE.get(secure_path)
         if not token:
             query = {
                 'path': secure_path,
-                'videoId': content_id,
             }
+            if custom_tokenizer_query:
+                query.update(custom_tokenizer_query)
+            else:
+                query['videoId'] = content_id
             if ap_data.get('auth_required'):
                 query['accessToken'] = self._extract_mvpd_auth(ap_data['url'], content_id, ap_data['site_name'], ap_data['site_name'])
             auth = self._download_xml(
@@ -188,3 +192,42 @@ class TurnerBaseIE(AdobePassIE):
             'episode_number': int_or_none(xpath_text(video_data, 'episodeNumber')),
             'is_live': is_live,
         }
+
+    def _extract_ngtv_info(self, media_id, tokenizer_query, ap_data=None):
+        streams_data = self._download_json(
+            'http://medium.ngtv.io/media/%s/tv' % media_id,
+            media_id)['media']['tv']
+        duration = None
+        chapters = []
+        formats = []
+        for supported_type in ('unprotected', 'bulkaes'):
+            stream_data = streams_data.get(supported_type, {})
+            m3u8_url = stream_data.get('secureUrl') or stream_data.get('url')
+            if not m3u8_url:
+                continue
+            if stream_data.get('playlistProtection') == 'spe':
+                m3u8_url = self._add_akamai_spe_token(
+                    'http://token.ngtv.io/token/token_spe',
+                    m3u8_url, media_id, ap_data or {}, tokenizer_query)
+            formats.extend(self._extract_m3u8_formats(
+                m3u8_url, media_id, 'mp4', m3u8_id='hls', fatal=False))
+
+            duration = float_or_none(stream_data.get('totalRuntime'))
+
+            if not chapters:
+                for chapter in stream_data.get('contentSegments', []):
+                    start_time = float_or_none(chapter.get('start'))
+                    chapter_duration = float_or_none(chapter.get('duration'))
+                    if start_time is None or chapter_duration is None:
+                        continue
+                    chapters.append({
+                        'start_time': start_time,
+                        'end_time': start_time + chapter_duration,
+                    })
+        self._sort_formats(formats)
+
+        return {
+            'formats': formats,
+            'chapters': chapters,
+            'duration': duration,
+        }

From bc3143ac5e18731502df014e30c5fe89554e9d6f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 30 May 2018 21:52:03 +0700
Subject: [PATCH 116/125] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 24 ++++++++++++++++++++++++
 1 file changed, 24 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index 280390ea0..95a5c556f 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,27 @@
+version <unreleased>
+
+Core
+* [downloader/rtmp] Generalize download messages and report time elapsed
+  on finish
+* [downloader/rtmp] Gracefully handle live streams interrupted by user
+
+Extractors
+* [teamcoco] Fix extraction for full episodes (#16573)
+* [spiegel] Fix info extraction (#16538)
++ [apa] Add support for apa.at (#15041, #15672)
++ [bellmedia] Add support for bnnbloomberg.ca (#16560)
++ [9c9media] Extract MPD formats and subtitles
+* [cammodels] Use geo verification headers
++ [ufctv] Add support for authentication (#16542)
++ [cammodels] Add support for cammodels.com (#14499)
+* [utils] Fix style id extraction for namespaced id attribute in dfxp2srt
+  (#16551)
+* [soundcloud] Detect format extension (#16549)
+* [cbc] Fix playlist title extraction (#16502)
++ [tumblr] Detect and report sensitive media (#13829)
++ [tumblr] Add support for authentication (#15133)
+
+
 version 2018.05.26
 
 Core

From e425710554f1ed96504389fb526b898a942012dd Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 30 May 2018 21:54:30 +0700
Subject: [PATCH 117/125] release 2018.05.30

---
 .github/ISSUE_TEMPLATE.md | 6 +++---
 ChangeLog                 | 2 +-
 docs/supportedsites.md    | 3 ++-
 youtube_dl/version.py     | 2 +-
 4 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index c4d4e534e..b47a450a4 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -6,8 +6,8 @@
 
 ---
 
-### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.05.26*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
-- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2018.05.26**
+### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.05.30*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
+- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2018.05.30**
 
 ### Before submitting an *issue* make sure you have:
 - [ ] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
@@ -36,7 +36,7 @@ Add the `-v` flag to **your command line** you run youtube-dl with (`youtube-dl
 [debug] User config: []
 [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
 [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
-[debug] youtube-dl version 2018.05.26
+[debug] youtube-dl version 2018.05.30
 [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
 [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
 [debug] Proxy map: {}
diff --git a/ChangeLog b/ChangeLog
index 95a5c556f..4e989caf7 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2018.05.30
 
 Core
 * [downloader/rtmp] Generalize download messages and report time elapsed
diff --git a/docs/supportedsites.md b/docs/supportedsites.md
index b60f2ff23..c2d5401d6 100644
--- a/docs/supportedsites.md
+++ b/docs/supportedsites.md
@@ -15,7 +15,6 @@
  - **8tracks**
  - **91porn**
  - **9c9media**
- - **9c9media:stack**
  - **9gag**
  - **9now.com.au**
  - **abc.net.au**
@@ -48,6 +47,7 @@
  - **anitube.se**
  - **Anvato**
  - **AnySex**
+ - **APA**
  - **Aparat**
  - **AppleConnect**
  - **AppleDaily**: 臺灣蘋果日報
@@ -128,6 +128,7 @@
  - **BYUtv**
  - **Camdemy**
  - **CamdemyFolder**
+ - **CamModels**
  - **CamWithHer**
  - **canalc2.tv**
  - **Canalplus**: mycanal.fr and piwiplus.fr
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 2253da927..0f15738b2 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2018.05.26'
+__version__ = '2018.05.30'

From 4fd1437d9d617069494a471ba40341c2ad6623b6 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 30 May 2018 17:08:32 +0100
Subject: [PATCH 118/125] [rbmaradio] check formats availability(closes #16585)

---
 youtube_dl/extractor/rbmaradio.py | 1 +
 1 file changed, 1 insertion(+)

diff --git a/youtube_dl/extractor/rbmaradio.py b/youtube_dl/extractor/rbmaradio.py
index afa7b9161..9c4d72bbd 100644
--- a/youtube_dl/extractor/rbmaradio.py
+++ b/youtube_dl/extractor/rbmaradio.py
@@ -54,6 +54,7 @@ class RBMARadioIE(InfoExtractor):
             'abr': abr,
             'vcodec': 'none',
         } for abr in (96, 128, 256)]
+        self._check_formats(formats, episode_id)
 
         description = clean_html(episode.get('longTeaser'))
         thumbnail = self._proto_relative_url(episode.get('imageURL', {}).get('landscape'))

From 128b58ad139f2e62274ab6a649b965f5fa01a533 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Thu, 31 May 2018 02:49:35 +0100
Subject: [PATCH 119/125] [nhl] remove old extractors

---
 youtube_dl/extractor/extractors.py |   7 +-
 youtube_dl/extractor/nhl.py        | 345 +++++------------------------
 2 files changed, 62 insertions(+), 290 deletions(-)

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 5f829c72c..93b22a8c3 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -705,12 +705,7 @@ from .nexx import (
 from .nfb import NFBIE
 from .nfl import NFLIE
 from .nhk import NhkVodIE
-from .nhl import (
-    NHLVideocenterIE,
-    NHLNewsIE,
-    NHLVideocenterCategoryIE,
-    NHLIE,
-)
+from .nhl import NHLIE
 from .nick import (
     NickIE,
     NickBrIE,
diff --git a/youtube_dl/extractor/nhl.py b/youtube_dl/extractor/nhl.py
index 62ce800c0..cf440f713 100644
--- a/youtube_dl/extractor/nhl.py
+++ b/youtube_dl/extractor/nhl.py
@@ -1,18 +1,10 @@
 from __future__ import unicode_literals
 
 import re
-import json
-import os
 
 from .common import InfoExtractor
-from ..compat import (
-    compat_urlparse,
-    compat_urllib_parse_urlencode,
-    compat_urllib_parse_urlparse,
-    compat_str,
-)
+from ..compat import compat_str
 from ..utils import (
-    unified_strdate,
     determine_ext,
     int_or_none,
     parse_iso8601,
@@ -20,236 +12,77 @@ from ..utils import (
 )
 
 
-class NHLBaseInfoExtractor(InfoExtractor):
-    @staticmethod
-    def _fix_json(json_string):
-        return json_string.replace('\\\'', '\'')
+class NHLBaseIE(InfoExtractor):
+    def _real_extract(self, url):
+        site, tmp_id = re.match(self._VALID_URL, url).groups()
+        video_data = self._download_json(
+            'https://%s/%s/%sid/v1/%s/details/web-v1.json'
+            % (self._CONTENT_DOMAIN, site[:3], 'item/' if site == 'mlb' else '', tmp_id), tmp_id)
+        if video_data.get('type') != 'video':
+            video_data = video_data['media']
+            video = video_data.get('video')
+            if video:
+                video_data = video
+            else:
+                videos = video_data.get('videos')
+                if videos:
+                    video_data = videos[0]
 
-    def _real_extract_video(self, video_id):
-        vid_parts = video_id.split(',')
-        if len(vid_parts) == 3:
-            video_id = '%s0%s%s-X-h' % (vid_parts[0][:4], vid_parts[1], vid_parts[2].rjust(4, '0'))
-        json_url = 'http://video.nhl.com/videocenter/servlets/playlist?ids=%s&format=json' % video_id
-        data = self._download_json(
-            json_url, video_id, transform_source=self._fix_json)
-        return self._extract_video(data[0])
+        video_id = compat_str(video_data['id'])
+        title = video_data['title']
 
-    def _extract_video(self, info):
-        video_id = info['id']
-        self.report_extraction(video_id)
+        formats = []
+        for playback in video_data.get('playbacks', []):
+            playback_url = playback.get('url')
+            if not playback_url:
+                continue
+            ext = determine_ext(playback_url)
+            if ext == 'm3u8':
+                m3u8_formats = self._extract_m3u8_formats(
+                    playback_url, video_id, 'mp4', 'm3u8_native',
+                    m3u8_id=playback.get('name', 'hls'), fatal=False)
+                self._check_formats(m3u8_formats, video_id)
+                formats.extend(m3u8_formats)
+            else:
+                height = int_or_none(playback.get('height'))
+                formats.append({
+                    'format_id': playback.get('name', 'http' + ('-%dp' % height if height else '')),
+                    'url': playback_url,
+                    'width': int_or_none(playback.get('width')),
+                    'height': height,
+                    'tbr': int_or_none(self._search_regex(r'_(\d+)[kK]', playback_url, 'bitrate', default=None)),
+                })
+        self._sort_formats(formats)
 
-        initial_video_url = info['publishPoint']
-        if info['formats'] == '1':
-            parsed_url = compat_urllib_parse_urlparse(initial_video_url)
-            filename, ext = os.path.splitext(parsed_url.path)
-            path = '%s_sd%s' % (filename, ext)
-            data = compat_urllib_parse_urlencode({
-                'type': 'fvod',
-                'path': compat_urlparse.urlunparse(parsed_url[:2] + (path,) + parsed_url[3:])
+        thumbnails = []
+        cuts = video_data.get('image', {}).get('cuts') or []
+        if isinstance(cuts, dict):
+            cuts = cuts.values()
+        for thumbnail_data in cuts:
+            thumbnail_url = thumbnail_data.get('src')
+            if not thumbnail_url:
+                continue
+            thumbnails.append({
+                'url': thumbnail_url,
+                'width': int_or_none(thumbnail_data.get('width')),
+                'height': int_or_none(thumbnail_data.get('height')),
             })
-            path_url = 'http://video.nhl.com/videocenter/servlets/encryptvideopath?' + data
-            path_doc = self._download_xml(
-                path_url, video_id, 'Downloading final video url')
-            video_url = path_doc.find('path').text
-        else:
-            video_url = initial_video_url
-
-        join = compat_urlparse.urljoin
-        ret = {
-            'id': video_id,
-            'title': info['name'],
-            'url': video_url,
-            'description': info['description'],
-            'duration': int(info['duration']),
-            'thumbnail': join(join(video_url, '/u/'), info['bigImage']),
-            'upload_date': unified_strdate(info['releaseDate'].split('.')[0]),
-        }
-        if video_url.startswith('rtmp:'):
-            mobj = re.match(r'(?P<tc_url>rtmp://[^/]+/(?P<app>[a-z0-9/]+))/(?P<play_path>mp4:.*)', video_url)
-            ret.update({
-                'tc_url': mobj.group('tc_url'),
-                'play_path': mobj.group('play_path'),
-                'app': mobj.group('app'),
-                'no_resume': True,
-            })
-        return ret
-
-
-class NHLVideocenterIE(NHLBaseInfoExtractor):
-    IE_NAME = 'nhl.com:videocenter'
-    _VALID_URL = r'https?://video(?P<team>\.[^.]*)?\.nhl\.com/videocenter/(?:console|embed)?(?:\?(?:.*?[?&])?)(?:id|hlg|playlist)=(?P<id>[-0-9a-zA-Z,]+)'
-
-    _TESTS = [{
-        'url': 'http://video.canucks.nhl.com/videocenter/console?catid=6?id=453614',
-        'md5': 'db704a4ea09e8d3988c85e36cc892d09',
-        'info_dict': {
-            'id': '453614',
-            'ext': 'mp4',
-            'title': 'Quick clip: Weise 4-3 goal vs Flames',
-            'description': 'Dale Weise scores his first of the season to put the Canucks up 4-3.',
-            'duration': 18,
-            'upload_date': '20131006',
-        },
-    }, {
-        'url': 'http://video.nhl.com/videocenter/console?id=2014020024-628-h',
-        'md5': 'd22e82bc592f52d37d24b03531ee9696',
-        'info_dict': {
-            'id': '2014020024-628-h',
-            'ext': 'mp4',
-            'title': 'Alex Galchenyuk Goal on Ray Emery (14:40/3rd)',
-            'description': 'Home broadcast - Montreal Canadiens at Philadelphia Flyers - October 11, 2014',
-            'duration': 0,
-            'upload_date': '20141011',
-        },
-    }, {
-        'url': 'http://video.mapleleafs.nhl.com/videocenter/console?id=58665&catid=802',
-        'md5': 'c78fc64ea01777e426cfc202b746c825',
-        'info_dict': {
-            'id': '58665',
-            'ext': 'flv',
-            'title': 'Classic Game In Six - April 22, 1979',
-            'description': 'It was the last playoff game for the Leafs in the decade, and the last time the Leafs and Habs played in the playoffs. Great game, not a great ending.',
-            'duration': 400,
-            'upload_date': '20100129'
-        },
-    }, {
-        'url': 'http://video.flames.nhl.com/videocenter/console?id=630616',
-        'only_matching': True,
-    }, {
-        'url': 'http://video.nhl.com/videocenter/?id=736722',
-        'only_matching': True,
-    }, {
-        'url': 'http://video.nhl.com/videocenter/console?hlg=20142015,2,299&lang=en',
-        'md5': '076fcb88c255154aacbf0a7accc3f340',
-        'info_dict': {
-            'id': '2014020299-X-h',
-            'ext': 'mp4',
-            'title': 'Penguins at Islanders / Game Highlights',
-            'description': 'Home broadcast - Pittsburgh Penguins at New York Islanders - November 22, 2014',
-            'duration': 268,
-            'upload_date': '20141122',
-        }
-    }, {
-        'url': 'http://video.oilers.nhl.com/videocenter/console?id=691469&catid=4',
-        'info_dict': {
-            'id': '691469',
-            'ext': 'mp4',
-            'title': 'RAW | Craig MacTavish Full Press Conference',
-            'description': 'Oilers GM Craig MacTavish addresses the media at Rexall Place on Friday.',
-            'upload_date': '20141205',
-        },
-        'params': {
-            'skip_download': True,  # Requires rtmpdump
-        }
-    }, {
-        'url': 'http://video.nhl.com/videocenter/embed?playlist=836127',
-        'only_matching': True,
-    }]
-
-    def _real_extract(self, url):
-        video_id = self._match_id(url)
-        return self._real_extract_video(video_id)
-
-
-class NHLNewsIE(NHLBaseInfoExtractor):
-    IE_NAME = 'nhl.com:news'
-    IE_DESC = 'NHL news'
-    _VALID_URL = r'https?://(?:.+?\.)?nhl\.com/(?:ice|club)/news\.html?(?:\?(?:.*?[?&])?)id=(?P<id>[-0-9a-zA-Z]+)'
-
-    _TESTS = [{
-        'url': 'http://www.nhl.com/ice/news.htm?id=750727',
-        'md5': '4b3d1262e177687a3009937bd9ec0be8',
-        'info_dict': {
-            'id': '736722',
-            'ext': 'mp4',
-            'title': 'Cal Clutterbuck has been fined $2,000',
-            'description': 'md5:45fe547d30edab88b23e0dd0ab1ed9e6',
-            'duration': 37,
-            'upload_date': '20150128',
-        },
-    }, {
-        # iframe embed
-        'url': 'http://sabres.nhl.com/club/news.htm?id=780189',
-        'md5': '9f663d1c006c90ac9fb82777d4294e12',
-        'info_dict': {
-            'id': '836127',
-            'ext': 'mp4',
-            'title': 'Morning Skate: OTT vs. BUF (9/23/15)',
-            'description': "Brian Duff chats with Tyler Ennis prior to Buffalo's first preseason home game.",
-            'duration': 93,
-            'upload_date': '20150923',
-        },
-    }]
-
-    def _real_extract(self, url):
-        news_id = self._match_id(url)
-        webpage = self._download_webpage(url, news_id)
-        video_id = self._search_regex(
-            [r'pVid(\d+)', r"nlid\s*:\s*'(\d+)'",
-             r'<iframe[^>]+src=["\']https?://video.*?\.nhl\.com/videocenter/embed\?.*\bplaylist=(\d+)'],
-            webpage, 'video id')
-        return self._real_extract_video(video_id)
-
-
-class NHLVideocenterCategoryIE(NHLBaseInfoExtractor):
-    IE_NAME = 'nhl.com:videocenter:category'
-    IE_DESC = 'NHL videocenter category'
-    _VALID_URL = r'https?://video\.(?P<team>[^.]*)\.nhl\.com/videocenter/(console\?[^(id=)]*catid=(?P<catid>[0-9]+)(?![&?]id=).*?)?$'
-    _TEST = {
-        'url': 'http://video.canucks.nhl.com/videocenter/console?catid=999',
-        'info_dict': {
-            'id': '999',
-            'title': 'Highlights',
-        },
-        'playlist_count': 12,
-    }
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        team = mobj.group('team')
-        webpage = self._download_webpage(url, team)
-        cat_id = self._search_regex(
-            [r'var defaultCatId = "(.+?)";',
-             r'{statusIndex:0,index:0,.*?id:(.*?),'],
-            webpage, 'category id')
-        playlist_title = self._html_search_regex(
-            r'tab0"[^>]*?>(.*?)</td>',
-            webpage, 'playlist title', flags=re.DOTALL).lower().capitalize()
-
-        data = compat_urllib_parse_urlencode({
-            'cid': cat_id,
-            # This is the default value
-            'count': 12,
-            'ptrs': 3,
-            'format': 'json',
-        })
-        path = '/videocenter/servlets/browse?' + data
-        request_url = compat_urlparse.urljoin(url, path)
-        response = self._download_webpage(request_url, playlist_title)
-        response = self._fix_json(response)
-        if not response.strip():
-            self._downloader.report_warning('Got an empty response, trying '
-                                            'adding the "newvideos" parameter')
-            response = self._download_webpage(request_url + '&newvideos=true',
-                                              playlist_title)
-            response = self._fix_json(response)
-        videos = json.loads(response)
 
         return {
-            '_type': 'playlist',
-            'title': playlist_title,
-            'id': cat_id,
-            'entries': [self._extract_video(v) for v in videos],
+            'id': video_id,
+            'title': title,
+            'description': video_data.get('description'),
+            'timestamp': parse_iso8601(video_data.get('date')),
+            'duration': parse_duration(video_data.get('duration')),
+            'thumbnails': thumbnails,
+            'formats': formats,
         }
 
 
-class NHLIE(InfoExtractor):
+class NHLIE(NHLBaseIE):
     IE_NAME = 'nhl.com'
     _VALID_URL = r'https?://(?:www\.)?(?P<site>nhl|wch2016)\.com/(?:[^/]+/)*c-(?P<id>\d+)'
-    _SITES_MAP = {
-        'nhl': 'nhl',
-        'wch2016': 'wch',
-    }
+    _CONTENT_DOMAIN = 'nhl.bamcontent.com'
     _TESTS = [{
         # type=video
         'url': 'https://www.nhl.com/video/anisimov-cleans-up-mess/t-277752844/c-43663503',
@@ -293,59 +126,3 @@ class NHLIE(InfoExtractor):
         'url': 'https://www.wch2016.com/news/3-stars-team-europe-vs-team-canada/c-282195068',
         'only_matching': True,
     }]
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        tmp_id, site = mobj.group('id'), mobj.group('site')
-        video_data = self._download_json(
-            'https://nhl.bamcontent.com/%s/id/v1/%s/details/web-v1.json'
-            % (self._SITES_MAP[site], tmp_id), tmp_id)
-        if video_data.get('type') == 'article':
-            video_data = video_data['media']
-
-        video_id = compat_str(video_data['id'])
-        title = video_data['title']
-
-        formats = []
-        for playback in video_data.get('playbacks', []):
-            playback_url = playback.get('url')
-            if not playback_url:
-                continue
-            ext = determine_ext(playback_url)
-            if ext == 'm3u8':
-                m3u8_formats = self._extract_m3u8_formats(
-                    playback_url, video_id, 'mp4', 'm3u8_native',
-                    m3u8_id=playback.get('name', 'hls'), fatal=False)
-                self._check_formats(m3u8_formats, video_id)
-                formats.extend(m3u8_formats)
-            else:
-                height = int_or_none(playback.get('height'))
-                formats.append({
-                    'format_id': playback.get('name', 'http' + ('-%dp' % height if height else '')),
-                    'url': playback_url,
-                    'width': int_or_none(playback.get('width')),
-                    'height': height,
-                })
-        self._sort_formats(formats, ('preference', 'width', 'height', 'tbr', 'format_id'))
-
-        thumbnails = []
-        for thumbnail_id, thumbnail_data in video_data.get('image', {}).get('cuts', {}).items():
-            thumbnail_url = thumbnail_data.get('src')
-            if not thumbnail_url:
-                continue
-            thumbnails.append({
-                'id': thumbnail_id,
-                'url': thumbnail_url,
-                'width': int_or_none(thumbnail_data.get('width')),
-                'height': int_or_none(thumbnail_data.get('height')),
-            })
-
-        return {
-            'id': video_id,
-            'title': title,
-            'description': video_data.get('description'),
-            'timestamp': parse_iso8601(video_data.get('date')),
-            'duration': parse_duration(video_data.get('duration')),
-            'thumbnails': thumbnails,
-            'formats': formats,
-        }

From acca2ac7f3f4c78bce775d47736caa63e6872e26 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Thu, 31 May 2018 02:50:14 +0100
Subject: [PATCH 120/125] [mlb] improve extraction(closes #16587)

---
 youtube_dl/extractor/mlb.py | 105 +++++++++---------------------------
 1 file changed, 24 insertions(+), 81 deletions(-)

diff --git a/youtube_dl/extractor/mlb.py b/youtube_dl/extractor/mlb.py
index 675ff6873..b907f6b49 100644
--- a/youtube_dl/extractor/mlb.py
+++ b/youtube_dl/extractor/mlb.py
@@ -1,96 +1,90 @@
 from __future__ import unicode_literals
 
-import re
-
-from .common import InfoExtractor
-from ..utils import (
-    parse_duration,
-    parse_iso8601,
-)
+from .nhl import NHLBaseIE
 
 
-class MLBIE(InfoExtractor):
+class MLBIE(NHLBaseIE):
     _VALID_URL = r'''(?x)
                     https?://
-                        (?:[\da-z_-]+\.)*mlb\.com/
+                        (?:[\da-z_-]+\.)*(?P<site>mlb)\.com/
                         (?:
                             (?:
-                                (?:.*?/)?video/(?:topic/[\da-z_-]+/)?(?:v|.*?/c-)|
+                                (?:[^/]+/)*c-|
                                 (?:
                                     shared/video/embed/(?:embed|m-internal-embed)\.html|
                                     (?:[^/]+/)+(?:play|index)\.jsp|
                                 )\?.*?\bcontent_id=
                             )
-                            (?P<id>n?\d+)|
-                            (?:[^/]+/)*(?P<path>[^/]+)
+                            (?P<id>\d+)
                         )
                     '''
+    _CONTENT_DOMAIN = 'content.mlb.com'
     _TESTS = [
         {
-            'url': 'http://m.mlb.com/sea/video/topic/51231442/v34698933/nymsea-ackley-robs-a-home-run-with-an-amazing-catch/?c_id=sea',
-            'md5': 'ff56a598c2cf411a9a38a69709e97079',
+            'url': 'https://www.mlb.com/mariners/video/ackleys-spectacular-catch/c-34698933',
+            'md5': '632358dacfceec06bad823b83d21df2d',
             'info_dict': {
                 'id': '34698933',
                 'ext': 'mp4',
                 'title': "Ackley's spectacular catch",
                 'description': 'md5:7f5a981eb4f3cbc8daf2aeffa2215bf0',
                 'duration': 66,
-                'timestamp': 1405980600,
-                'upload_date': '20140721',
+                'timestamp': 1405995000,
+                'upload_date': '20140722',
                 'thumbnail': r're:^https?://.*\.jpg$',
             },
         },
         {
-            'url': 'http://m.mlb.com/video/topic/81536970/v34496663/mianym-stanton-practices-for-the-home-run-derby',
-            'md5': 'd9c022c10d21f849f49c05ae12a8a7e9',
+            'url': 'https://www.mlb.com/video/stanton-prepares-for-derby/c-34496663',
+            'md5': 'bf2619bf9cacc0a564fc35e6aeb9219f',
             'info_dict': {
                 'id': '34496663',
                 'ext': 'mp4',
                 'title': 'Stanton prepares for Derby',
                 'description': 'md5:d00ce1e5fd9c9069e9c13ab4faedfa57',
                 'duration': 46,
-                'timestamp': 1405105800,
+                'timestamp': 1405120200,
                 'upload_date': '20140711',
                 'thumbnail': r're:^https?://.*\.jpg$',
             },
         },
         {
-            'url': 'http://m.mlb.com/video/topic/vtp_hrd_sponsor/v34578115/hrd-cespedes-wins-2014-gillette-home-run-derby',
-            'md5': '0e6e73d509321e142409b695eadd541f',
+            'url': 'https://www.mlb.com/video/cespedes-repeats-as-derby-champ/c-34578115',
+            'md5': '99bb9176531adc600b90880fb8be9328',
             'info_dict': {
                 'id': '34578115',
                 'ext': 'mp4',
                 'title': 'Cespedes repeats as Derby champ',
                 'description': 'md5:08df253ce265d4cf6fb09f581fafad07',
                 'duration': 488,
-                'timestamp': 1405399936,
+                'timestamp': 1405414336,
                 'upload_date': '20140715',
                 'thumbnail': r're:^https?://.*\.jpg$',
             },
         },
         {
-            'url': 'http://m.mlb.com/video/v34577915/bautista-on-derby-captaining-duties-his-performance',
-            'md5': 'b8fd237347b844365d74ea61d4245967',
+            'url': 'https://www.mlb.com/video/bautista-on-home-run-derby/c-34577915',
+            'md5': 'da8b57a12b060e7663ee1eebd6f330ec',
             'info_dict': {
                 'id': '34577915',
                 'ext': 'mp4',
                 'title': 'Bautista on Home Run Derby',
                 'description': 'md5:b80b34031143d0986dddc64a8839f0fb',
                 'duration': 52,
-                'timestamp': 1405390722,
+                'timestamp': 1405405122,
                 'upload_date': '20140715',
                 'thumbnail': r're:^https?://.*\.jpg$',
             },
         },
         {
-            'url': 'http://m.mlb.com/news/article/118550098/blue-jays-kevin-pillar-goes-spidey-up-the-wall-to-rob-tim-beckham-of-a-homer',
-            'md5': 'aafaf5b0186fee8f32f20508092f8111',
+            'url': 'https://www.mlb.com/news/blue-jays-kevin-pillar-goes-spidey-up-the-wall-to-rob-tim-beckham-of-a-homer/c-118550098',
+            'md5': 'e09e37b552351fddbf4d9e699c924d68',
             'info_dict': {
                 'id': '75609783',
                 'ext': 'mp4',
                 'title': 'Must C: Pillar climbs for catch',
                 'description': '4/15/15: Blue Jays outfielder Kevin Pillar continues his defensive dominance by climbing the wall in left to rob Tim Beckham of a home run',
-                'timestamp': 1429124820,
+                'timestamp': 1429139220,
                 'upload_date': '20150415',
             }
         },
@@ -111,7 +105,7 @@ class MLBIE(InfoExtractor):
             'only_matching': True,
         },
         {
-            'url': 'http://m.cardinals.mlb.com/stl/video/v51175783/atlstl-piscotty-makes-great-sliding-catch-on-line/?partnerId=as_mlb_20150321_42500876&adbid=579409712979910656&adbpl=tw&adbpr=52847728',
+            'url': 'https://www.mlb.com/cardinals/video/piscottys-great-sliding-catch/c-51175783',
             'only_matching': True,
         },
         {
@@ -120,58 +114,7 @@ class MLBIE(InfoExtractor):
             'only_matching': True,
         },
         {
-            'url': 'http://washington.nationals.mlb.com/mlb/gameday/index.jsp?c_id=was&gid=2015_05_09_atlmlb_wasmlb_1&lang=en&content_id=108309983&mode=video#',
+            'url': 'https://www.mlb.com/cut4/carlos-gomez-borrowed-sunglasses-from-an-as-fan/c-278912842',
             'only_matching': True,
         }
     ]
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-
-        if not video_id:
-            video_path = mobj.group('path')
-            webpage = self._download_webpage(url, video_path)
-            video_id = self._search_regex(
-                [r'data-video-?id="(\d+)"', r'content_id=(\d+)'], webpage, 'video id')
-
-        detail = self._download_xml(
-            'http://m.mlb.com/gen/multimedia/detail/%s/%s/%s/%s.xml'
-            % (video_id[-3], video_id[-2], video_id[-1], video_id), video_id)
-
-        title = detail.find('./headline').text
-        description = detail.find('./big-blurb').text
-        duration = parse_duration(detail.find('./duration').text)
-        timestamp = parse_iso8601(detail.attrib['date'][:-5])
-
-        thumbnails = [{
-            'url': thumbnail.text,
-        } for thumbnail in detail.findall('./thumbnailScenarios/thumbnailScenario')]
-
-        formats = []
-        for media_url in detail.findall('./url'):
-            playback_scenario = media_url.attrib['playback_scenario']
-            fmt = {
-                'url': media_url.text,
-                'format_id': playback_scenario,
-            }
-            m = re.search(r'(?P<vbr>\d+)K_(?P<width>\d+)X(?P<height>\d+)', playback_scenario)
-            if m:
-                fmt.update({
-                    'vbr': int(m.group('vbr')) * 1000,
-                    'width': int(m.group('width')),
-                    'height': int(m.group('height')),
-                })
-            formats.append(fmt)
-
-        self._sort_formats(formats)
-
-        return {
-            'id': video_id,
-            'title': title,
-            'description': description,
-            'duration': duration,
-            'timestamp': timestamp,
-            'formats': formats,
-            'thumbnails': thumbnails,
-        }

From 3a8e3730c198dd7cb8be76f04d101c66361da6b9 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Thu, 31 May 2018 11:40:37 +0100
Subject: [PATCH 121/125] [francetv] add support for
 sport.francetvinfo.fr(closes #15645)

---
 youtube_dl/extractor/extractors.py |  1 +
 youtube_dl/extractor/francetv.py   | 25 +++++++++++++++++++++++++
 2 files changed, 26 insertions(+)

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 93b22a8c3..b05afd101 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -381,6 +381,7 @@ from .francetv import (
     FranceTVSiteIE,
     FranceTVEmbedIE,
     FranceTVInfoIE,
+    FranceTVInfoSportIE,
     FranceTVJeunesseIE,
     GenerationWhatIE,
     CultureboxIE,
diff --git a/youtube_dl/extractor/francetv.py b/youtube_dl/extractor/francetv.py
index c02cd03de..6fc6b0da0 100644
--- a/youtube_dl/extractor/francetv.py
+++ b/youtube_dl/extractor/francetv.py
@@ -379,6 +379,31 @@ class FranceTVInfoIE(FranceTVBaseInfoExtractor):
         return self._make_url_result(video_id, catalogue)
 
 
+class FranceTVInfoSportIE(FranceTVBaseInfoExtractor):
+    IE_NAME = 'sport.francetvinfo.fr'
+    _VALID_URL = r'https?://sport\.francetvinfo\.fr/(?:[^/]+/)*(?P<id>[^/?#&]+)'
+    _TESTS = [{
+        'url': 'https://sport.francetvinfo.fr/les-jeux-olympiques/retour-sur-les-meilleurs-moments-de-pyeongchang-2018',
+        'info_dict': {
+            'id': '6e49080e-3f45-11e8-b459-000d3a2439ea',
+            'ext': 'mp4',
+            'title': 'Retour sur les meilleurs moments de Pyeongchang 2018',
+            'timestamp': 1523639962,
+            'upload_date': '20180413',
+        },
+        'params': {
+            'skip_download': True,
+        },
+        'add_ie': [FranceTVIE.ie_key()],
+    }]
+
+    def _real_extract(self, url):
+        display_id = self._match_id(url)
+        webpage = self._download_webpage(url, display_id)
+        video_id = self._search_regex(r'data-video="([^"]+)"', webpage, 'video_id')
+        return self._make_url_result(video_id, 'Sport-web')
+
+
 class GenerationWhatIE(InfoExtractor):
     IE_NAME = 'france2.fr:generation-what'
     _VALID_URL = r'https?://generation-what\.francetv\.fr/[^/]+/video/(?P<id>[^/?#&]+)'

From c3f75e2454051021c33f88c982913cba8c651188 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Thu, 31 May 2018 12:39:45 +0100
Subject: [PATCH 122/125] [audimedia] fix extraction(closes  #15309)

---
 youtube_dl/extractor/audimedia.py | 48 +++++++++++++++++--------------
 1 file changed, 26 insertions(+), 22 deletions(-)

diff --git a/youtube_dl/extractor/audimedia.py b/youtube_dl/extractor/audimedia.py
index aa6925623..6bd48ef15 100644
--- a/youtube_dl/extractor/audimedia.py
+++ b/youtube_dl/extractor/audimedia.py
@@ -5,13 +5,12 @@ from .common import InfoExtractor
 from ..utils import (
     int_or_none,
     parse_iso8601,
-    sanitized_Request,
 )
 
 
 class AudiMediaIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?audi-mediacenter\.com/(?:en|de)/audimediatv/(?P<id>[^/?#]+)'
-    _TEST = {
+    _VALID_URL = r'https?://(?:www\.)?audi-mediacenter\.com/(?:en|de)/audimediatv/(?:video/)?(?P<id>[^/?#]+)'
+    _TESTS = [{
         'url': 'https://www.audi-mediacenter.com/en/audimediatv/60-seconds-of-audi-sport-104-2015-wec-bahrain-rookie-test-1467',
         'md5': '79a8b71c46d49042609795ab59779b66',
         'info_dict': {
@@ -24,41 +23,46 @@ class AudiMediaIE(InfoExtractor):
             'duration': 74022,
             'view_count': int,
         }
-    }
-    # extracted from https://audimedia.tv/assets/embed/embedded-player.js (dataSourceAuthToken)
-    _AUTH_TOKEN = 'e25b42847dba18c6c8816d5d8ce94c326e06823ebf0859ed164b3ba169be97f2'
+    }, {
+        'url': 'https://www.audi-mediacenter.com/en/audimediatv/video/60-seconds-of-audi-sport-104-2015-wec-bahrain-rookie-test-2991',
+        'only_matching': True,
+    }]
 
     def _real_extract(self, url):
         display_id = self._match_id(url)
         webpage = self._download_webpage(url, display_id)
 
         raw_payload = self._search_regex([
-            r'class="amtv-embed"[^>]+id="([^"]+)"',
-            r'class=\\"amtv-embed\\"[^>]+id=\\"([^"]+)\\"',
+            r'class="amtv-embed"[^>]+id="([0-9a-z-]+)"',
+            r'id="([0-9a-z-]+)"[^>]+class="amtv-embed"',
+            r'class=\\"amtv-embed\\"[^>]+id=\\"([0-9a-z-]+)\\"',
+            r'id=\\"([0-9a-z-]+)\\"[^>]+class=\\"amtv-embed\\"',
+            r'id=(?:\\)?"(amtve-[a-z]-\d+-[a-z]{2})',
         ], webpage, 'raw payload')
-        _, stage_mode, video_id, lang = raw_payload.split('-')
+        _, stage_mode, video_id, _ = raw_payload.split('-')
 
         # TODO: handle s and e stage_mode (live streams and ended live streams)
         if stage_mode not in ('s', 'e'):
-            request = sanitized_Request(
-                'https://audimedia.tv/api/video/v1/videos/%s?embed[]=video_versions&embed[]=thumbnail_image&where[content_language_iso]=%s' % (video_id, lang),
-                headers={'X-Auth-Token': self._AUTH_TOKEN})
-            json_data = self._download_json(request, video_id)['results']
+            video_data = self._download_json(
+                'https://www.audimedia.tv/api/video/v1/videos/' + video_id,
+                video_id, query={
+                    'embed[]': ['video_versions', 'thumbnail_image'],
+                })['results']
             formats = []
 
-            stream_url_hls = json_data.get('stream_url_hls')
+            stream_url_hls = video_data.get('stream_url_hls')
             if stream_url_hls:
                 formats.extend(self._extract_m3u8_formats(
                     stream_url_hls, video_id, 'mp4',
                     entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))
 
-            stream_url_hds = json_data.get('stream_url_hds')
+            stream_url_hds = video_data.get('stream_url_hds')
             if stream_url_hds:
                 formats.extend(self._extract_f4m_formats(
                     stream_url_hds + '?hdcore=3.4.0',
                     video_id, f4m_id='hds', fatal=False))
 
-            for video_version in json_data.get('video_versions'):
+            for video_version in video_data.get('video_versions', []):
                 video_version_url = video_version.get('download_url') or video_version.get('stream_url')
                 if not video_version_url:
                     continue
@@ -79,11 +83,11 @@ class AudiMediaIE(InfoExtractor):
 
             return {
                 'id': video_id,
-                'title': json_data['title'],
-                'description': json_data.get('subtitle'),
-                'thumbnail': json_data.get('thumbnail_image', {}).get('file'),
-                'timestamp': parse_iso8601(json_data.get('publication_date')),
-                'duration': int_or_none(json_data.get('duration')),
-                'view_count': int_or_none(json_data.get('view_count')),
+                'title': video_data['title'],
+                'description': video_data.get('subtitle'),
+                'thumbnail': video_data.get('thumbnail_image', {}).get('file'),
+                'timestamp': parse_iso8601(video_data.get('publication_date')),
+                'duration': int_or_none(video_data.get('duration')),
+                'view_count': int_or_none(video_data.get('view_count')),
                 'formats': formats,
             }

From 0bfdcc14956557294d8b5ab7309a5f31b3710888 Mon Sep 17 00:00:00 2001
From: DroidFreak32 <rushabshah32@gmail.com>
Date: Thu, 31 May 2018 20:31:44 +0530
Subject: [PATCH 123/125] [openload] Add support for oload.win and
 oload.download

---
 youtube_dl/extractor/openload.py | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/openload.py b/youtube_dl/extractor/openload.py
index d0bdd60b8..702f86b44 100644
--- a/youtube_dl/extractor/openload.py
+++ b/youtube_dl/extractor/openload.py
@@ -243,7 +243,7 @@ class PhantomJSwrapper(object):
 
 
 class OpenloadIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?(?:openload\.(?:co|io|link)|oload\.(?:tv|stream|site|xyz))/(?:f|embed)/(?P<id>[a-zA-Z0-9-_]+)'
+    _VALID_URL = r'https?://(?:www\.)?(?:openload\.(?:co|io|link)|oload\.(?:tv|stream|site|xyz|win|download))/(?:f|embed)/(?P<id>[a-zA-Z0-9-_]+)'
 
     _TESTS = [{
         'url': 'https://openload.co/f/kUEfGclsU9o',
@@ -301,6 +301,12 @@ class OpenloadIE(InfoExtractor):
     }, {
         'url': 'https://oload.xyz/f/WwRBpzW8Wtk',
         'only_matching': True,
+    }, {
+        'url': 'https://oload.win/f/kUEfGclsU9o',
+        'only_matching': True,
+    }, {
+        'url': 'https://oload.download/f/kUEfGclsU9o',
+        'only_matching': True,
     }]
 
     _USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'

From 2593725a9bd1347ab54435dc0b48dd7b878f38c5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Fri, 1 Jun 2018 05:16:00 +0700
Subject: [PATCH 124/125] [twitter:card] Add support for another endpoint
 (closes #16586)

---
 youtube_dl/extractor/twitter.py | 49 +++++++++++++++++++++++++++++----
 1 file changed, 44 insertions(+), 5 deletions(-)

diff --git a/youtube_dl/extractor/twitter.py b/youtube_dl/extractor/twitter.py
index d7e425041..4a77e792e 100644
--- a/youtube_dl/extractor/twitter.py
+++ b/youtube_dl/extractor/twitter.py
@@ -63,7 +63,7 @@ class TwitterCardIE(TwitterBaseIE):
                 'id': '623160978427936768',
                 'ext': 'mp4',
                 'title': 'Twitter web player',
-                'thumbnail': r're:^https?://.*(?:\bformat=|\.)jpg',
+                'thumbnail': r're:^https?://.*$',
             },
         },
         {
@@ -223,15 +223,38 @@ class TwitterCardIE(TwitterBaseIE):
                 formats.extend(self._extract_mobile_formats(username, video_id))
 
             if formats:
+                title = self._search_regex(r'<title>([^<]+)</title>', webpage, 'title')
+                thumbnail = config.get('posterImageUrl') or config.get('image_src')
+                duration = float_or_none(config.get('duration'), scale=1000) or duration
                 break
 
+        if not formats:
+            config = self._download_json(
+                'https://api.twitter.com/1.1/videos/tweet/config/%s.json' % video_id,
+                video_id, headers={
+                    'Authorization': 'Bearer AAAAAAAAAAAAAAAAAAAAAIK1zgAAAAAA2tUWuhGZ2JceoId5GwYWU5GspY4%3DUq7gzFoCZs1QfwGoVdvSac3IniczZEYXIcDyumCauIXpcAPorE',
+                })
+            track = config['track']
+            vmap_url = track.get('vmapUrl')
+            if vmap_url:
+                formats = self._extract_formats_from_vmap_url(vmap_url, video_id)
+            else:
+                playback_url = track['playbackUrl']
+                if determine_ext(playback_url) == 'm3u8':
+                    formats = self._extract_m3u8_formats(
+                        playback_url, video_id, 'mp4',
+                        entry_protocol='m3u8_native', m3u8_id='hls')
+                else:
+                    formats = [{
+                        'url': playback_url,
+                    }]
+            title = 'Twitter web player'
+            thumbnail = config.get('posterImage')
+            duration = float_or_none(track.get('durationMs'), scale=1000)
+
         self._remove_duplicate_formats(formats)
         self._sort_formats(formats)
 
-        title = self._search_regex(r'<title>([^<]+)</title>', webpage, 'title')
-        thumbnail = config.get('posterImageUrl') or config.get('image_src')
-        duration = float_or_none(config.get('duration'), scale=1000) or duration
-
         return {
             'id': video_id,
             'title': title,
@@ -375,6 +398,22 @@ class TwitterIE(InfoExtractor):
         'params': {
             'skip_download': True,  # requires ffmpeg
         },
+    }, {
+        # card via api.twitter.com/1.1/videos/tweet/config
+        'url': 'https://twitter.com/LisPower1/status/1001551623938805763',
+        'info_dict': {
+            'id': '1001551623938805763',
+            'ext': 'mp4',
+            'title': 're:.*?Shep is on a roll today.*?',
+            'thumbnail': r're:^https?://.*\.jpg',
+            'description': 'md5:63b036c228772523ae1924d5f8e5ed6b',
+            'uploader': 'Lis Power',
+            'uploader_id': 'LisPower1',
+            'duration': 111.278,
+        },
+        'params': {
+            'skip_download': True,  # requires ffmpeg
+        },
     }]
 
     def _real_extract(self, url):

From 926d97fc6b018a25ea777dfcfb9a84a10920c2b7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Fri, 1 Jun 2018 05:17:49 +0700
Subject: [PATCH 125/125] [9c9media] PEP 8

---
 youtube_dl/extractor/ninecninemedia.py | 1 -
 1 file changed, 1 deletion(-)

diff --git a/youtube_dl/extractor/ninecninemedia.py b/youtube_dl/extractor/ninecninemedia.py
index 875665d43..65754c5e7 100644
--- a/youtube_dl/extractor/ninecninemedia.py
+++ b/youtube_dl/extractor/ninecninemedia.py
@@ -4,7 +4,6 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..compat import compat_str
 from ..utils import (
     parse_iso8601,
     float_or_none,