mirror of
https://github.com/ytdl-org/youtube-dl.git
synced 2025-12-14 10:02:42 +01:00
Compare commits
67 Commits
2017.10.29
...
2017.11.26
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5ddeb7702a | ||
|
|
6c07f0b288 | ||
|
|
e94d1adc36 | ||
|
|
d08dcd2dbd | ||
|
|
7512aa986f | ||
|
|
93f3f10cdc | ||
|
|
87dac57cf6 | ||
|
|
b485d5d6bf | ||
|
|
a238a868ba | ||
|
|
c0f647a179 | ||
|
|
6ff27b8d5a | ||
|
|
9ef909f2b2 | ||
|
|
8cfbcfab9a | ||
|
|
b7785cf156 | ||
|
|
9105523818 | ||
|
|
dbb25af657 | ||
|
|
fe4bfe36e1 | ||
|
|
6f5c598a28 | ||
|
|
cd9ff4ec5b | ||
|
|
c6c6a64aa5 | ||
|
|
e0a8686f48 | ||
|
|
6049176471 | ||
|
|
805f5bf759 | ||
|
|
32ad4f3faf | ||
|
|
6899b1d9e8 | ||
|
|
939be9adfe | ||
|
|
2688664762 | ||
|
|
8f63941104 | ||
|
|
a9efdf3d4a | ||
|
|
f610dbb05f | ||
|
|
38db52adf3 | ||
|
|
3192d4bc7a | ||
|
|
9cbd4dda10 | ||
|
|
08e45b39e7 | ||
|
|
fae0eb42ec | ||
|
|
ea2295842f | ||
|
|
a2b6aba8de | ||
|
|
ff31f2d5c3 | ||
|
|
0987f2ddb2 | ||
|
|
5871ebac47 | ||
|
|
05dee6c520 | ||
|
|
27adc9ec65 | ||
|
|
388beb86e0 | ||
|
|
d4e31b72b9 | ||
|
|
5fc12b9549 | ||
|
|
af85ce29c6 | ||
|
|
e4d9586562 | ||
|
|
79d1f8ed68 | ||
|
|
a5203935d6 | ||
|
|
59d2e6d04f | ||
|
|
a9543e37c8 | ||
|
|
61fb07e156 | ||
|
|
4222346fb2 | ||
|
|
cc6a960e13 | ||
|
|
f34b841b51 | ||
|
|
e0998333fa | ||
|
|
909191de91 | ||
|
|
477c97f86b | ||
|
|
6e71bbf4ab | ||
|
|
181e381fda | ||
|
|
187ee66c94 | ||
|
|
48107c198b | ||
|
|
cd670befc4 | ||
|
|
44cca168cc | ||
|
|
b0f4331002 | ||
|
|
044eeb1455 | ||
|
|
8fe767e072 |
6
.github/ISSUE_TEMPLATE.md
vendored
6
.github/ISSUE_TEMPLATE.md
vendored
@@ -6,8 +6,8 @@
|
||||
|
||||
---
|
||||
|
||||
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.10.29*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
|
||||
- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.10.29**
|
||||
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.11.26*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
|
||||
- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.11.26**
|
||||
|
||||
### Before submitting an *issue* make sure you have:
|
||||
- [ ] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
|
||||
@@ -35,7 +35,7 @@ Add the `-v` flag to **your command line** you run youtube-dl with (`youtube-dl
|
||||
[debug] User config: []
|
||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||
[debug] youtube-dl version 2017.10.29
|
||||
[debug] youtube-dl version 2017.11.26
|
||||
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
||||
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
||||
[debug] Proxy map: {}
|
||||
|
||||
69
ChangeLog
69
ChangeLog
@@ -1,3 +1,72 @@
|
||||
version 2017.11.26
|
||||
|
||||
Core
|
||||
* [extractor/common] Use final URL when dumping request (#14769)
|
||||
|
||||
Extractors
|
||||
* [fczenit] Fix extraction
|
||||
- [firstpost] Remove extractor
|
||||
* [freespeech] Fix extraction
|
||||
* [nexx] Extract more formats
|
||||
+ [openload] Add support for openload.link (#14763)
|
||||
* [empflix] Relax URL regular expression
|
||||
* [empflix] Fix extractrion
|
||||
* [tnaflix] Don't modify download URLs (#14811)
|
||||
- [gamersyde] Remove extractor
|
||||
* [francetv:generationwhat] Fix extraction
|
||||
+ [massengeschmacktv] Add support for Massengeschmack TV
|
||||
* [fox9] Fix extraction
|
||||
* [faz] Fix extraction and add support for Perform Group embeds (#14714)
|
||||
+ [performgroup] Add support for performgroup.com
|
||||
+ [jwplatform] Add support for iframes (#14828)
|
||||
* [culturebox] Fix extraction (#14827)
|
||||
* [youku] Fix extraction; update ccode (#14815)
|
||||
* [livestream] Make SMIL extraction non fatal (#14792)
|
||||
+ [drtuber] Add support for mobile URLs (#14772)
|
||||
+ [spankbang] Add support for mobile URLs (#14771)
|
||||
* [instagram] Fix description, timestamp and counters extraction (#14755)
|
||||
|
||||
|
||||
version 2017.11.15
|
||||
|
||||
Core
|
||||
* [common] Skip Apple FairPlay m3u8 manifests (#14741)
|
||||
* [YoutubeDL] Fix playlist range optimization for --playlist-items (#14740)
|
||||
|
||||
Extractors
|
||||
* [vshare] Capture and output error message
|
||||
* [vshare] Fix extraction (#14473)
|
||||
* [crunchyroll] Extract old RTMP formats
|
||||
* [tva] Fix extraction (#14736)
|
||||
* [gamespot] Lower preference of HTTP formats (#14652)
|
||||
* [instagram:user] Fix extraction (#14699)
|
||||
* [ccma] Fix typo (#14730)
|
||||
- Remove sensitive data from logging in messages
|
||||
* [instagram:user] Fix extraction (#14699)
|
||||
+ [gamespot] Add support for article URLs (#14652)
|
||||
* [gamespot] Skip Brightcove Once HTTP formats (#14652)
|
||||
* [cartoonnetwork] Update tokenizer_src (#14666)
|
||||
+ [wsj] Recognize another URL pattern (#14704)
|
||||
* [pandatv] Update API URL and sign format URLs (#14693)
|
||||
* [crunchyroll] Use old login method (#11572)
|
||||
|
||||
|
||||
version 2017.11.06
|
||||
|
||||
Core
|
||||
+ [extractor/common] Add protocol for f4m formats
|
||||
* [f4m] Prefer baseURL for relative URLs (#14660)
|
||||
* [extractor/common] Respect URL query in _extract_wowza_formats (14645)
|
||||
|
||||
Extractors
|
||||
+ [hotstar:playlist] Add support for playlists (#12465)
|
||||
* [hotstar] Bypass geo restriction (#14672)
|
||||
- [22tracks] Remove extractor (#11024, #14628)
|
||||
+ [skysport] Sdd support ooyala videos protected with embed_token (#14641)
|
||||
* [gamespot] Extract formats referenced with new data fields (#14652)
|
||||
* [spankbang] Detect unavailable videos (#14644)
|
||||
|
||||
|
||||
version 2017.10.29
|
||||
|
||||
Core
|
||||
|
||||
1
Makefile
1
Makefile
@@ -110,7 +110,6 @@ youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-
|
||||
--exclude '*~' \
|
||||
--exclude '__pycache__' \
|
||||
--exclude '.git' \
|
||||
--exclude 'testdata' \
|
||||
--exclude 'docs/_build' \
|
||||
-- \
|
||||
bin devscripts test youtube_dl docs \
|
||||
|
||||
@@ -3,8 +3,6 @@
|
||||
- **1up.com**
|
||||
- **20min**
|
||||
- **220.ro**
|
||||
- **22tracks:genre**
|
||||
- **22tracks:track**
|
||||
- **24video**
|
||||
- **3qsdn**: 3Q SDN
|
||||
- **3sat**
|
||||
@@ -268,10 +266,8 @@
|
||||
- **fc2**
|
||||
- **fc2:embed**
|
||||
- **Fczenit**
|
||||
- **fernsehkritik.tv**
|
||||
- **filmon**
|
||||
- **filmon:channel**
|
||||
- **Firstpost**
|
||||
- **FiveTV**
|
||||
- **Flickr**
|
||||
- **Flipagram**
|
||||
@@ -285,7 +281,7 @@
|
||||
- **foxnews:article**
|
||||
- **foxnews:insider**
|
||||
- **FoxSports**
|
||||
- **france2.fr:generation-quoi**
|
||||
- **france2.fr:generation-what**
|
||||
- **FranceCulture**
|
||||
- **FranceInter**
|
||||
- **FranceTV**
|
||||
@@ -303,7 +299,6 @@
|
||||
- **GameInformer**
|
||||
- **GameOne**
|
||||
- **gameone:playlist**
|
||||
- **Gamersyde**
|
||||
- **GameSpot**
|
||||
- **GameStar**
|
||||
- **Gaskrank**
|
||||
@@ -342,6 +337,7 @@
|
||||
- **HornBunny**
|
||||
- **HotNewHipHop**
|
||||
- **HotStar**
|
||||
- **hotstar:playlist**
|
||||
- **Howcast**
|
||||
- **HowStuffWorks**
|
||||
- **HRTi**
|
||||
@@ -442,6 +438,7 @@
|
||||
- **mangomolo:live**
|
||||
- **mangomolo:video**
|
||||
- **ManyVids**
|
||||
- **massengeschmack.tv**
|
||||
- **MatchTV**
|
||||
- **MDR**: MDR.DE and KiKA
|
||||
- **media.ccc.de**
|
||||
@@ -609,6 +606,7 @@
|
||||
- **pcmag**
|
||||
- **PearVideo**
|
||||
- **People**
|
||||
- **PerformGroup**
|
||||
- **periscope**: Periscope
|
||||
- **periscope:user**: Periscope user videos
|
||||
- **PhilharmonieDeParis**: Philharmonie de Paris
|
||||
|
||||
@@ -574,6 +574,32 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
self.ie._sort_formats(formats)
|
||||
expect_value(self, formats, expected_formats, None)
|
||||
|
||||
def test_parse_f4m_formats(self):
|
||||
_TEST_CASES = [
|
||||
(
|
||||
# https://github.com/rg3/youtube-dl/issues/14660
|
||||
'custom_base_url',
|
||||
'http://api.new.livestream.com/accounts/6115179/events/6764928/videos/144884262.f4m',
|
||||
[{
|
||||
'manifest_url': 'http://api.new.livestream.com/accounts/6115179/events/6764928/videos/144884262.f4m',
|
||||
'ext': 'flv',
|
||||
'format_id': '2148',
|
||||
'protocol': 'f4m',
|
||||
'tbr': 2148,
|
||||
'width': 1280,
|
||||
'height': 720,
|
||||
}]
|
||||
),
|
||||
]
|
||||
|
||||
for f4m_file, f4m_url, expected_formats in _TEST_CASES:
|
||||
with io.open('./test/testdata/f4m/%s.f4m' % f4m_file,
|
||||
mode='r', encoding='utf-8') as f:
|
||||
formats = self.ie._parse_f4m_formats(
|
||||
compat_etree_fromstring(f.read().encode('utf-8')),
|
||||
f4m_url, None)
|
||||
self.ie._sort_formats(formats)
|
||||
expect_value(self, formats, expected_formats, None)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
@@ -466,11 +466,11 @@ class TestFormatSelection(unittest.TestCase):
|
||||
ydl = YDL({'simulate': True})
|
||||
self.assertEqual(ydl._default_format_spec({}), 'bestvideo+bestaudio/best')
|
||||
|
||||
ydl = YDL({'is_live': True})
|
||||
self.assertEqual(ydl._default_format_spec({}), 'best/bestvideo+bestaudio')
|
||||
ydl = YDL({})
|
||||
self.assertEqual(ydl._default_format_spec({'is_live': True}), 'best/bestvideo+bestaudio')
|
||||
|
||||
ydl = YDL({'simulate': True, 'is_live': True})
|
||||
self.assertEqual(ydl._default_format_spec({}), 'bestvideo+bestaudio/best')
|
||||
ydl = YDL({'simulate': True})
|
||||
self.assertEqual(ydl._default_format_spec({'is_live': True}), 'bestvideo+bestaudio/best')
|
||||
|
||||
ydl = YDL({'outtmpl': '-'})
|
||||
self.assertEqual(ydl._default_format_spec({}), 'best/bestvideo+bestaudio')
|
||||
|
||||
10
test/testdata/f4m/custom_base_url.f4m
vendored
Normal file
10
test/testdata/f4m/custom_base_url.f4m
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<manifest xmlns="http://ns.adobe.com/f4m/1.0">
|
||||
<streamType>recorded</streamType>
|
||||
<baseURL>http://vod.livestream.com/events/0000000000673980/</baseURL>
|
||||
<duration>269.293</duration>
|
||||
<bootstrapInfo profile="named" id="bootstrap_1">AAAAm2Fic3QAAAAAAAAAAQAAAAPoAAAAAAAEG+0AAAAAAAAAAAAAAAAAAQAAABlhc3J0AAAAAAAAAAABAAAAAQAAAC4BAAAAVmFmcnQAAAAAAAAD6AAAAAAEAAAAAQAAAAAAAAAAAAAXcAAAAC0AAAAAAAQHQAAAE5UAAAAuAAAAAAAEGtUAAAEYAAAAAAAAAAAAAAAAAAAAAAA=</bootstrapInfo>
|
||||
<media url="b90f532f-b0f6-4f4e-8289-706d490b2fd8_2292" bootstrapInfoId="bootstrap_1" bitrate="2148" width="1280" height="720" videoCodec="avc1.4d401f" audioCodec="mp4a.40.2">
|
||||
<metadata>AgAKb25NZXRhRGF0YQgAAAAIAAhkdXJhdGlvbgBAcNSwIMSbpgAFd2lkdGgAQJQAAAAAAAAABmhlaWdodABAhoAAAAAAAAAJZnJhbWVyYXRlAEA4/7DoLwW3AA12aWRlb2RhdGFyYXRlAECe1DLgjcobAAx2aWRlb2NvZGVjaWQAQBwAAAAAAAAADWF1ZGlvZGF0YXJhdGUAQGSimlvaPKQADGF1ZGlvY29kZWNpZABAJAAAAAAAAAAACQ==</metadata>
|
||||
</media>
|
||||
</manifest>
|
||||
@@ -948,7 +948,8 @@ class YoutubeDL(object):
|
||||
report_download(n_entries)
|
||||
else: # iterable
|
||||
if playlistitems:
|
||||
entries = make_playlistitems_entries(list(ie_entries))
|
||||
entries = make_playlistitems_entries(list(itertools.islice(
|
||||
ie_entries, 0, max(playlistitems))))
|
||||
else:
|
||||
entries = list(itertools.islice(
|
||||
ie_entries, playliststart, playlistend))
|
||||
|
||||
@@ -243,8 +243,17 @@ def remove_encrypted_media(media):
|
||||
media))
|
||||
|
||||
|
||||
def _add_ns(prop):
|
||||
return '{http://ns.adobe.com/f4m/1.0}%s' % prop
|
||||
def _add_ns(prop, ver=1):
|
||||
return '{http://ns.adobe.com/f4m/%d.0}%s' % (ver, prop)
|
||||
|
||||
|
||||
def get_base_url(manifest):
|
||||
base_url = xpath_text(
|
||||
manifest, [_add_ns('baseURL'), _add_ns('baseURL', 2)],
|
||||
'base URL', default=None)
|
||||
if base_url:
|
||||
base_url = base_url.strip()
|
||||
return base_url
|
||||
|
||||
|
||||
class F4mFD(FragmentFD):
|
||||
@@ -330,13 +339,13 @@ class F4mFD(FragmentFD):
|
||||
rate, media = list(filter(
|
||||
lambda f: int(f[0]) == requested_bitrate, formats))[0]
|
||||
|
||||
base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
|
||||
# Prefer baseURL for relative URLs as per 11.2 of F4M 3.0 spec.
|
||||
man_base_url = get_base_url(doc) or man_url
|
||||
|
||||
base_url = compat_urlparse.urljoin(man_base_url, media.attrib['url'])
|
||||
bootstrap_node = doc.find(_add_ns('bootstrapInfo'))
|
||||
# From Adobe F4M 3.0 spec:
|
||||
# The <baseURL> element SHALL be the base URL for all relative
|
||||
# (HTTP-based) URLs in the manifest. If <baseURL> is not present, said
|
||||
# URLs should be relative to the location of the containing document.
|
||||
boot_info, bootstrap_url = self._parse_bootstrap_node(bootstrap_node, man_url)
|
||||
boot_info, bootstrap_url = self._parse_bootstrap_node(
|
||||
bootstrap_node, man_base_url)
|
||||
live = boot_info['live']
|
||||
metadata_node = media.find(_add_ns('metadata'))
|
||||
if metadata_node is not None:
|
||||
|
||||
@@ -78,7 +78,7 @@ class AnimeOnDemandIE(InfoExtractor):
|
||||
post_url = urljoin(self._LOGIN_URL, post_url)
|
||||
|
||||
response = self._download_webpage(
|
||||
post_url, None, 'Logging in as %s' % username,
|
||||
post_url, None, 'Logging in',
|
||||
data=urlencode_postdata(login_form), headers={
|
||||
'Referer': self._LOGIN_URL,
|
||||
})
|
||||
|
||||
@@ -87,7 +87,7 @@ class AtresPlayerIE(InfoExtractor):
|
||||
self._LOGIN_URL, urlencode_postdata(login_form))
|
||||
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
response = self._download_webpage(
|
||||
request, None, 'Logging in as %s' % username)
|
||||
request, None, 'Logging in')
|
||||
|
||||
error = self._html_search_regex(
|
||||
r'(?s)<ul[^>]+class="[^"]*\blist_error\b[^"]*">(.+?)</ul>',
|
||||
|
||||
@@ -59,7 +59,7 @@ class BambuserIE(InfoExtractor):
|
||||
self._LOGIN_URL, urlencode_postdata(login_form))
|
||||
request.add_header('Referer', self._LOGIN_URL)
|
||||
response = self._download_webpage(
|
||||
request, None, 'Logging in as %s' % username)
|
||||
request, None, 'Logging in')
|
||||
|
||||
login_error = self._html_search_regex(
|
||||
r'(?s)<div class="messages error">(.+?)</div>',
|
||||
|
||||
@@ -31,7 +31,7 @@ class CartoonNetworkIE(TurnerBaseIE):
|
||||
'http://www.cartoonnetwork.com/video-seo-svc/episodeservices/getCvpPlaylist?networkName=CN2&' + query, video_id, {
|
||||
'secure': {
|
||||
'media_src': 'http://androidhls-secure.cdn.turner.com/toon/big',
|
||||
'tokenizer_src': 'http://www.cartoonnetwork.com/cntv/mvpd/processors/services/token_ipadAdobe.do',
|
||||
'tokenizer_src': 'https://token.vgtf.net/token/token_mobile',
|
||||
},
|
||||
}, {
|
||||
'url': url,
|
||||
|
||||
@@ -93,7 +93,7 @@ class CCMAIE(InfoExtractor):
|
||||
'description': clean_html(informacio.get('descripcio')),
|
||||
'duration': duration,
|
||||
'timestamp': timestamp,
|
||||
'thumnails': thumbnails,
|
||||
'thumbnails': thumbnails,
|
||||
'subtitles': subtitles,
|
||||
'formats': formats,
|
||||
}
|
||||
|
||||
@@ -29,7 +29,10 @@ from ..compat import (
|
||||
compat_urlparse,
|
||||
compat_xml_parse_error,
|
||||
)
|
||||
from ..downloader.f4m import remove_encrypted_media
|
||||
from ..downloader.f4m import (
|
||||
get_base_url,
|
||||
remove_encrypted_media,
|
||||
)
|
||||
from ..utils import (
|
||||
NO_DEFAULT,
|
||||
age_restricted,
|
||||
@@ -589,19 +592,11 @@ class InfoExtractor(object):
|
||||
if not encoding:
|
||||
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
|
||||
if self._downloader.params.get('dump_intermediate_pages', False):
|
||||
try:
|
||||
url = url_or_request.get_full_url()
|
||||
except AttributeError:
|
||||
url = url_or_request
|
||||
self.to_screen('Dumping request to ' + url)
|
||||
self.to_screen('Dumping request to ' + urlh.geturl())
|
||||
dump = base64.b64encode(webpage_bytes).decode('ascii')
|
||||
self._downloader.to_screen(dump)
|
||||
if self._downloader.params.get('write_pages', False):
|
||||
try:
|
||||
url = url_or_request.get_full_url()
|
||||
except AttributeError:
|
||||
url = url_or_request
|
||||
basen = '%s_%s' % (video_id, url)
|
||||
basen = '%s_%s' % (video_id, urlh.geturl())
|
||||
if len(basen) > 240:
|
||||
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
|
||||
basen = basen[:240 - len(h)] + h
|
||||
@@ -1239,11 +1234,8 @@ class InfoExtractor(object):
|
||||
media_nodes = remove_encrypted_media(media_nodes)
|
||||
if not media_nodes:
|
||||
return formats
|
||||
base_url = xpath_text(
|
||||
manifest, ['{http://ns.adobe.com/f4m/1.0}baseURL', '{http://ns.adobe.com/f4m/2.0}baseURL'],
|
||||
'base URL', default=None)
|
||||
if base_url:
|
||||
base_url = base_url.strip()
|
||||
|
||||
manifest_base_url = get_base_url(manifest)
|
||||
|
||||
bootstrap_info = xpath_element(
|
||||
manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
|
||||
@@ -1275,7 +1267,7 @@ class InfoExtractor(object):
|
||||
continue
|
||||
manifest_url = (
|
||||
media_url if media_url.startswith('http://') or media_url.startswith('https://')
|
||||
else ((base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
|
||||
else ((manifest_base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
|
||||
# If media_url is itself a f4m manifest do the recursive extraction
|
||||
# since bitrates in parent manifest (this one) and media_url manifest
|
||||
# may differ leading to inability to resolve the format by requested
|
||||
@@ -1310,6 +1302,7 @@ class InfoExtractor(object):
|
||||
'url': manifest_url,
|
||||
'manifest_url': manifest_url,
|
||||
'ext': 'flv' if bootstrap_info is not None else None,
|
||||
'protocol': 'f4m',
|
||||
'tbr': tbr,
|
||||
'width': width,
|
||||
'height': height,
|
||||
@@ -1355,6 +1348,9 @@ class InfoExtractor(object):
|
||||
if '#EXT-X-FAXS-CM:' in m3u8_doc: # Adobe Flash Access
|
||||
return []
|
||||
|
||||
if re.search(r'#EXT-X-SESSION-KEY:.*?URI="skd://', m3u8_doc): # Apple FairPlay
|
||||
return []
|
||||
|
||||
formats = []
|
||||
|
||||
format_url = lambda u: (
|
||||
@@ -2233,27 +2229,35 @@ class InfoExtractor(object):
|
||||
return formats
|
||||
|
||||
def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
|
||||
query = compat_urlparse.urlparse(url).query
|
||||
url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
|
||||
url_base = self._search_regex(
|
||||
r'(?:(?:https?|rtmp|rtsp):)?(//[^?]+)', url, 'format url')
|
||||
http_base_url = '%s:%s' % ('http', url_base)
|
||||
formats = []
|
||||
|
||||
def manifest_url(manifest):
|
||||
m_url = '%s/%s' % (http_base_url, manifest)
|
||||
if query:
|
||||
m_url += '?%s' % query
|
||||
return m_url
|
||||
|
||||
if 'm3u8' not in skip_protocols:
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
http_base_url + '/playlist.m3u8', video_id, 'mp4',
|
||||
manifest_url('playlist.m3u8'), video_id, 'mp4',
|
||||
m3u8_entry_protocol, m3u8_id='hls', fatal=False))
|
||||
if 'f4m' not in skip_protocols:
|
||||
formats.extend(self._extract_f4m_formats(
|
||||
http_base_url + '/manifest.f4m',
|
||||
manifest_url('manifest.f4m'),
|
||||
video_id, f4m_id='hds', fatal=False))
|
||||
if 'dash' not in skip_protocols:
|
||||
formats.extend(self._extract_mpd_formats(
|
||||
http_base_url + '/manifest.mpd',
|
||||
manifest_url('manifest.mpd'),
|
||||
video_id, mpd_id='dash', fatal=False))
|
||||
if re.search(r'(?:/smil:|\.smil)', url_base):
|
||||
if 'smil' not in skip_protocols:
|
||||
rtmp_formats = self._extract_smil_formats(
|
||||
http_base_url + '/jwplayer.smil',
|
||||
manifest_url('jwplayer.smil'),
|
||||
video_id, fatal=False)
|
||||
for rtmp_format in rtmp_formats:
|
||||
rtsp_format = rtmp_format.copy()
|
||||
|
||||
@@ -38,11 +38,32 @@ class CrunchyrollBaseIE(InfoExtractor):
|
||||
_LOGIN_FORM = 'login_form'
|
||||
_NETRC_MACHINE = 'crunchyroll'
|
||||
|
||||
def _call_rpc_api(self, method, video_id, note=None, data=None):
|
||||
data = data or {}
|
||||
data['req'] = 'RpcApi' + method
|
||||
data = compat_urllib_parse_urlencode(data).encode('utf-8')
|
||||
return self._download_xml(
|
||||
'http://www.crunchyroll.com/xml/',
|
||||
video_id, note, fatal=False, data=data, headers={
|
||||
'Content-Type': 'application/x-www-form-urlencoded',
|
||||
})
|
||||
|
||||
def _login(self):
|
||||
(username, password) = self._get_login_info()
|
||||
if username is None:
|
||||
return
|
||||
|
||||
self._download_webpage(
|
||||
'https://www.crunchyroll.com/?a=formhandler',
|
||||
None, 'Logging in', 'Wrong login info',
|
||||
data=urlencode_postdata({
|
||||
'formname': 'RpcApiUser_Login',
|
||||
'next_url': 'https://www.crunchyroll.com/acct/membership',
|
||||
'name': username,
|
||||
'password': password,
|
||||
}))
|
||||
|
||||
'''
|
||||
login_page = self._download_webpage(
|
||||
self._LOGIN_URL, None, 'Downloading login page')
|
||||
|
||||
@@ -86,6 +107,7 @@ class CrunchyrollBaseIE(InfoExtractor):
|
||||
raise ExtractorError('Unable to login: %s' % error, expected=True)
|
||||
|
||||
raise ExtractorError('Unable to log in')
|
||||
'''
|
||||
|
||||
def _real_initialize(self):
|
||||
self._login()
|
||||
@@ -365,15 +387,19 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
def _get_subtitles(self, video_id, webpage):
|
||||
subtitles = {}
|
||||
for sub_id, sub_name in re.findall(r'\bssid=([0-9]+)"[^>]+?\btitle="([^"]+)', webpage):
|
||||
sub_page = self._download_webpage(
|
||||
'http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' + sub_id,
|
||||
video_id, note='Downloading subtitles for ' + sub_name)
|
||||
id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False)
|
||||
iv = self._search_regex(r'<iv>([^<]+)', sub_page, 'subtitle_iv', fatal=False)
|
||||
data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False)
|
||||
if not id or not iv or not data:
|
||||
sub_doc = self._call_rpc_api(
|
||||
'Subtitle_GetXml', video_id,
|
||||
'Downloading subtitles for ' + sub_name, data={
|
||||
'subtitle_script_id': sub_id,
|
||||
})
|
||||
if not sub_doc:
|
||||
continue
|
||||
subtitle = self._decrypt_subtitles(data, iv, id).decode('utf-8')
|
||||
sid = sub_doc.get('id')
|
||||
iv = xpath_text(sub_doc, 'iv', 'subtitle iv')
|
||||
data = xpath_text(sub_doc, 'data', 'subtitle data')
|
||||
if not sid or not iv or not data:
|
||||
continue
|
||||
subtitle = self._decrypt_subtitles(data, iv, sid).decode('utf-8')
|
||||
lang_code = self._search_regex(r'lang_code=["\']([^"\']+)', subtitle, 'subtitle_lang_code', fatal=False)
|
||||
if not lang_code:
|
||||
continue
|
||||
@@ -444,65 +470,79 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
for fmt in available_fmts:
|
||||
stream_quality, stream_format = self._FORMAT_IDS[fmt]
|
||||
video_format = fmt + 'p'
|
||||
streamdata_req = sanitized_Request(
|
||||
'http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=%s&video_quality=%s'
|
||||
% (video_id, stream_format, stream_quality),
|
||||
compat_urllib_parse_urlencode({'current_page': url}).encode('utf-8'))
|
||||
streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
streamdata = self._download_xml(
|
||||
streamdata_req, video_id,
|
||||
note='Downloading media info for %s' % video_format)
|
||||
stream_info = streamdata.find('./{default}preload/stream_info')
|
||||
video_encode_id = xpath_text(stream_info, './video_encode_id')
|
||||
if video_encode_id in video_encode_ids:
|
||||
continue
|
||||
video_encode_ids.append(video_encode_id)
|
||||
stream_infos = []
|
||||
streamdata = self._call_rpc_api(
|
||||
'VideoPlayer_GetStandardConfig', video_id,
|
||||
'Downloading media info for %s' % video_format, data={
|
||||
'media_id': video_id,
|
||||
'video_format': stream_format,
|
||||
'video_quality': stream_quality,
|
||||
'current_page': url,
|
||||
})
|
||||
if streamdata:
|
||||
stream_info = streamdata.find('./{default}preload/stream_info')
|
||||
if stream_info:
|
||||
stream_infos.append(stream_info)
|
||||
stream_info = self._call_rpc_api(
|
||||
'VideoEncode_GetStreamInfo', video_id,
|
||||
'Downloading stream info for %s' % video_format, data={
|
||||
'media_id': video_id,
|
||||
'video_format': stream_format,
|
||||
'video_encode_quality': stream_quality,
|
||||
})
|
||||
if stream_info:
|
||||
stream_infos.append(stream_info)
|
||||
for stream_info in stream_infos:
|
||||
video_encode_id = xpath_text(stream_info, './video_encode_id')
|
||||
if video_encode_id in video_encode_ids:
|
||||
continue
|
||||
video_encode_ids.append(video_encode_id)
|
||||
|
||||
video_file = xpath_text(stream_info, './file')
|
||||
if not video_file:
|
||||
continue
|
||||
if video_file.startswith('http'):
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
video_file, video_id, 'mp4', entry_protocol='m3u8_native',
|
||||
m3u8_id='hls', fatal=False))
|
||||
continue
|
||||
|
||||
video_url = xpath_text(stream_info, './host')
|
||||
if not video_url:
|
||||
continue
|
||||
metadata = stream_info.find('./metadata')
|
||||
format_info = {
|
||||
'format': video_format,
|
||||
'format_id': video_format,
|
||||
'height': int_or_none(xpath_text(metadata, './height')),
|
||||
'width': int_or_none(xpath_text(metadata, './width')),
|
||||
}
|
||||
|
||||
if '.fplive.net/' in video_url:
|
||||
video_url = re.sub(r'^rtmpe?://', 'http://', video_url.strip())
|
||||
parsed_video_url = compat_urlparse.urlparse(video_url)
|
||||
direct_video_url = compat_urlparse.urlunparse(parsed_video_url._replace(
|
||||
netloc='v.lvlt.crcdn.net',
|
||||
path='%s/%s' % (remove_end(parsed_video_url.path, '/'), video_file.split(':')[-1])))
|
||||
if self._is_valid_url(direct_video_url, video_id, video_format):
|
||||
format_info.update({
|
||||
'url': direct_video_url,
|
||||
})
|
||||
formats.append(format_info)
|
||||
video_file = xpath_text(stream_info, './file')
|
||||
if not video_file:
|
||||
continue
|
||||
if video_file.startswith('http'):
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
video_file, video_id, 'mp4', entry_protocol='m3u8_native',
|
||||
m3u8_id='hls', fatal=False))
|
||||
continue
|
||||
|
||||
format_info.update({
|
||||
'url': video_url,
|
||||
'play_path': video_file,
|
||||
'ext': 'flv',
|
||||
})
|
||||
formats.append(format_info)
|
||||
self._sort_formats(formats)
|
||||
video_url = xpath_text(stream_info, './host')
|
||||
if not video_url:
|
||||
continue
|
||||
metadata = stream_info.find('./metadata')
|
||||
format_info = {
|
||||
'format': video_format,
|
||||
'height': int_or_none(xpath_text(metadata, './height')),
|
||||
'width': int_or_none(xpath_text(metadata, './width')),
|
||||
}
|
||||
|
||||
metadata = self._download_xml(
|
||||
'http://www.crunchyroll.com/xml', video_id,
|
||||
note='Downloading media info', query={
|
||||
'req': 'RpcApiVideoPlayer_GetMediaMetadata',
|
||||
if '.fplive.net/' in video_url:
|
||||
video_url = re.sub(r'^rtmpe?://', 'http://', video_url.strip())
|
||||
parsed_video_url = compat_urlparse.urlparse(video_url)
|
||||
direct_video_url = compat_urlparse.urlunparse(parsed_video_url._replace(
|
||||
netloc='v.lvlt.crcdn.net',
|
||||
path='%s/%s' % (remove_end(parsed_video_url.path, '/'), video_file.split(':')[-1])))
|
||||
if self._is_valid_url(direct_video_url, video_id, video_format):
|
||||
format_info.update({
|
||||
'format_id': 'http-' + video_format,
|
||||
'url': direct_video_url,
|
||||
})
|
||||
formats.append(format_info)
|
||||
continue
|
||||
|
||||
format_info.update({
|
||||
'format_id': 'rtmp-' + video_format,
|
||||
'url': video_url,
|
||||
'play_path': video_file,
|
||||
'ext': 'flv',
|
||||
})
|
||||
formats.append(format_info)
|
||||
self._sort_formats(formats, ('height', 'width', 'tbr', 'fps'))
|
||||
|
||||
metadata = self._call_rpc_api(
|
||||
'VideoPlayer_GetMediaMetadata', video_id,
|
||||
note='Downloading media info', data={
|
||||
'media_id': video_id,
|
||||
})
|
||||
|
||||
|
||||
@@ -54,7 +54,7 @@ class DramaFeverBaseIE(AMPIE):
|
||||
request = sanitized_Request(
|
||||
self._LOGIN_URL, urlencode_postdata(login_form))
|
||||
response = self._download_webpage(
|
||||
request, None, 'Logging in as %s' % username)
|
||||
request, None, 'Logging in')
|
||||
|
||||
if all(logout_pattern not in response
|
||||
for logout_pattern in ['href="/accounts/logout/"', '>Log out<']):
|
||||
|
||||
@@ -10,7 +10,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class DrTuberIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?drtuber\.com/(?:video|embed)/(?P<id>\d+)(?:/(?P<display_id>[\w-]+))?'
|
||||
_VALID_URL = r'https?://(?:(?:www|m)\.)?drtuber\.com/(?:video|embed)/(?P<id>\d+)(?:/(?P<display_id>[\w-]+))?'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.drtuber.com/video/1740434/hot-perky-blonde-naked-golf',
|
||||
'md5': '93e680cf2536ad0dfb7e74d94a89facd',
|
||||
@@ -28,6 +28,9 @@ class DrTuberIE(InfoExtractor):
|
||||
}, {
|
||||
'url': 'http://www.drtuber.com/embed/489939',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://m.drtuber.com/video/3893529/lingerie-blowjob-from-beautiful-teen',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -344,11 +344,9 @@ from .filmon import (
|
||||
FilmOnIE,
|
||||
FilmOnChannelIE,
|
||||
)
|
||||
from .firstpost import FirstpostIE
|
||||
from .firsttv import FirstTVIE
|
||||
from .fivemin import FiveMinIE
|
||||
from .fivetv import FiveTVIE
|
||||
from .fktv import FKTVIE
|
||||
from .flickr import FlickrIE
|
||||
from .flipagram import FlipagramIE
|
||||
from .folketinget import FolketingetIE
|
||||
@@ -375,7 +373,7 @@ from .francetv import (
|
||||
FranceTVIE,
|
||||
FranceTVEmbedIE,
|
||||
FranceTVInfoIE,
|
||||
GenerationQuoiIE,
|
||||
GenerationWhatIE,
|
||||
CultureboxIE,
|
||||
)
|
||||
from .freesound import FreesoundIE
|
||||
@@ -391,7 +389,6 @@ from .gameone import (
|
||||
GameOneIE,
|
||||
GameOnePlaylistIE,
|
||||
)
|
||||
from .gamersyde import GamersydeIE
|
||||
from .gamespot import GameSpotIE
|
||||
from .gamestar import GameStarIE
|
||||
from .gaskrank import GaskrankIE
|
||||
@@ -432,7 +429,10 @@ from .hitbox import HitboxIE, HitboxLiveIE
|
||||
from .hitrecord import HitRecordIE
|
||||
from .hornbunny import HornBunnyIE
|
||||
from .hotnewhiphop import HotNewHipHopIE
|
||||
from .hotstar import HotStarIE
|
||||
from .hotstar import (
|
||||
HotStarIE,
|
||||
HotStarPlaylistIE,
|
||||
)
|
||||
from .howcast import HowcastIE
|
||||
from .howstuffworks import HowStuffWorksIE
|
||||
from .hrti import (
|
||||
@@ -569,6 +569,7 @@ from .mangomolo import (
|
||||
MangomoloLiveIE,
|
||||
)
|
||||
from .manyvids import ManyVidsIE
|
||||
from .massengeschmacktv import MassengeschmackTVIE
|
||||
from .matchtv import MatchTVIE
|
||||
from .mdr import MDRIE
|
||||
from .mediaset import MediasetIE
|
||||
@@ -786,6 +787,7 @@ from .patreon import PatreonIE
|
||||
from .pbs import PBSIE
|
||||
from .pearvideo import PearVideoIE
|
||||
from .people import PeopleIE
|
||||
from .performgroup import PerformGroupIE
|
||||
from .periscope import (
|
||||
PeriscopeIE,
|
||||
PeriscopeUserIE,
|
||||
@@ -1110,10 +1112,6 @@ from .tvplayer import TVPlayerIE
|
||||
from .tweakers import TweakersIE
|
||||
from .twentyfourvideo import TwentyFourVideoIE
|
||||
from .twentymin import TwentyMinutenIE
|
||||
from .twentytwotracks import (
|
||||
TwentyTwoTracksIE,
|
||||
TwentyTwoTracksGenreIE
|
||||
)
|
||||
from .twitch import (
|
||||
TwitchVideoIE,
|
||||
TwitchChapterIE,
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_etree_fromstring
|
||||
from ..utils import (
|
||||
xpath_element,
|
||||
xpath_text,
|
||||
@@ -43,10 +46,15 @@ class FazIE(InfoExtractor):
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
description = self._og_search_description(webpage)
|
||||
config_xml_url = self._search_regex(
|
||||
r'videoXMLURL\s*=\s*"([^"]+)', webpage, 'config xml url')
|
||||
config = self._download_xml(
|
||||
config_xml_url, video_id, 'Downloading config xml')
|
||||
media = self._html_search_regex(
|
||||
r"data-videojs-media='([^']+)",
|
||||
webpage, 'media')
|
||||
if media == 'extern':
|
||||
perform_url = self._search_regex(
|
||||
r"<iframe[^>]+?src='((?:http:)?//player\.performgroup\.com/eplayer/eplayer\.html#/?[0-9a-f]{26}\.[0-9a-z]{26})",
|
||||
webpage, 'perform url')
|
||||
return self.url_result(perform_url)
|
||||
config = compat_etree_fromstring(media)
|
||||
|
||||
encodings = xpath_element(config, 'ENCODINGS', 'encodings', True)
|
||||
formats = []
|
||||
@@ -55,12 +63,24 @@ class FazIE(InfoExtractor):
|
||||
if encoding is not None:
|
||||
encoding_url = xpath_text(encoding, 'FILENAME')
|
||||
if encoding_url:
|
||||
formats.append({
|
||||
tbr = xpath_text(encoding, 'AVERAGEBITRATE', 1000)
|
||||
if tbr:
|
||||
tbr = int_or_none(tbr.replace(',', '.'))
|
||||
f = {
|
||||
'url': encoding_url,
|
||||
'format_id': code.lower(),
|
||||
'quality': pref,
|
||||
'tbr': int_or_none(xpath_text(encoding, 'AVERAGEBITRATE')),
|
||||
})
|
||||
'tbr': tbr,
|
||||
'vcodec': xpath_text(encoding, 'CODEC'),
|
||||
}
|
||||
mobj = re.search(r'(\d+)x(\d+)_(\d+)\.mp4', encoding_url)
|
||||
if mobj:
|
||||
f.update({
|
||||
'width': int(mobj.group(1)),
|
||||
'height': int(mobj.group(2)),
|
||||
'tbr': tbr or int(mobj.group(3)),
|
||||
})
|
||||
formats.append(f)
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
|
||||
@@ -2,7 +2,10 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urlparse
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
float_or_none,
|
||||
)
|
||||
|
||||
|
||||
class FczenitIE(InfoExtractor):
|
||||
@@ -14,6 +17,8 @@ class FczenitIE(InfoExtractor):
|
||||
'id': '41044',
|
||||
'ext': 'mp4',
|
||||
'title': 'Так пишется история: казанский разгром ЦСКА на «Зенит-ТВ»',
|
||||
'timestamp': 1462283735,
|
||||
'upload_date': '20160503',
|
||||
},
|
||||
}
|
||||
|
||||
@@ -21,28 +26,31 @@ class FczenitIE(InfoExtractor):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
video_title = self._html_search_regex(
|
||||
r'<[^>]+class=\"photoalbum__title\">([^<]+)', webpage, 'title')
|
||||
msi_id = self._search_regex(
|
||||
r"(?s)config\s*=\s*{.+?video_id\s*:\s*'([^']+)'", webpage, 'msi id')
|
||||
|
||||
video_items = self._parse_json(self._search_regex(
|
||||
r'arrPath\s*=\s*JSON\.parse\(\'(.+)\'\)', webpage, 'video items'),
|
||||
video_id)
|
||||
|
||||
def merge_dicts(*dicts):
|
||||
ret = {}
|
||||
for a_dict in dicts:
|
||||
ret.update(a_dict)
|
||||
return ret
|
||||
msi_data = self._download_json(
|
||||
'http://player.fc-zenit.ru/msi/video', msi_id, query={
|
||||
'video': msi_id,
|
||||
})['data']
|
||||
title = msi_data['name']
|
||||
|
||||
formats = [{
|
||||
'url': compat_urlparse.urljoin(url, video_url),
|
||||
'tbr': int(tbr),
|
||||
} for tbr, video_url in merge_dicts(*video_items).items()]
|
||||
'format_id': q.get('label'),
|
||||
'url': q['url'],
|
||||
'height': int_or_none(q.get('label')),
|
||||
} for q in msi_data['qualities'] if q.get('url')]
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
tags = [tag['label'] for tag in msi_data.get('tags', []) if tag.get('label')]
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': video_title,
|
||||
'title': title,
|
||||
'thumbnail': msi_data.get('preview'),
|
||||
'formats': formats,
|
||||
'duration': float_or_none(msi_data.get('duration')),
|
||||
'timestamp': int_or_none(msi_data.get('date')),
|
||||
'tags': tags,
|
||||
}
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class FirstpostIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?firstpost\.com/[^/]+/.*-(?P<id>[0-9]+)\.html'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.firstpost.com/india/india-to-launch-indigenous-aircraft-carrier-monday-1025403.html',
|
||||
'md5': 'ee9114957692f01fb1263ed87039112a',
|
||||
'info_dict': {
|
||||
'id': '1025403',
|
||||
'ext': 'mp4',
|
||||
'title': 'India to launch indigenous aircraft carrier INS Vikrant today',
|
||||
'description': 'md5:feef3041cb09724e0bdc02843348f5f4',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
page = self._download_webpage(url, video_id)
|
||||
|
||||
title = self._html_search_meta('twitter:title', page, 'title', fatal=True)
|
||||
description = self._html_search_meta('twitter:description', page, 'title')
|
||||
|
||||
data = self._download_xml(
|
||||
'http://www.firstpost.com/getvideoxml-%s.xml' % video_id, video_id,
|
||||
'Downloading video XML')
|
||||
|
||||
item = data.find('./playlist/item')
|
||||
thumbnail = item.find('./image').text
|
||||
|
||||
formats = [
|
||||
{
|
||||
'url': details.find('./file').text,
|
||||
'format_id': details.find('./label').text.strip(),
|
||||
'width': int(details.find('./width').text.strip()),
|
||||
'height': int(details.find('./height').text.strip()),
|
||||
} for details in item.findall('./source/file_details') if details.find('./file').text
|
||||
]
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'formats': formats,
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
determine_ext,
|
||||
js_to_json,
|
||||
)
|
||||
|
||||
|
||||
class FKTVIE(InfoExtractor):
|
||||
IE_NAME = 'fernsehkritik.tv'
|
||||
_VALID_URL = r'https?://(?:www\.)?fernsehkritik\.tv/folge-(?P<id>[0-9]+)(?:/.*)?'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://fernsehkritik.tv/folge-1',
|
||||
'md5': '21f0b0c99bce7d5b524eb1b17b1c6d79',
|
||||
'info_dict': {
|
||||
'id': '1',
|
||||
'ext': 'mp4',
|
||||
'title': 'Folge 1 vom 10. April 2007',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
episode = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(
|
||||
'http://fernsehkritik.tv/folge-%s/play' % episode, episode)
|
||||
title = clean_html(self._html_search_regex(
|
||||
'<h3>([^<]+)</h3>', webpage, 'title'))
|
||||
thumbnail = self._search_regex(r'POSTER\s*=\s*"([^"]+)', webpage, 'thumbnail', fatal=False)
|
||||
sources = self._parse_json(self._search_regex(r'(?s)MEDIA\s*=\s*(\[.+?\]);', webpage, 'media'), episode, js_to_json)
|
||||
|
||||
formats = []
|
||||
for source in sources:
|
||||
furl = source.get('src')
|
||||
if furl:
|
||||
formats.append({
|
||||
'url': furl,
|
||||
'format_id': determine_ext(furl),
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': episode,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'thumbnail': thumbnail,
|
||||
}
|
||||
@@ -2,7 +2,6 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .anvato import AnvatoIE
|
||||
from ..utils import js_to_json
|
||||
|
||||
|
||||
class FOX9IE(AnvatoIE):
|
||||
@@ -34,9 +33,9 @@ class FOX9IE(AnvatoIE):
|
||||
|
||||
video_id = self._parse_json(
|
||||
self._search_regex(
|
||||
r'AnvatoPlaylist\s*\(\s*(\[.+?\])\s*\)\s*;',
|
||||
r"this\.videosJson\s*=\s*'(\[.+?\])';",
|
||||
webpage, 'anvato playlist'),
|
||||
video_id, transform_source=js_to_json)[0]['video']
|
||||
video_id)[0]['video']
|
||||
|
||||
return self._get_anvato_videos(
|
||||
'anvato_epfox_app_web_prod_b3373168e12f423f41504f207000188daf88251b',
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urlparse
|
||||
@@ -308,31 +307,32 @@ class FranceTVInfoIE(FranceTVBaseInfoExtractor):
|
||||
return self._extract_video(video_id, catalogue)
|
||||
|
||||
|
||||
class GenerationQuoiIE(InfoExtractor):
|
||||
IE_NAME = 'france2.fr:generation-quoi'
|
||||
_VALID_URL = r'https?://generation-quoi\.france2\.fr/portrait/(?P<id>[^/?#]+)'
|
||||
class GenerationWhatIE(InfoExtractor):
|
||||
IE_NAME = 'france2.fr:generation-what'
|
||||
_VALID_URL = r'https?://generation-what\.francetv\.fr/[^/]+/video/(?P<id>[^/?#]+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://generation-quoi.france2.fr/portrait/garde-a-vous',
|
||||
_TESTS = [{
|
||||
'url': 'http://generation-what.francetv.fr/portrait/video/present-arms',
|
||||
'info_dict': {
|
||||
'id': 'k7FJX8VBcvvLmX4wA5Q',
|
||||
'id': 'wtvKYUG45iw',
|
||||
'ext': 'mp4',
|
||||
'title': 'Génération Quoi - Garde à Vous',
|
||||
'uploader': 'Génération Quoi',
|
||||
'title': 'Generation What - Garde à vous - FRA',
|
||||
'uploader': 'Generation What',
|
||||
'uploader_id': 'UCHH9p1eetWCgt4kXBYCb3_w',
|
||||
'upload_date': '20160411',
|
||||
},
|
||||
'params': {
|
||||
# It uses Dailymotion
|
||||
'skip_download': True,
|
||||
},
|
||||
}
|
||||
}, {
|
||||
'url': 'http://generation-what.francetv.fr/europe/video/present-arms',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
info_url = compat_urlparse.urljoin(url, '/medias/video/%s.json' % display_id)
|
||||
info_json = self._download_webpage(info_url, display_id)
|
||||
info = json.loads(info_json)
|
||||
return self.url_result('http://www.dailymotion.com/video/%s' % info['id'],
|
||||
ie='Dailymotion')
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
youtube_id = self._search_regex(
|
||||
r"window\.videoURL\s*=\s*'([0-9A-Za-z_-]{11})';",
|
||||
webpage, 'youtube id')
|
||||
return self.url_result(youtube_id, 'Youtube', youtube_id)
|
||||
|
||||
|
||||
class CultureboxIE(FranceTVBaseInfoExtractor):
|
||||
@@ -363,6 +363,6 @@ class CultureboxIE(FranceTVBaseInfoExtractor):
|
||||
raise ExtractorError('Video %s is not available' % name, expected=True)
|
||||
|
||||
video_id, catalogue = self._search_regex(
|
||||
r'"http://videos\.francetv\.fr/video/([^@]+@[^"]+)"', webpage, 'video id').split('@')
|
||||
r'"https?://videos\.francetv\.fr/video/([^@]+@[^"]+)"', webpage, 'video id').split('@')
|
||||
|
||||
return self._extract_video(video_id, catalogue)
|
||||
|
||||
@@ -1,37 +1,34 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class FreespeechIE(InfoExtractor):
|
||||
IE_NAME = 'freespeech.org'
|
||||
_VALID_URL = r'https?://(?:www\.)?freespeech\.org/video/(?P<title>.+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?freespeech\.org/stories/(?P<id>.+)'
|
||||
_TEST = {
|
||||
'add_ie': ['Youtube'],
|
||||
'url': 'https://www.freespeech.org/video/obama-romney-campaign-colorado-ahead-debate-0',
|
||||
'url': 'http://www.freespeech.org/stories/fcc-announces-net-neutrality-rollback-whats-stake/',
|
||||
'info_dict': {
|
||||
'id': 'poKsVCZ64uU',
|
||||
'ext': 'webm',
|
||||
'title': 'Obama, Romney Campaign in Colorado Ahead of Debate',
|
||||
'description': 'Obama, Romney Campaign in Colorado Ahead of Debate',
|
||||
'uploader': 'freespeechtv',
|
||||
'id': 'waRk6IPqyWM',
|
||||
'ext': 'mp4',
|
||||
'title': 'What\'s At Stake - Net Neutrality Special',
|
||||
'description': 'Presented by MNN and FSTV',
|
||||
'upload_date': '20170728',
|
||||
'uploader_id': 'freespeechtv',
|
||||
'upload_date': '20121002',
|
||||
'uploader': 'freespeechtv',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
title = mobj.group('title')
|
||||
webpage = self._download_webpage(url, title)
|
||||
info_json = self._search_regex(r'jQuery\.extend\(Drupal\.settings, ({.*?})\);', webpage, 'info')
|
||||
info = json.loads(info_json)
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
youtube_url = self._search_regex(
|
||||
r'data-video-url="([^"]+)"',
|
||||
webpage, 'youtube url')
|
||||
|
||||
return {
|
||||
'_type': 'url',
|
||||
'url': info['jw_player']['basic_video_node_player']['file'],
|
||||
'url': youtube_url,
|
||||
'ie_key': 'Youtube',
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ class FunimationIE(InfoExtractor):
|
||||
try:
|
||||
data = self._download_json(
|
||||
'https://prod-api-funimationnow.dadcdigital.com/api/auth/login/',
|
||||
None, 'Logging in as %s' % username, data=urlencode_postdata({
|
||||
None, 'Logging in', data=urlencode_postdata({
|
||||
'username': username,
|
||||
'password': password,
|
||||
}))
|
||||
|
||||
@@ -1,70 +0,0 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
js_to_json,
|
||||
parse_duration,
|
||||
remove_start,
|
||||
)
|
||||
|
||||
|
||||
class GamersydeIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?gamersyde\.com/hqstream_(?P<display_id>[\da-z_]+)-(?P<id>\d+)_[a-z]{2}\.html'
|
||||
_TEST = {
|
||||
'url': 'http://www.gamersyde.com/hqstream_bloodborne_birth_of_a_hero-34371_en.html',
|
||||
'md5': 'f38d400d32f19724570040d5ce3a505f',
|
||||
'info_dict': {
|
||||
'id': '34371',
|
||||
'ext': 'mp4',
|
||||
'duration': 372,
|
||||
'title': 'Bloodborne - Birth of a hero',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
display_id = mobj.group('display_id')
|
||||
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
playlist = self._parse_json(
|
||||
self._search_regex(
|
||||
r'(?s)playlist: \[({.+?})\]\s*}\);', webpage, 'files'),
|
||||
display_id, transform_source=js_to_json)
|
||||
|
||||
formats = []
|
||||
for source in playlist['sources']:
|
||||
video_url = source.get('file')
|
||||
if not video_url:
|
||||
continue
|
||||
format_id = source.get('label')
|
||||
f = {
|
||||
'url': video_url,
|
||||
'format_id': format_id,
|
||||
}
|
||||
m = re.search(r'^(?P<height>\d+)[pP](?P<fps>\d+)fps', format_id)
|
||||
if m:
|
||||
f.update({
|
||||
'height': int(m.group('height')),
|
||||
'fps': int(m.group('fps')),
|
||||
})
|
||||
formats.append(f)
|
||||
self._sort_formats(formats)
|
||||
|
||||
title = remove_start(playlist['title'], '%s - ' % video_id)
|
||||
thumbnail = playlist.get('image')
|
||||
duration = parse_duration(self._search_regex(
|
||||
r'Length:</label>([^<]+)<', webpage, 'duration', fatal=False))
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
'title': title,
|
||||
'thumbnail': thumbnail,
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
}
|
||||
@@ -14,7 +14,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class GameSpotIE(OnceIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?gamespot\.com/.*-(?P<id>\d+)/?'
|
||||
_VALID_URL = r'https?://(?:www\.)?gamespot\.com/(?:video|article)s/(?:[^/]+/\d+-|embed/)(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.gamespot.com/videos/arma-3-community-guide-sitrep-i/2300-6410818/',
|
||||
'md5': 'b2a30deaa8654fcccd43713a6b6a4825',
|
||||
@@ -35,6 +35,12 @@ class GameSpotIE(OnceIE):
|
||||
'params': {
|
||||
'skip_download': True, # m3u8 downloads
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.gamespot.com/videos/embed/6439218/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.gamespot.com/articles/the-last-of-us-2-receives-new-ps4-trailer/1100-6454469/',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
@@ -52,7 +58,7 @@ class GameSpotIE(OnceIE):
|
||||
manifest_url = f4m_url
|
||||
formats.extend(self._extract_f4m_formats(
|
||||
f4m_url + '?hdcore=3.7.0', page_id, f4m_id='hds', fatal=False))
|
||||
m3u8_url = streams.get('m3u8_stream')
|
||||
m3u8_url = dict_get(streams, ('m3u8_stream', 'adaptive_stream'))
|
||||
if m3u8_url:
|
||||
manifest_url = m3u8_url
|
||||
m3u8_formats = self._extract_m3u8_formats(
|
||||
@@ -60,7 +66,7 @@ class GameSpotIE(OnceIE):
|
||||
m3u8_id='hls', fatal=False)
|
||||
formats.extend(m3u8_formats)
|
||||
progressive_url = dict_get(
|
||||
streams, ('progressive_hd', 'progressive_high', 'progressive_low'))
|
||||
streams, ('progressive_hd', 'progressive_high', 'progressive_low', 'other_lr'))
|
||||
if progressive_url and manifest_url:
|
||||
qualities_basename = self._search_regex(
|
||||
r'/([^/]+)\.csmil/',
|
||||
@@ -105,7 +111,8 @@ class GameSpotIE(OnceIE):
|
||||
onceux_url = self._parse_json(unescapeHTML(onceux_json), page_id).get('metadataUri')
|
||||
if onceux_url:
|
||||
formats.extend(self._extract_once_formats(re.sub(
|
||||
r'https?://[^/]+', 'http://once.unicornmedia.com', onceux_url)))
|
||||
r'https?://[^/]+', 'http://once.unicornmedia.com', onceux_url),
|
||||
http_formats_preference=-1))
|
||||
|
||||
if not formats:
|
||||
for quality in ['sd', 'hd']:
|
||||
|
||||
@@ -102,6 +102,7 @@ from .joj import JojIE
|
||||
from .megaphone import MegaphoneIE
|
||||
from .vzaar import VzaarIE
|
||||
from .channel9 import Channel9IE
|
||||
from .vshare import VShareIE
|
||||
|
||||
|
||||
class GenericIE(InfoExtractor):
|
||||
@@ -1098,9 +1099,9 @@ class GenericIE(InfoExtractor):
|
||||
},
|
||||
# jwplayer rtmp
|
||||
{
|
||||
'url': 'http://www.suffolk.edu/sjc/',
|
||||
'url': 'http://www.suffolk.edu/sjc/live.php',
|
||||
'info_dict': {
|
||||
'id': 'sjclive',
|
||||
'id': 'live',
|
||||
'ext': 'flv',
|
||||
'title': 'Massachusetts Supreme Judicial Court Oral Arguments',
|
||||
'uploader': 'www.suffolk.edu',
|
||||
@@ -1108,7 +1109,7 @@ class GenericIE(InfoExtractor):
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
'skip': 'does not contain a video anymore',
|
||||
'skip': 'Only has video a few mornings per month, see http://www.suffolk.edu/sjc/',
|
||||
},
|
||||
# Complex jwplayer
|
||||
{
|
||||
@@ -1135,6 +1136,19 @@ class GenericIE(InfoExtractor):
|
||||
'skip_download': True,
|
||||
}
|
||||
},
|
||||
{
|
||||
# JWPlatform iframe
|
||||
'url': 'https://www.mediaite.com/tv/dem-senator-claims-gary-cohn-faked-a-bad-connection-during-trump-call-to-get-him-off-the-phone/',
|
||||
'md5': 'ca00a040364b5b439230e7ebfd02c4e9',
|
||||
'info_dict': {
|
||||
'id': 'O0c5JcKT',
|
||||
'ext': 'mp4',
|
||||
'upload_date': '20171122',
|
||||
'timestamp': 1511366290,
|
||||
'title': 'Dem Senator Claims Gary Cohn Faked a Bad Connection During Trump Call to Get Him Off the Phone',
|
||||
},
|
||||
'add_ie': [JWPlatformIE.ie_key()],
|
||||
},
|
||||
{
|
||||
# Video.js embed, multiple formats
|
||||
'url': 'http://ortcam.com/solidworks-урок-6-настройка-чертежа_33f9b7351.html',
|
||||
@@ -1921,6 +1935,16 @@ class GenericIE(InfoExtractor):
|
||||
'title': 'Rescue Kit 14 Free Edition - Getting started',
|
||||
},
|
||||
'playlist_count': 4,
|
||||
},
|
||||
{
|
||||
# vshare embed
|
||||
'url': 'https://youtube-dl-demo.neocities.org/vshare.html',
|
||||
'md5': '17b39f55b5497ae8b59f5fbce8e35886',
|
||||
'info_dict': {
|
||||
'id': '0f64ce6',
|
||||
'title': 'vl14062007715967',
|
||||
'ext': 'mp4',
|
||||
}
|
||||
}
|
||||
# {
|
||||
# # TODO: find another test
|
||||
@@ -2879,6 +2903,11 @@ class GenericIE(InfoExtractor):
|
||||
return self.playlist_from_matches(
|
||||
channel9_urls, video_id, video_title, ie=Channel9IE.ie_key())
|
||||
|
||||
vshare_urls = VShareIE._extract_urls(webpage)
|
||||
if vshare_urls:
|
||||
return self.playlist_from_matches(
|
||||
vshare_urls, video_id, video_title, ie=VShareIE.ie_key())
|
||||
|
||||
def merge_dicts(dict1, dict2):
|
||||
merged = {}
|
||||
for k, v in dict1.items():
|
||||
|
||||
@@ -1,22 +1,47 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
determine_ext,
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
)
|
||||
|
||||
|
||||
class HotStarIE(InfoExtractor):
|
||||
class HotStarBaseIE(InfoExtractor):
|
||||
_GEO_COUNTRIES = ['IN']
|
||||
|
||||
def _download_json(self, *args, **kwargs):
|
||||
response = super(HotStarBaseIE, self)._download_json(*args, **kwargs)
|
||||
if response['resultCode'] != 'OK':
|
||||
if kwargs.get('fatal'):
|
||||
raise ExtractorError(
|
||||
response['errorDescription'], expected=True)
|
||||
return None
|
||||
return response['resultObj']
|
||||
|
||||
def _download_content_info(self, content_id):
|
||||
return self._download_json(
|
||||
'https://account.hotstar.com/AVS/besc', content_id, query={
|
||||
'action': 'GetAggregatedContentDetails',
|
||||
'appVersion': '5.0.40',
|
||||
'channel': 'PCTV',
|
||||
'contentId': content_id,
|
||||
})['contentInfo'][0]
|
||||
|
||||
|
||||
class HotStarIE(HotStarBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?hotstar\.com/(?:.+?[/-])?(?P<id>\d{10})'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.hotstar.com/on-air-with-aib--english-1000076273',
|
||||
'info_dict': {
|
||||
'id': '1000076273',
|
||||
'ext': 'mp4',
|
||||
'title': 'On Air With AIB - English',
|
||||
'title': 'On Air With AIB',
|
||||
'description': 'md5:c957d8868e9bc793ccb813691cc4c434',
|
||||
'timestamp': 1447227000,
|
||||
'upload_date': '20151111',
|
||||
@@ -34,23 +59,11 @@ class HotStarIE(InfoExtractor):
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _download_json(self, url_or_request, video_id, note='Downloading JSON metadata', fatal=True, query=None):
|
||||
json_data = super(HotStarIE, self)._download_json(
|
||||
url_or_request, video_id, note, fatal=fatal, query=query)
|
||||
if json_data['resultCode'] != 'OK':
|
||||
if fatal:
|
||||
raise ExtractorError(json_data['errorDescription'])
|
||||
return None
|
||||
return json_data['resultObj']
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
video_data = self._download_json(
|
||||
'http://account.hotstar.com/AVS/besc', video_id, query={
|
||||
'action': 'GetAggregatedContentDetails',
|
||||
'channel': 'PCTV',
|
||||
'contentId': video_id,
|
||||
})['contentInfo'][0]
|
||||
|
||||
video_data = self._download_content_info(video_id)
|
||||
|
||||
title = video_data['episodeTitle']
|
||||
|
||||
if video_data.get('encrypted') == 'Y':
|
||||
@@ -99,3 +112,51 @@ class HotStarIE(InfoExtractor):
|
||||
'episode_number': int_or_none(video_data.get('episodeNumber')),
|
||||
'series': video_data.get('contentTitle'),
|
||||
}
|
||||
|
||||
|
||||
class HotStarPlaylistIE(HotStarBaseIE):
|
||||
IE_NAME = 'hotstar:playlist'
|
||||
_VALID_URL = r'(?P<url>https?://(?:www\.)?hotstar\.com/tv/[^/]+/(?P<content_id>\d+))/(?P<type>[^/]+)/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.hotstar.com/tv/pratidaan/14982/episodes/14812/9993',
|
||||
'info_dict': {
|
||||
'id': '14812',
|
||||
},
|
||||
'playlist_mincount': 75,
|
||||
}, {
|
||||
'url': 'http://www.hotstar.com/tv/pratidaan/14982/popular-clips/9998/9998',
|
||||
'only_matching': True,
|
||||
}]
|
||||
_ITEM_TYPES = {
|
||||
'episodes': 'EPISODE',
|
||||
'popular-clips': 'CLIPS',
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
base_url = mobj.group('url')
|
||||
content_id = mobj.group('content_id')
|
||||
playlist_type = mobj.group('type')
|
||||
|
||||
content_info = self._download_content_info(content_id)
|
||||
playlist_id = compat_str(content_info['categoryId'])
|
||||
|
||||
collection = self._download_json(
|
||||
'https://search.hotstar.com/AVS/besc', playlist_id, query={
|
||||
'action': 'SearchContents',
|
||||
'appVersion': '5.0.40',
|
||||
'channel': 'PCTV',
|
||||
'moreFilters': 'series:%s;' % playlist_id,
|
||||
'query': '*',
|
||||
'searchOrder': 'last_broadcast_date desc,year desc,title asc',
|
||||
'type': self._ITEM_TYPES.get(playlist_type, 'EPISODE'),
|
||||
})
|
||||
|
||||
entries = [
|
||||
self.url_result(
|
||||
'%s/_/%s' % (base_url, video['contentId']),
|
||||
ie=HotStarIE.ie_key(), video_id=video['contentId'])
|
||||
for video in collection['response']['docs']
|
||||
if video.get('contentId')]
|
||||
|
||||
return self.playlist_result(entries, playlist_id)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import itertools
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
@@ -7,7 +8,6 @@ from ..compat import compat_str
|
||||
from ..utils import (
|
||||
get_element_by_attribute,
|
||||
int_or_none,
|
||||
limit_length,
|
||||
lowercase_escape,
|
||||
try_get,
|
||||
)
|
||||
@@ -130,13 +130,21 @@ class InstagramIE(InfoExtractor):
|
||||
video_url = media.get('video_url')
|
||||
height = int_or_none(media.get('dimensions', {}).get('height'))
|
||||
width = int_or_none(media.get('dimensions', {}).get('width'))
|
||||
description = media.get('caption')
|
||||
description = try_get(
|
||||
media, lambda x: x['edge_media_to_caption']['edges'][0]['node']['text'],
|
||||
compat_str) or media.get('caption')
|
||||
thumbnail = media.get('display_src')
|
||||
timestamp = int_or_none(media.get('date'))
|
||||
timestamp = int_or_none(media.get('taken_at_timestamp') or media.get('date'))
|
||||
uploader = media.get('owner', {}).get('full_name')
|
||||
uploader_id = media.get('owner', {}).get('username')
|
||||
like_count = int_or_none(media.get('likes', {}).get('count'))
|
||||
comment_count = int_or_none(media.get('comments', {}).get('count'))
|
||||
|
||||
def get_count(key, kind):
|
||||
return int_or_none(try_get(
|
||||
media, (lambda x: x['edge_media_%s' % key]['count'],
|
||||
lambda x: x['%ss' % kind]['count'])))
|
||||
like_count = get_count('preview_like', 'like')
|
||||
comment_count = get_count('to_comment', 'comment')
|
||||
|
||||
comments = [{
|
||||
'author': comment.get('user', {}).get('username'),
|
||||
'author_id': comment.get('user', {}).get('id'),
|
||||
@@ -212,7 +220,7 @@ class InstagramIE(InfoExtractor):
|
||||
|
||||
|
||||
class InstagramUserIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?instagram\.com/(?P<username>[^/]{2,})/?(?:$|[?#])'
|
||||
_VALID_URL = r'https?://(?:www\.)?instagram\.com/(?P<id>[^/]{2,})/?(?:$|[?#])'
|
||||
IE_DESC = 'Instagram user profile'
|
||||
IE_NAME = 'instagram:user'
|
||||
_TEST = {
|
||||
@@ -221,82 +229,79 @@ class InstagramUserIE(InfoExtractor):
|
||||
'id': 'porsche',
|
||||
'title': 'porsche',
|
||||
},
|
||||
'playlist_mincount': 2,
|
||||
'playlist': [{
|
||||
'info_dict': {
|
||||
'id': '614605558512799803_462752227',
|
||||
'ext': 'mp4',
|
||||
'title': '#Porsche Intelligent Performance.',
|
||||
'thumbnail': r're:^https?://.*\.jpg',
|
||||
'uploader': 'Porsche',
|
||||
'uploader_id': 'porsche',
|
||||
'timestamp': 1387486713,
|
||||
'upload_date': '20131219',
|
||||
},
|
||||
}],
|
||||
'playlist_count': 5,
|
||||
'params': {
|
||||
'extract_flat': True,
|
||||
'skip_download': True,
|
||||
'playlistend': 5,
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
uploader_id = mobj.group('username')
|
||||
def _entries(self, uploader_id):
|
||||
query = {
|
||||
'__a': 1,
|
||||
}
|
||||
|
||||
entries = []
|
||||
page_count = 0
|
||||
media_url = 'http://instagram.com/%s/media' % uploader_id
|
||||
while True:
|
||||
def get_count(kind):
|
||||
return int_or_none(try_get(
|
||||
node, lambda x: x['%ss' % kind]['count']))
|
||||
|
||||
for page_num in itertools.count(1):
|
||||
page = self._download_json(
|
||||
media_url, uploader_id,
|
||||
note='Downloading page %d ' % (page_count + 1),
|
||||
)
|
||||
page_count += 1
|
||||
'https://instagram.com/%s/' % uploader_id, uploader_id,
|
||||
note='Downloading page %d' % page_num,
|
||||
fatal=False, query=query)
|
||||
if not page:
|
||||
break
|
||||
|
||||
for it in page['items']:
|
||||
if it.get('type') != 'video':
|
||||
nodes = try_get(page, lambda x: x['user']['media']['nodes'], list)
|
||||
if not nodes:
|
||||
break
|
||||
|
||||
max_id = None
|
||||
|
||||
for node in nodes:
|
||||
node_id = node.get('id')
|
||||
if node_id:
|
||||
max_id = node_id
|
||||
|
||||
if node.get('__typename') != 'GraphVideo' and node.get('is_video') is not True:
|
||||
continue
|
||||
video_id = node.get('code')
|
||||
if not video_id:
|
||||
continue
|
||||
like_count = int_or_none(it.get('likes', {}).get('count'))
|
||||
user = it.get('user', {})
|
||||
|
||||
formats = [{
|
||||
'format_id': k,
|
||||
'height': v.get('height'),
|
||||
'width': v.get('width'),
|
||||
'url': v['url'],
|
||||
} for k, v in it['videos'].items()]
|
||||
self._sort_formats(formats)
|
||||
info = self.url_result(
|
||||
'https://instagram.com/p/%s/' % video_id,
|
||||
ie=InstagramIE.ie_key(), video_id=video_id)
|
||||
|
||||
thumbnails_el = it.get('images', {})
|
||||
thumbnail = thumbnails_el.get('thumbnail', {}).get('url')
|
||||
description = try_get(
|
||||
node, [lambda x: x['caption'], lambda x: x['text']['id']],
|
||||
compat_str)
|
||||
thumbnail = node.get('thumbnail_src') or node.get('display_src')
|
||||
timestamp = int_or_none(node.get('date'))
|
||||
|
||||
# In some cases caption is null, which corresponds to None
|
||||
# in python. As a result, it.get('caption', {}) gives None
|
||||
title = (it.get('caption') or {}).get('text', it['id'])
|
||||
comment_count = get_count('comment')
|
||||
like_count = get_count('like')
|
||||
view_count = int_or_none(node.get('video_views'))
|
||||
|
||||
entries.append({
|
||||
'id': it['id'],
|
||||
'title': limit_length(title, 80),
|
||||
'formats': formats,
|
||||
info.update({
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'webpage_url': it.get('link'),
|
||||
'uploader': user.get('full_name'),
|
||||
'uploader_id': user.get('username'),
|
||||
'timestamp': timestamp,
|
||||
'comment_count': comment_count,
|
||||
'like_count': like_count,
|
||||
'timestamp': int_or_none(it.get('created_time')),
|
||||
'view_count': view_count,
|
||||
})
|
||||
|
||||
if not page['items']:
|
||||
break
|
||||
max_id = page['items'][-1]['id'].split('_')[0]
|
||||
media_url = (
|
||||
'http://instagram.com/%s/media?max_id=%s' % (
|
||||
uploader_id, max_id))
|
||||
yield info
|
||||
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'entries': entries,
|
||||
'id': uploader_id,
|
||||
'title': uploader_id,
|
||||
}
|
||||
if not max_id:
|
||||
break
|
||||
|
||||
query['max_id'] = max_id
|
||||
|
||||
def _real_extract(self, url):
|
||||
uploader_id = self._match_id(url)
|
||||
return self.playlist_result(
|
||||
self._entries(uploader_id), uploader_id, uploader_id)
|
||||
|
||||
@@ -24,7 +24,7 @@ class JWPlatformIE(InfoExtractor):
|
||||
@staticmethod
|
||||
def _extract_url(webpage):
|
||||
mobj = re.search(
|
||||
r'<script[^>]+?src=["\'](?P<url>(?:https?:)?//content.jwplatform.com/players/[a-zA-Z0-9]{8})',
|
||||
r'<(?:script|iframe)[^>]+?src=["\'](?P<url>(?:https?:)?//content.jwplatform.com/players/[a-zA-Z0-9]{8})',
|
||||
webpage)
|
||||
if mobj:
|
||||
return mobj.group('url')
|
||||
|
||||
@@ -114,7 +114,7 @@ class LivestreamIE(InfoExtractor):
|
||||
|
||||
smil_url = video_data.get('smil_url')
|
||||
if smil_url:
|
||||
formats.extend(self._extract_smil_formats(smil_url, video_id))
|
||||
formats.extend(self._extract_smil_formats(smil_url, video_id, fatal=False))
|
||||
|
||||
m3u8_url = video_data.get('m3u8_url')
|
||||
if m3u8_url:
|
||||
|
||||
77
youtube_dl/extractor/massengeschmacktv.py
Normal file
77
youtube_dl/extractor/massengeschmacktv.py
Normal file
@@ -0,0 +1,77 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
determine_ext,
|
||||
int_or_none,
|
||||
js_to_json,
|
||||
mimetype2ext,
|
||||
parse_filesize,
|
||||
)
|
||||
|
||||
|
||||
class MassengeschmackTVIE(InfoExtractor):
|
||||
IE_NAME = 'massengeschmack.tv'
|
||||
_VALID_URL = r'https?://(?:www\.)?massengeschmack\.tv/play/(?P<id>[^?&#]+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'https://massengeschmack.tv/play/fktv202',
|
||||
'md5': 'a9e054db9c2b5a08f0a0527cc201e8d3',
|
||||
'info_dict': {
|
||||
'id': 'fktv202',
|
||||
'ext': 'mp4',
|
||||
'title': 'Fernsehkritik-TV - Folge 202',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
episode = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, episode)
|
||||
title = clean_html(self._html_search_regex(
|
||||
'<h3>([^<]+)</h3>', webpage, 'title'))
|
||||
thumbnail = self._search_regex(r'POSTER\s*=\s*"([^"]+)', webpage, 'thumbnail', fatal=False)
|
||||
sources = self._parse_json(self._search_regex(r'(?s)MEDIA\s*=\s*(\[.+?\]);', webpage, 'media'), episode, js_to_json)
|
||||
|
||||
formats = []
|
||||
for source in sources:
|
||||
furl = source.get('src')
|
||||
if not furl:
|
||||
continue
|
||||
furl = self._proto_relative_url(furl)
|
||||
ext = determine_ext(furl) or mimetype2ext(source.get('type'))
|
||||
if ext == 'm3u8':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
furl, episode, 'mp4', 'm3u8_native',
|
||||
m3u8_id='hls', fatal=False))
|
||||
else:
|
||||
formats.append({
|
||||
'url': furl,
|
||||
'format_id': determine_ext(furl),
|
||||
})
|
||||
|
||||
for (durl, format_id, width, height, filesize) in re.findall(r'''(?x)
|
||||
<a[^>]+?href="(?P<url>(?:https:)?//[^"]+)".*?
|
||||
<strong>(?P<format_id>.+?)</strong>.*?
|
||||
<small>(?:(?P<width>\d+)x(?P<height>\d+))?\s+?\((?P<filesize>[\d,]+\s*[GM]iB)\)</small>
|
||||
''', webpage):
|
||||
formats.append({
|
||||
'url': durl,
|
||||
'format_id': format_id,
|
||||
'width': int_or_none(width),
|
||||
'height': int_or_none(height),
|
||||
'filesize': parse_filesize(filesize),
|
||||
'vcodec': 'none' if format_id.startswith('Audio') else None,
|
||||
})
|
||||
|
||||
self._sort_formats(formats, ('width', 'height', 'filesize', 'tbr'))
|
||||
|
||||
return {
|
||||
'id': episode,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'thumbnail': thumbnail,
|
||||
}
|
||||
@@ -28,7 +28,7 @@ class NexxIE(InfoExtractor):
|
||||
_TESTS = [{
|
||||
# movie
|
||||
'url': 'https://api.nexx.cloud/v3/748/videos/byid/128907',
|
||||
'md5': '16746bfc28c42049492385c989b26c4a',
|
||||
'md5': '828cea195be04e66057b846288295ba1',
|
||||
'info_dict': {
|
||||
'id': '128907',
|
||||
'ext': 'mp4',
|
||||
@@ -42,9 +42,6 @@ class NexxIE(InfoExtractor):
|
||||
'timestamp': 1384264416,
|
||||
'upload_date': '20131112',
|
||||
},
|
||||
'params': {
|
||||
'format': 'bestvideo',
|
||||
},
|
||||
}, {
|
||||
# episode
|
||||
'url': 'https://api.nexx.cloud/v3/741/videos/byid/247858',
|
||||
@@ -62,7 +59,6 @@ class NexxIE(InfoExtractor):
|
||||
'season_number': 2,
|
||||
},
|
||||
'params': {
|
||||
'format': 'bestvideo',
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
@@ -193,35 +189,67 @@ class NexxIE(InfoExtractor):
|
||||
stream_data = video['streamdata']
|
||||
language = general.get('language_raw') or ''
|
||||
|
||||
# TODO: reverse more cdns and formats
|
||||
# TODO: reverse more cdns
|
||||
|
||||
cdn = stream_data['cdnType']
|
||||
assert cdn == 'azure'
|
||||
|
||||
azure_locator = stream_data['azureLocator']
|
||||
|
||||
AZURE_URL = 'http://nx-p%02d.akamaized.net/'
|
||||
AZURE_URL = 'http://nx%s%02d.akamaized.net/'
|
||||
|
||||
for secure in ('s', ''):
|
||||
cdn_shield = stream_data.get('cdnShieldHTTP%s' % secure.upper())
|
||||
if cdn_shield:
|
||||
azure_base = 'http%s://%s' % (secure, cdn_shield)
|
||||
break
|
||||
else:
|
||||
azure_base = AZURE_URL % int(stream_data['azureAccount'].replace('nexxplayplus', ''))
|
||||
def get_cdn_shield_base(shield_type='', prefix='-p'):
|
||||
for secure in ('', 's'):
|
||||
cdn_shield = stream_data.get('cdnShield%sHTTP%s' % (shield_type, secure.upper()))
|
||||
if cdn_shield:
|
||||
return 'http%s://%s' % (secure, cdn_shield)
|
||||
else:
|
||||
return AZURE_URL % (prefix, int(stream_data['azureAccount'].replace('nexxplayplus', '')))
|
||||
|
||||
azure_stream_base = get_cdn_shield_base()
|
||||
is_ml = ',' in language
|
||||
azure_m3u8_url = '%s%s/%s_src%s.ism/Manifest(format=m3u8-aapl)' % (
|
||||
azure_base, azure_locator, video_id, ('_manifest' if is_ml else ''))
|
||||
azure_manifest_url = '%s%s/%s_src%s.ism/Manifest' % (
|
||||
azure_stream_base, azure_locator, video_id, ('_manifest' if is_ml else '')) + '%s'
|
||||
|
||||
protection_token = try_get(
|
||||
video, lambda x: x['protectiondata']['token'], compat_str)
|
||||
if protection_token:
|
||||
azure_m3u8_url += '?hdnts=%s' % protection_token
|
||||
azure_manifest_url += '?hdnts=%s' % protection_token
|
||||
|
||||
formats = self._extract_m3u8_formats(
|
||||
azure_m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native',
|
||||
m3u8_id='%s-hls' % cdn)
|
||||
azure_manifest_url % '(format=m3u8-aapl)',
|
||||
video_id, 'mp4', 'm3u8_native',
|
||||
m3u8_id='%s-hls' % cdn, fatal=False)
|
||||
formats.extend(self._extract_mpd_formats(
|
||||
azure_manifest_url % '(format=mpd-time-csf)',
|
||||
video_id, mpd_id='%s-dash' % cdn, fatal=False))
|
||||
formats.extend(self._extract_ism_formats(
|
||||
azure_manifest_url % '', video_id, ism_id='%s-mss' % cdn, fatal=False))
|
||||
|
||||
azure_progressive_base = get_cdn_shield_base('Prog', '-d')
|
||||
azure_file_distribution = stream_data.get('azureFileDistribution')
|
||||
if azure_file_distribution:
|
||||
fds = azure_file_distribution.split(',')
|
||||
if fds:
|
||||
for fd in fds:
|
||||
ss = fd.split(':')
|
||||
if len(ss) == 2:
|
||||
tbr = int_or_none(ss[0])
|
||||
if tbr:
|
||||
f = {
|
||||
'url': '%s%s/%s_src_%s_%d.mp4' % (
|
||||
azure_progressive_base, azure_locator, video_id, ss[1], tbr),
|
||||
'format_id': '%s-http-%d' % (cdn, tbr),
|
||||
'tbr': tbr,
|
||||
}
|
||||
width_height = ss[1].split('x')
|
||||
if len(width_height) == 2:
|
||||
f.update({
|
||||
'width': int_or_none(width_height[0]),
|
||||
'height': int_or_none(width_height[1]),
|
||||
})
|
||||
formats.append(f)
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
|
||||
@@ -70,7 +70,7 @@ class NocoIE(InfoExtractor):
|
||||
return
|
||||
|
||||
login = self._download_json(
|
||||
self._LOGIN_URL, None, 'Logging in as %s' % username,
|
||||
self._LOGIN_URL, None, 'Logging in',
|
||||
data=urlencode_postdata({
|
||||
'a': 'login',
|
||||
'cookie': '1',
|
||||
|
||||
@@ -11,7 +11,7 @@ class OnceIE(InfoExtractor):
|
||||
ADAPTIVE_URL_TEMPLATE = 'http://once.unicornmedia.com/now/master/playlist/%s/%s/%s/content.m3u8'
|
||||
PROGRESSIVE_URL_TEMPLATE = 'http://once.unicornmedia.com/now/media/progressive/%s/%s/%s/%s/content.mp4'
|
||||
|
||||
def _extract_once_formats(self, url):
|
||||
def _extract_once_formats(self, url, http_formats_preference=None):
|
||||
domain_id, application_id, media_item_id = re.match(
|
||||
OnceIE._VALID_URL, url).groups()
|
||||
formats = self._extract_m3u8_formats(
|
||||
@@ -35,6 +35,7 @@ class OnceIE(InfoExtractor):
|
||||
'format_id': adaptive_format['format_id'].replace(
|
||||
'hls', 'http'),
|
||||
'protocol': 'http',
|
||||
'preference': http_formats_preference,
|
||||
})
|
||||
progressive_formats.append(progressive_format)
|
||||
self._check_formats(progressive_formats, media_item_id)
|
||||
|
||||
@@ -140,7 +140,7 @@ class PhantomJSwrapper(object):
|
||||
for name in self._TMP_FILE_NAMES:
|
||||
try:
|
||||
os.remove(self._TMP_FILES[name].name)
|
||||
except:
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
|
||||
def _save_cookies(self, url):
|
||||
@@ -242,7 +242,7 @@ class PhantomJSwrapper(object):
|
||||
|
||||
|
||||
class OpenloadIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:openload\.(?:co|io)|oload\.tv)/(?:f|embed)/(?P<id>[a-zA-Z0-9-_]+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?(?:openload\.(?:co|io|link)|oload\.tv)/(?:f|embed)/(?P<id>[a-zA-Z0-9-_]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://openload.co/f/kUEfGclsU9o',
|
||||
@@ -286,6 +286,9 @@ class OpenloadIE(InfoExtractor):
|
||||
}, {
|
||||
'url': 'https://oload.tv/embed/KnG-kKZdcfY/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.openload.link/f/KnG-kKZdcfY',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
_USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
|
||||
|
||||
@@ -49,13 +49,13 @@ class ORFTVthekIE(InfoExtractor):
|
||||
'params': {
|
||||
'skip_download': True, # rtsp downloads
|
||||
},
|
||||
'_skip': 'Blocked outside of Austria / Germany',
|
||||
'skip': 'Blocked outside of Austria / Germany',
|
||||
}, {
|
||||
'url': 'http://tvthek.orf.at/topic/Fluechtlingskrise/10463081/Heimat-Fremde-Heimat/13879132/Senioren-betreuen-Migrantenkinder/13879141',
|
||||
'skip_download': True,
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://tvthek.orf.at/profile/Universum/35429',
|
||||
'skip_download': True,
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
||||
@@ -33,7 +33,7 @@ class PandaTVIE(InfoExtractor):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
config = self._download_json(
|
||||
'https://www.panda.tv/api_room?roomid=%s' % video_id, video_id)
|
||||
'https://www.panda.tv/api_room_v2?roomid=%s' % video_id, video_id)
|
||||
|
||||
error_code = config.get('errno', 0)
|
||||
if error_code is not 0:
|
||||
@@ -66,6 +66,11 @@ class PandaTVIE(InfoExtractor):
|
||||
plflag1 = '4'
|
||||
live_panda = 'live_panda' if plflag0 < 1 else ''
|
||||
|
||||
plflag_auth = self._parse_json(video_info['plflag_list'], video_id)
|
||||
sign = plflag_auth['auth']['sign']
|
||||
ts = plflag_auth['auth']['time']
|
||||
rid = plflag_auth['auth']['rid']
|
||||
|
||||
quality_key = qualities(['OD', 'HD', 'SD'])
|
||||
suffix = ['_small', '_mid', '']
|
||||
formats = []
|
||||
@@ -77,8 +82,8 @@ class PandaTVIE(InfoExtractor):
|
||||
continue
|
||||
for pref, (ext, pl) in enumerate((('m3u8', '-hls'), ('flv', ''))):
|
||||
formats.append({
|
||||
'url': 'https://pl%s%s.live.panda.tv/live_panda/%s%s%s.%s'
|
||||
% (pl, plflag1, room_key, live_panda, suffix[quality], ext),
|
||||
'url': 'https://pl%s%s.live.panda.tv/live_panda/%s%s%s.%s?sign=%s&ts=%s&rid=%s'
|
||||
% (pl, plflag1, room_key, live_panda, suffix[quality], ext, sign, ts, rid),
|
||||
'format_id': '%s-%s' % (k, ext),
|
||||
'quality': quality,
|
||||
'source_preference': pref,
|
||||
|
||||
@@ -67,7 +67,7 @@ class PatreonIE(InfoExtractor):
|
||||
'https://www.patreon.com/processLogin',
|
||||
compat_urllib_parse_urlencode(login_form).encode('utf-8')
|
||||
)
|
||||
login_page = self._download_webpage(request, None, note='Logging in as %s' % username)
|
||||
login_page = self._download_webpage(request, None, note='Logging in')
|
||||
|
||||
if re.search(r'onLoginFailed', login_page):
|
||||
raise ExtractorError('Unable to login, incorrect username and/or password', expected=True)
|
||||
|
||||
83
youtube_dl/extractor/performgroup.py
Normal file
83
youtube_dl/extractor/performgroup.py
Normal file
@@ -0,0 +1,83 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import int_or_none
|
||||
|
||||
|
||||
class PerformGroupIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://player\.performgroup\.com/eplayer(?:/eplayer\.html|\.js)#/?(?P<id>[0-9a-f]{26})\.(?P<auth_token>[0-9a-z]{26})'
|
||||
_TESTS = [{
|
||||
# http://www.faz.net/aktuell/sport/fussball/wm-2018-playoffs-schweiz-besiegt-nordirland-1-0-15286104.html
|
||||
'url': 'http://player.performgroup.com/eplayer/eplayer.html#d478c41c5d192f56b9aa859de8.1w4crrej5w14e1ed4s1ce4ykab',
|
||||
'md5': '259cb03d142e2e52471e8837ecacb29f',
|
||||
'info_dict': {
|
||||
'id': 'xgrwobuzumes1lwjxtcdpwgxd',
|
||||
'ext': 'mp4',
|
||||
'title': 'Liga MX: Keine Einsicht nach Horrorfoul',
|
||||
'description': 'md5:7cd3b459c82725b021e046ab10bf1c5b',
|
||||
'timestamp': 1511533477,
|
||||
'upload_date': '20171124',
|
||||
}
|
||||
}]
|
||||
|
||||
def _call_api(self, service, auth_token, content_id, referer_url):
|
||||
return self._download_json(
|
||||
'http://ep3.performfeeds.com/ep%s/%s/%s/' % (service, auth_token, content_id),
|
||||
content_id, headers={
|
||||
'Referer': referer_url,
|
||||
'Origin': 'http://player.performgroup.com',
|
||||
}, query={
|
||||
'_fmt': 'json',
|
||||
})
|
||||
|
||||
def _real_extract(self, url):
|
||||
player_id, auth_token = re.search(self._VALID_URL, url).groups()
|
||||
bootstrap = self._call_api('bootstrap', auth_token, player_id, url)
|
||||
video = bootstrap['config']['dataSource']['sourceItems'][0]['videos'][0]
|
||||
video_id = video['uuid']
|
||||
vod = self._call_api('vod', auth_token, video_id, url)
|
||||
media = vod['videos']['video'][0]['media']
|
||||
|
||||
formats = []
|
||||
hls_url = media.get('hls', {}).get('url')
|
||||
if hls_url:
|
||||
formats.extend(self._extract_m3u8_formats(hls_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False))
|
||||
|
||||
hds_url = media.get('hds', {}).get('url')
|
||||
if hds_url:
|
||||
formats.extend(self._extract_f4m_formats(hds_url + '?hdcore', video_id, f4m_id='hds', fatal=False))
|
||||
|
||||
for c in media.get('content', []):
|
||||
c_url = c.get('url')
|
||||
if not c_url:
|
||||
continue
|
||||
tbr = int_or_none(c.get('bitrate'), 1000)
|
||||
format_id = 'http'
|
||||
if tbr:
|
||||
format_id += '-%d' % tbr
|
||||
formats.append({
|
||||
'format_id': format_id,
|
||||
'url': c_url,
|
||||
'tbr': tbr,
|
||||
'width': int_or_none(c.get('width')),
|
||||
'height': int_or_none(c.get('height')),
|
||||
'filesize': int_or_none(c.get('fileSize')),
|
||||
'vcodec': c.get('type'),
|
||||
'fps': int_or_none(c.get('videoFrameRate')),
|
||||
'vbr': int_or_none(c.get('videoRate'), 1000),
|
||||
'abr': int_or_none(c.get('audioRate'), 1000),
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': video['title'],
|
||||
'description': video.get('description'),
|
||||
'thumbnail': video.get('poster'),
|
||||
'duration': int_or_none(video.get('duration')),
|
||||
'timestamp': int_or_none(video.get('publishedTime'), 1000),
|
||||
'formats': formats,
|
||||
}
|
||||
@@ -116,7 +116,7 @@ class PluralsightIE(PluralsightBaseIE):
|
||||
post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
|
||||
|
||||
response = self._download_webpage(
|
||||
post_url, None, 'Logging in as %s' % username,
|
||||
post_url, None, 'Logging in',
|
||||
data=urlencode_postdata(login_form),
|
||||
headers={'Content-Type': 'application/x-www-form-urlencoded'})
|
||||
|
||||
|
||||
@@ -68,7 +68,7 @@ class RoosterTeethIE(InfoExtractor):
|
||||
|
||||
login_request = self._download_webpage(
|
||||
self._LOGIN_URL, None,
|
||||
note='Logging in as %s' % username,
|
||||
note='Logging in',
|
||||
data=urlencode_postdata(login_form),
|
||||
headers={
|
||||
'Referer': self._LOGIN_URL,
|
||||
|
||||
@@ -21,7 +21,7 @@ class RozhlasIE(InfoExtractor):
|
||||
}
|
||||
}, {
|
||||
'url': 'http://prehravac.rozhlas.cz/audio/3421320/embed',
|
||||
'skip_download': True,
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
||||
@@ -61,7 +61,7 @@ class SafariBaseIE(InfoExtractor):
|
||||
request = sanitized_Request(
|
||||
self._LOGIN_URL, urlencode_postdata(login_form), headers=headers)
|
||||
login_page = self._download_webpage(
|
||||
request, None, 'Logging in as %s' % username)
|
||||
request, None, 'Logging in')
|
||||
|
||||
if not is_logged(login_page):
|
||||
raise ExtractorError(
|
||||
|
||||
@@ -2,7 +2,12 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import strip_or_none
|
||||
from ..utils import (
|
||||
extract_attributes,
|
||||
smuggle_url,
|
||||
strip_or_none,
|
||||
urljoin,
|
||||
)
|
||||
|
||||
|
||||
class SkySportsIE(InfoExtractor):
|
||||
@@ -22,12 +27,22 @@ class SkySportsIE(InfoExtractor):
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
video_data = extract_attributes(self._search_regex(
|
||||
r'(<div.+?class="sdc-article-video__media-ooyala"[^>]+>)', webpage, 'video data'))
|
||||
|
||||
video_url = 'ooyala:%s' % video_data['data-video-id']
|
||||
if video_data.get('data-token-required') == 'true':
|
||||
token_fetch_options = self._parse_json(video_data.get('data-token-fetch-options', '{}'), video_id, fatal=False) or {}
|
||||
token_fetch_url = token_fetch_options.get('url')
|
||||
if token_fetch_url:
|
||||
embed_token = self._download_webpage(urljoin(url, token_fetch_url), video_id, fatal=False)
|
||||
if embed_token:
|
||||
video_url = smuggle_url(video_url, {'embed_token': embed_token.strip('"')})
|
||||
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'id': video_id,
|
||||
'url': 'ooyala:%s' % self._search_regex(
|
||||
r'data-video-id="([^"]+)"', webpage, 'ooyala id'),
|
||||
'url': video_url,
|
||||
'title': self._og_search_title(webpage),
|
||||
'description': strip_or_none(self._og_search_description(webpage)),
|
||||
'ie_key': 'Ooyala',
|
||||
|
||||
@@ -3,10 +3,11 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import ExtractorError
|
||||
|
||||
|
||||
class SpankBangIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:(?:www|[a-z]{2})\.)?spankbang\.com/(?P<id>[\da-z]+)/video'
|
||||
_VALID_URL = r'https?://(?:(?:www|m|[a-z]{2})\.)?spankbang\.com/(?P<id>[\da-z]+)/video'
|
||||
_TESTS = [{
|
||||
'url': 'http://spankbang.com/3vvn/video/fantasy+solo',
|
||||
'md5': '1cc433e1d6aa14bc376535b8679302f7',
|
||||
@@ -14,7 +15,7 @@ class SpankBangIE(InfoExtractor):
|
||||
'id': '3vvn',
|
||||
'ext': 'mp4',
|
||||
'title': 'fantasy solo',
|
||||
'description': 'Watch fantasy solo free HD porn video - 05 minutes - dillion harper masturbates on a bed free adult movies.',
|
||||
'description': 'Watch fantasy solo free HD porn video - 05 minutes - Babe,Masturbation,Solo,Toy - dillion harper masturbates on a bed free adult movies sexy clips.',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'uploader': 'silly2587',
|
||||
'age_limit': 18,
|
||||
@@ -27,12 +28,20 @@ class SpankBangIE(InfoExtractor):
|
||||
# no uploader
|
||||
'url': 'http://spankbang.com/lklg/video/sex+with+anyone+wedding+edition+2',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# mobile page
|
||||
'url': 'http://m.spankbang.com/1o2de/video/can+t+remember+her+name',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
if re.search(r'<[^>]+\bid=["\']video_removed', webpage):
|
||||
raise ExtractorError(
|
||||
'Video %s is not available' % video_id, expected=True)
|
||||
|
||||
stream_key = self._html_search_regex(
|
||||
r'''var\s+stream_key\s*=\s*['"](.+?)['"]''',
|
||||
webpage, 'stream key')
|
||||
|
||||
@@ -21,6 +21,8 @@ class TNAFlixNetworkBaseIE(InfoExtractor):
|
||||
r'flashvars\.config\s*=\s*escape\("([^"]+)"',
|
||||
r'<input[^>]+name="config\d?" value="([^"]+)"',
|
||||
]
|
||||
_HOST = 'tna'
|
||||
_VKEY_SUFFIX = ''
|
||||
_TITLE_REGEX = r'<input[^>]+name="title" value="([^"]+)"'
|
||||
_DESCRIPTION_REGEX = r'<input[^>]+name="description" value="([^"]+)"'
|
||||
_UPLOADER_REGEX = r'<input[^>]+name="username" value="([^"]+)"'
|
||||
@@ -72,7 +74,13 @@ class TNAFlixNetworkBaseIE(InfoExtractor):
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
display_id = mobj.group('display_id') if 'display_id' in mobj.groupdict() else video_id
|
||||
for display_id_key in ('display_id', 'display_id_2'):
|
||||
if display_id_key in mobj.groupdict():
|
||||
display_id = mobj.group(display_id_key)
|
||||
if display_id:
|
||||
break
|
||||
else:
|
||||
display_id = video_id
|
||||
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
@@ -81,8 +89,8 @@ class TNAFlixNetworkBaseIE(InfoExtractor):
|
||||
|
||||
if not cfg_url:
|
||||
inputs = self._hidden_inputs(webpage)
|
||||
cfg_url = ('https://cdn-fck.tnaflix.com/tnaflix/%s.fid?key=%s&VID=%s&premium=1&vip=1&alpha'
|
||||
% (inputs['vkey'], inputs['nkey'], video_id))
|
||||
cfg_url = ('https://cdn-fck.%sflix.com/%sflix/%s%s.fid?key=%s&VID=%s&premium=1&vip=1&alpha'
|
||||
% (self._HOST, self._HOST, inputs['vkey'], self._VKEY_SUFFIX, inputs['nkey'], video_id))
|
||||
|
||||
cfg_xml = self._download_xml(
|
||||
cfg_url, display_id, 'Downloading metadata',
|
||||
@@ -91,7 +99,8 @@ class TNAFlixNetworkBaseIE(InfoExtractor):
|
||||
formats = []
|
||||
|
||||
def extract_video_url(vl):
|
||||
return re.sub(r'speed=\d+', 'speed=', unescapeHTML(vl.text))
|
||||
# Any URL modification now results in HTTP Error 403: Forbidden
|
||||
return unescapeHTML(vl.text)
|
||||
|
||||
video_link = cfg_xml.find('./videoLink')
|
||||
if video_link is not None:
|
||||
@@ -192,18 +201,21 @@ class TNAFlixNetworkEmbedIE(TNAFlixNetworkBaseIE):
|
||||
webpage)]
|
||||
|
||||
|
||||
class TNAFlixIE(TNAFlixNetworkBaseIE):
|
||||
class TNAEMPFlixBaseIE(TNAFlixNetworkBaseIE):
|
||||
_DESCRIPTION_REGEX = r'(?s)>Description:</[^>]+>(.+?)<'
|
||||
_UPLOADER_REGEX = r'<span>by\s*<a[^>]+\bhref=["\']/profile/[^>]+>([^<]+)<'
|
||||
_CATEGORIES_REGEX = r'(?s)<span[^>]*>Categories:</span>(.+?)</div>'
|
||||
|
||||
|
||||
class TNAFlixIE(TNAEMPFlixBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?tnaflix\.com/[^/]+/(?P<display_id>[^/]+)/video(?P<id>\d+)'
|
||||
|
||||
_TITLE_REGEX = r'<title>(.+?) - (?:TNAFlix Porn Videos|TNAFlix\.com)</title>'
|
||||
_DESCRIPTION_REGEX = r'(?s)>Description:</[^>]+>(.+?)<'
|
||||
_UPLOADER_REGEX = r'<i>\s*Verified Member\s*</i>\s*<h\d+>(.+?)<'
|
||||
_CATEGORIES_REGEX = r'(?s)<span[^>]*>Categories:</span>(.+?)</div>'
|
||||
|
||||
_TESTS = [{
|
||||
# anonymous uploader, no categories
|
||||
'url': 'http://www.tnaflix.com/porn-stars/Carmella-Decesare-striptease/video553878',
|
||||
'md5': 'ecf3498417d09216374fc5907f9c6ec0',
|
||||
'md5': '7e569419fe6d69543d01e6be22f5f7c4',
|
||||
'info_dict': {
|
||||
'id': '553878',
|
||||
'display_id': 'Carmella-Decesare-striptease',
|
||||
@@ -228,7 +240,7 @@ class TNAFlixIE(TNAFlixNetworkBaseIE):
|
||||
'duration': 164,
|
||||
'age_limit': 18,
|
||||
'uploader': 'bobwhite39',
|
||||
'categories': ['Amateur Porn', 'Squirting Videos', 'Teen Girls 18+'],
|
||||
'categories': list,
|
||||
}
|
||||
}, {
|
||||
'url': 'https://www.tnaflix.com/amateur-porn/bunzHD-Ms.Donk/video358632',
|
||||
@@ -236,14 +248,15 @@ class TNAFlixIE(TNAFlixNetworkBaseIE):
|
||||
}]
|
||||
|
||||
|
||||
class EMPFlixIE(TNAFlixNetworkBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?empflix\.com/videos/(?P<display_id>.+?)-(?P<id>[0-9]+)\.html'
|
||||
class EMPFlixIE(TNAEMPFlixBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?empflix\.com/(?:videos/(?P<display_id>.+?)-|[^/]+/(?P<display_id_2>[^/]+)/video)(?P<id>[0-9]+)'
|
||||
|
||||
_UPLOADER_REGEX = r'<span[^>]+class="infoTitle"[^>]*>Uploaded By:</span>(.+?)</li>'
|
||||
_HOST = 'emp'
|
||||
_VKEY_SUFFIX = '-1'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.empflix.com/videos/Amateur-Finger-Fuck-33051.html',
|
||||
'md5': 'b1bc15b6412d33902d6e5952035fcabc',
|
||||
'md5': 'bc30d48b91a7179448a0bda465114676',
|
||||
'info_dict': {
|
||||
'id': '33051',
|
||||
'display_id': 'Amateur-Finger-Fuck',
|
||||
@@ -259,6 +272,9 @@ class EMPFlixIE(TNAFlixNetworkBaseIE):
|
||||
}, {
|
||||
'url': 'http://www.empflix.com/videos/[AROMA][ARMD-718]-Aoi-Yoshino-Sawa-25826.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.empflix.com/amateur-porn/Amateur-Finger-Fuck/video33051',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
|
||||
|
||||
@@ -32,6 +32,8 @@ class TVAIE(InfoExtractor):
|
||||
video_data = self._download_json(
|
||||
'https://videos.tva.ca/proxy/item/_' + video_id, video_id, headers={
|
||||
'Accept': 'application/json',
|
||||
}, query={
|
||||
'appId': '5955fc5f23eec60006c951f1',
|
||||
})
|
||||
|
||||
def get_attribute(key):
|
||||
|
||||
@@ -1,86 +0,0 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import int_or_none
|
||||
|
||||
# 22Tracks regularly replace the audio tracks that can be streamed on their
|
||||
# site. The tracks usually expire after 1 months, so we can't add tests.
|
||||
|
||||
|
||||
class TwentyTwoTracksIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://22tracks\.com/(?P<city>[a-z]+)/(?P<genre>[\da-z]+)/(?P<id>\d+)'
|
||||
IE_NAME = '22tracks:track'
|
||||
|
||||
_API_BASE = 'http://22tracks.com/api'
|
||||
|
||||
def _extract_info(self, city, genre_name, track_id=None):
|
||||
item_id = track_id if track_id else genre_name
|
||||
|
||||
cities = self._download_json(
|
||||
'%s/cities' % self._API_BASE, item_id,
|
||||
'Downloading cities info',
|
||||
'Unable to download cities info')
|
||||
city_id = [x['id'] for x in cities if x['slug'] == city][0]
|
||||
|
||||
genres = self._download_json(
|
||||
'%s/genres/%s' % (self._API_BASE, city_id), item_id,
|
||||
'Downloading %s genres info' % city,
|
||||
'Unable to download %s genres info' % city)
|
||||
genre = [x for x in genres if x['slug'] == genre_name][0]
|
||||
genre_id = genre['id']
|
||||
|
||||
tracks = self._download_json(
|
||||
'%s/tracks/%s' % (self._API_BASE, genre_id), item_id,
|
||||
'Downloading %s genre tracks info' % genre_name,
|
||||
'Unable to download track info')
|
||||
|
||||
return [x for x in tracks if x['id'] == item_id][0] if track_id else [genre['title'], tracks]
|
||||
|
||||
def _get_track_url(self, filename, track_id):
|
||||
token = self._download_json(
|
||||
'http://22tracks.com/token.php?desktop=true&u=/128/%s' % filename,
|
||||
track_id, 'Downloading token', 'Unable to download token')
|
||||
return 'http://audio.22tracks.com%s?st=%s&e=%d' % (token['filename'], token['st'], token['e'])
|
||||
|
||||
def _extract_track_info(self, track_info, track_id):
|
||||
download_url = self._get_track_url(track_info['filename'], track_id)
|
||||
title = '%s - %s' % (track_info['artist'].strip(), track_info['title'].strip())
|
||||
return {
|
||||
'id': track_id,
|
||||
'url': download_url,
|
||||
'ext': 'mp3',
|
||||
'title': title,
|
||||
'duration': int_or_none(track_info.get('duration')),
|
||||
'timestamp': int_or_none(track_info.get('published_at') or track_info.get('created'))
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
||||
city = mobj.group('city')
|
||||
genre = mobj.group('genre')
|
||||
track_id = mobj.group('id')
|
||||
|
||||
track_info = self._extract_info(city, genre, track_id)
|
||||
return self._extract_track_info(track_info, track_id)
|
||||
|
||||
|
||||
class TwentyTwoTracksGenreIE(TwentyTwoTracksIE):
|
||||
_VALID_URL = r'https?://22tracks\.com/(?P<city>[a-z]+)/(?P<genre>[\da-z]+)/?$'
|
||||
IE_NAME = '22tracks:genre'
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
||||
city = mobj.group('city')
|
||||
genre = mobj.group('genre')
|
||||
|
||||
genre_title, tracks = self._extract_info(city, genre)
|
||||
|
||||
entries = [
|
||||
self._extract_track_info(track_info, track_info['id'])
|
||||
for track_info in tracks]
|
||||
|
||||
return self.playlist_result(entries, genre, genre_title)
|
||||
@@ -101,7 +101,7 @@ class TwitchBaseIE(InfoExtractor):
|
||||
fail(clean_html(login_page))
|
||||
|
||||
redirect_page, handle = login_step(
|
||||
login_page, handle, 'Logging in as %s' % username, {
|
||||
login_page, handle, 'Logging in', {
|
||||
'username': username,
|
||||
'password': password,
|
||||
})
|
||||
|
||||
@@ -164,7 +164,7 @@ class UdemyIE(InfoExtractor):
|
||||
})
|
||||
|
||||
response = self._download_webpage(
|
||||
self._LOGIN_URL, None, 'Logging in as %s' % username,
|
||||
self._LOGIN_URL, None, 'Logging in',
|
||||
data=urlencode_postdata(login_form),
|
||||
headers={
|
||||
'Referer': self._ORIGIN_URL,
|
||||
|
||||
@@ -28,10 +28,10 @@ class VidziIE(InfoExtractor):
|
||||
},
|
||||
}, {
|
||||
'url': 'http://vidzi.tv/embed-4z2yb0rzphe9-600x338.html',
|
||||
'skip_download': True,
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://vidzi.cc/cghql9yq6emu.html',
|
||||
'skip_download': True,
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
||||
@@ -99,7 +99,7 @@ class VikiBaseIE(InfoExtractor):
|
||||
|
||||
login = self._call_api(
|
||||
'sessions.json', None,
|
||||
'Logging in as %s' % username, post_data=login_form)
|
||||
'Logging in', post_data=login_form)
|
||||
|
||||
self._token = login.get('token')
|
||||
if not self._token:
|
||||
|
||||
@@ -67,7 +67,7 @@ class VKBaseIE(InfoExtractor):
|
||||
|
||||
login_page = self._download_webpage(
|
||||
'https://login.vk.com/?act=login', None,
|
||||
note='Logging in as %s' % username,
|
||||
note='Logging in',
|
||||
data=urlencode_postdata(login_form))
|
||||
|
||||
if re.search(r'onLoginFailed', login_page):
|
||||
|
||||
@@ -1,14 +1,21 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_chr
|
||||
from ..utils import (
|
||||
decode_packed_codes,
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class VShareIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?vshare\.io/[dv]/(?P<id>[^/?#&]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://vshare.io/d/0f64ce6',
|
||||
'md5': '16d7b8fef58846db47419199ff1ab3e7',
|
||||
'md5': '17b39f55b5497ae8b59f5fbce8e35886',
|
||||
'info_dict': {
|
||||
'id': '0f64ce6',
|
||||
'title': 'vl14062007715967',
|
||||
@@ -19,20 +26,49 @@ class VShareIE(InfoExtractor):
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
@staticmethod
|
||||
def _extract_urls(webpage):
|
||||
return re.findall(
|
||||
r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//(?:www\.)?vshare\.io/v/[^/?#&]+)',
|
||||
webpage)
|
||||
|
||||
def _extract_packed(self, webpage):
|
||||
packed = self._search_regex(
|
||||
r'(eval\(function.+)', webpage, 'packed code')
|
||||
unpacked = decode_packed_codes(packed)
|
||||
digits = self._search_regex(r'\[((?:\d+,?)+)\]', unpacked, 'digits')
|
||||
digits = [int(digit) for digit in digits.split(',')]
|
||||
key_digit = self._search_regex(
|
||||
r'fromCharCode\(.+?(\d+)\)}', unpacked, 'key digit')
|
||||
chars = [compat_chr(d - int(key_digit)) for d in digits]
|
||||
return ''.join(chars)
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(
|
||||
'https://vshare.io/d/%s' % video_id, video_id)
|
||||
'https://vshare.io/v/%s/width-650/height-430/1' % video_id,
|
||||
video_id)
|
||||
|
||||
title = self._html_search_regex(
|
||||
r'(?s)<div id="root-container">(.+?)<br/>', webpage, 'title')
|
||||
video_url = self._search_regex(
|
||||
r'<a[^>]+href=(["\'])(?P<url>(?:https?:)?//.+?)\1[^>]*>[Cc]lick\s+here',
|
||||
webpage, 'video url', group='url')
|
||||
r'<title>([^<]+)</title>', webpage, 'title')
|
||||
title = title.split(' - ')[0]
|
||||
|
||||
return {
|
||||
error = self._html_search_regex(
|
||||
r'(?s)<div[^>]+\bclass=["\']xxx-error[^>]+>(.+?)</div', webpage,
|
||||
'error', default=None)
|
||||
if error:
|
||||
raise ExtractorError(error, expected=True)
|
||||
|
||||
info = self._parse_html5_media_entries(
|
||||
url, '<video>%s</video>' % self._extract_packed(webpage),
|
||||
video_id)[0]
|
||||
|
||||
self._sort_formats(info['formats'])
|
||||
|
||||
info.update({
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'url': video_url,
|
||||
}
|
||||
})
|
||||
|
||||
return info
|
||||
|
||||
@@ -13,7 +13,7 @@ class WSJIE(InfoExtractor):
|
||||
_VALID_URL = r'''(?x)
|
||||
(?:
|
||||
https?://video-api\.wsj\.com/api-video/player/iframe\.html\?.*?\bguid=|
|
||||
https?://(?:www\.)?(?:wsj|barrons)\.com/video/[^/]+/|
|
||||
https?://(?:www\.)?(?:wsj|barrons)\.com/video/(?:[^/]+/)+|
|
||||
wsj:
|
||||
)
|
||||
(?P<id>[a-fA-F0-9-]{36})
|
||||
@@ -38,6 +38,9 @@ class WSJIE(InfoExtractor):
|
||||
}, {
|
||||
'url': 'http://www.barrons.com/video/capitalism-deserves-more-respect-from-millennials/F301217E-6F46-43AE-B8D2-B7180D642EE9.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.wsj.com/video/series/a-brief-history-of/the-modern-cell-carrier-how-we-got-here/980E2187-401D-48A1-B82B-1486CEE06CB9',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
||||
@@ -154,7 +154,7 @@ class YoukuIE(InfoExtractor):
|
||||
# request basic data
|
||||
basic_data_params = {
|
||||
'vid': video_id,
|
||||
'ccode': '0402' if 'tudou.com' in url else '0401',
|
||||
'ccode': '0502',
|
||||
'client_ip': '192.168.1.1',
|
||||
'utid': cna,
|
||||
'client_ts': time.time() / 1000,
|
||||
@@ -240,7 +240,7 @@ class YoukuShowIE(InfoExtractor):
|
||||
}, {
|
||||
# Ongoing playlist. The initial page is the last one
|
||||
'url': 'http://list.youku.com/show/id_za7c275ecd7b411e1a19e.html',
|
||||
'only_matchine': True,
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _extract_entries(self, playlist_data_url, show_id, note, query):
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
__version__ = '2017.10.29'
|
||||
__version__ = '2017.11.26'
|
||||
|
||||
Reference in New Issue
Block a user