mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2025-12-18 03:42:23 +01:00
Compare commits
334 Commits
2021.03.03
...
2021.06.01
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3de7c2ce9a | ||
|
|
bc6b9bcd65 | ||
|
|
6e6390321c | ||
|
|
4040428efc | ||
|
|
cc1dfc9373 | ||
|
|
14eb1ee1cb | ||
|
|
879e7199bb | ||
|
|
d89da64b1d | ||
|
|
5dcd8e1d88 | ||
|
|
10bb7e51e8 | ||
|
|
b0089e8992 | ||
|
|
a3ed14cbaf | ||
|
|
9dee4df559 | ||
|
|
adddc50cbf | ||
|
|
46c43ffc9d | ||
|
|
37a3bb66a7 | ||
|
|
337e0c62f8 | ||
|
|
885cc0b75c | ||
|
|
46953e7e6e | ||
|
|
ae8f99e648 | ||
|
|
077c476276 | ||
|
|
835a1478b4 | ||
|
|
120fe5134a | ||
|
|
56a8fb4f77 | ||
|
|
55575225b4 | ||
|
|
483336e79e | ||
|
|
c77495e3a4 | ||
|
|
65af1839c6 | ||
|
|
177877c544 | ||
|
|
b25522ba52 | ||
|
|
c19bc311cb | ||
|
|
5435dcf96e | ||
|
|
f17c702270 | ||
|
|
3907333c5d | ||
|
|
acdecdfaef | ||
|
|
09d18ad07e | ||
|
|
bc516a3f3c | ||
|
|
9572eaaa11 | ||
|
|
18e674b4f6 | ||
|
|
8d68ab98a7 | ||
|
|
135e6b93f4 | ||
|
|
13a49340ed | ||
|
|
81a23040eb | ||
|
|
857f63136d | ||
|
|
a927acb1ec | ||
|
|
09f1580e2d | ||
|
|
cd59e22191 | ||
|
|
7237fdc6ce | ||
|
|
0fdf490d33 | ||
|
|
b73612a254 | ||
|
|
5014558ab9 | ||
|
|
28b0eb0f65 | ||
|
|
95131b2176 | ||
|
|
2305e2e5c9 | ||
|
|
00ae27690d | ||
|
|
9d5d4d64f8 | ||
|
|
98784ef8d6 | ||
|
|
d3fc8074a4 | ||
|
|
9c2b75b561 | ||
|
|
856bb8f99d | ||
|
|
af32f40bf5 | ||
|
|
4ec82a72bb | ||
|
|
07cce701de | ||
|
|
74e001af1d | ||
|
|
ff2751ac9c | ||
|
|
abcdd12b26 | ||
|
|
18db754858 | ||
|
|
fe03a6cdc8 | ||
|
|
cd684175ad | ||
|
|
da692b7920 | ||
|
|
95c01b6c16 | ||
|
|
6911e11edd | ||
|
|
5112f26a60 | ||
|
|
a06916d98e | ||
|
|
681de68e9d | ||
|
|
7aee40c13c | ||
|
|
9297939ec3 | ||
|
|
774d79cc4c | ||
|
|
2412044c90 | ||
|
|
120916dac2 | ||
|
|
fe346461ff | ||
|
|
d2a1fad968 | ||
|
|
0fb983f62d | ||
|
|
53c18592d3 | ||
|
|
e632bce2e4 | ||
|
|
0760b0a7e2 | ||
|
|
d908aa636a | ||
|
|
3d89341b47 | ||
|
|
d8ec40b39f | ||
|
|
4171221823 | ||
|
|
eaeca38fc4 | ||
|
|
fac988053f | ||
|
|
61241abbb0 | ||
|
|
53ed7066ab | ||
|
|
a61f4b287b | ||
|
|
486fb17975 | ||
|
|
2f567473c6 | ||
|
|
000ee7ef34 | ||
|
|
41d1cca328 | ||
|
|
717297545b | ||
|
|
e8e738406a | ||
|
|
e625be0d10 | ||
|
|
12e73423f1 | ||
|
|
7700b37f39 | ||
|
|
c28cfda81f | ||
|
|
848887eb7a | ||
|
|
3158150cb7 | ||
|
|
6ef6bcbd6b | ||
|
|
06425e9621 | ||
|
|
4d224a3022 | ||
|
|
f59ae58163 | ||
|
|
0d1bb027aa | ||
|
|
4cd0a709aa | ||
|
|
1815d1028b | ||
|
|
0fa9a1e236 | ||
|
|
eb55bad5a0 | ||
|
|
cc0ec3e161 | ||
|
|
80185155a1 | ||
|
|
c755f1901f | ||
|
|
68b91dc905 | ||
|
|
88f06afc0c | ||
|
|
40078a55e2 | ||
|
|
d2558234cf | ||
|
|
f5fa042c82 | ||
|
|
07e4a40a9a | ||
|
|
e28f1c0ae8 | ||
|
|
ef39f8600a | ||
|
|
2291dbce2a | ||
|
|
58f197b76c | ||
|
|
895b0931e5 | ||
|
|
1ad047d0f7 | ||
|
|
be6202f12b | ||
|
|
e8f834cd8d | ||
|
|
e0e624ca7f | ||
|
|
ec4f374c05 | ||
|
|
c811e8d8bd | ||
|
|
b2cd5da460 | ||
|
|
2de3b21e05 | ||
|
|
4bed436371 | ||
|
|
efe9dba595 | ||
|
|
47f4203dd3 | ||
|
|
015c10aeec | ||
|
|
a00d781b73 | ||
|
|
0c541b563f | ||
|
|
64a5cf7929 | ||
|
|
7a450a3b1c | ||
|
|
7de27caf16 | ||
|
|
c26326c1be | ||
|
|
66a1b8643a | ||
|
|
15828bcf25 | ||
|
|
333217f43e | ||
|
|
4a2f19abbd | ||
|
|
5fbcebed8c | ||
|
|
becdc7f82c | ||
|
|
73b9088a1c | ||
|
|
f6a1d69a87 | ||
|
|
fd76a14259 | ||
|
|
171e59edd4 | ||
|
|
a0c3b2d5cf | ||
|
|
19bb39202d | ||
|
|
d4553567d2 | ||
|
|
4d49884c58 | ||
|
|
5873d4ccdd | ||
|
|
db9a564b6a | ||
|
|
c72967d5de | ||
|
|
598d185db1 | ||
|
|
b982cbdd0e | ||
|
|
6a04a74e8b | ||
|
|
88728713c8 | ||
|
|
6b1d8c1e30 | ||
|
|
87c3d06271 | ||
|
|
915f911e36 | ||
|
|
cf9d6cfb0c | ||
|
|
bbed5763f1 | ||
|
|
ca0b91b39e | ||
|
|
0cf0571560 | ||
|
|
e58c22a0f6 | ||
|
|
e4bdd3377d | ||
|
|
0b2e9d2c30 | ||
|
|
1bdae7d312 | ||
|
|
a471f21da6 | ||
|
|
6efb071135 | ||
|
|
f4536226c1 | ||
|
|
a439a3a45c | ||
|
|
26e2805c3f | ||
|
|
3b4775e021 | ||
|
|
ab406a1c0e | ||
|
|
a3faeb7de4 | ||
|
|
8c54a3051d | ||
|
|
c32b0aab8a | ||
|
|
3097d9e512 | ||
|
|
c1df120eda | ||
|
|
2cff495997 | ||
|
|
d0491a1ebe | ||
|
|
b9d68c199b | ||
|
|
155510fe81 | ||
|
|
201c145953 | ||
|
|
5d34200268 | ||
|
|
b7da73eb19 | ||
|
|
6a39ee13f7 | ||
|
|
33245766ab | ||
|
|
358de58c4d | ||
|
|
a7191c6f57 | ||
|
|
baa5873942 | ||
|
|
c6ce815461 | ||
|
|
79360d99d3 | ||
|
|
46fff7105e | ||
|
|
72e1fe969f | ||
|
|
b5be6dd504 | ||
|
|
8ea3f7b909 | ||
|
|
921b76cab8 | ||
|
|
a31953b0e6 | ||
|
|
54670cf084 | ||
|
|
a0f30f194a | ||
|
|
b31fdeedfd | ||
|
|
8fa43c73d8 | ||
|
|
56d868dbb7 | ||
|
|
f4f751af40 | ||
|
|
1988fab7e3 | ||
|
|
9de3ea3126 | ||
|
|
e01d6aa435 | ||
|
|
f7ad71607d | ||
|
|
68379de561 | ||
|
|
d9aa233295 | ||
|
|
f37468c41f | ||
|
|
52a8a1e1b9 | ||
|
|
d818eb7473 | ||
|
|
f8d4ad9ab0 | ||
|
|
3ffc7c89b0 | ||
|
|
f1823403b0 | ||
|
|
384fb069ec | ||
|
|
a4ddaf231e | ||
|
|
7e60c06925 | ||
|
|
d92f5d5a90 | ||
|
|
9e62f283ff | ||
|
|
c24ce07a84 | ||
|
|
de6758128e | ||
|
|
73d4343e39 | ||
|
|
57d104424f | ||
|
|
02aabd45d0 | ||
|
|
39ed931e53 | ||
|
|
b28f8d244a | ||
|
|
73cd218f5a | ||
|
|
84601bb72b | ||
|
|
54df8fc5b2 | ||
|
|
5d39972ed0 | ||
|
|
0481374e1d | ||
|
|
eff635394a | ||
|
|
df0c81513e | ||
|
|
3f6a90eb63 | ||
|
|
b050d210df | ||
|
|
f4e4be19f0 | ||
|
|
cce889b900 | ||
|
|
a6ae61a4c2 | ||
|
|
b23b9eefd9 | ||
|
|
a2f0b0c672 | ||
|
|
b704fc1a68 | ||
|
|
a3affbe6a0 | ||
|
|
1418a0437f | ||
|
|
143db31d48 | ||
|
|
3700c7ef10 | ||
|
|
498f560638 | ||
|
|
394dcd4486 | ||
|
|
83b20a970d | ||
|
|
e1feb88fdf | ||
|
|
389b9dbbcc | ||
|
|
a7f347d9c9 | ||
|
|
421a459573 | ||
|
|
c224251aad | ||
|
|
037cc66ec8 | ||
|
|
9160a0c6a2 | ||
|
|
5c5fae6d2f | ||
|
|
c1d3a4a8f0 | ||
|
|
adc74b3c6d | ||
|
|
beb4b92a66 | ||
|
|
cd9b384cc3 | ||
|
|
4d971a16b8 | ||
|
|
3561530776 | ||
|
|
4690688658 | ||
|
|
fe845284c4 | ||
|
|
2b3bf01c90 | ||
|
|
23c1a66730 | ||
|
|
dd18a58cb1 | ||
|
|
a94bfd6cfe | ||
|
|
a515a78dd3 | ||
|
|
e167860ce7 | ||
|
|
75d43ca080 | ||
|
|
5226731e2d | ||
|
|
dcf64d43e0 | ||
|
|
e3c076970e | ||
|
|
7978e172f3 | ||
|
|
605d299f83 | ||
|
|
18c1f04362 | ||
|
|
e4beae703d | ||
|
|
d034ab669c | ||
|
|
5aeefbd633 | ||
|
|
597c18665e | ||
|
|
10db0d2f57 | ||
|
|
7275535116 | ||
|
|
a1c5d2ca64 | ||
|
|
ca87974543 | ||
|
|
e92caff5d5 | ||
|
|
ea3a012d2a | ||
|
|
5b8917fb52 | ||
|
|
8eec0120a2 | ||
|
|
4cf1e5d2f9 | ||
|
|
0a473f2f0f | ||
|
|
e4edeb6226 | ||
|
|
d488e254d9 | ||
|
|
d7009caa03 | ||
|
|
54759df586 | ||
|
|
605b684c2d | ||
|
|
994443d24d | ||
|
|
c5640c4508 | ||
|
|
1f52a09e2e | ||
|
|
fc21af505c | ||
|
|
015f3b3120 | ||
|
|
5ba4a0b69c | ||
|
|
0852947fcc | ||
|
|
99594a11ce | ||
|
|
2be71994c0 | ||
|
|
26fe8ffed0 | ||
|
|
feee67ae88 | ||
|
|
1caaf92d47 | ||
|
|
d069eca7a3 | ||
|
|
f3eaa8dd1c | ||
|
|
9e631877f8 | ||
|
|
36147a63e3 | ||
|
|
57db6a87ef | ||
|
|
cd7c66cf01 | ||
|
|
2c736b4f61 | ||
|
|
c4a508ab31 | ||
|
|
7815e55572 | ||
|
|
162e6f0000 |
6
.github/ISSUE_TEMPLATE/1_broken_site.md
vendored
6
.github/ISSUE_TEMPLATE/1_broken_site.md
vendored
@@ -21,7 +21,7 @@ assignees: ''
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.03.01. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.05.20. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
||||
- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in https://github.com/yt-dlp/yt-dlp.
|
||||
- Search the bugtracker for similar issues: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
||||
@@ -29,7 +29,7 @@ Carefully read and work through this check list in order to prevent the most com
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a broken site support
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.03.01**
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.05.20**
|
||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
||||
- [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
|
||||
- [ ] I've searched the bugtracker for similar issues including closed ones
|
||||
@@ -44,7 +44,7 @@ Add the `-v` flag to your command line you run yt-dlp with (`yt-dlp -v <your com
|
||||
[debug] User config: []
|
||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||
[debug] yt-dlp version 2021.03.01
|
||||
[debug] yt-dlp version 2021.05.20
|
||||
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
||||
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
||||
[debug] Proxy map: {}
|
||||
|
||||
@@ -21,7 +21,7 @@ assignees: ''
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.03.01. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.05.20. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
||||
- Make sure that site you are requesting is not dedicated to copyright infringement, see https://github.com/yt-dlp/yt-dlp. yt-dlp does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
|
||||
- Search the bugtracker for similar site support requests: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
||||
@@ -29,7 +29,7 @@ Carefully read and work through this check list in order to prevent the most com
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a new site support request
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.03.01**
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.05.20**
|
||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
||||
- [ ] I've checked that none of provided URLs violate any copyrights
|
||||
- [ ] I've searched the bugtracker for similar site support requests including closed ones
|
||||
|
||||
@@ -21,13 +21,13 @@ assignees: ''
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.03.01. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.05.20. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- Search the bugtracker for similar site feature requests: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a site feature request
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.03.01**
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.05.20**
|
||||
- [ ] I've searched the bugtracker for similar site feature requests including closed ones
|
||||
|
||||
|
||||
|
||||
6
.github/ISSUE_TEMPLATE/4_bug_report.md
vendored
6
.github/ISSUE_TEMPLATE/4_bug_report.md
vendored
@@ -21,7 +21,7 @@ assignees: ''
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.03.01. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.05.20. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
||||
- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in https://github.com/yt-dlp/yt-dlp.
|
||||
- Search the bugtracker for similar issues: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
||||
@@ -30,7 +30,7 @@ Carefully read and work through this check list in order to prevent the most com
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a broken site support issue
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.03.01**
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.05.20**
|
||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
||||
- [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
|
||||
- [ ] I've searched the bugtracker for similar bug reports including closed ones
|
||||
@@ -46,7 +46,7 @@ Add the `-v` flag to your command line you run yt-dlp with (`yt-dlp -v <your com
|
||||
[debug] User config: []
|
||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||
[debug] yt-dlp version 2021.03.01
|
||||
[debug] yt-dlp version 2021.05.20
|
||||
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
||||
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
||||
[debug] Proxy map: {}
|
||||
|
||||
4
.github/ISSUE_TEMPLATE/5_feature_request.md
vendored
4
.github/ISSUE_TEMPLATE/5_feature_request.md
vendored
@@ -21,13 +21,13 @@ assignees: ''
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.03.01. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.05.20. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- Search the bugtracker for similar feature requests: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a feature request
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.03.01**
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.05.20**
|
||||
- [ ] I've searched the bugtracker for similar feature requests including closed ones
|
||||
|
||||
|
||||
|
||||
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
@@ -132,7 +132,7 @@ jobs:
|
||||
- name: Upgrade pip and enable wheel support
|
||||
run: python -m pip install pip==19.1.1 setuptools==43.0.0 wheel==0.33.6
|
||||
- name: Install Requirements for 32 Bit
|
||||
run: pip install pyinstaller==3.5 mutagen==1.42.0 pycryptodome==3.9.4
|
||||
run: pip install pyinstaller==3.5 mutagen==1.42.0 pycryptodome==3.9.4 pefile==2019.4.18
|
||||
- name: Bump version
|
||||
id: bump_version
|
||||
run: python devscripts/update-version.py
|
||||
|
||||
11
.github/workflows/core.yml
vendored
11
.github/workflows/core.yml
vendored
@@ -6,7 +6,7 @@ jobs:
|
||||
if: "!contains(github.event.head_commit.message, 'ci skip')"
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: true
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-18.04]
|
||||
# TODO: python 2.6
|
||||
@@ -41,11 +41,18 @@ jobs:
|
||||
- name: Install Jython
|
||||
if: ${{ matrix.python-impl == 'jython' }}
|
||||
run: |
|
||||
wget http://search.maven.org/remotecontent?filepath=org/python/jython-installer/2.7.1/jython-installer-2.7.1.jar -O jython-installer.jar
|
||||
wget https://repo1.maven.org/maven2/org/python/jython-installer/2.7.1/jython-installer-2.7.1.jar -O jython-installer.jar
|
||||
java -jar jython-installer.jar -s -d "$HOME/jython"
|
||||
echo "$HOME/jython/bin" >> $GITHUB_PATH
|
||||
- name: Install nose
|
||||
if: ${{ matrix.python-impl != 'jython' }}
|
||||
run: pip install nose
|
||||
- name: Install nose (Jython)
|
||||
if: ${{ matrix.python-impl == 'jython' }}
|
||||
# Working around deprecation of support for non-SNI clients at PyPI CDN (see https://status.python.org/incidents/hzmjhqsdjqgb)
|
||||
run: |
|
||||
wget https://files.pythonhosted.org/packages/99/4f/13fb671119e65c4dce97c60e67d3fd9e6f7f809f2b307e2611f4701205cb/nose-1.3.7-py2-none-any.whl
|
||||
pip install nose-1.3.7-py2-none-any.whl
|
||||
- name: Run tests
|
||||
continue-on-error: ${{ matrix.ytdl-test-set == 'download' || matrix.python-impl == 'jython' }}
|
||||
env:
|
||||
|
||||
9
.github/workflows/download.yml
vendored
9
.github/workflows/download.yml
vendored
@@ -41,11 +41,18 @@ jobs:
|
||||
- name: Install Jython
|
||||
if: ${{ matrix.python-impl == 'jython' }}
|
||||
run: |
|
||||
wget http://search.maven.org/remotecontent?filepath=org/python/jython-installer/2.7.1/jython-installer-2.7.1.jar -O jython-installer.jar
|
||||
wget https://repo1.maven.org/maven2/org/python/jython-installer/2.7.1/jython-installer-2.7.1.jar -O jython-installer.jar
|
||||
java -jar jython-installer.jar -s -d "$HOME/jython"
|
||||
echo "$HOME/jython/bin" >> $GITHUB_PATH
|
||||
- name: Install nose
|
||||
if: ${{ matrix.python-impl != 'jython' }}
|
||||
run: pip install nose
|
||||
- name: Install nose (Jython)
|
||||
if: ${{ matrix.python-impl == 'jython' }}
|
||||
# Working around deprecation of support for non-SNI clients at PyPI CDN (see https://status.python.org/incidents/hzmjhqsdjqgb)
|
||||
run: |
|
||||
wget https://files.pythonhosted.org/packages/99/4f/13fb671119e65c4dce97c60e67d3fd9e6f7f809f2b307e2611f4701205cb/nose-1.3.7-py2-none-any.whl
|
||||
pip install nose-1.3.7-py2-none-any.whl
|
||||
- name: Run tests
|
||||
continue-on-error: ${{ matrix.ytdl-test-set == 'download' || matrix.python-impl == 'jython' }}
|
||||
env:
|
||||
|
||||
84
.gitignore
vendored
84
.gitignore
vendored
@@ -1,3 +1,46 @@
|
||||
# Config
|
||||
*.conf
|
||||
*.spec
|
||||
cookies
|
||||
cookies.txt
|
||||
|
||||
# Downloaded
|
||||
*.srt
|
||||
*.ttml
|
||||
*.sbv
|
||||
*.vtt
|
||||
*.flv
|
||||
*.mp4
|
||||
*.m4a
|
||||
*.m4v
|
||||
*.mp3
|
||||
*.3gp
|
||||
*.webm
|
||||
*.wav
|
||||
*.ape
|
||||
*.mkv
|
||||
*.swf
|
||||
*.part
|
||||
*.part-*
|
||||
*.ytdl
|
||||
*.dump
|
||||
*.frag
|
||||
*.frag.urls
|
||||
*.aria2
|
||||
*.swp
|
||||
*.ogg
|
||||
*.opus
|
||||
*.info.json
|
||||
*.live_chat.json
|
||||
*.jpg
|
||||
*.png
|
||||
*.webp
|
||||
*.annotations.xml
|
||||
*.description
|
||||
|
||||
# Allow config/media files in testdata
|
||||
!test/testdata/**
|
||||
|
||||
# Python
|
||||
*.pyc
|
||||
*.pyo
|
||||
@@ -43,47 +86,6 @@ README.txt
|
||||
yt-dlp.zip
|
||||
*.exe
|
||||
|
||||
# Downloaded
|
||||
*.srt
|
||||
*.ttml
|
||||
*.sbv
|
||||
*.vtt
|
||||
*.flv
|
||||
*.mp4
|
||||
*.m4a
|
||||
*.m4v
|
||||
*.mp3
|
||||
*.3gp
|
||||
*.webm
|
||||
*.wav
|
||||
*.ape
|
||||
*.mkv
|
||||
*.swf
|
||||
*.part
|
||||
*.ytdl
|
||||
*.dump
|
||||
*.frag
|
||||
*.frag.urls
|
||||
*.aria2
|
||||
*.swp
|
||||
*.ogg
|
||||
*.opus
|
||||
*.info.json
|
||||
*.live_chat.json
|
||||
*.jpg
|
||||
*.png
|
||||
*.webp
|
||||
*.annotations.xml
|
||||
*.description
|
||||
|
||||
# Config
|
||||
*.conf
|
||||
*.spec
|
||||
cookies
|
||||
cookies.txt
|
||||
|
||||
|
||||
|
||||
# Text Editor / IDE
|
||||
.idea
|
||||
*.iml
|
||||
|
||||
24
CONTRIBUTORS
24
CONTRIBUTORS
@@ -1,5 +1,6 @@
|
||||
pukkandan (owner)
|
||||
shirt-dev (collaborator)
|
||||
colethedj (collaborator)
|
||||
h-h-h-h
|
||||
pauldubois98
|
||||
nixxo
|
||||
@@ -21,10 +22,29 @@ nao20010128nao
|
||||
kurumigi
|
||||
tsukumi
|
||||
bbepis
|
||||
animelover1984
|
||||
Pccode66
|
||||
Ashish
|
||||
Ashish0804
|
||||
RobinD42
|
||||
hseg
|
||||
colethedj
|
||||
DennyDai
|
||||
codeasashu
|
||||
teesid
|
||||
kevinoconnor7
|
||||
damianoamatruda
|
||||
2ShedsJackson
|
||||
CXwudi
|
||||
xtkoba
|
||||
llacb47
|
||||
hheimbuerger
|
||||
B0pol
|
||||
lkho
|
||||
fstirlitz
|
||||
Lamieur
|
||||
tsukumijima
|
||||
Hadi0609
|
||||
b5eff52
|
||||
craftingmod
|
||||
tpikonen
|
||||
tripulse
|
||||
king-millez
|
||||
|
||||
378
Changelog.md
378
Changelog.md
@@ -6,9 +6,11 @@
|
||||
* Run `make doc`
|
||||
* Update Changelog.md and CONTRIBUTORS
|
||||
* Change "Merged with ytdl" version in Readme.md if needed
|
||||
* Add new/fixed extractors in "new features" section of Readme.md
|
||||
* Commit to master as `Release <version>`
|
||||
* Push to origin/release - build task will now run
|
||||
* Update version.py using devscripts\update-version.py
|
||||
* Push to origin/release using `git push origin master:release`
|
||||
build task will now run
|
||||
* Update version.py using `devscripts\update-version.py`
|
||||
* Run `make issuetemplates`
|
||||
* Commit to master as `[version] update :ci skip all`
|
||||
* Push to origin/master
|
||||
@@ -17,17 +19,312 @@
|
||||
-->
|
||||
|
||||
|
||||
### 2021.06.01
|
||||
|
||||
* Merge youtube-dl: Upto [commit/d495292](https://github.com/ytdl-org/youtube-dl/commit/d495292852b6c2f1bd58bc2141ff2b0265c952cf)
|
||||
* Pre-check archive and filters during playlist extraction
|
||||
* Handle Basic Auth `user:pass` in URLs by [hhirtz](https://github.com/hhirtz) and [pukkandan](https://github.com/pukkandan)
|
||||
* [archiveorg] Add YoutubeWebArchiveIE by [colethedj](https://github.com/colethedj) and [alex-gedeon](https://github.com/alex-gedeon)
|
||||
* [fancode] Add extractor by [rmsmachine](https://github.com/rmsmachine)
|
||||
* [patreon] Support vimeo embeds by [rhsmachine](https://github.com/rhsmachine)
|
||||
* [Saitosan] Add new extractor by [llacb47](https://github.com/llacb47)
|
||||
* [ShemarooMe] Add extractor by [Ashish0804](https://github.com/Ashish0804) and [pukkandan](https://github.com/pukkandan)
|
||||
* [telemundo] Add extractor by [king-millez](https://github.com/king-millez)
|
||||
* [SonyLIV] Add SonyLIVSeriesIE and subtitle support by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [Hotstar] Add HotStarSeriesIE by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [Voot] Add VootSeriesIE by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [vidio] Support login and premium videos by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [fragment] When using `-N`, do not keep the fragment content in memory
|
||||
* [ffmpeg] Download and merge in a single step if possible
|
||||
* [ThumbnailsConvertor] Support conversion to `png` and make it the default by [louie-github](https://github.com/louie-github)
|
||||
* [VideoConvertor] Generalize with remuxer and allow conditional recoding
|
||||
* [EmbedThumbnail] Embed in `mp4`/`m4a` using mutagen by [tripulse](https://github.com/tripulse) and [pukkandan](https://github.com/pukkandan)
|
||||
* [EmbedThumbnail] Embed if any thumbnail was downloaded, not just the best
|
||||
* [EmbedThumbnail] Correctly escape filename
|
||||
* [update] replace self without launching a subprocess in windows
|
||||
* [update] Block further update for unsupported systems
|
||||
* Refactor `__process_playlist` by creating `LazyList`
|
||||
* Write messages to `stderr` when both `quiet` and `verbose`
|
||||
* Sanitize and sort playlist thumbnails
|
||||
* Remove `None` values from `info.json`
|
||||
* [extractor] Always prefer native hls downloader by default
|
||||
* [extractor] Skip subtitles without URI in m3u8 manifests by [hheimbuerger](https://github.com/hheimbuerger)
|
||||
* [extractor] Functions to parse `socket.io` response as `json` by [pukkandan](https://github.com/pukkandan) and [llacb47](https://github.com/llacb47)
|
||||
* [extractor] Allow `note=False` when extracting manifests
|
||||
* [utils] Escape URLs in `sanitized_Request`, not `sanitize_url`
|
||||
* [hls] Disable external downloader for `webtt`
|
||||
* [youtube] `/live` URLs should raise error if channel is not live
|
||||
* [youtube] Bug fixes
|
||||
* [zee5] Fix m3u8 formats' extension
|
||||
* [ard] Allow URLs without `-` before id by [olifre](https://github.com/olifre)
|
||||
* [cleanup] `YoutubeDL._match_entry`
|
||||
* [cleanup] Refactor updater
|
||||
* [cleanup] Refactor ffmpeg convertors
|
||||
* [cleanup] setup.py
|
||||
|
||||
|
||||
### 2021.05.20
|
||||
|
||||
* **Youtube improvements**:
|
||||
* Support youtube music `MP`, `VL` and `browse` pages
|
||||
* Extract more formats for youtube music by [craftingmod](https://github.com/craftingmod), [colethedj](https://github.com/colethedj) and [pukkandan](https://github.com/pukkandan)
|
||||
* Extract multiple subtitles in same language by [pukkandan](https://github.com/pukkandan) and [tpikonen](https://github.com/tpikonen)
|
||||
* Redirect channels that doesn't have a `videos` tab to their `UU` playlists
|
||||
* Support in-channel search
|
||||
* Sort audio-only formats correctly
|
||||
* Always extract `maxresdefault` thumbnail
|
||||
* Extract audio language
|
||||
* Add subtitle language names by [nixxo](https://github.com/nixxo) and [tpikonen](https://github.com/tpikonen)
|
||||
* Show alerts only from the final webpage
|
||||
* Add `html5=1` param to `get_video_info` page requests by [colethedj](https://github.com/colethedj)
|
||||
* Better message when login required
|
||||
* **Add option `--print`**: to print any field/template
|
||||
* Deprecates: `--get-description`, `--get-duration`, `--get-filename`, `--get-format`, `--get-id`, `--get-thumbnail`, `--get-title`, `--get-url`
|
||||
* Field `additional_urls` to download additional videos from metadata using [`--parse-metadata`](https://github.com/yt-dlp/yt-dlp#modifying-metadata)
|
||||
* Merge youtube-dl: Upto [commit/dfbbe29](https://github.com/ytdl-org/youtube-dl/commit/dfbbe2902fc67f0f93ee47a8077c148055c67a9b)
|
||||
* Write thumbnail of playlist and add `pl_thumbnail` outtmpl key
|
||||
* [embedthumbnail] Add `flac` support and refactor `mutagen` code by [pukkandan](https://github.com/pukkandan) and [tripulse](https://github.com/tripulse)
|
||||
* [audius:artist] Add extractor by [king-millez](https://github.com/king-millez)
|
||||
* [parlview] Add extractor by [king-millez](https://github.com/king-millez)
|
||||
* [tenplay] Fix extractor by [king-millez](https://github.com/king-millez)
|
||||
* [rmcdecouverte] Generalize `_VALID_URL`
|
||||
* Add compat-option `no-attach-infojson`
|
||||
* Add field `name` for subtitles
|
||||
* Ensure `post_extract` and `pre_process` only run once
|
||||
* Fix `--check-formats` when there is network error
|
||||
* Standardize `write_debug` and `get_param`
|
||||
* [options] Alias `--write-comments`, `--no-write-comments`
|
||||
* [options] Refactor callbacks
|
||||
* [test:download] Only extract enough videos for `playlist_mincount`
|
||||
* [extractor] bugfix for when `compat_opts` is not given
|
||||
* [build] Fix x86 build by [shirt](https://github.com/shirt-dev)
|
||||
* [cleanup] code formatting, youtube tests and readme
|
||||
|
||||
### 2021.05.11
|
||||
* **Deprecate support for python versions < 3.6**
|
||||
* **Subtitle extraction from manifests** by [fstirlitz](https://github.com/fstirlitz). See [be6202f](https://github.com/yt-dlp/yt-dlp/commit/be6202f12b97858b9d716e608394b51065d0419f) for details
|
||||
* **Improve output template:**
|
||||
* Allow slicing lists/strings using `field.start:end:step`
|
||||
* A field can also be used as offset like `field1+num+field2`
|
||||
* A default value can be given using `field|default`
|
||||
* Prevent invalid fields from causing errors
|
||||
* **Merge youtube-dl**: Upto [commit/a726009](https://github.com/ytdl-org/youtube-dl/commit/a7260099873acc6dc7d76cafad2f6b139087afd0)
|
||||
* **Remove options** `-l`, `-t`, `-A` completely and disable `--auto-number`, `--title`, `--literal`, `--id`
|
||||
* [Plugins] Prioritize plugins over standard extractors and prevent plugins from overwriting the standard extractor classes
|
||||
* [downloader] Fix `quiet` and `to_stderr`
|
||||
* [fragment] Ensure the file is closed on error
|
||||
* [fragment] Make sure first segment is not skipped
|
||||
* [aria2c] Fix whitespace being stripped off
|
||||
* [embedthumbnail] Fix bug where jpeg thumbnails were converted again
|
||||
* [FormatSort] Fix for when some formats have quality and others don't
|
||||
* [utils] Add `network_exceptions`
|
||||
* [utils] Escape URL while sanitizing
|
||||
* [ukcolumn] Add Extractor
|
||||
* [whowatch] Add extractor by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [CBS] Improve `_VALID_URL` to support movies
|
||||
* [crackle] Improve extraction
|
||||
* [curiositystream] Fix collections
|
||||
* [francetvinfo] Improve video id extraction
|
||||
* [generic] Respect the encoding in manifest
|
||||
* [limelight] Obey `allow_unplayable_formats`
|
||||
* [mediasite] Generalize URL pattern by [fstirlitz](https://github.com/fstirlitz)
|
||||
* [mxplayer] Add MxplayerShowIE by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [nebula] Move to nebula.app by [Lamieur](https://github.com/Lamieur)
|
||||
* [niconico] Fix HLS formats by [CXwudi](https://github.com/CXwudi), [tsukumijima](https://github.com/tsukumijima), [nao20010128nao](https://github.com/nao20010128nao) and [pukkandan](https://github.com/pukkandan)
|
||||
* [niconico] Fix title and thumbnail extraction by [CXwudi](https://github.com/CXwudi)
|
||||
* [plutotv] Extract subtitles from manifests
|
||||
* [plutotv] Fix format extraction for some urls
|
||||
* [rmcdecouverte] Improve `_VALID_URL`
|
||||
* [sonyliv] Fix `title` and `series` extraction by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [tubi] Raise "no video formats" error when video url is empty
|
||||
* [youtube:tab] Detect playlists inside community posts
|
||||
* [youtube] Add `oembed` to reserved names
|
||||
* [zee5] Fix extraction for some URLs by [Hadi0609](https://github.com/Hadi0609)
|
||||
* [zee5] Fix py2 compatibility
|
||||
* Fix `playlist_index` and add `playlist_autonumber`. See [#302](https://github.com/yt-dlp/yt-dlp/issues/302) for details
|
||||
* Add experimental option `--check-formats` to test the URLs before format selection
|
||||
* Option `--compat-options` to revert [some of yt-dlp's changes](https://github.com/yt-dlp/yt-dlp#differences-in-default-behavior)
|
||||
* Deprecates `--list-formats-as-table`, `--list-formats-old`
|
||||
* Fix number of digits in `%(playlist_index)s`
|
||||
* Fix case sensitivity of format selector
|
||||
* Revert "[core] be able to hand over id and title using url_result"
|
||||
* Do not strip out whitespaces in `-o` and `-P`
|
||||
* Fix `preload_download_archive` writing verbose message to `stdout`
|
||||
* Move option warnings to `YoutubeDL`so that they obey `--no-warnings` and can output colors
|
||||
* Py2 compatibility for `FileNotFoundError`
|
||||
|
||||
|
||||
### 2021.04.22
|
||||
* **Improve output template:**
|
||||
* Objects can be traversed like `%(field.key1.key2)s`
|
||||
* An offset can be added to numeric fields as `%(field+N)s`
|
||||
* Deprecates `--autonumber-start`
|
||||
* **Improve `--sub-langs`:**
|
||||
* Treat `--sub-langs` entries as regex
|
||||
* `all` can be used to refer to all the subtitles
|
||||
* language codes can be prefixed with `-` to exclude it
|
||||
* Deprecates `--all-subs`
|
||||
* Add option `--ignore-no-formats-error` to ignore the "no video format" and similar errors
|
||||
* Add option `--skip-playlist-after-errors` to skip the rest of a playlist after a given number of errors are encountered
|
||||
* Merge youtube-dl: Upto [commit/7e8b3f9](https://github.com/ytdl-org/youtube-dl/commit/7e8b3f9439ebefb3a3a4e5da9c0bd2b595976438)
|
||||
* [downloader] Fix bug in downloader selection
|
||||
* [BilibiliChannel] Fix pagination by [nao20010128nao](https://github.com/nao20010128nao) and [pukkandan](https://github.com/pukkandan)
|
||||
* [rai] Add support for http formats by [nixxo](https://github.com/nixxo)
|
||||
* [TubiTv] Add TubiTvShowIE by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [twitcasting] Fix extractor
|
||||
* [viu:ott] Fix extractor and support series by [lkho](https://github.com/lkho) and [pukkandan](https://github.com/pukkandan)
|
||||
* [youtube:tab] Show unavailable videos in playlists by [colethedj](https://github.com/colethedj)
|
||||
* [youtube:tab] Reload with unavailable videos for all playlists
|
||||
* [youtube] Ignore invalid stretch ratio
|
||||
* [youtube] Improve channel syncid extraction to support ytcfg by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Standardize API calls for tabs, mixes and search by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Bugfix in `_extract_ytcfg`
|
||||
* [mildom:user:vod] Download only necessary amount of pages
|
||||
* [mildom] Remove proxy completely by [fstirlitz](https://github.com/fstirlitz)
|
||||
* [go] Fix `_VALID_URL`
|
||||
* [MetadataFromField] Improve regex and add tests
|
||||
* [Exec] Ensure backward compatibility when the command contains `%`
|
||||
* [extractor] Fix inconsistent use of `report_warning`
|
||||
* Ensure `mergeall` selects best format when multistreams are disabled
|
||||
* Improve the yt-dlp.sh script by [fstirlitz](https://github.com/fstirlitz)
|
||||
* [lazy_extractor] Do not load plugins
|
||||
* [ci] Disable fail-fast
|
||||
* [documentation] Clarify which deprecated options still work
|
||||
* [documentation] Fix typos
|
||||
|
||||
|
||||
### 2021.04.11
|
||||
* Add option `--convert-thumbnails` (only jpg currently supported)
|
||||
* Format selector `mergeall` to download and merge all formats
|
||||
* Pass any field to `--exec` using similar syntax to output template
|
||||
* Choose downloader for each protocol using `--downloader PROTO:NAME`
|
||||
* Alias `--downloader` for `--external-downloader`
|
||||
* Added `native` as an option for the downloader
|
||||
* Merge youtube-dl: Upto [commit/4fb25ff](https://github.com/ytdl-org/youtube-dl/commit/4fb25ff5a3be5206bb72e5c4046715b1529fb2c7) (except vimeo)
|
||||
* [DiscoveryPlusIndia] Add DiscoveryPlusIndiaShowIE by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [NFHSNetwork] Add extractor by [llacb47](https://github.com/llacb47)
|
||||
* [nebula] Add extractor (watchnebula.com) by [hheimbuerger](https://github.com/hheimbuerger)
|
||||
* [nitter] Fix extraction of reply tweets and update instance list by [B0pol](https://github.com/B0pol)
|
||||
* [nitter] Fix thumbnails by [B0pol](https://github.com/B0pol)
|
||||
* [youtube] Fix thumbnail URL
|
||||
* [youtube] Parse API parameters from initial webpage by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Extract comments' approximate timestamp by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Fix alert extraction
|
||||
* [bilibili] Fix uploader
|
||||
* [utils] Add `datetime_from_str` and `datetime_add_months` by [colethedj](https://github.com/colethedj)
|
||||
* Run some `postprocessors` before actual download
|
||||
* Improve argument parsing for `-P`, `-o`, `-S`
|
||||
* Fix some `m3u8` not obeying `--allow-unplayable-formats`
|
||||
* Fix default of `dynamic_mpd`
|
||||
* Deprecate `--all-formats`, `--include-ads`, `--hls-prefer-native`, `--hls-prefer-ffmpeg`
|
||||
* [documentation] Improvements
|
||||
|
||||
### 2021.04.03
|
||||
* Merge youtube-dl: Upto [commit/654b4f4](https://github.com/ytdl-org/youtube-dl/commit/654b4f4ff2718f38b3182c1188c5d569c14cc70a)
|
||||
* Ability to set a specific field in the file's metadata using `--parse-metadata`
|
||||
* Ability to select n'th best format like `-f bv*.2`
|
||||
* [DiscoveryPlus] Add discoveryplus.in
|
||||
* [la7] Add podcasts and podcast playlists by [nixxo](https://github.com/nixxo)
|
||||
* [mildom] Update extractor with current proxy by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [ard:mediathek] Fix video id extraction
|
||||
* [generic] Detect Invidious' link element
|
||||
* [youtube] Show premium state in `availability` by [colethedj](https://github.com/colethedj)
|
||||
* [viewsource] Add extractor to handle `view-source:`
|
||||
* [sponskrub] Run before embedding thumbnail
|
||||
* [documentation] Improve `--parse-metadata` documentation
|
||||
|
||||
|
||||
### 2021.03.24.1
|
||||
* Revert [commit/8562218](https://github.com/ytdl-org/youtube-dl/commit/8562218350a79d4709da8593bb0c538aa0824acf)
|
||||
|
||||
### 2021.03.24
|
||||
* Merge youtube-dl: Upto 2021.03.25 ([commit/8562218](https://github.com/ytdl-org/youtube-dl/commit/8562218350a79d4709da8593bb0c538aa0824acf))
|
||||
* Parse metadata from multiple fields using `--parse-metadata`
|
||||
* Ability to load playlist infojson using `--load-info-json`
|
||||
* Write current epoch to infojson when using `--no-clean-infojson`
|
||||
* [youtube_live_chat] fix bug when trying to set cookies
|
||||
* [niconico] Fix for when logged in by [CXwudi](https://github.com/CXwudi) and [xtkoba](https://github.com/xtkoba)
|
||||
* [linuxacadamy] Fix login
|
||||
|
||||
|
||||
### 2021.03.21
|
||||
* Merge youtube-dl: Upto [commit/7e79ba7](https://github.com/ytdl-org/youtube-dl/commit/7e79ba7dd6e6649dd2ce3a74004b2044f2182881)
|
||||
* Option `--no-clean-infojson` to keep private keys in the infojson
|
||||
* [aria2c] Support retry/abort unavailable fragments by [damianoamatruda](https://github.com/damianoamatruda)
|
||||
* [aria2c] Better default arguments
|
||||
* [movefiles] Fix bugs and make more robust
|
||||
* [formatSort] Fix `quality` being ignored
|
||||
* [splitchapters] Fix for older ffmpeg
|
||||
* [sponskrub] Pass proxy to sponskrub
|
||||
* Make sure `post_hook` gets the final filename
|
||||
* Recursively remove any private keys from infojson
|
||||
* Embed video URL metadata inside `mp4` by [damianoamatruda](https://github.com/damianoamatruda) and [pukkandan](https://github.com/pukkandan)
|
||||
* Merge `webm` formats into `mkv` if thumbnails are to be embedded by [damianoamatruda](https://github.com/damianoamatruda)
|
||||
* Use headers and cookies when downloading subtitles by [damianoamatruda](https://github.com/damianoamatruda)
|
||||
* Parse resolution in info dictionary by [damianoamatruda](https://github.com/damianoamatruda)
|
||||
* More consistent warning messages by [damianoamatruda](https://github.com/damianoamatruda) and [pukkandan](https://github.com/pukkandan)
|
||||
* [documentation] Add deprecated options and aliases in readme
|
||||
* [documentation] Fix some minor mistakes
|
||||
|
||||
* [niconico] Partial fix adapted from [animelover1984/youtube-dl@b5eff52](https://github.com/animelover1984/youtube-dl/commit/b5eff52dd9ed5565672ea1694b38c9296db3fade) (login and smile formats still don't work)
|
||||
* [niconico] Add user extractor by [animelover1984](https://github.com/animelover1984)
|
||||
* [bilibili] Add anthology support by [animelover1984](https://github.com/animelover1984)
|
||||
* [amcnetworks] Fix extractor by [2ShedsJackson](https://github.com/2ShedsJackson)
|
||||
* [stitcher] Merge from youtube-dl by [nixxo](https://github.com/nixxo)
|
||||
* [rcs] Improved extraction by [nixxo](https://github.com/nixxo)
|
||||
* [linuxacadamy] Improve regex
|
||||
* [youtube] Show if video is `private`, `unlisted` etc in info (`availability`) by [colethedj](https://github.com/colethedj) and [pukkandan](https://github.com/pukkandan)
|
||||
* [youtube] bugfix for channel playlist extraction
|
||||
* [nbc] Improve metadata extraction by [2ShedsJackson](https://github.com/2ShedsJackson)
|
||||
|
||||
|
||||
### 2021.03.15
|
||||
* **Split video by chapters**: using option `--split-chapters`
|
||||
* The output file of the split files can be set with `-o`/`-P` using the prefix `chapter:`
|
||||
* Additional keys `section_title`, `section_number`, `section_start`, `section_end` are available in the output template
|
||||
* **Parallel fragment downloads** by [shirt](https://github.com/shirt-dev)
|
||||
* Use option `--concurrent-fragments` (`-N`) to set the number of threads (default 1)
|
||||
* Merge youtube-dl: Upto [commit/3be0980](https://github.com/ytdl-org/youtube-dl/commit/3be098010f667b14075e3dfad1e74e5e2becc8ea)
|
||||
* [zee5] Add Show Extractor by [Ashish0804](https://github.com/Ashish0804) and [pukkandan](https://github.com/pukkandan)
|
||||
* [rai] fix drm check [nixxo](https://github.com/nixxo)
|
||||
* [wimtv] Add extractor by [nixxo](https://github.com/nixxo)
|
||||
* [mtv] Add mtv.it and extract series metadata by [nixxo](https://github.com/nixxo)
|
||||
* [pluto.tv] Add extractor by [kevinoconnor7](https://github.com/kevinoconnor7)
|
||||
* [youtube] Rewrite comment extraction by [colethedj](https://github.com/colethedj)
|
||||
* [embedthumbnail] Set mtime correctly
|
||||
* Refactor some postprocessor/downloader code by [pukkandan](https://github.com/pukkandan) and [shirt](https://github.com/shirt-dev)
|
||||
|
||||
|
||||
### 2021.03.07
|
||||
* [youtube] Fix history, mixes, community pages and trending by [pukkandan](https://github.com/pukkandan) and [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Fix private feeds/playlists on multi-channel accounts by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Extract alerts from continuation by [colethedj](https://github.com/colethedj)
|
||||
* [cbs] Add support for ParamountPlus by [shirt](https://github.com/shirt-dev)
|
||||
* [mxplayer] Rewrite extractor with show support by [pukkandan](https://github.com/pukkandan) and [Ashish0804](https://github.com/Ashish0804)
|
||||
* [gedi] Improvements from youtube-dl by [nixxo](https://github.com/nixxo)
|
||||
* [vimeo] Fix videos with password by [teesid](https://github.com/teesid)
|
||||
* [lbry] Support `lbry://` url by [nixxo](https://github.com/nixxo)
|
||||
* [bilibili] Change `Accept` header by [pukkandan](https://github.com/pukkandan) and [animelover1984](https://github.com/animelover1984)
|
||||
* [trovo] Pass origin header
|
||||
* [rai] Check for DRM by [nixxo](https://github.com/nixxo)
|
||||
* [downloader] Fix bug for `ffmpeg`/`httpie`
|
||||
* [update] Fix updater removing the executable bit on some UNIX distros
|
||||
* [update] Fix current build hash for UNIX
|
||||
* [documentation] Include wget/curl/aria2c install instructions for Unix by [Ashish0804](https://github.com/Ashish0804)
|
||||
* Fix some videos downloading with `m3u8` extension
|
||||
* Remove "fixup is ignored" warning when fixup wasn't passed by user
|
||||
|
||||
|
||||
### 2021.03.03.2
|
||||
* [build] Fix bug
|
||||
|
||||
|
||||
### 2021.03.03
|
||||
* [youtube] Use new browse API for continuation page extraction by [colethedj](https://github.com/colethedj) and [pukkandan](https://github.com/pukkandan)
|
||||
* Fix HLS playlist downloading by [shirt](https://github.com/shirt-dev)
|
||||
* **Merge youtube-dl:** Upto [2021.03.03](https://github.com/ytdl-org/youtube-dl/releases/tag/2021.03.03)
|
||||
* Merge youtube-dl: Upto [2021.03.03](https://github.com/ytdl-org/youtube-dl/releases/tag/2021.03.03)
|
||||
* [mtv] Fix extractor
|
||||
* [nick] Fix extractor by [DennyDai](https://github.com/DennyDai)
|
||||
* [mxplayer] Add new extractor by[codeasashu](https://github.com/codeasashu)
|
||||
* [mxplayer] Add new extractor by [codeasashu](https://github.com/codeasashu)
|
||||
* [youtube] Throw error when `--extractor-retries` are exhausted
|
||||
* Reduce default of `--extractor-retries` to 3
|
||||
* Fix packaging bugs by [hseg](https://github.com/hseg)
|
||||
@@ -59,10 +356,10 @@
|
||||
* Moved project to an organization [yt-dlp](https://github.com/yt-dlp)
|
||||
* **Completely changed project name to yt-dlp** by [Pccode66](https://github.com/Pccode66) and [pukkandan](https://github.com/pukkandan)
|
||||
* Also, `youtube-dlc` config files are no longer loaded
|
||||
* **Merge youtube-dl:** Upto [commit/4460329](https://github.com/ytdl-org/youtube-dl/commit/44603290e5002153f3ebad6230cc73aef42cc2cd) (except tmz, gedi)
|
||||
* Merge youtube-dl: Upto [commit/4460329](https://github.com/ytdl-org/youtube-dl/commit/44603290e5002153f3ebad6230cc73aef42cc2cd) (except tmz, gedi)
|
||||
* [Readthedocs](https://yt-dlp.readthedocs.io) support by [shirt](https://github.com/shirt-dev)
|
||||
* [youtube] Show if video was a live stream in info (`was_live`)
|
||||
* [Zee5] Add new extractor by [Ashish](https://github.com/Ashish) and [pukkandan](https://github.com/pukkandan)
|
||||
* [Zee5] Add new extractor by [Ashish0804](https://github.com/Ashish0804) and [pukkandan](https://github.com/pukkandan)
|
||||
* [jwplatform] Add support for `hyland.com`
|
||||
* [tennistv] Fix extractor
|
||||
* [hls] Support media initialization by [shirt](https://github.com/shirt-dev)
|
||||
@@ -77,7 +374,7 @@
|
||||
|
||||
|
||||
### 2021.02.19
|
||||
* **Merge youtube-dl:** Upto [commit/cf2dbec](https://github.com/ytdl-org/youtube-dl/commit/cf2dbec6301177a1fddf72862de05fa912d9869d) (except kakao)
|
||||
* Merge youtube-dl: Upto [commit/cf2dbec](https://github.com/ytdl-org/youtube-dl/commit/cf2dbec6301177a1fddf72862de05fa912d9869d) (except kakao)
|
||||
* [viki] Fix extractor
|
||||
* [niconico] Extract `channel` and `channel_id` by [kurumigi](https://github.com/kurumigi)
|
||||
* [youtube] Multiple page support for hashtag URLs
|
||||
@@ -102,7 +399,7 @@
|
||||
|
||||
|
||||
### 2021.02.15
|
||||
* **Merge youtube-dl:** Upto [2021.02.10](https://github.com/ytdl-org/youtube-dl/releases/tag/2021.02.10) (except archive.org)
|
||||
* Merge youtube-dl: Upto [2021.02.10](https://github.com/ytdl-org/youtube-dl/releases/tag/2021.02.10) (except archive.org)
|
||||
* [niconico] Improved extraction and support encrypted/SMILE movies by [kurumigi](https://github.com/kurumigi), [tsukumi](https://github.com/tsukumi), [bbepis](https://github.com/bbepis), [pukkandan](https://github.com/pukkandan)
|
||||
* Fix HLS AES-128 with multiple keys in external downloaders by [shirt](https://github.com/shirt-dev)
|
||||
* [youtube_live_chat] Fix by using POST API by [siikamiika](https://github.com/siikamiika)
|
||||
@@ -145,7 +442,7 @@
|
||||
|
||||
|
||||
### 2021.02.04
|
||||
* **Merge youtube-dl:** Upto [2021.02.04.1](https://github.com/ytdl-org/youtube-dl/releases/tag/2021.02.04.1)
|
||||
* Merge youtube-dl: Upto [2021.02.04.1](https://github.com/ytdl-org/youtube-dl/releases/tag/2021.02.04.1)
|
||||
* **Date/time formatting in output template:**
|
||||
* You can use [`strftime`](https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes) to format date/time fields. Example: `%(upload_date>%Y-%m-%d)s`
|
||||
* **Multiple output templates:**
|
||||
@@ -199,7 +496,7 @@
|
||||
|
||||
|
||||
### 2021.01.24
|
||||
* **Merge youtube-dl:** Upto [2021.01.24](https://github.com/ytdl-org/youtube-dl/releases/tag/2021.01.16)
|
||||
* Merge youtube-dl: Upto [2021.01.24](https://github.com/ytdl-org/youtube-dl/releases/tag/2021.01.16)
|
||||
* Plugin support ([documentation](https://github.com/yt-dlp/yt-dlp#plugins))
|
||||
* **Multiple paths**: New option `-P`/`--paths` to give different paths for different types of files
|
||||
* The syntax is `-P "type:path" -P "type:path"` ([documentation](https://github.com/yt-dlp/yt-dlp#:~:text=-P,%20--paths%20TYPE:PATH))
|
||||
@@ -228,7 +525,7 @@
|
||||
|
||||
|
||||
### 2021.01.16
|
||||
* **Merge youtube-dl:** Upto [2021.01.16](https://github.com/ytdl-org/youtube-dl/releases/tag/2021.01.16)
|
||||
* Merge youtube-dl: Upto [2021.01.16](https://github.com/ytdl-org/youtube-dl/releases/tag/2021.01.16)
|
||||
* **Configuration files:**
|
||||
* Portable configuration file: `./yt-dlp.conf`
|
||||
* Allow the configuration files to be named `yt-dlp` instead of `youtube-dlc`. See [this](https://github.com/yt-dlp/yt-dlp#configuration) for details
|
||||
@@ -258,7 +555,7 @@
|
||||
* [archive.org] Fix extractor and add support for audio and playlists by [wporr](https://github.com/wporr)
|
||||
* [Animelab] Added by [mariuszskon](https://github.com/mariuszskon)
|
||||
* [youtube:search] Fix view_count by [ohnonot](https://github.com/ohnonot)
|
||||
* [youtube] Show if video is embeddable in info
|
||||
* [youtube] Show if video is embeddable in info (`playable_in_embed`)
|
||||
* Update version badge automatically in README
|
||||
* Enable `test_youtube_search_matching`
|
||||
* Create `to_screen` and similar functions in postprocessor/common
|
||||
@@ -274,9 +571,8 @@
|
||||
|
||||
|
||||
### 2021.01.08
|
||||
* **Merge youtube-dl:** Upto [2021.01.08](https://github.com/ytdl-org/youtube-dl/releases/tag/2021.01.08)
|
||||
* Extractor stitcher ([1](https://github.com/ytdl-org/youtube-dl/commit/bb38a1215718cdf36d73ff0a7830a64cd9fa37cc), [2](https://github.com/ytdl-org/youtube-dl/commit/a563c97c5cddf55f8989ed7ea8314ef78e30107f)) have not been merged
|
||||
* Moved changelog to seperate file
|
||||
* Merge youtube-dl: Upto [2021.01.08](https://github.com/ytdl-org/youtube-dl/releases/tag/2021.01.08) except stitcher ([1](https://github.com/ytdl-org/youtube-dl/commit/bb38a1215718cdf36d73ff0a7830a64cd9fa37cc), [2](https://github.com/ytdl-org/youtube-dl/commit/a563c97c5cddf55f8989ed7ea8314ef78e30107f))
|
||||
* Moved changelog to separate file
|
||||
|
||||
|
||||
### 2021.01.07-1
|
||||
@@ -314,7 +610,7 @@
|
||||
* Changed video format sorting to show video only files and video+audio files together.
|
||||
* Added `--video-multistreams`, `--no-video-multistreams`, `--audio-multistreams`, `--no-audio-multistreams`
|
||||
* Added `b`,`w`,`v`,`a` as alias for `best`, `worst`, `video` and `audio` respectively
|
||||
* **Shortcut Options:** Added `--write-link`, `--write-url-link`, `--write-webloc-link`, `--write-desktop-link` by [h-h-h-h](https://github.com/h-h-h-h) - See [Internet Shortcut Options](README.md#internet-shortcut-options) for details
|
||||
* Shortcut Options: Added `--write-link`, `--write-url-link`, `--write-webloc-link`, `--write-desktop-link` by [h-h-h-h](https://github.com/h-h-h-h) - See [Internet Shortcut Options](README.md#internet-shortcut-options) for details
|
||||
* **Sponskrub integration:** Added `--sponskrub`, `--sponskrub-cut`, `--sponskrub-force`, `--sponskrub-location`, `--sponskrub-args` - See [SponSkrub Options](README.md#sponskrub-sponsorblock-options) for details
|
||||
* Added `--force-download-archive` (`--force-write-archive`) by [h-h-h-h](https://github.com/h-h-h-h)
|
||||
* Added `--list-formats-as-table`, `--list-formats-old`
|
||||
@@ -324,36 +620,38 @@
|
||||
* Relaxed validation for format filters so that any arbitrary field can be used
|
||||
* Fix for embedding thumbnail in mp3 by [pauldubois98](https://github.com/pauldubois98) ([ytdl-org/youtube-dl#21569](https://github.com/ytdl-org/youtube-dl/pull/21569))
|
||||
* Make Twitch Video ID output from Playlist and VOD extractor same. This is only a temporary fix
|
||||
* **Merge youtube-dl:** Upto [2021.01.03](https://github.com/ytdl-org/youtube-dl/commit/8e953dcbb10a1a42f4e12e4e132657cb0100a1f8) - See [blackjack4494/yt-dlc#280](https://github.com/blackjack4494/yt-dlc/pull/280) for details
|
||||
* Merge youtube-dl: Upto [2021.01.03](https://github.com/ytdl-org/youtube-dl/commit/8e953dcbb10a1a42f4e12e4e132657cb0100a1f8) - See [blackjack4494/yt-dlc#280](https://github.com/blackjack4494/yt-dlc/pull/280) for details
|
||||
* Extractors [tiktok](https://github.com/ytdl-org/youtube-dl/commit/fb626c05867deab04425bad0c0b16b55473841a2) and [hotstar](https://github.com/ytdl-org/youtube-dl/commit/bb38a1215718cdf36d73ff0a7830a64cd9fa37cc) have not been merged
|
||||
* Cleaned up the fork for public use
|
||||
|
||||
|
||||
**PS**: All uncredited changes above this point are authored by [pukkandan](https://github.com/pukkandan)
|
||||
|
||||
### Unreleased changes in [blackjack4494/yt-dlc](https://github.com/blackjack4494/yt-dlc)
|
||||
* Updated to youtube-dl release 2020.11.26
|
||||
* [youtube]
|
||||
* Updated to youtube-dl release 2020.11.26 by [pukkandan](https://github.com/pukkandan)
|
||||
* Youtube improvements by [pukkandan](https://github.com/pukkandan)
|
||||
* Implemented all Youtube Feeds (ytfav, ytwatchlater, ytsubs, ythistory, ytrec) and SearchURL
|
||||
* Fix ytsearch not returning results sometimes due to promoted content
|
||||
* Temporary fix for automatic captions - disable json3
|
||||
* Fix some improper Youtube URLs
|
||||
* Redirect channel home to /video
|
||||
* Print youtube's warning message
|
||||
* Multiple pages are handled better for feeds
|
||||
* Handle Multiple pages for feeds better
|
||||
* [youtube] Fix ytsearch not returning results sometimes due to promoted content by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Temporary fix for automatic captions - disable json3 by [blackjack4494](https://github.com/blackjack4494)
|
||||
* Add --break-on-existing by [gergesh](https://github.com/gergesh)
|
||||
* Pre-check video IDs in the archive before downloading
|
||||
* [bitwave.tv] New extractor
|
||||
* [Gedi] Add extractor
|
||||
* [Rcs] Add new extractor
|
||||
* [skyit] Add support for multiple Sky Italia website and removed old skyitalia extractor
|
||||
* [france.tv] Fix thumbnail URL
|
||||
* [ina] support mobile links
|
||||
* [instagram] Fix extractor
|
||||
* [itv] BTCC new pages' URL update (articles instead of races)
|
||||
* [SouthparkDe] Support for English URLs
|
||||
* [spreaker] fix SpreakerShowIE test URL
|
||||
* [Vlive] Fix playlist handling when downloading a channel
|
||||
* [generic] Detect embedded bitchute videos
|
||||
* [generic] Extract embedded youtube and twitter videos
|
||||
* [ffmpeg] Ensure all streams are copied
|
||||
* Fix for os.rename error when embedding thumbnail to video in a different drive
|
||||
* make_win.bat: don't use UPX to pack vcruntime140.dll
|
||||
* Pre-check video IDs in the archive before downloading by [pukkandan](https://github.com/pukkandan)
|
||||
* [bitwave.tv] New extractor by [lorpus](https://github.com/lorpus)
|
||||
* [Gedi] Add extractor by [nixxo](https://github.com/nixxo)
|
||||
* [Rcs] Add new extractor by [nixxo](https://github.com/nixxo)
|
||||
* [skyit] New skyitalia extractor by [nixxo](https://github.com/nixxo)
|
||||
* [france.tv] Fix thumbnail URL by [renalid](https://github.com/renalid)
|
||||
* [ina] support mobile links by [B0pol](https://github.com/B0pol)
|
||||
* [instagram] Fix thumbnail extractor by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [SouthparkDe] Support for English URLs by [xypwn](https://github.com/xypwn)
|
||||
* [spreaker] fix SpreakerShowIE test URL by [pukkandan](https://github.com/pukkandan)
|
||||
* [Vlive] Fix playlist handling when downloading a channel by [kyuyeunk](https://github.com/kyuyeunk)
|
||||
* [tmz] Fix extractor by [diegorodriguezv](https://github.com/diegorodriguezv)
|
||||
* [generic] Detect embedded bitchute videos by [pukkandan](https://github.com/pukkandan)
|
||||
* [generic] Extract embedded youtube and twitter videos by [diegorodriguezv](https://github.com/diegorodriguezv)
|
||||
* [ffmpeg] Ensure all streams are copied by [pukkandan](https://github.com/pukkandan)
|
||||
* [embedthumbnail] Fix for os.rename error by [pukkandan](https://github.com/pukkandan)
|
||||
* make_win.bat: don't use UPX to pack vcruntime140.dll by [jbruchon](https://github.com/jbruchon)
|
||||
|
||||
538
README.md
538
README.md
@@ -1,39 +1,44 @@
|
||||
<div align="center">
|
||||
|
||||
# YT-DLP
|
||||
A command-line program to download videos from YouTube and many other [video platforms](supportedsites.md)
|
||||
|
||||
[](https://github.com/yt-dlp/yt-dlp/releases/latest)
|
||||
[](LICENSE)
|
||||
<!-- GHA doesn't have for-the-badge style
|
||||
[](https://github.com/yt-dlp/yt-dlp/actions)
|
||||
[](https://discord.gg/S75JaBna)
|
||||
-->
|
||||
[](https://github.com/yt-dlp/yt-dlp/releases/latest)
|
||||
[](LICENSE)
|
||||
[](https://yt-dlp.readthedocs.io)
|
||||
[](https://discord.gg/H5MNcFW63r)
|
||||
[](https://github.com/yt-dlp/yt-dlp/commits)
|
||||
[](https://github.com/yt-dlp/yt-dlp/commits)
|
||||
[](https://github.com/yt-dlp/yt-dlp/releases/latest)
|
||||
[](https://pypi.org/project/yt-dlp)
|
||||
|
||||
[](https://github.com/yt-dlp/yt-dlp/commits)
|
||||
[](https://github.com/yt-dlp/yt-dlp/commits)
|
||||
[](https://github.com/yt-dlp/yt-dlp/releases/latest)
|
||||
[](https://pypi.org/project/yt-dlp)
|
||||
[](https://yt-dlp.readthedocs.io)
|
||||
</div>
|
||||
|
||||
A command-line program to download videos from youtube.com and many other [video platforms](supportedsites.md)
|
||||
|
||||
This is a fork of [youtube-dlc](https://github.com/blackjack4494/yt-dlc) which is inturn a fork of [youtube-dl](https://github.com/ytdl-org/youtube-dl)
|
||||
yt-dlp is a [youtube-dl](https://github.com/ytdl-org/youtube-dl) fork based on the now inactive [youtube-dlc](https://github.com/blackjack4494/yt-dlc). The main focus of this project is adding new features and patches while also keeping up to date with the original project
|
||||
|
||||
* [NEW FEATURES](#new-features)
|
||||
* [Differences in default behavior](#differences-in-default-behavior)
|
||||
* [INSTALLATION](#installation)
|
||||
* [Dependencies](#dependencies)
|
||||
* [Update](#update)
|
||||
* [Compile](#compile)
|
||||
* [DESCRIPTION](#description)
|
||||
* [OPTIONS](#options)
|
||||
* [USAGE AND OPTIONS](#usage-and-options)
|
||||
* [General Options](#general-options)
|
||||
* [Network Options](#network-options)
|
||||
* [Geo Restriction](#geo-restriction)
|
||||
* [Geo-restriction](#geo-restriction)
|
||||
* [Video Selection](#video-selection)
|
||||
* [Download Options](#download-options)
|
||||
* [Filesystem Options](#filesystem-options)
|
||||
* [Thumbnail images](#thumbnail-images)
|
||||
* [Thumbnail Options](#thumbnail-options)
|
||||
* [Internet Shortcut Options](#internet-shortcut-options)
|
||||
* [Verbosity and Simulation Options](#verbosity-and-simulation-options)
|
||||
* [Workarounds](#workarounds)
|
||||
* [Video Format Options](#video-format-options)
|
||||
* [Subtitle Options](#subtitle-options)
|
||||
* [Authentication Options](#authentication-options)
|
||||
* [Adobe Pass Options](#adobe-pass-options)
|
||||
* [Post-processing Options](#post-processing-options)
|
||||
* [SponSkrub (SponsorBlock) Options](#sponskrub-sponsorblock-options)
|
||||
* [Extractor Options](#extractor-options)
|
||||
@@ -46,8 +51,12 @@ This is a fork of [youtube-dlc](https://github.com/blackjack4494/yt-dlc) which i
|
||||
* [Filtering Formats](#filtering-formats)
|
||||
* [Sorting Formats](#sorting-formats)
|
||||
* [Format Selection examples](#format-selection-examples)
|
||||
* [MODIFYING METADATA](#modifying-metadata)
|
||||
* [Modifying metadata examples](#modifying-metadata-examples)
|
||||
* [PLUGINS](#plugins)
|
||||
* [DEPRECATED OPTIONS](#deprecated-options)
|
||||
* [MORE](#more)
|
||||
</div>
|
||||
|
||||
|
||||
# NEW FEATURES
|
||||
@@ -57,32 +66,41 @@ The major new features from the latest release of [blackjack4494/yt-dlc](https:/
|
||||
|
||||
* **[Format Sorting](#sorting-formats)**: The default format sorting options have been changed so that higher resolution and better codecs will be now preferred instead of simply using larger bitrate. Furthermore, you can now specify the sort order using `-S`. This allows for much easier format selection that what is possible by simply using `--format` ([examples](#format-selection-examples))
|
||||
|
||||
* **Merged with youtube-dl v2021.03.03**: You get all the latest features and patches of [youtube-dl](https://github.com/ytdl-org/youtube-dl) in addition to all the features of [youtube-dlc](https://github.com/blackjack4494/yt-dlc)
|
||||
* **Merged with youtube-dl [commit/d495292](https://github.com/ytdl-org/youtube-dl/commit/d495292852b6c2f1bd58bc2141ff2b0265c952cf)**: (v2021.05.16) You get all the latest features and patches of [youtube-dl](https://github.com/ytdl-org/youtube-dl) in addition to all the features of [youtube-dlc](https://github.com/blackjack4494/yt-dlc)
|
||||
|
||||
* **Merged with animelover1984/youtube-dl**: You get most of the features and improvements from [animelover1984/youtube-dl](https://github.com/animelover1984/youtube-dl) including `--get-comments`, `BiliBiliSearch`, `BilibiliChannel`, Embedding thumbnail in mp4/ogg/opus, Playlist infojson etc. Note that the NicoNico improvements are not available. See [#31](https://github.com/yt-dlp/yt-dlp/pull/31) for details.
|
||||
* **Merged with animelover1984/youtube-dl**: You get most of the features and improvements from [animelover1984/youtube-dl](https://github.com/animelover1984/youtube-dl) including `--write-comments`, `BiliBiliSearch`, `BilibiliChannel`, Embedding thumbnail in mp4/ogg/opus, playlist infojson etc. Note that the NicoNico improvements are not available. See [#31](https://github.com/yt-dlp/yt-dlp/pull/31) for details.
|
||||
|
||||
* **Youtube improvements**:
|
||||
* All Youtube Feeds (`:ytfav`, `:ytwatchlater`, `:ytsubs`, `:ythistory`, `:ytrec`) works correctly and supports downloading multiple pages of content
|
||||
* Youtube search (`ytsearch:`, `ytsearchdate:`) along with Search URLs works correctly
|
||||
* All Feeds (`:ytfav`, `:ytwatchlater`, `:ytsubs`, `:ythistory`, `:ytrec`) supports downloading multiple pages of content
|
||||
* Search (`ytsearch:`, `ytsearchdate:`), search URLs and in-channel search works
|
||||
* Mixes supports downloading multiple pages of content
|
||||
* Redirect channel's home URL automatically to `/video` to preserve the old behaviour
|
||||
* `255kbps` audio is extracted from youtube music if premium cookies are given
|
||||
* Youtube music Albums, channels etc can be downloaded
|
||||
|
||||
* **Aria2c with HLS/DASH**: You can use aria2c as the external downloader for DASH(mpd) and HLS(m3u8) formats. No more slow ffmpeg/native downloads
|
||||
* **Split video by chapters**: Videos can be split into multiple files based on chapters using `--split-chapters`
|
||||
|
||||
* **New extractors**: AnimeLab, Philo MSO, Rcs, Gedi, bitwave.tv, mildom, audius, zee5
|
||||
* **Multi-threaded fragment downloads**: Download multiple fragments of m3u8/mpd videos in parallel. Use `--concurrent-fragments` (`-N`) option to set the number of threads used
|
||||
|
||||
* **Fixed extractors**: archive.org, roosterteeth.com, skyit, instagram, itv, SouthparkDe, spreaker, Vlive, tiktok, akamai, ina, rumble, tennistv
|
||||
* **Aria2c with HLS/DASH**: You can use `aria2c` as the external downloader for DASH(mpd) and HLS(m3u8) formats
|
||||
|
||||
* **Plugin support**: Extractors can be loaded from an external file. See [plugins](#plugins) for details
|
||||
* **New extractors**: AnimeLab, Philo MSO, Rcs, Gedi, bitwave.tv, mildom, audius, zee5, mtv.it, wimtv, pluto.tv, niconico users, discoveryplus.in, mediathek, NFHSNetwork, nebula, ukcolumn, whowatch, MxplayerShow, parlview (au), YoutubeWebArchive, fancode, Saitosan, ShemarooMe, telemundo, VootSeries, SonyLIVSeries, HotstarSeries
|
||||
|
||||
* **Multiple paths and output templates**: You can give different [output templates](#output-template) and download paths for different types of files. You can also set a temporary path where intermediary files are downloaded to. See [`--paths`](https://github.com/yt-dlp/yt-dlp/#:~:text=-P,%20--paths%20TYPE:PATH) for details
|
||||
* **Fixed extractors**: archive.org, roosterteeth.com, skyit, instagram, itv, SouthparkDe, spreaker, Vlive, akamai, ina, rumble, tennistv, amcnetworks, la7 podcasts, linuxacadamy, nitter, twitcasting, viu, crackle, curiositystream, mediasite, rmcdecouverte, sonyliv, tubi, tenplay, patreon
|
||||
|
||||
<!-- Relative link doesn't work for "#:~:text=" -->
|
||||
* **Subtitle extraction from manifests**: Subtitles can be extracted from streaming media manifests. See [commit/be6202f](https://github.com/yt-dlp/yt-dlp/commit/be6202f12b97858b9d716e608394b51065d0419f) for details
|
||||
|
||||
* **Multiple paths and output templates**: You can give different [output templates](#output-template) and download paths for different types of files. You can also set a temporary path where intermediary files are downloaded to using `--paths` (`-P`)
|
||||
|
||||
* **Portable Configuration**: Configuration files are automatically loaded from the home and root directories. See [configuration](#configuration) for details
|
||||
|
||||
* **Other new options**: `--parse-metadata`, `--list-formats-as-table`, `--write-link`, `--force-download-archive`, `--force-overwrites`, `--break-on-reject` etc
|
||||
* **Output template improvements**: Output templates can now have date-time formatting, numeric offsets, object traversal etc. See [output template](#output-template) for details. Even more advanced operations can also be done with the help of `--parse-metadata`
|
||||
|
||||
* **Improvements**: Multiple `--postprocessor-args` and `--external-downloader-args`, Date/time formatting in `-o`, faster archive checking, more [format selection options](#format-selection) etc
|
||||
* **Other new options**: `--sleep-requests`, `--convert-thumbnails`, `--write-link`, `--force-download-archive`, `--force-overwrites`, `--break-on-reject` etc
|
||||
|
||||
* **Improvements**: Multiple `--postprocessor-args` and `--downloader-args`, faster archive checking, more [format selection options](#format-selection) etc
|
||||
|
||||
* **Plugin extractors**: Extractors can be loaded from an external file. See [plugins](#plugins) for details
|
||||
|
||||
* **Self-updater**: The releases can be updated using `yt-dlp -U`
|
||||
|
||||
@@ -94,17 +112,69 @@ See [changelog](Changelog.md) or [commits](https://github.com/yt-dlp/yt-dlp/comm
|
||||
|
||||
If you are coming from [youtube-dl](https://github.com/ytdl-org/youtube-dl), the amount of changes are very large. Compare [options](#options) and [supported sites](supportedsites.md) with youtube-dl's to get an idea of the massive number of features/patches [youtube-dlc](https://github.com/blackjack4494/yt-dlc) has accumulated.
|
||||
|
||||
### Differences in default behavior
|
||||
|
||||
Some of yt-dlp's default options are different from that of youtube-dl and youtube-dlc.
|
||||
|
||||
* The options `--id`, `--auto-number` (`-A`), `--title` (`-t`) and `--literal` (`-l`), no longer work. See [removed options](#Removed) for details
|
||||
* `avconv` is not supported as as an alternative to `ffmpeg`
|
||||
* The default [output template](#output-template) is `%(title)s [%(id)s].%(ext)s`. There is no real reason for this change. This was changed before yt-dlp was ever made public and now there are no plans to change it back to `%(title)s.%(id)s.%(ext)s`. Instead, you may use `--compat-options filename`
|
||||
* The default [format sorting](sorting-formats) is different from youtube-dl and prefers higher resolution and better codecs rather than higher bitrates. You can use the `--format-sort` option to change this to any order you prefer, or use `--compat-options format-sort` to use youtube-dl's sorting order
|
||||
* The default format selector is `bv*+ba/b`. This means that if a combined video + audio format that is better than the best video-only format is found, the former will be prefered. Use `-f bv+ba/b` or `--compat-options format-spec` to revert this
|
||||
* Unlike youtube-dlc, yt-dlp does not allow merging multiple audio/video streams into one file by default (since this conflicts with the use of `-f bv*+ba`). If needed, this feature must be enabled using `--audio-multistreams` and `--video-multistreams`. You can also use `--compat-options multistreams` to enable both
|
||||
* `--ignore-errors` is enabled by default. Use `--abort-on-error` or `--compat-options abort-on-error` to abort on errors instead
|
||||
* When writing metadata files such as thumbnails, description or infojson, the same information (if available) is also written for playlists. Use `--no-write-playlist-metafiles` or `--compat-options no-playlist-metafiles` to not write these files
|
||||
* `--add-metadata` attaches the `infojson` to `mkv` files in addition to writing the metadata when used with `--write-infojson`. Use `--compat-options no-attach-info-json` to revert this
|
||||
* `playlist_index` behaves differently when used with options like `--playlist-reverse` and `--playlist-items`. See [#302](https://github.com/yt-dlp/yt-dlp/issues/302) for details. You can use `--compat-options playlist-index` if you want to keep the earlier behavior
|
||||
* The output of `-F` is listed in a new format. Use `--compat-options list-formats` to revert this
|
||||
* Youtube live chat (if available) is considered as a subtitle. Use `--sub-langs all,-live_chat` to download all subtitles except live chat. You can also use `--compat-options no-live-chat` to prevent live chat from downloading
|
||||
* Youtube channel URLs are automatically redirected to `/video`. Append a `/featured` to the URL to download only the videos in the home page. If the channel does not have a videos tab, we try to download the equivalent `UU` playlist instead. Also, `/live` URLs raise an error if there are no live videos instead of silently downloading the entire channel. You may use `--compat-options no-youtube-channel-redirect` to revert all these redirections
|
||||
* Unavailable videos are also listed for youtube playlists. Use `--compat-options no-youtube-unavailable-videos` to remove this
|
||||
* If `ffmpeg` is used as the downloader, the downloading and merging of formats happen in a single step when possible. Use `--compat-options no-direct-merge` to revert this
|
||||
|
||||
For ease of use, a few more compat options are available:
|
||||
* `--compat-options all`: Use all compat options
|
||||
* `--compat-options youtube-dl`: Same as `--compat-options all,-multistreams`
|
||||
* `--compat-options youtube-dlc`: Same as `--compat-options all,-no-live-chat,-no-youtube-channel-redirect`
|
||||
|
||||
|
||||
# INSTALLATION
|
||||
yt-dlp is not platform specific. So it should work on your Unix box, on Windows or on macOS
|
||||
|
||||
You can install yt-dlp using one of the following methods:
|
||||
* Download the binary from the [latest release](https://github.com/yt-dlp/yt-dlp/releases/latest) (recommended method)
|
||||
* Use [PyPI package](https://pypi.org/project/yt-dlp): `python -m pip install --upgrade yt-dlp`
|
||||
* Use pip+git: `python -m pip install --upgrade git+https://github.com/yt-dlp/yt-dlp.git@release`
|
||||
* Install master branch: `python -m pip install --upgrade git+https://github.com/yt-dlp/yt-dlp`
|
||||
* Use [PyPI package](https://pypi.org/project/yt-dlp): `python3 -m pip install --upgrade yt-dlp`
|
||||
* Use pip+git: `python3 -m pip install --upgrade git+https://github.com/yt-dlp/yt-dlp.git@release`
|
||||
* Install master branch: `python3 -m pip install --upgrade git+https://github.com/yt-dlp/yt-dlp`
|
||||
|
||||
Note that on some systems, you may need to use `py` or `python` instead of `python3`
|
||||
|
||||
UNIX users (Linux, macOS, BSD) can also install the [latest release](https://github.com/yt-dlp/yt-dlp/releases/latest) one of the following ways:
|
||||
|
||||
```
|
||||
sudo curl -L https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp -o /usr/local/bin/yt-dlp
|
||||
sudo chmod a+rx /usr/local/bin/yt-dlp
|
||||
```
|
||||
|
||||
```
|
||||
sudo wget https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp -O /usr/local/bin/yt-dlp
|
||||
sudo chmod a+rx /usr/local/bin/yt-dlp
|
||||
```
|
||||
|
||||
```
|
||||
sudo aria2c https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp -o /usr/local/bin/yt-dlp
|
||||
sudo chmod a+rx /usr/local/bin/yt-dlp
|
||||
```
|
||||
|
||||
### DEPENDENCIES
|
||||
Python versions 3.6+ (CPython and PyPy) are officially supported. Other versions and implementations may or maynot work correctly.
|
||||
|
||||
On windows, [Microsoft Visual C++ 2010 Redistributable Package (x86)](https://www.microsoft.com/en-us/download/details.aspx?id=26999) is also necessary to run yt-dlp. You probably already have this, but if the executable throws an error due to missing `MSVCR100.dll` you need to install it.
|
||||
|
||||
Although there are no other required dependencies, `ffmpeg` and `ffprobe` are highly recommended. Other optional dependencies are `sponskrub`, `AtomicParsley`, `mutagen`, `pycryptodome`, `phantomjs` and any of the supported external downloaders. Note that the windows releases are already built with the python interpreter, mutagen and pycryptodome included.
|
||||
|
||||
### UPDATE
|
||||
Starting from version `2021.02.09`, you can use `yt-dlp -U` to update if you are using the provided release.
|
||||
You can use `yt-dlp -U` to update if you are using the provided release.
|
||||
If you are using `pip`, simply re-run the same command that was used to install the program.
|
||||
|
||||
### COMPILE
|
||||
@@ -112,13 +182,15 @@ If you are using `pip`, simply re-run the same command that was used to install
|
||||
**For Windows**:
|
||||
To build the Windows executable, you must have pyinstaller (and optionally mutagen and pycryptodome)
|
||||
|
||||
python -m pip install --upgrade pyinstaller mutagen pycryptodome
|
||||
python3 -m pip install --upgrade pyinstaller mutagen pycryptodome
|
||||
|
||||
Once you have all the necessary dependancies installed, just run `py pyinst.py`. The executable will be built for the same architecture (32/64 bit) as the python used to build it. It is strongly reccomended to use python3 although python2.6+ is supported.
|
||||
Once you have all the necessary dependencies installed, just run `py pyinst.py`. The executable will be built for the same architecture (32/64 bit) as the python used to build it.
|
||||
|
||||
You can also build the executable without any version info or metadata by using:
|
||||
|
||||
pyinstaller.exe yt_dlp\__main__.py --onefile --name yt-dlp
|
||||
|
||||
Note that pyinstaller [does not support](https://github.com/pyinstaller/pyinstaller#requirements-and-tested-platforms) Python installed from the Windows store without using a virtual environment
|
||||
|
||||
**For Unix**:
|
||||
You will need the required build tools: `python`, `make` (GNU), `pandoc`, `zip`, `nosetests`
|
||||
@@ -126,15 +198,12 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
|
||||
**Note**: In either platform, `devscripts\update-version.py` can be used to automatically update the version number
|
||||
|
||||
# DESCRIPTION
|
||||
**yt-dlp** is a command-line program to download videos from youtube.com many other [video platforms](supportedsites.md). It requires the Python interpreter, version 2.6, 2.7, or 3.2+, and it is not platform specific. It should work on your Unix box, on Windows or on macOS. It is released to the public domain, which means you can modify it, redistribute it or use it however you like.
|
||||
# USAGE AND OPTIONS
|
||||
|
||||
yt-dlp [OPTIONS] [--] URL [URL...]
|
||||
|
||||
|
||||
# OPTIONS
|
||||
`Ctrl+F` is your friend :D
|
||||
<!-- Autogenerated -->
|
||||
<!-- Auto generated -->
|
||||
|
||||
## General Options:
|
||||
-h, --help Print this help text and exit
|
||||
@@ -177,8 +246,13 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
only list them
|
||||
--no-flat-playlist Extract the videos of a playlist
|
||||
--mark-watched Mark videos watched (YouTube only)
|
||||
--no-mark-watched Do not mark videos watched
|
||||
--no-mark-watched Do not mark videos watched (default)
|
||||
--no-colors Do not emit color codes in output
|
||||
--compat-options OPTS Options that can help keep compatibility
|
||||
with youtube-dl and youtube-dlc
|
||||
configurations by reverting some of the
|
||||
changes made in yt-dlp. See "Differences in
|
||||
default behavior" for details
|
||||
|
||||
## Network Options:
|
||||
--proxy URL Use the specified HTTP/HTTPS/SOCKS proxy.
|
||||
@@ -191,7 +265,7 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
-4, --force-ipv4 Make all connections via IPv4
|
||||
-6, --force-ipv6 Make all connections via IPv6
|
||||
|
||||
## Geo Restriction:
|
||||
## Geo-restriction:
|
||||
--geo-verification-proxy URL Use this proxy to verify the IP address for
|
||||
some geo-restricted sites. The default
|
||||
proxy specified by --proxy (or none, if the
|
||||
@@ -274,12 +348,14 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
a file that is in the archive
|
||||
--break-on-reject Stop the download process when encountering
|
||||
a file that has been filtered out
|
||||
--skip-playlist-after-errors N Number of allowed failures until the rest
|
||||
of the playlist is skipped
|
||||
--no-download-archive Do not use archive file (default)
|
||||
--include-ads Download advertisements as well
|
||||
(experimental)
|
||||
--no-include-ads Do not download advertisements (default)
|
||||
|
||||
## Download Options:
|
||||
-N, --concurrent-fragments N Number of fragments of a dash/hlsnative
|
||||
video that should be download concurrently
|
||||
(default is 1)
|
||||
-r, --limit-rate RATE Maximum download rate in bytes per second
|
||||
(e.g. 50K or 4.2M)
|
||||
-R, --retries RETRIES Number of retries (default is 10), or
|
||||
@@ -313,10 +389,6 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
--playlist-random Download playlist videos in random order
|
||||
--xattr-set-filesize Set file xattribute ytdl.filesize with
|
||||
expected file size
|
||||
--hls-prefer-native Use the native HLS downloader instead of
|
||||
ffmpeg
|
||||
--hls-prefer-ffmpeg Use ffmpeg instead of the native HLS
|
||||
downloader
|
||||
--hls-use-mpegts Use the mpegts container for HLS videos;
|
||||
allowing some players to play the video
|
||||
while downloading, and reducing the chance
|
||||
@@ -326,10 +398,19 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
--no-hls-use-mpegts Do not use the mpegts container for HLS
|
||||
videos. This is default when not
|
||||
downloading live streams
|
||||
--external-downloader NAME Name or path of the external downloader to
|
||||
use. Currently supports aria2c, avconv,
|
||||
axel, curl, ffmpeg, httpie, wget
|
||||
(Recommended: aria2c)
|
||||
--downloader [PROTO:]NAME Name or path of the external downloader to
|
||||
use (optionally) prefixed by the protocols
|
||||
(http, ftp, m3u8, dash, rstp, rtmp, mms) to
|
||||
use it for. Currently supports native,
|
||||
aria2c, avconv, axel, curl, ffmpeg, httpie,
|
||||
wget (Recommended: aria2c). You can use
|
||||
this option multiple times to set different
|
||||
downloaders for different protocols. For
|
||||
example, --downloader aria2c --downloader
|
||||
"dash,m3u8:native" will use aria2c for
|
||||
http/ftp downloads, and the native
|
||||
downloader for dash/m3u8 downloads
|
||||
(Alias: --external-downloader)
|
||||
--downloader-args NAME:ARGS Give these arguments to the external
|
||||
downloader. Specify the downloader name and
|
||||
the arguments separated by a colon ":". You
|
||||
@@ -341,7 +422,7 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
stdin), one URL per line. Lines starting
|
||||
with '#', ';' or ']' are considered as
|
||||
comments and ignored
|
||||
-P, --paths TYPE:PATH The paths where the files should be
|
||||
-P, --paths TYPES:PATH The paths where the files should be
|
||||
downloaded. Specify the type of file and
|
||||
the path separated by a colon ":". All the
|
||||
same types as --output are supported.
|
||||
@@ -352,13 +433,11 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
home path after download is finished. This
|
||||
option is ignored if --output is an
|
||||
absolute path
|
||||
-o, --output [TYPE:]TEMPLATE Output filename template, see "OUTPUT
|
||||
-o, --output [TYPES:]TEMPLATE Output filename template; see "OUTPUT
|
||||
TEMPLATE" for details
|
||||
--output-na-placeholder TEXT Placeholder value for unavailable meta
|
||||
fields in output filename template
|
||||
(default: "NA")
|
||||
--autonumber-start NUMBER Specify the start value for %(autonumber)s
|
||||
(default is 1)
|
||||
--restrict-filenames Restrict filenames to only ASCII
|
||||
characters, and avoid "&" and spaces in
|
||||
filenames
|
||||
@@ -378,7 +457,7 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
-c, --continue Resume partially downloaded files/fragments
|
||||
(default)
|
||||
--no-continue Do not resume partially downloaded
|
||||
fragments. If the file is unfragmented,
|
||||
fragments. If the file is not fragmented,
|
||||
restart download of the entire file
|
||||
--part Use .part files instead of writing directly
|
||||
into output file (default)
|
||||
@@ -402,10 +481,18 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
--write-description etc. (default)
|
||||
--no-write-playlist-metafiles Do not write playlist metadata when using
|
||||
--write-info-json, --write-description etc.
|
||||
--get-comments Retrieve video comments to be placed in the
|
||||
.info.json file. The comments are fetched
|
||||
even without this option if the extraction
|
||||
is known to be quick
|
||||
--clean-infojson Remove some private fields such as
|
||||
filenames from the infojson. Note that it
|
||||
could still contain some personal
|
||||
information (default)
|
||||
--no-clean-infojson Write all fields to the infojson
|
||||
--write-comments Retrieve video comments to be placed in the
|
||||
infojson. The comments are fetched even
|
||||
without this option if the extraction is
|
||||
known to be quick (Alias: --get-comments)
|
||||
--no-write-comments Do not retrieve video comments unless the
|
||||
extraction is known to be quick
|
||||
(Alias: --no-get-comments)
|
||||
--load-info-json FILE JSON file containing the video information
|
||||
(created with the "--write-info-json"
|
||||
option)
|
||||
@@ -423,7 +510,7 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
--no-cache-dir Disable filesystem caching
|
||||
--rm-cache-dir Delete all filesystem cache files
|
||||
|
||||
## Thumbnail Images:
|
||||
## Thumbnail Options:
|
||||
--write-thumbnail Write thumbnail image to disk
|
||||
--no-write-thumbnail Do not write thumbnail image to disk
|
||||
(default)
|
||||
@@ -445,15 +532,17 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
--no-warnings Ignore warnings
|
||||
-s, --simulate Do not download the video and do not write
|
||||
anything to disk
|
||||
--skip-download Do not download the video
|
||||
-g, --get-url Simulate, quiet but print URL
|
||||
-e, --get-title Simulate, quiet but print title
|
||||
--get-id Simulate, quiet but print id
|
||||
--get-thumbnail Simulate, quiet but print thumbnail URL
|
||||
--get-description Simulate, quiet but print video description
|
||||
--get-duration Simulate, quiet but print video length
|
||||
--get-filename Simulate, quiet but print output filename
|
||||
--get-format Simulate, quiet but print output format
|
||||
--ignore-no-formats-error Ignore "No video formats" error. Usefull
|
||||
for extracting metadata even if the video
|
||||
is not actually available for download
|
||||
(experimental)
|
||||
--no-ignore-no-formats-error Throw error when no downloadable video
|
||||
formats are found (default)
|
||||
--skip-download Do not download the video but write all
|
||||
related files (Alias: --no-download)
|
||||
-O, --print TEMPLATE Simulate, quiet but print the given fields.
|
||||
Either a field name or similar formatting
|
||||
as the output template can be used
|
||||
-j, --dump-json Simulate, quiet but print JSON information.
|
||||
See "OUTPUT TEMPLATE" for a description of
|
||||
available keys
|
||||
@@ -464,8 +553,8 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
--print-json Be quiet and print the video information as
|
||||
JSON (video is still being downloaded)
|
||||
--force-write-archive Force download archive entries to be
|
||||
written as far as no errors occur,even if
|
||||
-s or another simulation switch is used
|
||||
written as far as no errors occur, even if
|
||||
-s or another simulation option is used
|
||||
(Alias: --force-download-archive)
|
||||
--newline Output progress bar as new lines
|
||||
--no-progress Do not print progress bar
|
||||
@@ -482,7 +571,7 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
--encoding ENCODING Force the specified encoding (experimental)
|
||||
--no-check-certificate Suppress HTTPS certificate validation
|
||||
--prefer-insecure Use an unencrypted connection to retrieve
|
||||
information about the video. (Currently
|
||||
information about the video (Currently
|
||||
supported only for YouTube)
|
||||
--user-agent UA Specify a custom user agent
|
||||
--referer URL Specify a custom referer, use if the video
|
||||
@@ -496,15 +585,11 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
--sleep-requests SECONDS Number of seconds to sleep between requests
|
||||
during data extraction
|
||||
--sleep-interval SECONDS Number of seconds to sleep before each
|
||||
download when used alone or a lower bound
|
||||
of a range for randomized sleep before each
|
||||
download (minimum possible number of
|
||||
seconds to sleep) when used along with
|
||||
--max-sleep-interval
|
||||
--max-sleep-interval SECONDS Upper bound of a range for randomized sleep
|
||||
before each download (maximum possible
|
||||
number of seconds to sleep). Must only be
|
||||
used along with --min-sleep-interval
|
||||
download. This is the minimum time to sleep
|
||||
when used along with --max-sleep-interval
|
||||
(Alias: --min-sleep-interval)
|
||||
--max-sleep-interval SECONDS Maximum number of seconds to sleep. Can
|
||||
only be used along with --min-sleep-interval
|
||||
--sleep-subtitles SECONDS Number of seconds to sleep before each
|
||||
subtitle download
|
||||
|
||||
@@ -527,25 +612,22 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
into a single file
|
||||
--no-audio-multistreams Only one audio stream is downloaded for
|
||||
each output file (default)
|
||||
--all-formats Download all available video formats
|
||||
--prefer-free-formats Prefer video formats with free containers
|
||||
over non-free ones of same quality. Use
|
||||
with "-S ext" to strictly prefer free
|
||||
containers irrespective of quality
|
||||
--no-prefer-free-formats Don't give any special preference to free
|
||||
containers (default)
|
||||
--check-formats Check that the formats selected are
|
||||
actually downloadable (Experimental)
|
||||
-F, --list-formats List all available formats of requested
|
||||
videos
|
||||
--list-formats-as-table Present the output of -F in tabular form
|
||||
(default)
|
||||
--list-formats-old Present the output of -F in the old form
|
||||
(Alias: --no-list-formats-as-table)
|
||||
--merge-output-format FORMAT If a merge is required (e.g.
|
||||
bestvideo+bestaudio), output to given
|
||||
container format. One of mkv, mp4, ogg,
|
||||
webm, flv. Ignored if no merge is required
|
||||
--allow-unplayable-formats Allow unplayable formats to be listed and
|
||||
downloaded. All video postprocessing will
|
||||
downloaded. All video post-processing will
|
||||
also be turned off
|
||||
--no-allow-unplayable-formats Do not allow unplayable formats to be
|
||||
listed or downloaded (default)
|
||||
@@ -554,18 +636,20 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
--write-subs Write subtitle file
|
||||
--no-write-subs Do not write subtitle file (default)
|
||||
--write-auto-subs Write automatically generated subtitle file
|
||||
(YouTube only)
|
||||
--no-write-auto-subs Do not write automatically generated
|
||||
subtitle file (default)
|
||||
--all-subs Download all the available subtitles of the
|
||||
video
|
||||
(Alias: --write-automatic-subs)
|
||||
--no-write-auto-subs Do not write auto-generated subtitles
|
||||
(default) (Alias: --no-write-automatic-subs)
|
||||
--list-subs List all available subtitles for the video
|
||||
--sub-format FORMAT Subtitle format, accepts formats
|
||||
preference, for example: "srt" or
|
||||
"ass/srt/best"
|
||||
--sub-lang LANGS Languages of the subtitles to download
|
||||
(optional) separated by commas, use --list-
|
||||
subs for available language tags
|
||||
--sub-langs LANGS Languages of the subtitles to download (can
|
||||
be regex) or "all" separated by commas.
|
||||
(Eg: --sub-langs en.*,ja) You can prefix
|
||||
the language code with a "-" to exempt it
|
||||
from the requested languages. (Eg: --sub-
|
||||
langs all,-live_chat) Use --list-subs for a
|
||||
list of available language tags
|
||||
|
||||
## Authentication Options:
|
||||
-u, --username USERNAME Login with this account ID
|
||||
@@ -574,8 +658,6 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
-2, --twofactor TWOFACTOR Two-factor authentication code
|
||||
-n, --netrc Use .netrc authentication data
|
||||
--video-password PASSWORD Video password (vimeo, youku)
|
||||
|
||||
## Adobe Pass Options:
|
||||
--ap-mso MSO Adobe Pass multiple-system operator (TV
|
||||
provider) identifier, use --ap-list-mso for
|
||||
a list of available MSOs
|
||||
@@ -589,10 +671,10 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
## Post-Processing Options:
|
||||
-x, --extract-audio Convert video files to audio-only files
|
||||
(requires ffmpeg and ffprobe)
|
||||
--audio-format FORMAT Specify audio format: "best", "aac",
|
||||
"flac", "mp3", "m4a", "opus", "vorbis", or
|
||||
"wav"; "best" by default; No effect without
|
||||
-x
|
||||
--audio-format FORMAT Specify audio format to convert the audio
|
||||
to when -x is used. Currently supported
|
||||
formats are: best (default) or one of
|
||||
aac|flac|mp3|m4a|opus|vorbis|wav
|
||||
--audio-quality QUALITY Specify ffmpeg audio quality, insert a
|
||||
value between 0 (better) and 9 (worse) for
|
||||
VBR or a specific bitrate like 128K
|
||||
@@ -602,33 +684,34 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
|webm|mov|avi|mp3|mka|m4a|ogg|opus). If
|
||||
target container does not support the
|
||||
video/audio codec, remuxing will fail. You
|
||||
can specify multiple rules; eg.
|
||||
can specify multiple rules; Eg.
|
||||
"aac>m4a/mov>mp4/mkv" will remux aac to
|
||||
m4a, mov to mp4 and anything else to mkv.
|
||||
--recode-video FORMAT Re-encode the video into another format if
|
||||
re-encoding is necessary. The supported
|
||||
formats are the same as --remux-video
|
||||
re-encoding is necessary. The syntax and
|
||||
supported formats are the same as --remux-video
|
||||
--postprocessor-args NAME:ARGS Give these arguments to the postprocessors.
|
||||
Specify the postprocessor/executable name
|
||||
and the arguments separated by a colon ":"
|
||||
to give the argument to the specified
|
||||
postprocessor/executable. Supported
|
||||
postprocessors are: SponSkrub,
|
||||
ExtractAudio, VideoRemuxer, VideoConvertor,
|
||||
EmbedSubtitle, Metadata, Merger,
|
||||
FixupStretched, FixupM4a, FixupM3u8,
|
||||
SubtitlesConvertor and EmbedThumbnail. The
|
||||
supported executables are: SponSkrub,
|
||||
FFmpeg, FFprobe, and AtomicParsley. You can
|
||||
postprocessor/executable. Supported PP are:
|
||||
Merger, ExtractAudio, SplitChapters,
|
||||
Metadata, EmbedSubtitle, EmbedThumbnail,
|
||||
SubtitlesConvertor, ThumbnailsConvertor,
|
||||
VideoRemuxer, VideoConvertor, SponSkrub,
|
||||
FixupStretched, FixupM4a and FixupM3u8. The
|
||||
supported executables are: AtomicParsley,
|
||||
FFmpeg, FFprobe, and SponSkrub. You can
|
||||
also specify "PP+EXE:ARGS" to give the
|
||||
arguments to the specified executable only
|
||||
when being used by the specified
|
||||
postprocessor. Additionally, for
|
||||
ffmpeg/ffprobe, a number can be appended to
|
||||
the exe name seperated by "_i" to pass the
|
||||
argument before the specified input file.
|
||||
Eg: --ppa "Merger+ffmpeg_i1:-v quiet". You
|
||||
can use this option multiple times to give
|
||||
ffmpeg/ffprobe, "_i"/"_o" can be appended
|
||||
to the prefix optionally followed by a
|
||||
number to pass the argument before the
|
||||
specified input/output file. Eg: --ppa
|
||||
"Merger+ffmpeg_i1:-v quiet". You can use
|
||||
this option multiple times to give
|
||||
different arguments to different
|
||||
postprocessors. (Alias: --ppa)
|
||||
-k, --keep-video Keep the intermediate video file on disk
|
||||
@@ -644,20 +727,9 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
--no-embed-thumbnail Do not embed thumbnail (default)
|
||||
--add-metadata Write metadata to the video file
|
||||
--no-add-metadata Do not write metadata (default)
|
||||
--parse-metadata FIELD:FORMAT Parse additional metadata like title/artist
|
||||
from other fields. Give field name to
|
||||
extract data from, and format of the field
|
||||
seperated by a ":". Either regular
|
||||
expression with named capture groups or a
|
||||
similar syntax to the output template can
|
||||
also be used. The parsed parameters replace
|
||||
any existing values and can be use in
|
||||
output template. This option can be used
|
||||
multiple times. Example: --parse-metadata
|
||||
"title:%(artist)s - %(title)s" matches a
|
||||
title like "Coldplay - Paradise". Example
|
||||
(regex): --parse-metadata
|
||||
"description:Artist - (?P<artist>.+?)"
|
||||
--parse-metadata FROM:TO Parse additional metadata like title/artist
|
||||
from other fields; see "MODIFYING METADATA"
|
||||
for details
|
||||
--xattrs Write metadata to the video file's xattrs
|
||||
(using dublin core and xdg standards)
|
||||
--fixup POLICY Automatically correct known faults of the
|
||||
@@ -669,11 +741,26 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
path to the binary or its containing
|
||||
directory
|
||||
--exec CMD Execute a command on the file after
|
||||
downloading and post-processing, similar to
|
||||
find's -exec syntax. Example: --exec 'adb
|
||||
push {} /sdcard/Music/ && rm {}'
|
||||
--convert-subs FORMAT Convert the subtitles to other format
|
||||
(currently supported: srt|ass|vtt|lrc)
|
||||
downloading and post-processing. Similar
|
||||
syntax to the output template can be used
|
||||
to pass any field as arguments to the
|
||||
command. An additional field "filepath"
|
||||
that contains the final path of the
|
||||
downloaded file is also available. If no
|
||||
fields are passed, "%(filepath)s" is
|
||||
appended to the end of the command
|
||||
--convert-subs FORMAT Convert the subtitles to another format
|
||||
(currently supported: srt|vtt|ass|lrc)
|
||||
(Alias: --convert-subtitles)
|
||||
--convert-thumbnails FORMAT Convert the thumbnails to another format
|
||||
(currently supported: jpg|png)
|
||||
--split-chapters Split video into multiple files based on
|
||||
internal chapters. The "chapter:" prefix
|
||||
can be used with "--paths" and "--output"
|
||||
to set the output filename for the split
|
||||
files. See "OUTPUT TEMPLATE" for details
|
||||
--no-split-chapters Do not split video based on chapters
|
||||
(default)
|
||||
|
||||
## SponSkrub (SponsorBlock) Options:
|
||||
[SponSkrub](https://github.com/yt-dlp/SponSkrub) is a utility to
|
||||
@@ -737,7 +824,7 @@ You can configure yt-dlp by placing any supported command line option to a confi
|
||||
* `~/yt-dlp.conf.txt`
|
||||
|
||||
Note that `~` points to `C:\Users\<user name>` on windows. Also, `%XDG_CONFIG_HOME%` defaults to `~/.config` if undefined
|
||||
1. **System Configuration**: `/etc/yt-dlp.conf` or `/etc/yt-dlp.conf`
|
||||
1. **System Configuration**: `/etc/yt-dlp.conf`
|
||||
|
||||
For example, with the following configuration file yt-dlp will always extract the audio, not copy the mtime, use a proxy and save all videos under `YouTube` directory in your home directory:
|
||||
```
|
||||
@@ -789,9 +876,22 @@ The `-o` option is used to indicate a template for the output file names while `
|
||||
|
||||
**tl;dr:** [navigate me to examples](#output-template-examples).
|
||||
|
||||
The basic usage of `-o` is not to set any template arguments when downloading a single file, like in `yt-dlp -o funny_video.flv "https://some/video"`. However, it may contain special sequences that will be replaced when downloading each video. The special sequences may be formatted according to [python string formatting operations](https://docs.python.org/2/library/stdtypes.html#string-formatting). For example, `%(NAME)s` or `%(NAME)05d`. To clarify, that is a percent symbol followed by a name in parentheses, followed by formatting operations. Date/time fields can also be formatted according to [strftime formatting](https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes) by specifying it inside the parantheses seperated from the field name using a `>`. For example, `%(duration>%H-%M-%S)s`.
|
||||
The simplest usage of `-o` is not to set any template arguments when downloading a single file, like in `yt-dlp -o funny_video.flv "https://some/video"` (hard-coding file extension like this is _not_ recommended and could break some post-processing).
|
||||
|
||||
Additionally, you can set different output templates for the various metadata files seperately from the general output template by specifying the type of file followed by the template seperated by a colon ":". The different filetypes supported are `subtitle|thumbnail|description|annotation|infojson|pl_description|pl_infojson`. For example, `-o '%(title)s.%(ext)s' -o 'thumbnail:%(title)s\%(title)s.%(ext)s'` will put the thumbnails in a folder with the same name as the video.
|
||||
It may however also contain special sequences that will be replaced when downloading each video. The special sequences may be formatted according to [python string formatting operations](https://docs.python.org/2/library/stdtypes.html#string-formatting). For example, `%(NAME)s` or `%(NAME)05d`. To clarify, that is a percent symbol followed by a name in parentheses, followed by formatting operations.
|
||||
|
||||
The field names themselves (the part inside the parenthesis) can also have some special formatting:
|
||||
1. **Object traversal**: The dictionaries and lists available in metadata can be traversed by using a `.` (dot) separator. You can also do python slicing using `:`. Eg: `%(tags.0)s`, `%(subtitles.en.-1.ext)`, `%(id.3:7:-1)s`. Note that the fields that become available using this method are not listed below. Use `-j` to see such fields
|
||||
1. **Addition**: Addition and subtraction of numeric fields can be done using `+` and `-` respectively. Eg: `%(playlist_index+10)03d`, `%(n_entries+1-playlist_index)d`
|
||||
1. **Date/time Formatting**: Date/time fields can be formatted according to [strftime formatting](https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes) by specifying it separated from the field name using a `>`. Eg: `%(duration>%H-%M-%S)s`, `%(upload_date>%Y-%m-%d)s`, `%(epoch-3600>%H-%M-%S)s`
|
||||
1. **Default**: A default value can be specified for when the field is empty using a `|` seperator. This overrides `--output-na-template`. Eg: `%(uploader|Unknown)s`
|
||||
|
||||
To summarize, the general syntax for a field is:
|
||||
```
|
||||
%(name[.keys][addition][>strf][|default])[flags][width][.precision][length]type
|
||||
```
|
||||
|
||||
Additionally, you can set different output templates for the various metadata files separately from the general output template by specifying the type of file followed by the template separated by a colon `:`. The different file types supported are `subtitle`, `thumbnail`, `description`, `annotation`, `infojson`, `pl_thumbnail`, `pl_description`, `pl_infojson`, `chapter`. For example, `-o '%(title)s.%(ext)s' -o 'thumbnail:%(title)s\%(title)s.%(ext)s'` will put the thumbnails in a folder with the same name as the video.
|
||||
|
||||
The available fields are:
|
||||
|
||||
@@ -800,6 +900,7 @@ The available fields are:
|
||||
- `url` (string): Video URL
|
||||
- `ext` (string): Video filename extension
|
||||
- `alt_title` (string): A secondary title of the video
|
||||
- `description` (string): The description of the video
|
||||
- `display_id` (string): An alternative identifier for the video
|
||||
- `uploader` (string): Full name of the video uploader
|
||||
- `license` (string): License name the video is licensed under
|
||||
@@ -812,7 +913,7 @@ The available fields are:
|
||||
- `channel_id` (string): Id of the channel
|
||||
- `location` (string): Physical location where the video was filmed
|
||||
- `duration` (numeric): Length of the video in seconds
|
||||
- `duration_string` (string): Length of the video (HH-mm-ss)
|
||||
- `duration_string` (string): Length of the video (HH:mm:ss)
|
||||
- `view_count` (numeric): How many users have watched the video on the platform
|
||||
- `like_count` (numeric): Number of positive ratings of the video
|
||||
- `dislike_count` (numeric): Number of negative ratings of the video
|
||||
@@ -823,6 +924,7 @@ The available fields are:
|
||||
- `is_live` (boolean): Whether this video is a live stream or a fixed-length video
|
||||
- `was_live` (boolean): Whether this video was originally a live stream
|
||||
- `playable_in_embed` (string): Whether this video is allowed to play in embedded players on other sites
|
||||
- `availability` (string): Whether the video is 'private', 'premium_only', 'subscriber_only', 'needs_auth', 'unlisted' or 'public'
|
||||
- `start_time` (numeric): Time in seconds where the reproduction should start, as specified in the URL
|
||||
- `end_time` (numeric): Time in seconds where the reproduction should end, as specified in the URL
|
||||
- `format` (string): A human-readable description of the format
|
||||
@@ -882,6 +984,18 @@ Available for the media that is a track or a part of a music album:
|
||||
- `disc_number` (numeric): Number of the disc or other physical medium the track belongs to
|
||||
- `release_year` (numeric): Year (YYYY) when the album was released
|
||||
|
||||
Available for `chapter:` prefix when using `--split-chapters` for videos with internal chapters:
|
||||
|
||||
- `section_title` (string): Title of the chapter
|
||||
- `section_number` (numeric): Number of the chapter within the file
|
||||
- `section_start` (numeric): Start time of the chapter in seconds
|
||||
- `section_end` (numeric): End time of the chapter in seconds
|
||||
|
||||
Available only when used in `--print`:
|
||||
|
||||
- `urls` (string): The URLs of all requested formats, one in each line
|
||||
- `filename` (string): Name of the video file. Note that the actual filename may be different due to post-processing. Use `--exec echo` to get the name after all postprocessing is complete
|
||||
|
||||
Each aforementioned sequence when referenced in an output template will be replaced by the actual value corresponding to the sequence name. Note that some of the sequences are not guaranteed to be present since they depend on the metadata obtained by a particular extractor. Such sequences will be replaced with placeholder value provided with `--output-na-placeholder` (`NA` by default).
|
||||
|
||||
For example for `-o %(title)s-%(id)s.%(ext)s` and an mp4 video with title `yt-dlp test video` and id `BaW_jenozKcj`, this will result in a `yt-dlp test video-BaW_jenozKcj.mp4` file created in the current directory.
|
||||
@@ -914,7 +1028,7 @@ youtube-dl_test_video_.mp4 # A simple file name
|
||||
# Download YouTube playlist videos in separate directory indexed by video order in a playlist
|
||||
$ yt-dlp -o '%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s' https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re
|
||||
|
||||
# Download YouTube playlist videos in seperate directories according to their uploaded year
|
||||
# Download YouTube playlist videos in separate directories according to their uploaded year
|
||||
$ yt-dlp -o '%(upload_date>%Y)s/%(title)s.%(ext)s' https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re
|
||||
|
||||
# Download all playlists of YouTube channel/user keeping each playlist in separate directory:
|
||||
@@ -935,19 +1049,20 @@ $ yt-dlp -o - BaW_jenozKc
|
||||
By default, yt-dlp tries to download the best available quality if you **don't** pass any options.
|
||||
This is generally equivalent to using `-f bestvideo*+bestaudio/best`. However, if multiple audiostreams is enabled (`--audio-multistreams`), the default format changes to `-f bestvideo+bestaudio/best`. Similarly, if ffmpeg is unavailable, or if you use yt-dlp to stream to `stdout` (`-o -`), the default becomes `-f best/bestvideo+bestaudio`.
|
||||
|
||||
The general syntax for format selection is `--f FORMAT` (or `--format FORMAT`) where `FORMAT` is a *selector expression*, i.e. an expression that describes format or formats you would like to download.
|
||||
The general syntax for format selection is `-f FORMAT` (or `--format FORMAT`) where `FORMAT` is a *selector expression*, i.e. an expression that describes format or formats you would like to download.
|
||||
|
||||
**tl;dr:** [navigate me to examples](#format-selection-examples).
|
||||
|
||||
The simplest case is requesting a specific format, for example with `-f 22` you can download the format with format code equal to 22. You can get the list of available format codes for particular video using `--list-formats` or `-F`. Note that these format codes are extractor specific.
|
||||
The simplest case is requesting a specific format, for example with `-f 22` you can download the format with format code equal to 22. You can get the list of available format codes for particular video using `--list-formats` or `-F`. Note that these format codes are extractor specific.
|
||||
|
||||
You can also use a file extension (currently `3gp`, `aac`, `flv`, `m4a`, `mp3`, `mp4`, `ogg`, `wav`, `webm` are supported) to download the best quality format of a particular file extension served as a single file, e.g. `-f webm` will download the best quality format with the `webm` extension served as a single file.
|
||||
|
||||
You can also use special names to select particular edge case formats:
|
||||
|
||||
- `all`: Select all formats
|
||||
- `b*`, `best*`: Select the best quality format irrespective of whether it contains video or audio.
|
||||
- `w*`, `worst*`: Select the worst quality format irrespective of whether it contains video or audio.
|
||||
- `mergeall`: Select and merge all formats (Must be used with `--audio-multistreams`, `--video-multistreams` or both)
|
||||
- `b*`, `best*`: Select the best quality format irrespective of whether it contains video or audio
|
||||
- `w*`, `worst*`: Select the worst quality format irrespective of whether it contains video or audio
|
||||
- `b`, `best`: Select the best quality format that contains both video and audio. Equivalent to `best*[vcodec!=none][acodec!=none]`
|
||||
- `w`, `worst`: Select the worst quality format that contains both video and audio. Equivalent to `worst*[vcodec!=none][acodec!=none]`
|
||||
- `bv`, `bestvideo`: Select the best quality video-only format. Equivalent to `best*[acodec=none]`
|
||||
@@ -959,7 +1074,9 @@ You can also use special names to select particular edge case formats:
|
||||
- `ba*`, `bestaudio*`: Select the best quality format that contains audio. It may also contain video. Equivalent to `best*[acodec!=none]`
|
||||
- `wa*`, `worstaudio*`: Select the worst quality format that contains audio. It may also contain video. Equivalent to `worst*[acodec!=none]`
|
||||
|
||||
For example, to download the worst quality video-only format you can use `-f worstvideo`. It is however recomended to never actually use `worst` and related options. When your format selector is `worst`, the format which is worst in all respects is selected. Most of the time, what you actually want is the video with the smallest filesize instead. So it is generally better to use `-f best -S +size,+br,+res,+fps` instead of `-f worst`. See [sorting formats](#sorting-formats) for more details.
|
||||
For example, to download the worst quality video-only format you can use `-f worstvideo`. It is however recommended not to use `worst` and related options. When your format selector is `worst`, the format which is worst in all respects is selected. Most of the time, what you actually want is the video with the smallest filesize instead. So it is generally better to use `-f best -S +size,+br,+res,+fps` instead of `-f worst`. See [sorting formats](#sorting-formats) for more details.
|
||||
|
||||
You can select the n'th best format of a type by using `best<type>.<n>`. For example, `best.2` will select the 2nd best combined format. Similarly, `bv*.3` will select the 3rd best format that contains a video stream.
|
||||
|
||||
If you want to download multiple videos and they don't have the same formats available, you can specify the order of preference using slashes. Note that formats on the left hand side are preferred, for example `-f 22/17/18` will download format 22 if it's available, otherwise it will download format 17 if it's available, otherwise it will download format 18 if it's available, otherwise it will complain that no suitable formats are available for download.
|
||||
|
||||
@@ -994,7 +1111,7 @@ Also filtering work for comparisons `=` (equals), `^=` (starts with), `$=` (ends
|
||||
|
||||
Any string comparison may be prefixed with negation `!` in order to produce an opposite comparison, e.g. `!*=` (does not contain).
|
||||
|
||||
Note that none of the aforementioned meta fields are guaranteed to be present since this solely depends on the metadata obtained by particular extractor, i.e. the metadata offered by the video hoster. Any other field made available by the extractor can also be used for filtering.
|
||||
Note that none of the aforementioned meta fields are guaranteed to be present since this solely depends on the metadata obtained by particular extractor, i.e. the metadata offered by the website. Any other field made available by the extractor can also be used for filtering.
|
||||
|
||||
Formats for which the value is not known are excluded unless you put a question mark (`?`) after the operator. You can combine format filters, so `-f "[height<=?720][tbr>500]"` selects up to 720p videos (or videos where the height is not known) with a bitrate of at least 500 KBit/s. You can also use the filters with `all` to download all formats that satisfy the filter. For example, `-f "all[vcodec=none]"` selects all audio-only formats.
|
||||
|
||||
@@ -1010,7 +1127,7 @@ You can change the criteria for being considered the `best` by using `-S` (`--fo
|
||||
- `lang`: Language preference as given by the extractor
|
||||
- `quality`: The quality of the format as given by the extractor
|
||||
- `source`: Preference of the source as given by the extractor
|
||||
- `proto`: Protocol used for download (`https`/`ftps` > `http`/`ftp` > `m3u8-native` > `m3u8` > `http-dash-segments` > other > `mms`/`rtsp` > unknown > `f4f`/`f4m`)
|
||||
- `proto`: Protocol used for download (`https`/`ftps` > `http`/`ftp` > `m3u8_native` > `m3u8` > `http_dash_segments` > other > `mms`/`rtsp` > unknown > `f4f`/`f4m`)
|
||||
- `vcodec`: Video Codec (`av01` > `vp9.2` > `vp9` > `h265` > `h264` > `vp8` > `h263` > `theora` > other > unknown)
|
||||
- `acodec`: Audio Codec (`opus` > `vorbis` > `aac` > `mp4a` > `mp3` > `ac3` > `dts` > other > unknown)
|
||||
- `codec`: Equivalent to `vcodec,acodec`
|
||||
@@ -1030,11 +1147,11 @@ You can change the criteria for being considered the `best` by using `-S` (`--fo
|
||||
- `br`: Equivalent to using `tbr,vbr,abr`
|
||||
- `asr`: Audio sample rate in Hz
|
||||
|
||||
Note that any other **numerical** field made available by the extractor can also be used. All fields, unless specified otherwise, are sorted in decending order. To reverse this, prefix the field with a `+`. Eg: `+res` prefers format with the smallest resolution. Additionally, you can suffix a prefered value for the fields, seperated by a `:`. Eg: `res:720` prefers larger videos, but no larger than 720p and the smallest video if there are no videos less than 720p. For `codec` and `ext`, you can provide two prefered values, the first for video and the second for audio. Eg: `+codec:avc:m4a` (equivalent to `+vcodec:avc,+acodec:m4a`) sets the video codec preference to `h264` > `h265` > `vp9` > `vp9.2` > `av01` > `vp8` > `h263` > `theora` and audio codec preference to `mp4a` > `aac` > `vorbis` > `opus` > `mp3` > `ac3` > `dts`. You can also make the sorting prefer the nearest values to the provided by using `~` as the delimiter. Eg: `filesize~1G` prefers the format with filesize closest to 1 GiB.
|
||||
Note that any other **numerical** field made available by the extractor can also be used. All fields, unless specified otherwise, are sorted in descending order. To reverse this, prefix the field with a `+`. Eg: `+res` prefers format with the smallest resolution. Additionally, you can suffix a preferred value for the fields, separated by a `:`. Eg: `res:720` prefers larger videos, but no larger than 720p and the smallest video if there are no videos less than 720p. For `codec` and `ext`, you can provide two preferred values, the first for video and the second for audio. Eg: `+codec:avc:m4a` (equivalent to `+vcodec:avc,+acodec:m4a`) sets the video codec preference to `h264` > `h265` > `vp9` > `vp9.2` > `av01` > `vp8` > `h263` > `theora` and audio codec preference to `mp4a` > `aac` > `vorbis` > `opus` > `mp3` > `ac3` > `dts`. You can also make the sorting prefer the nearest values to the provided by using `~` as the delimiter. Eg: `filesize~1G` prefers the format with filesize closest to 1 GiB.
|
||||
|
||||
The fields `hasvid`, `ie_pref`, `lang` are always given highest priority in sorting, irrespective of the user-defined order. This behaviour can be changed by using `--force-format-sort`. Apart from these, the default order used is: `quality,res,fps,codec:vp9.2,size,br,asr,proto,ext,hasaud,source,id`. Note that the extractors may override this default order, but they cannot override the user-provided order.
|
||||
|
||||
If your format selector is `worst`, the last item is selected after sorting. This means it will select the format that is worst in all repects. Most of the time, what you actually want is the video with the smallest filesize instead. So it is generally better to use `-f best -S +size,+br,+res,+fps`.
|
||||
If your format selector is `worst`, the last item is selected after sorting. This means it will select the format that is worst in all respects. Most of the time, what you actually want is the video with the smallest filesize instead. So it is generally better to use `-f best -S +size,+br,+res,+fps`.
|
||||
|
||||
**Tip**: You can use the `-v -F` to see how the formats have been sorted (worst to best).
|
||||
|
||||
@@ -1043,7 +1160,7 @@ If your format selector is `worst`, the last item is selected after sorting. Thi
|
||||
Note that on Windows you may need to use double quotes instead of single.
|
||||
|
||||
```bash
|
||||
# Download and merge the best best video-only format and the best audio-only format,
|
||||
# Download and merge the best video-only format and the best audio-only format,
|
||||
# or download the best combined format if video-only format is not available
|
||||
$ yt-dlp -f 'bv+ba/b'
|
||||
|
||||
@@ -1059,10 +1176,17 @@ $ yt-dlp
|
||||
# by default, bestvideo and bestaudio will have the same file name.
|
||||
$ yt-dlp -f 'bv,ba' -o '%(title)s.f%(format_id)s.%(ext)s'
|
||||
|
||||
# Download and merge the best format that has a video stream,
|
||||
# and all audio-only formats into one file
|
||||
$ yt-dlp -f 'bv*+mergeall[vcodec=none]' --audio-multistreams
|
||||
|
||||
# Download and merge the best format that has a video stream,
|
||||
# and the best 2 audio-only formats into one file
|
||||
$ yt-dlp -f 'bv*+ba+ba.2' --audio-multistreams
|
||||
|
||||
|
||||
# The following examples show the old method (without -S) of format selection
|
||||
# and how to use -S to achieve a similar but better result
|
||||
# and how to use -S to achieve a similar but (generally) better result
|
||||
|
||||
# Download the worst video available (old method)
|
||||
$ yt-dlp -f 'wv*+wa/w'
|
||||
@@ -1138,28 +1262,124 @@ $ yt-dlp -S '+codec:h264'
|
||||
|
||||
# More complex examples
|
||||
|
||||
# Download the best video no better than 720p prefering framerate greater than 30,
|
||||
# or the worst video (still prefering framerate greater than 30) if there is no such video
|
||||
# Download the best video no better than 720p preferring framerate greater than 30,
|
||||
# or the worst video (still preferring framerate greater than 30) if there is no such video
|
||||
$ yt-dlp -f '((bv*[fps>30]/bv*)[height<=720]/(wv*[fps>30]/wv*)) + ba / (b[fps>30]/b)[height<=720]/(w[fps>30]/w)'
|
||||
|
||||
# Download the video with the largest resolution no better than 720p,
|
||||
# or the video with the smallest resolution available if there is no such video,
|
||||
# prefering larger framerate for formats with the same resolution
|
||||
# or the video with the smallest resolution available if there is no such video,
|
||||
# preferring larger framerate for formats with the same resolution
|
||||
$ yt-dlp -S 'res:720,fps'
|
||||
|
||||
|
||||
|
||||
# Download the video with smallest resolution no worse than 480p,
|
||||
# or the video with the largest resolution available if there is no such video,
|
||||
# prefering better codec and then larger total bitrate for the same resolution
|
||||
# preferring better codec and then larger total bitrate for the same resolution
|
||||
$ yt-dlp -S '+res:480,codec,br'
|
||||
```
|
||||
|
||||
# MODIFYING METADATA
|
||||
|
||||
The metadata obtained the the extractors can be modified by using `--parse-metadata FROM:TO`. The general syntax is to give the name of a field or a template (with similar syntax to [output template](#output-template)) to extract data from, and the format to interpret it as, separated by a colon `:`. Either a [python regular expression](https://docs.python.org/3/library/re.html#regular-expression-syntax) with named capture groups or a similar syntax to the [output template](#output-template) (only `%(field)s` formatting is supported) can be used for `TO`. The option can be used multiple times to parse and modify various fields.
|
||||
|
||||
Note that any field created by this can be used in the [output template](#output-template) and will also affect the media file's metadata added when using `--add-metadata`.
|
||||
|
||||
This option also has a few special uses:
|
||||
* You can use this to change the metadata that is embedded in the media file. To do this, set the value of the corresponding field with a `meta_` prefix. For example, any value you set to `meta_description` field will be added to the `description` field in the file. You can use this to set a different "description" and "synopsis", for example
|
||||
* You can download an additional URL based on the metadata of the currently downloaded video. To do this, set the field `additional_urls` to the URL that you want to download. Eg: `--parse-metadata "description:(?P<additional_urls>https?://www\.vimeo\.com/\d+)` will download the first vimeo video found in the description
|
||||
|
||||
## Modifying metadata examples
|
||||
|
||||
Note that on Windows you may need to use double quotes instead of single.
|
||||
|
||||
```bash
|
||||
# Interpret the title as "Artist - Title"
|
||||
$ yt-dlp --parse-metadata 'title:%(artist)s - %(title)s'
|
||||
|
||||
# Regex example
|
||||
$ yt-dlp --parse-metadata 'description:Artist - (?P<artist>.+)'
|
||||
|
||||
# Set title as "Series name S01E05"
|
||||
$ yt-dlp --parse-metadata '%(series)s S%(season_number)02dE%(episode_number)02d:%(title)s'
|
||||
|
||||
# Set "comment" field in video metadata using description instead of webpage_url
|
||||
$ yt-dlp --parse-metadata 'description:(?s)(?P<meta_comment>.+)' --add-metadata
|
||||
|
||||
```
|
||||
|
||||
# PLUGINS
|
||||
|
||||
Plugins are loaded from `<root-dir>/ytdlp_plugins/<type>/__init__.py`. Currently only `extractor` plugins are supported. Support for `downloader` and `postprocessor` plugins may be added in the future. See [ytdlp_plugins](ytdlp_plugins) for example.
|
||||
|
||||
**Note**: `<root-dir>` is the directory of the binary (`<root-dir>/yt-dlp`), or the root directory of the module if you are running directly from source-code (`<root dir>/yt_dlp/__main__.py`)
|
||||
|
||||
# DEPRECATED OPTIONS
|
||||
|
||||
These are all the deprecated options and the current alternative to achieve the same effect
|
||||
|
||||
#### Not recommended
|
||||
While these options still work, their use is not recommended since there are other alternatives to achieve the same
|
||||
|
||||
--get-description --print description
|
||||
--get-duration --print duration_string
|
||||
--get-filename --print filename
|
||||
--get-format --print format
|
||||
--get-id --print id
|
||||
--get-thumbnail --print thumbnail
|
||||
-e, --get-title --print title
|
||||
-g, --get-url --print urls
|
||||
--all-formats -f all
|
||||
--all-subs --sub-langs all --write-subs
|
||||
--autonumber-size NUMBER Use string formatting. Eg: %(autonumber)03d
|
||||
--autonumber-start NUMBER Use internal field formatting like %(autonumber+NUMBER)s
|
||||
--metadata-from-title FORMAT --parse-metadata "%(title)s:FORMAT"
|
||||
--hls-prefer-native --downloader "m3u8:native"
|
||||
--hls-prefer-ffmpeg --downloader "m3u8:ffmpeg"
|
||||
--list-formats-old --compat-options list-formats (Alias: --no-list-formats-as-table)
|
||||
--list-formats-as-table --compat-options -list-formats [Default] (Alias: --no-list-formats-old)
|
||||
--sponskrub-args ARGS --ppa "sponskrub:ARGS"
|
||||
--test Used by developers for testing extractors. Not intended for the end user
|
||||
--youtube-print-sig-code Used for testing youtube signatures
|
||||
|
||||
|
||||
#### Old aliases
|
||||
These are aliases that are no longer documented for various reasons
|
||||
|
||||
--avconv-location --ffmpeg-location
|
||||
--cn-verification-proxy URL --geo-verification-proxy URL
|
||||
--dump-headers --print-traffic
|
||||
--dump-intermediate-pages --dump-pages
|
||||
--force-write-download-archive --force-write-archive
|
||||
--load-info --load-info-json
|
||||
--no-split-tracks --no-split-chapters
|
||||
--no-write-srt --no-write-subs
|
||||
--prefer-unsecure --prefer-insecure
|
||||
--rate-limit RATE --limit-rate RATE
|
||||
--split-tracks --split-chapters
|
||||
--srt-lang LANGS --sub-langs LANGS
|
||||
--trim-file-names LENGTH --trim-filenames LENGTH
|
||||
--write-srt --write-subs
|
||||
--yes-overwrites --force-overwrites
|
||||
|
||||
#### No longer supported
|
||||
These options may no longer work as intended
|
||||
|
||||
--prefer-avconv avconv is not officially supported by yt-dlp (Alias: --no-prefer-ffmpeg)
|
||||
--prefer-ffmpeg Default (Alias: --no-prefer-avconv)
|
||||
-C, --call-home Not implemented
|
||||
--no-call-home Default
|
||||
--include-ads No longer supported
|
||||
--no-include-ads Default
|
||||
|
||||
#### Removed
|
||||
These options were deprecated since 2014 and have now been entirely removed
|
||||
|
||||
--id -o "%(id)s.%(ext)s"
|
||||
-A, --auto-number -o "%(autonumber)s-%(id)s.%(ext)s"
|
||||
-t, --title -o "%(title)s-%(id)s.%(ext)s"
|
||||
-l, --literal -o accepts literal names
|
||||
|
||||
|
||||
# MORE
|
||||
For FAQ, Developer Instructions etc., see the [original README](https://github.com/ytdl-org/youtube-dl#faq)
|
||||
|
||||
@@ -14,9 +14,14 @@ lazy_extractors_filename = sys.argv[1]
|
||||
if os.path.exists(lazy_extractors_filename):
|
||||
os.remove(lazy_extractors_filename)
|
||||
|
||||
# Block plugins from loading
|
||||
os.rename('ytdlp_plugins', 'ytdlp_plugins_blocked')
|
||||
|
||||
from yt_dlp.extractor import _ALL_CLASSES
|
||||
from yt_dlp.extractor.common import InfoExtractor, SearchInfoExtractor
|
||||
|
||||
os.rename('ytdlp_plugins_blocked', 'ytdlp_plugins')
|
||||
|
||||
with open('devscripts/lazy_load_template.py', 'rt') as f:
|
||||
module_template = f.read()
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ assert arch in ('32', '64')
|
||||
print('Building %sbit version' % arch)
|
||||
_x86 = '_x86' if arch == '32' else ''
|
||||
|
||||
FILE_DESCRIPTION = 'Media Downloader%s' % (' (32 Bit)' if _x86 else '')
|
||||
FILE_DESCRIPTION = 'yt-dlp%s' % (' (32 Bit)' if _x86 else '')
|
||||
|
||||
# root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
|
||||
# print('Changing working directory to %s' % root_dir)
|
||||
@@ -69,6 +69,7 @@ PyInstaller.__main__.run([
|
||||
'--onefile',
|
||||
'--icon=devscripts/cloud.ico',
|
||||
'--exclude-module=youtube_dl',
|
||||
'--exclude-module=youtube_dlc',
|
||||
'--exclude-module=test',
|
||||
'--exclude-module=ytdlp_plugins',
|
||||
'--hidden-import=mutagen',
|
||||
|
||||
117
setup.py
117
setup.py
@@ -9,45 +9,44 @@ from distutils.spawn import spawn
|
||||
|
||||
|
||||
# Get the version from yt_dlp/version.py without importing the package
|
||||
exec(compile(open('yt_dlp/version.py').read(),
|
||||
'yt_dlp/version.py', 'exec'))
|
||||
exec(compile(open('yt_dlp/version.py').read(), 'yt_dlp/version.py', 'exec'))
|
||||
|
||||
|
||||
DESCRIPTION = 'Command-line program to download videos from YouTube.com and many other other video platforms.'
|
||||
|
||||
LONG_DESCRIPTION = '\n\n'.join((
|
||||
'Official repository: <https://github.com/yt-dlp/yt-dlp>',
|
||||
'**PS**: Many links in this document will not work since this is a copy of the README.md from Github',
|
||||
open("README.md", "r", encoding="utf-8").read()))
|
||||
'**PS**: Some links in this document will not work since this is a copy of the README.md from Github',
|
||||
open('README.md', 'r', encoding='utf-8').read()))
|
||||
|
||||
REQUIREMENTS = ['mutagen', 'pycryptodome']
|
||||
|
||||
if sys.argv[1:2] == ['py2exe']:
|
||||
raise NotImplementedError('py2exe is not currently supported; instead, use "pyinst.py" to build with pyinstaller')
|
||||
|
||||
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
|
||||
print("inv")
|
||||
else:
|
||||
files_spec = [
|
||||
('share/bash-completion/completions', ['completions/bash/yt-dlp']),
|
||||
('share/zsh/site-functions', ['completions/zsh/_yt-dlp']),
|
||||
('share/fish/vendor_completions.d', ['completions/fish/yt-dlp.fish']),
|
||||
('share/doc/yt_dlp', ['README.txt']),
|
||||
('share/man/man1', ['yt-dlp.1'])
|
||||
]
|
||||
root = os.path.dirname(os.path.abspath(__file__))
|
||||
data_files = []
|
||||
for dirname, files in files_spec:
|
||||
resfiles = []
|
||||
for fn in files:
|
||||
if not os.path.exists(fn):
|
||||
warnings.warn('Skipping file %s since it is not present. Try running `make pypi-files` first.' % fn)
|
||||
else:
|
||||
resfiles.append(fn)
|
||||
data_files.append((dirname, resfiles))
|
||||
|
||||
params = {
|
||||
'data_files': data_files,
|
||||
}
|
||||
params['entry_points'] = {'console_scripts': ['yt-dlp = yt_dlp:main']}
|
||||
files_spec = [
|
||||
('share/bash-completion/completions', ['completions/bash/yt-dlp']),
|
||||
('share/zsh/site-functions', ['completions/zsh/_yt-dlp']),
|
||||
('share/fish/vendor_completions.d', ['completions/fish/yt-dlp.fish']),
|
||||
('share/doc/yt_dlp', ['README.txt']),
|
||||
('share/man/man1', ['yt-dlp.1'])
|
||||
]
|
||||
root = os.path.dirname(os.path.abspath(__file__))
|
||||
data_files = []
|
||||
for dirname, files in files_spec:
|
||||
resfiles = []
|
||||
for fn in files:
|
||||
if not os.path.exists(fn):
|
||||
warnings.warn('Skipping file %s since it is not present. Try running `make pypi-files` first' % fn)
|
||||
else:
|
||||
resfiles.append(fn)
|
||||
data_files.append((dirname, resfiles))
|
||||
|
||||
params = {
|
||||
'data_files': data_files,
|
||||
}
|
||||
params['entry_points'] = {'console_scripts': ['yt-dlp = yt_dlp:main']}
|
||||
|
||||
|
||||
class build_lazy_extractors(Command):
|
||||
@@ -61,23 +60,21 @@ class build_lazy_extractors(Command):
|
||||
pass
|
||||
|
||||
def run(self):
|
||||
spawn(
|
||||
[sys.executable, 'devscripts/make_lazy_extractors.py', 'yt_dlp/extractor/lazy_extractors.py'],
|
||||
dry_run=self.dry_run,
|
||||
)
|
||||
spawn([sys.executable, 'devscripts/make_lazy_extractors.py', 'yt_dlp/extractor/lazy_extractors.py'],
|
||||
dry_run=self.dry_run)
|
||||
|
||||
|
||||
packages = find_packages(exclude=("youtube_dl", "test", "ytdlp_plugins"))
|
||||
packages = find_packages(exclude=('youtube_dl', 'test', 'ytdlp_plugins'))
|
||||
|
||||
setup(
|
||||
name="yt-dlp",
|
||||
name='yt-dlp',
|
||||
version=__version__,
|
||||
maintainer="pukkandan",
|
||||
maintainer_email="pukkandan.ytdlp@gmail.com",
|
||||
maintainer='pukkandan',
|
||||
maintainer_email='pukkandan.ytdlp@gmail.com',
|
||||
description=DESCRIPTION,
|
||||
long_description=LONG_DESCRIPTION,
|
||||
long_description_content_type="text/markdown",
|
||||
url="https://github.com/yt-dlp/yt-dlp",
|
||||
long_description_content_type='text/markdown',
|
||||
url='https://github.com/yt-dlp/yt-dlp',
|
||||
packages=packages,
|
||||
install_requires=REQUIREMENTS,
|
||||
project_urls={
|
||||
@@ -87,28 +84,28 @@ setup(
|
||||
#'Funding': 'https://donate.pypi.org',
|
||||
},
|
||||
classifiers=[
|
||||
"Topic :: Multimedia :: Video",
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
"Environment :: Console",
|
||||
"Programming Language :: Python",
|
||||
"Programming Language :: Python :: 2",
|
||||
"Programming Language :: Python :: 2.6",
|
||||
"Programming Language :: Python :: 2.7",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.2",
|
||||
"Programming Language :: Python :: 3.3",
|
||||
"Programming Language :: Python :: 3.4",
|
||||
"Programming Language :: Python :: 3.5",
|
||||
"Programming Language :: Python :: 3.6",
|
||||
"Programming Language :: Python :: 3.7",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: Implementation",
|
||||
"Programming Language :: Python :: Implementation :: CPython",
|
||||
"Programming Language :: Python :: Implementation :: IronPython",
|
||||
"Programming Language :: Python :: Implementation :: Jython",
|
||||
"Programming Language :: Python :: Implementation :: PyPy",
|
||||
"License :: Public Domain",
|
||||
"Operating System :: OS Independent",
|
||||
'Topic :: Multimedia :: Video',
|
||||
'Development Status :: 5 - Production/Stable',
|
||||
'Environment :: Console',
|
||||
'Programming Language :: Python',
|
||||
'Programming Language :: Python :: 2',
|
||||
'Programming Language :: Python :: 2.6',
|
||||
'Programming Language :: Python :: 2.7',
|
||||
'Programming Language :: Python :: 3',
|
||||
'Programming Language :: Python :: 3.2',
|
||||
'Programming Language :: Python :: 3.3',
|
||||
'Programming Language :: Python :: 3.4',
|
||||
'Programming Language :: Python :: 3.5',
|
||||
'Programming Language :: Python :: 3.6',
|
||||
'Programming Language :: Python :: 3.7',
|
||||
'Programming Language :: Python :: 3.8',
|
||||
'Programming Language :: Python :: Implementation',
|
||||
'Programming Language :: Python :: Implementation :: CPython',
|
||||
'Programming Language :: Python :: Implementation :: IronPython',
|
||||
'Programming Language :: Python :: Implementation :: Jython',
|
||||
'Programming Language :: Python :: Implementation :: PyPy',
|
||||
'License :: Public Domain',
|
||||
'Operating System :: OS Independent',
|
||||
],
|
||||
python_requires='>=2.6',
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
- **20min**
|
||||
- **220.ro**
|
||||
- **23video**
|
||||
- **247sports**
|
||||
- **24video**
|
||||
- **3qsdn**: 3Q SDN
|
||||
- **3sat**
|
||||
@@ -81,6 +82,7 @@
|
||||
- **audiomack**
|
||||
- **audiomack:album**
|
||||
- **Audius**: Audius.co
|
||||
- **audius:artist**: Audius.co profile/artist pages
|
||||
- **audius:playlist**: Audius.co playlists
|
||||
- **audius:track**: Audius track ID or API link. Prepend with "audius:"
|
||||
- **AWAAN**
|
||||
@@ -97,7 +99,8 @@
|
||||
- **bbc**: BBC
|
||||
- **bbc.co.uk**: BBC iPlayer
|
||||
- **bbc.co.uk:article**: BBC articles
|
||||
- **bbc.co.uk:iplayer:playlist**
|
||||
- **bbc.co.uk:iplayer:episodes**
|
||||
- **bbc.co.uk:iplayer:group**
|
||||
- **bbc.co.uk:playlist**
|
||||
- **BBVTV**
|
||||
- **Beatport**
|
||||
@@ -128,7 +131,6 @@
|
||||
- **bitwave:stream**
|
||||
- **BleacherReport**
|
||||
- **BleacherReportCMS**
|
||||
- **blinkx**
|
||||
- **Bloomberg**
|
||||
- **BokeCC**
|
||||
- **BongaCams**
|
||||
@@ -170,7 +172,8 @@
|
||||
- **cbsnews**: CBS News
|
||||
- **cbsnews:embed**
|
||||
- **cbsnews:livevideo**: CBS News Live Videos
|
||||
- **CBSSports**
|
||||
- **cbssports**
|
||||
- **cbssports:embed**
|
||||
- **CCMA**
|
||||
- **CCTV**: 央视网
|
||||
- **CDA**
|
||||
@@ -222,7 +225,8 @@
|
||||
- **Culturebox**
|
||||
- **CultureUnplugged**
|
||||
- **curiositystream**
|
||||
- **curiositystream:collection**
|
||||
- **curiositystream:collections**
|
||||
- **curiositystream:series**
|
||||
- **CWTV**
|
||||
- **DagelijkseKost**: dagelijksekost.een.be
|
||||
- **DailyMail**
|
||||
@@ -248,6 +252,8 @@
|
||||
- **DiscoveryGoPlaylist**
|
||||
- **DiscoveryNetworksDe**
|
||||
- **DiscoveryPlus**
|
||||
- **DiscoveryPlusIndia**
|
||||
- **DiscoveryPlusIndiaShow**
|
||||
- **DiscoveryVR**
|
||||
- **Disney**
|
||||
- **dlive:stream**
|
||||
@@ -301,6 +307,7 @@
|
||||
- **EyedoTV**
|
||||
- **facebook**
|
||||
- **FacebookPluginsVideo**
|
||||
- **fancode:vod**
|
||||
- **faz.net**
|
||||
- **fc2**
|
||||
- **fc2:embed**
|
||||
@@ -347,8 +354,7 @@
|
||||
- **Gaskrank**
|
||||
- **Gazeta**
|
||||
- **GDCVault**
|
||||
- **Gedi**
|
||||
- **GediEmbeds**
|
||||
- **GediDigital**
|
||||
- **generic**: Generic downloader that works on some sites
|
||||
- **Gfycat**
|
||||
- **GiantBomb**
|
||||
@@ -387,6 +393,7 @@
|
||||
- **HotNewHipHop**
|
||||
- **hotstar**
|
||||
- **hotstar:playlist**
|
||||
- **hotstar:series**
|
||||
- **Howcast**
|
||||
- **HowStuffWorks**
|
||||
- **hrfernsehen**
|
||||
@@ -457,6 +464,8 @@
|
||||
- **kuwo:singer**: 酷我音乐 - 歌手
|
||||
- **kuwo:song**: 酷我音乐
|
||||
- **la7.it**
|
||||
- **la7.it:pod:episode**
|
||||
- **la7.it:podcast**
|
||||
- **laola1tv**
|
||||
- **laola1tv:embed**
|
||||
- **lbry**
|
||||
@@ -480,6 +489,8 @@
|
||||
- **limelight**
|
||||
- **limelight:channel**
|
||||
- **limelight:channel_list**
|
||||
- **LineLive**
|
||||
- **LineLiveChannel**
|
||||
- **LineTV**
|
||||
- **linkedin:learning**
|
||||
- **linkedin:learning:course**
|
||||
@@ -506,6 +517,7 @@
|
||||
- **mangomolo:live**
|
||||
- **mangomolo:video**
|
||||
- **ManyVids**
|
||||
- **MaoriTV**
|
||||
- **Markiza**
|
||||
- **MarkizaPage**
|
||||
- **massengeschmack.tv**
|
||||
@@ -544,6 +556,7 @@
|
||||
- **mixcloud:playlist**
|
||||
- **mixcloud:user**
|
||||
- **MLB**
|
||||
- **MLBVideo**
|
||||
- **Mnet**
|
||||
- **MNetTV**
|
||||
- **MoeVideo**: LetitBit video services: moevideo.net, playreplay.net and videochart.net
|
||||
@@ -562,6 +575,8 @@
|
||||
- **mtg**: MTG services
|
||||
- **mtv**
|
||||
- **mtv.de**
|
||||
- **mtv.it**
|
||||
- **mtv.it:programma**
|
||||
- **mtv:video**
|
||||
- **mtvjapan**
|
||||
- **mtvservices:embedded**
|
||||
@@ -572,6 +587,7 @@
|
||||
- **Mwave**
|
||||
- **MwaveMeetGreet**
|
||||
- **Mxplayer**
|
||||
- **MxplayerShow**
|
||||
- **MyChannels**
|
||||
- **MySpace**
|
||||
- **MySpace:album**
|
||||
@@ -603,6 +619,7 @@
|
||||
- **ndr:embed**
|
||||
- **ndr:embed:base**
|
||||
- **NDTV**
|
||||
- **Nebula**
|
||||
- **NerdCubedFeed**
|
||||
- **netease:album**: 网易云音乐 - 专辑
|
||||
- **netease:djradio**: 网易云音乐 - 电台
|
||||
@@ -621,6 +638,7 @@
|
||||
- **NextTV**: 壹電視
|
||||
- **Nexx**
|
||||
- **NexxEmbed**
|
||||
- **NFHSNetwork**
|
||||
- **nfl.com** (Currently broken)
|
||||
- **nfl.com:article** (Currently broken)
|
||||
- **NhkVod**
|
||||
@@ -633,6 +651,7 @@
|
||||
- **nicknight**
|
||||
- **niconico**: ニコニコ動画
|
||||
- **NiconicoPlaylist**
|
||||
- **NiconicoUser**
|
||||
- **Nintendo**
|
||||
- **Nitter**
|
||||
- **njoy**: N-JOY
|
||||
@@ -703,9 +722,13 @@
|
||||
- **OutsideTV**
|
||||
- **PacktPub**
|
||||
- **PacktPubCourse**
|
||||
- **PalcoMP3:artist**
|
||||
- **PalcoMP3:song**
|
||||
- **PalcoMP3:video**
|
||||
- **pandora.tv**: 판도라TV
|
||||
- **ParamountNetwork**
|
||||
- **parliamentlive.tv**: UK parliament videos
|
||||
- **Parlview**
|
||||
- **Patreon**
|
||||
- **pbs**: Public Broadcasting Service (PBS) and member stations: PBS: Public Broadcasting Service, APT - Alabama Public Television (WBIQ), GPB/Georgia Public Broadcasting (WGTV), Mississippi Public Broadcasting (WMPN), Nashville Public Television (WNPT), WFSU-TV (WFSU), WSRE (WSRE), WTCI (WTCI), WPBA/Channel 30 (WPBA), Alaska Public Media (KAKM), Arizona PBS (KAET), KNME-TV/Channel 5 (KNME), Vegas PBS (KLVX), AETN/ARKANSAS ETV NETWORK (KETS), KET (WKLE), WKNO/Channel 10 (WKNO), LPB/LOUISIANA PUBLIC BROADCASTING (WLPB), OETA (KETA), Ozarks Public Television (KOZK), WSIU Public Broadcasting (WSIU), KEET TV (KEET), KIXE/Channel 9 (KIXE), KPBS San Diego (KPBS), KQED (KQED), KVIE Public Television (KVIE), PBS SoCal/KOCE (KOCE), ValleyPBS (KVPT), CONNECTICUT PUBLIC TELEVISION (WEDH), KNPB Channel 5 (KNPB), SOPTV (KSYS), Rocky Mountain PBS (KRMA), KENW-TV3 (KENW), KUED Channel 7 (KUED), Wyoming PBS (KCWC), Colorado Public Television / KBDI 12 (KBDI), KBYU-TV (KBYU), Thirteen/WNET New York (WNET), WGBH/Channel 2 (WGBH), WGBY (WGBY), NJTV Public Media NJ (WNJT), WLIW21 (WLIW), mpt/Maryland Public Television (WMPB), WETA Television and Radio (WETA), WHYY (WHYY), PBS 39 (WLVT), WVPT - Your Source for PBS and More! (WVPT), Howard University Television (WHUT), WEDU PBS (WEDU), WGCU Public Media (WGCU), WPBT2 (WPBT), WUCF TV (WUCF), WUFT/Channel 5 (WUFT), WXEL/Channel 42 (WXEL), WLRN/Channel 17 (WLRN), WUSF Public Broadcasting (WUSF), ETV (WRLK), UNC-TV (WUNC), PBS Hawaii - Oceanic Cable Channel 10 (KHET), Idaho Public Television (KAID), KSPS (KSPS), OPB (KOPB), KWSU/Channel 10 & KTNW/Channel 31 (KWSU), WILL-TV (WILL), Network Knowledge - WSEC/Springfield (WSEC), WTTW11 (WTTW), Iowa Public Television/IPTV (KDIN), Nine Network (KETC), PBS39 Fort Wayne (WFWA), WFYI Indianapolis (WFYI), Milwaukee Public Television (WMVS), WNIN (WNIN), WNIT Public Television (WNIT), WPT (WPNE), WVUT/Channel 22 (WVUT), WEIU/Channel 51 (WEIU), WQPT-TV (WQPT), WYCC PBS Chicago (WYCC), WIPB-TV (WIPB), WTIU (WTIU), CET (WCET), ThinkTVNetwork (WPTD), WBGU-TV (WBGU), WGVU TV (WGVU), NET1 (KUON), Pioneer Public Television (KWCM), SDPB Television (KUSD), TPT (KTCA), KSMQ (KSMQ), KPTS/Channel 8 (KPTS), KTWU/Channel 11 (KTWU), East Tennessee PBS (WSJK), WCTE-TV (WCTE), WLJT, Channel 11 (WLJT), WOSU TV (WOSU), WOUB/WOUC (WOUB), WVPB (WVPB), WKYU-PBS (WKYU), KERA 13 (KERA), MPBN (WCBB), Mountain Lake PBS (WCFE), NHPTV (WENH), Vermont PBS (WETK), witf (WITF), WQED Multimedia (WQED), WMHT Educational Telecommunications (WMHT), Q-TV (WDCQ), WTVS Detroit Public TV (WTVS), CMU Public Television (WCMU), WKAR-TV (WKAR), WNMU-TV Public TV 13 (WNMU), WDSE - WRPT (WDSE), WGTE TV (WGTE), Lakeland Public Television (KAWE), KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS), MontanaPBS (KUSM), KRWG/Channel 22 (KRWG), KACV (KACV), KCOS/Channel 13 (KCOS), WCNY/Channel 24 (WCNY), WNED (WNED), WPBS (WPBS), WSKG Public TV (WSKG), WXXI (WXXI), WPSU (WPSU), WVIA Public Media Studios (WVIA), WTVI (WTVI), Western Reserve PBS (WNEO), WVIZ/PBS ideastream (WVIZ), KCTS 9 (KCTS), Basin PBS (KPBT), KUHT / Channel 8 (KUHT), KLRN (KLRN), KLRU (KLRU), WTJX Channel 12 (WTJX), WCVE PBS (WCVE), KBTC Public Television (KBTC)
|
||||
- **PearVideo**
|
||||
@@ -729,12 +752,14 @@
|
||||
- **play.fm**
|
||||
- **player.sky.it**
|
||||
- **PlayPlusTV**
|
||||
- **PlayStuff**
|
||||
- **PlaysTV**
|
||||
- **Playtvak**: Playtvak.cz, iDNES.cz and Lidovky.cz
|
||||
- **Playvid**
|
||||
- **Playwire**
|
||||
- **pluralsight**
|
||||
- **pluralsight:course**
|
||||
- **PlutoTV**
|
||||
- **podomatic**
|
||||
- **Pokemon**
|
||||
- **PokemonWatch**
|
||||
@@ -836,6 +861,7 @@
|
||||
- **safari**: safaribooksonline.com online video
|
||||
- **safari:api**
|
||||
- **safari:course**: safaribooksonline.com online courses
|
||||
- **Saitosan**
|
||||
- **SAKTV**
|
||||
- **SaltTV**
|
||||
- **SampleFocus**
|
||||
@@ -860,6 +886,7 @@
|
||||
- **Shahid**
|
||||
- **ShahidShow**
|
||||
- **Shared**: shared.sx
|
||||
- **ShemarooMe**
|
||||
- **ShowRoomLive**
|
||||
- **simplecast**
|
||||
- **simplecast:episode**
|
||||
@@ -879,6 +906,7 @@
|
||||
- **Snotr**
|
||||
- **Sohu**
|
||||
- **SonyLIV**
|
||||
- **SonyLIVSeries**
|
||||
- **soundcloud**
|
||||
- **soundcloud:playlist**
|
||||
- **soundcloud:search**: Soundcloud search
|
||||
@@ -915,6 +943,7 @@
|
||||
- **stanfordoc**: Stanford Open ClassRoom
|
||||
- **Steam**
|
||||
- **Stitcher**
|
||||
- **StitcherShow**
|
||||
- **StoryFire**
|
||||
- **StoryFireSeries**
|
||||
- **StoryFireUser**
|
||||
@@ -956,6 +985,7 @@
|
||||
- **Telecinco**: telecinco.es, cuatro.com and mediaset.es
|
||||
- **Telegraaf**
|
||||
- **TeleMB**
|
||||
- **Telemundo**
|
||||
- **TeleQuebec**
|
||||
- **TeleQuebecEmission**
|
||||
- **TeleQuebecLive**
|
||||
@@ -996,6 +1026,7 @@
|
||||
- **TruTV**
|
||||
- **Tube8**
|
||||
- **TubiTv**
|
||||
- **TubiTvShow**
|
||||
- **Tumblr**
|
||||
- **tunein:clip**
|
||||
- **tunein:program**
|
||||
@@ -1055,6 +1086,7 @@
|
||||
- **UDNEmbed**: 聯合影音
|
||||
- **UFCArabia**
|
||||
- **UFCTV**
|
||||
- **ukcolumn**
|
||||
- **UKTVPlay**
|
||||
- **umg:de**: Universal Music Deutschland
|
||||
- **Unistra**
|
||||
@@ -1087,6 +1119,7 @@
|
||||
- **Vidbit**
|
||||
- **Viddler**
|
||||
- **Videa**
|
||||
- **video.arnes.si**: Arnes Video
|
||||
- **video.google:search**: Google Video search
|
||||
- **video.sky.it**
|
||||
- **video.sky.it:live**
|
||||
@@ -1136,6 +1169,7 @@
|
||||
- **VODPlatform**
|
||||
- **VoiceRepublic**
|
||||
- **Voot**
|
||||
- **VootSeries**
|
||||
- **VoxMedia**
|
||||
- **VoxMediaVolume**
|
||||
- **vpro**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
||||
@@ -1165,6 +1199,7 @@
|
||||
- **wdr:mobile**
|
||||
- **WDRElefant**
|
||||
- **WDRPage**
|
||||
- **web.archive:youtube**: web.archive.org saved youtube videos
|
||||
- **Webcaster**
|
||||
- **WebcasterFeed**
|
||||
- **WebOfStories**
|
||||
@@ -1172,6 +1207,8 @@
|
||||
- **Weibo**
|
||||
- **WeiboMobile**
|
||||
- **WeiqiTV**: WQTV
|
||||
- **whowatch**
|
||||
- **WimTV**
|
||||
- **Wistia**
|
||||
- **WistiaPlaylist**
|
||||
- **wnl**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
||||
@@ -1181,7 +1218,7 @@
|
||||
- **WWE**
|
||||
- **XBef**
|
||||
- **XboxClips**
|
||||
- **XFileShare**: XFileShare based sites: Aparat, ClipWatching, GoUnlimited, GoVid, HolaVid, Streamty, TheVideoBee, Uqload, VidBom, vidlo, VidLocker, VidShare, VUp, XVideoSharing
|
||||
- **XFileShare**: XFileShare based sites: Aparat, ClipWatching, GoUnlimited, GoVid, HolaVid, Streamty, TheVideoBee, Uqload, VidBom, vidlo, VidLocker, VidShare, VUp, WolfStream, XVideoSharing
|
||||
- **XHamster**
|
||||
- **XHamsterEmbed**
|
||||
- **XHamsterUser**
|
||||
@@ -1242,7 +1279,9 @@
|
||||
- **ZDF**
|
||||
- **ZDFChannel**
|
||||
- **Zee5**
|
||||
- **zee5:series**
|
||||
- **Zhihu**
|
||||
- **zingmp3**: mp3.zing.vn
|
||||
- **zingmp3:album**
|
||||
- **zoom**
|
||||
- **Zype**
|
||||
|
||||
@@ -1,41 +1,40 @@
|
||||
{
|
||||
"consoletitle": false,
|
||||
"continuedl": true,
|
||||
"forcedescription": false,
|
||||
"forcefilename": false,
|
||||
"forceformat": false,
|
||||
"forcethumbnail": false,
|
||||
"forcetitle": false,
|
||||
"forceurl": false,
|
||||
"consoletitle": false,
|
||||
"continuedl": true,
|
||||
"forcedescription": false,
|
||||
"forcefilename": false,
|
||||
"forceformat": false,
|
||||
"forcethumbnail": false,
|
||||
"forcetitle": false,
|
||||
"forceurl": false,
|
||||
"force_write_download_archive": false,
|
||||
"format": "best",
|
||||
"ignoreerrors": false,
|
||||
"listformats": null,
|
||||
"logtostderr": false,
|
||||
"matchtitle": null,
|
||||
"max_downloads": null,
|
||||
"overwrites": null,
|
||||
"nopart": false,
|
||||
"noprogress": false,
|
||||
"outtmpl": "%(id)s.%(ext)s",
|
||||
"password": null,
|
||||
"playlistend": -1,
|
||||
"playliststart": 1,
|
||||
"prefer_free_formats": false,
|
||||
"quiet": false,
|
||||
"ratelimit": null,
|
||||
"rejecttitle": null,
|
||||
"retries": 10,
|
||||
"simulate": false,
|
||||
"subtitleslang": null,
|
||||
"ignoreerrors": false,
|
||||
"listformats": null,
|
||||
"logtostderr": false,
|
||||
"matchtitle": null,
|
||||
"max_downloads": null,
|
||||
"overwrites": null,
|
||||
"nopart": false,
|
||||
"noprogress": false,
|
||||
"outtmpl": "%(id)s.%(ext)s",
|
||||
"password": null,
|
||||
"playliststart": 1,
|
||||
"prefer_free_formats": false,
|
||||
"quiet": false,
|
||||
"ratelimit": null,
|
||||
"rejecttitle": null,
|
||||
"retries": 10,
|
||||
"simulate": false,
|
||||
"subtitleslang": null,
|
||||
"subtitlesformat": "best",
|
||||
"test": true,
|
||||
"updatetime": true,
|
||||
"usenetrc": false,
|
||||
"username": null,
|
||||
"verbose": true,
|
||||
"writedescription": false,
|
||||
"writeinfojson": true,
|
||||
"test": true,
|
||||
"updatetime": true,
|
||||
"usenetrc": false,
|
||||
"username": null,
|
||||
"verbose": true,
|
||||
"writedescription": false,
|
||||
"writeinfojson": true,
|
||||
"writeannotations": false,
|
||||
"writelink": false,
|
||||
"writeurllink": false,
|
||||
|
||||
@@ -450,7 +450,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
|
||||
'language': 'en',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'protocol': 'm3u8_native',
|
||||
'audio_ext': 'mp4',
|
||||
}, {
|
||||
'format_id': 'aud2-English',
|
||||
@@ -458,7 +458,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
|
||||
'language': 'en',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'protocol': 'm3u8_native',
|
||||
'audio_ext': 'mp4',
|
||||
}, {
|
||||
'format_id': 'aud3-English',
|
||||
@@ -466,14 +466,14 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
|
||||
'language': 'en',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'protocol': 'm3u8_native',
|
||||
'audio_ext': 'mp4',
|
||||
}, {
|
||||
'format_id': '530',
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v2/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'protocol': 'm3u8_native',
|
||||
'width': 480,
|
||||
'height': 270,
|
||||
'vcodec': 'avc1.640015',
|
||||
@@ -482,7 +482,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v2/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'protocol': 'm3u8_native',
|
||||
'width': 480,
|
||||
'height': 270,
|
||||
'vcodec': 'avc1.640015',
|
||||
@@ -491,7 +491,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v2/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'protocol': 'm3u8_native',
|
||||
'width': 480,
|
||||
'height': 270,
|
||||
'vcodec': 'avc1.640015',
|
||||
@@ -500,7 +500,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v3/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'protocol': 'm3u8_native',
|
||||
'width': 640,
|
||||
'height': 360,
|
||||
'vcodec': 'avc1.64001e',
|
||||
@@ -509,7 +509,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v3/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'protocol': 'm3u8_native',
|
||||
'width': 640,
|
||||
'height': 360,
|
||||
'vcodec': 'avc1.64001e',
|
||||
@@ -518,7 +518,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v3/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'protocol': 'm3u8_native',
|
||||
'width': 640,
|
||||
'height': 360,
|
||||
'vcodec': 'avc1.64001e',
|
||||
@@ -527,7 +527,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v4/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'protocol': 'm3u8_native',
|
||||
'width': 768,
|
||||
'height': 432,
|
||||
'vcodec': 'avc1.64001e',
|
||||
@@ -536,7 +536,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v4/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'protocol': 'm3u8_native',
|
||||
'width': 768,
|
||||
'height': 432,
|
||||
'vcodec': 'avc1.64001e',
|
||||
@@ -545,7 +545,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v4/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'protocol': 'm3u8_native',
|
||||
'width': 768,
|
||||
'height': 432,
|
||||
'vcodec': 'avc1.64001e',
|
||||
@@ -554,7 +554,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v5/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'protocol': 'm3u8_native',
|
||||
'width': 960,
|
||||
'height': 540,
|
||||
'vcodec': 'avc1.640020',
|
||||
@@ -563,7 +563,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v5/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'protocol': 'm3u8_native',
|
||||
'width': 960,
|
||||
'height': 540,
|
||||
'vcodec': 'avc1.640020',
|
||||
@@ -572,7 +572,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v5/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'protocol': 'm3u8_native',
|
||||
'width': 960,
|
||||
'height': 540,
|
||||
'vcodec': 'avc1.640020',
|
||||
@@ -581,7 +581,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v6/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'protocol': 'm3u8_native',
|
||||
'width': 1280,
|
||||
'height': 720,
|
||||
'vcodec': 'avc1.640020',
|
||||
@@ -590,7 +590,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v6/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'protocol': 'm3u8_native',
|
||||
'width': 1280,
|
||||
'height': 720,
|
||||
'vcodec': 'avc1.640020',
|
||||
@@ -599,7 +599,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v6/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'protocol': 'm3u8_native',
|
||||
'width': 1280,
|
||||
'height': 720,
|
||||
'vcodec': 'avc1.640020',
|
||||
@@ -608,7 +608,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v7/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'protocol': 'm3u8_native',
|
||||
'width': 1920,
|
||||
'height': 1080,
|
||||
'vcodec': 'avc1.64002a',
|
||||
@@ -617,7 +617,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v7/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'protocol': 'm3u8_native',
|
||||
'width': 1920,
|
||||
'height': 1080,
|
||||
'vcodec': 'avc1.64002a',
|
||||
@@ -626,7 +626,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v7/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'protocol': 'm3u8_native',
|
||||
'width': 1920,
|
||||
'height': 1080,
|
||||
'vcodec': 'avc1.64002a',
|
||||
@@ -635,7 +635,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v8/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'protocol': 'm3u8_native',
|
||||
'width': 1920,
|
||||
'height': 1080,
|
||||
'vcodec': 'avc1.64002a',
|
||||
@@ -644,7 +644,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v8/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'protocol': 'm3u8_native',
|
||||
'width': 1920,
|
||||
'height': 1080,
|
||||
'vcodec': 'avc1.64002a',
|
||||
@@ -653,7 +653,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v8/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'protocol': 'm3u8_native',
|
||||
'width': 1920,
|
||||
'height': 1080,
|
||||
'vcodec': 'avc1.64002a',
|
||||
@@ -662,7 +662,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v9/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'protocol': 'm3u8_native',
|
||||
'width': 1920,
|
||||
'height': 1080,
|
||||
'vcodec': 'avc1.64002a',
|
||||
@@ -671,7 +671,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v9/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'protocol': 'm3u8_native',
|
||||
'width': 1920,
|
||||
'height': 1080,
|
||||
'vcodec': 'avc1.64002a',
|
||||
@@ -680,21 +680,190 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v9/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'protocol': 'm3u8_native',
|
||||
'width': 1920,
|
||||
'height': 1080,
|
||||
'vcodec': 'avc1.64002a',
|
||||
}]
|
||||
}],
|
||||
{}
|
||||
),
|
||||
(
|
||||
'bipbop_16x9',
|
||||
'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/bipbop_16x9_variant.m3u8',
|
||||
[{
|
||||
'format_id': 'bipbop_audio-BipBop Audio 2',
|
||||
'format_index': None,
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/alternate_audio_aac/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/bipbop_16x9_variant.m3u8',
|
||||
'language': 'eng',
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8_native',
|
||||
'preference': None,
|
||||
'quality': None,
|
||||
'vcodec': 'none',
|
||||
'audio_ext': 'mp4',
|
||||
'video_ext': 'none',
|
||||
}, {
|
||||
'format_id': '41',
|
||||
'format_index': None,
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/gear0/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/bipbop_16x9_variant.m3u8',
|
||||
'tbr': 41.457,
|
||||
'ext': 'mp4',
|
||||
'fps': None,
|
||||
'protocol': 'm3u8_native',
|
||||
'preference': None,
|
||||
'quality': None,
|
||||
'vcodec': 'none',
|
||||
'acodec': 'mp4a.40.2',
|
||||
'audio_ext': 'mp4',
|
||||
'video_ext': 'none',
|
||||
'abr': 41.457,
|
||||
}, {
|
||||
'format_id': '263',
|
||||
'format_index': None,
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/gear1/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/bipbop_16x9_variant.m3u8',
|
||||
'tbr': 263.851,
|
||||
'ext': 'mp4',
|
||||
'fps': None,
|
||||
'protocol': 'm3u8_native',
|
||||
'preference': None,
|
||||
'quality': None,
|
||||
'width': 416,
|
||||
'height': 234,
|
||||
'vcodec': 'avc1.4d400d',
|
||||
'acodec': 'mp4a.40.2',
|
||||
'video_ext': 'mp4',
|
||||
'audio_ext': 'none',
|
||||
'vbr': 263.851,
|
||||
'abr': 0,
|
||||
}, {
|
||||
'format_id': '577',
|
||||
'format_index': None,
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/gear2/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/bipbop_16x9_variant.m3u8',
|
||||
'tbr': 577.61,
|
||||
'ext': 'mp4',
|
||||
'fps': None,
|
||||
'protocol': 'm3u8_native',
|
||||
'preference': None,
|
||||
'quality': None,
|
||||
'width': 640,
|
||||
'height': 360,
|
||||
'vcodec': 'avc1.4d401e',
|
||||
'acodec': 'mp4a.40.2',
|
||||
'video_ext': 'mp4',
|
||||
'audio_ext': 'none',
|
||||
'vbr': 577.61,
|
||||
'abr': 0,
|
||||
}, {
|
||||
'format_id': '915',
|
||||
'format_index': None,
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/gear3/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/bipbop_16x9_variant.m3u8',
|
||||
'tbr': 915.905,
|
||||
'ext': 'mp4',
|
||||
'fps': None,
|
||||
'protocol': 'm3u8_native',
|
||||
'preference': None,
|
||||
'quality': None,
|
||||
'width': 960,
|
||||
'height': 540,
|
||||
'vcodec': 'avc1.4d401f',
|
||||
'acodec': 'mp4a.40.2',
|
||||
'video_ext': 'mp4',
|
||||
'audio_ext': 'none',
|
||||
'vbr': 915.905,
|
||||
'abr': 0,
|
||||
}, {
|
||||
'format_id': '1030',
|
||||
'format_index': None,
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/gear4/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/bipbop_16x9_variant.m3u8',
|
||||
'tbr': 1030.138,
|
||||
'ext': 'mp4',
|
||||
'fps': None,
|
||||
'protocol': 'm3u8_native',
|
||||
'preference': None,
|
||||
'quality': None,
|
||||
'width': 1280,
|
||||
'height': 720,
|
||||
'vcodec': 'avc1.4d401f',
|
||||
'acodec': 'mp4a.40.2',
|
||||
'video_ext': 'mp4',
|
||||
'audio_ext': 'none',
|
||||
'vbr': 1030.138,
|
||||
'abr': 0,
|
||||
}, {
|
||||
'format_id': '1924',
|
||||
'format_index': None,
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/gear5/prog_index.m3u8',
|
||||
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/bipbop_16x9_variant.m3u8',
|
||||
'tbr': 1924.009,
|
||||
'ext': 'mp4',
|
||||
'fps': None,
|
||||
'protocol': 'm3u8_native',
|
||||
'preference': None,
|
||||
'quality': None,
|
||||
'width': 1920,
|
||||
'height': 1080,
|
||||
'vcodec': 'avc1.4d401f',
|
||||
'acodec': 'mp4a.40.2',
|
||||
'video_ext': 'mp4',
|
||||
'audio_ext': 'none',
|
||||
'vbr': 1924.009,
|
||||
'abr': 0,
|
||||
}],
|
||||
{
|
||||
'en': [{
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/eng/prog_index.m3u8',
|
||||
'ext': 'vtt',
|
||||
'protocol': 'm3u8_native'
|
||||
}, {
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/eng_forced/prog_index.m3u8',
|
||||
'ext': 'vtt',
|
||||
'protocol': 'm3u8_native'
|
||||
}],
|
||||
'fr': [{
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/fra/prog_index.m3u8',
|
||||
'ext': 'vtt',
|
||||
'protocol': 'm3u8_native'
|
||||
}, {
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/fra_forced/prog_index.m3u8',
|
||||
'ext': 'vtt',
|
||||
'protocol': 'm3u8_native'
|
||||
}],
|
||||
'es': [{
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/spa/prog_index.m3u8',
|
||||
'ext': 'vtt',
|
||||
'protocol': 'm3u8_native'
|
||||
}, {
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/spa_forced/prog_index.m3u8',
|
||||
'ext': 'vtt',
|
||||
'protocol': 'm3u8_native'
|
||||
}],
|
||||
'ja': [{
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/jpn/prog_index.m3u8',
|
||||
'ext': 'vtt',
|
||||
'protocol': 'm3u8_native'
|
||||
}, {
|
||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/jpn_forced/prog_index.m3u8',
|
||||
'ext': 'vtt',
|
||||
'protocol': 'm3u8_native'
|
||||
}],
|
||||
}
|
||||
),
|
||||
]
|
||||
|
||||
for m3u8_file, m3u8_url, expected_formats in _TEST_CASES:
|
||||
for m3u8_file, m3u8_url, expected_formats, expected_subs in _TEST_CASES:
|
||||
with io.open('./test/testdata/m3u8/%s.m3u8' % m3u8_file,
|
||||
mode='r', encoding='utf-8') as f:
|
||||
formats = self.ie._parse_m3u8_formats(
|
||||
formats, subs = self.ie._parse_m3u8_formats_and_subtitles(
|
||||
f.read(), m3u8_url, ext='mp4')
|
||||
self.ie._sort_formats(formats)
|
||||
expect_value(self, formats, expected_formats, None)
|
||||
expect_value(self, subs, expected_subs, None)
|
||||
|
||||
def test_parse_mpd_formats(self):
|
||||
_TEST_CASES = [
|
||||
@@ -780,7 +949,8 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'tbr': 5997.485,
|
||||
'width': 1920,
|
||||
'height': 1080,
|
||||
}]
|
||||
}],
|
||||
{},
|
||||
), (
|
||||
# https://github.com/ytdl-org/youtube-dl/pull/14844
|
||||
'urls_only',
|
||||
@@ -863,7 +1033,8 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'tbr': 4400,
|
||||
'width': 1920,
|
||||
'height': 1080,
|
||||
}]
|
||||
}],
|
||||
{},
|
||||
), (
|
||||
# https://github.com/ytdl-org/youtube-dl/issues/20346
|
||||
# Media considered unfragmented even though it contains
|
||||
@@ -909,18 +1080,328 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
|
||||
'width': 360,
|
||||
'height': 360,
|
||||
'fps': 30,
|
||||
}]
|
||||
}],
|
||||
{},
|
||||
), (
|
||||
'subtitles',
|
||||
'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
|
||||
'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/',
|
||||
[{
|
||||
'format_id': 'audio=128001',
|
||||
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
|
||||
'ext': 'm4a',
|
||||
'tbr': 128.001,
|
||||
'asr': 48000,
|
||||
'format_note': 'DASH audio',
|
||||
'container': 'm4a_dash',
|
||||
'vcodec': 'none',
|
||||
'acodec': 'mp4a.40.2',
|
||||
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
|
||||
'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
|
||||
'protocol': 'http_dash_segments',
|
||||
'audio_ext': 'm4a',
|
||||
'video_ext': 'none',
|
||||
'abr': 128.001,
|
||||
}, {
|
||||
'format_id': 'video=100000',
|
||||
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
|
||||
'ext': 'mp4',
|
||||
'width': 336,
|
||||
'height': 144,
|
||||
'tbr': 100,
|
||||
'format_note': 'DASH video',
|
||||
'container': 'mp4_dash',
|
||||
'vcodec': 'avc1.4D401F',
|
||||
'acodec': 'none',
|
||||
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
|
||||
'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
|
||||
'protocol': 'http_dash_segments',
|
||||
'video_ext': 'mp4',
|
||||
'audio_ext': 'none',
|
||||
'vbr': 100,
|
||||
}, {
|
||||
'format_id': 'video=326000',
|
||||
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
|
||||
'ext': 'mp4',
|
||||
'width': 562,
|
||||
'height': 240,
|
||||
'tbr': 326,
|
||||
'format_note': 'DASH video',
|
||||
'container': 'mp4_dash',
|
||||
'vcodec': 'avc1.4D401F',
|
||||
'acodec': 'none',
|
||||
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
|
||||
'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
|
||||
'protocol': 'http_dash_segments',
|
||||
'video_ext': 'mp4',
|
||||
'audio_ext': 'none',
|
||||
'vbr': 326,
|
||||
}, {
|
||||
'format_id': 'video=698000',
|
||||
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
|
||||
'ext': 'mp4',
|
||||
'width': 844,
|
||||
'height': 360,
|
||||
'tbr': 698,
|
||||
'format_note': 'DASH video',
|
||||
'container': 'mp4_dash',
|
||||
'vcodec': 'avc1.4D401F',
|
||||
'acodec': 'none',
|
||||
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
|
||||
'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
|
||||
'protocol': 'http_dash_segments',
|
||||
'video_ext': 'mp4',
|
||||
'audio_ext': 'none',
|
||||
'vbr': 698,
|
||||
}, {
|
||||
'format_id': 'video=1493000',
|
||||
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
|
||||
'ext': 'mp4',
|
||||
'width': 1126,
|
||||
'height': 480,
|
||||
'tbr': 1493,
|
||||
'format_note': 'DASH video',
|
||||
'container': 'mp4_dash',
|
||||
'vcodec': 'avc1.4D401F',
|
||||
'acodec': 'none',
|
||||
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
|
||||
'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
|
||||
'protocol': 'http_dash_segments',
|
||||
'video_ext': 'mp4',
|
||||
'audio_ext': 'none',
|
||||
'vbr': 1493,
|
||||
}, {
|
||||
'format_id': 'video=4482000',
|
||||
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
|
||||
'ext': 'mp4',
|
||||
'width': 1688,
|
||||
'height': 720,
|
||||
'tbr': 4482,
|
||||
'format_note': 'DASH video',
|
||||
'container': 'mp4_dash',
|
||||
'vcodec': 'avc1.4D401F',
|
||||
'acodec': 'none',
|
||||
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
|
||||
'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
|
||||
'protocol': 'http_dash_segments',
|
||||
'video_ext': 'mp4',
|
||||
'audio_ext': 'none',
|
||||
'vbr': 4482,
|
||||
}],
|
||||
{
|
||||
'en': [
|
||||
{
|
||||
'ext': 'mp4',
|
||||
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
|
||||
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
|
||||
'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
|
||||
'protocol': 'http_dash_segments',
|
||||
}
|
||||
]
|
||||
},
|
||||
)
|
||||
]
|
||||
|
||||
for mpd_file, mpd_url, mpd_base_url, expected_formats in _TEST_CASES:
|
||||
for mpd_file, mpd_url, mpd_base_url, expected_formats, expected_subtitles in _TEST_CASES:
|
||||
with io.open('./test/testdata/mpd/%s.mpd' % mpd_file,
|
||||
mode='r', encoding='utf-8') as f:
|
||||
formats = self.ie._parse_mpd_formats(
|
||||
formats, subtitles = self.ie._parse_mpd_formats_and_subtitles(
|
||||
compat_etree_fromstring(f.read().encode('utf-8')),
|
||||
mpd_base_url=mpd_base_url, mpd_url=mpd_url)
|
||||
self.ie._sort_formats(formats)
|
||||
expect_value(self, formats, expected_formats, None)
|
||||
expect_value(self, subtitles, expected_subtitles, None)
|
||||
|
||||
def test_parse_ism_formats(self):
|
||||
_TEST_CASES = [
|
||||
(
|
||||
'sintel',
|
||||
'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
|
||||
[{
|
||||
'format_id': 'audio-128',
|
||||
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
|
||||
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
|
||||
'ext': 'isma',
|
||||
'tbr': 128,
|
||||
'asr': 48000,
|
||||
'vcodec': 'none',
|
||||
'acodec': 'AACL',
|
||||
'protocol': 'ism',
|
||||
'_download_params': {
|
||||
'stream_type': 'audio',
|
||||
'duration': 8880746666,
|
||||
'timescale': 10000000,
|
||||
'width': 0,
|
||||
'height': 0,
|
||||
'fourcc': 'AACL',
|
||||
'codec_private_data': '1190',
|
||||
'sampling_rate': 48000,
|
||||
'channels': 2,
|
||||
'bits_per_sample': 16,
|
||||
'nal_unit_length_field': 4
|
||||
},
|
||||
'audio_ext': 'isma',
|
||||
'video_ext': 'none',
|
||||
'abr': 128,
|
||||
}, {
|
||||
'format_id': 'video-100',
|
||||
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
|
||||
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
|
||||
'ext': 'ismv',
|
||||
'width': 336,
|
||||
'height': 144,
|
||||
'tbr': 100,
|
||||
'vcodec': 'AVC1',
|
||||
'acodec': 'none',
|
||||
'protocol': 'ism',
|
||||
'_download_params': {
|
||||
'stream_type': 'video',
|
||||
'duration': 8880746666,
|
||||
'timescale': 10000000,
|
||||
'width': 336,
|
||||
'height': 144,
|
||||
'fourcc': 'AVC1',
|
||||
'codec_private_data': '00000001674D401FDA0544EFFC2D002CBC40000003004000000C03C60CA80000000168EF32C8',
|
||||
'channels': 2,
|
||||
'bits_per_sample': 16,
|
||||
'nal_unit_length_field': 4
|
||||
},
|
||||
'video_ext': 'ismv',
|
||||
'audio_ext': 'none',
|
||||
'vbr': 100,
|
||||
}, {
|
||||
'format_id': 'video-326',
|
||||
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
|
||||
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
|
||||
'ext': 'ismv',
|
||||
'width': 562,
|
||||
'height': 240,
|
||||
'tbr': 326,
|
||||
'vcodec': 'AVC1',
|
||||
'acodec': 'none',
|
||||
'protocol': 'ism',
|
||||
'_download_params': {
|
||||
'stream_type': 'video',
|
||||
'duration': 8880746666,
|
||||
'timescale': 10000000,
|
||||
'width': 562,
|
||||
'height': 240,
|
||||
'fourcc': 'AVC1',
|
||||
'codec_private_data': '00000001674D401FDA0241FE23FFC3BC83BA44000003000400000300C03C60CA800000000168EF32C8',
|
||||
'channels': 2,
|
||||
'bits_per_sample': 16,
|
||||
'nal_unit_length_field': 4
|
||||
},
|
||||
'video_ext': 'ismv',
|
||||
'audio_ext': 'none',
|
||||
'vbr': 326,
|
||||
}, {
|
||||
'format_id': 'video-698',
|
||||
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
|
||||
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
|
||||
'ext': 'ismv',
|
||||
'width': 844,
|
||||
'height': 360,
|
||||
'tbr': 698,
|
||||
'vcodec': 'AVC1',
|
||||
'acodec': 'none',
|
||||
'protocol': 'ism',
|
||||
'_download_params': {
|
||||
'stream_type': 'video',
|
||||
'duration': 8880746666,
|
||||
'timescale': 10000000,
|
||||
'width': 844,
|
||||
'height': 360,
|
||||
'fourcc': 'AVC1',
|
||||
'codec_private_data': '00000001674D401FDA0350BFB97FF06AF06AD1000003000100000300300F1832A00000000168EF32C8',
|
||||
'channels': 2,
|
||||
'bits_per_sample': 16,
|
||||
'nal_unit_length_field': 4
|
||||
},
|
||||
'video_ext': 'ismv',
|
||||
'audio_ext': 'none',
|
||||
'vbr': 698,
|
||||
}, {
|
||||
'format_id': 'video-1493',
|
||||
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
|
||||
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
|
||||
'ext': 'ismv',
|
||||
'width': 1126,
|
||||
'height': 480,
|
||||
'tbr': 1493,
|
||||
'vcodec': 'AVC1',
|
||||
'acodec': 'none',
|
||||
'protocol': 'ism',
|
||||
'_download_params': {
|
||||
'stream_type': 'video',
|
||||
'duration': 8880746666,
|
||||
'timescale': 10000000,
|
||||
'width': 1126,
|
||||
'height': 480,
|
||||
'fourcc': 'AVC1',
|
||||
'codec_private_data': '00000001674D401FDA011C3DE6FFF0D890D871000003000100000300300F1832A00000000168EF32C8',
|
||||
'channels': 2,
|
||||
'bits_per_sample': 16,
|
||||
'nal_unit_length_field': 4
|
||||
},
|
||||
'video_ext': 'ismv',
|
||||
'audio_ext': 'none',
|
||||
'vbr': 1493,
|
||||
}, {
|
||||
'format_id': 'video-4482',
|
||||
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
|
||||
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
|
||||
'ext': 'ismv',
|
||||
'width': 1688,
|
||||
'height': 720,
|
||||
'tbr': 4482,
|
||||
'vcodec': 'AVC1',
|
||||
'acodec': 'none',
|
||||
'protocol': 'ism',
|
||||
'_download_params': {
|
||||
'stream_type': 'video',
|
||||
'duration': 8880746666,
|
||||
'timescale': 10000000,
|
||||
'width': 1688,
|
||||
'height': 720,
|
||||
'fourcc': 'AVC1',
|
||||
'codec_private_data': '00000001674D401FDA01A816F97FFC1ABC1AB440000003004000000C03C60CA80000000168EF32C8',
|
||||
'channels': 2,
|
||||
'bits_per_sample': 16,
|
||||
'nal_unit_length_field': 4
|
||||
},
|
||||
'video_ext': 'ismv',
|
||||
'audio_ext': 'none',
|
||||
'vbr': 4482,
|
||||
}],
|
||||
{
|
||||
'eng': [
|
||||
{
|
||||
'ext': 'ismt',
|
||||
'protocol': 'ism',
|
||||
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
|
||||
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
|
||||
'_download_params': {
|
||||
'stream_type': 'text',
|
||||
'duration': 8880746666,
|
||||
'timescale': 10000000,
|
||||
'fourcc': 'TTML',
|
||||
'codec_private_data': ''
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
),
|
||||
]
|
||||
|
||||
for ism_file, ism_url, expected_formats, expected_subtitles in _TEST_CASES:
|
||||
with io.open('./test/testdata/ism/%s.Manifest' % ism_file,
|
||||
mode='r', encoding='utf-8') as f:
|
||||
formats, subtitles = self.ie._parse_ism_formats_and_subtitles(
|
||||
compat_etree_fromstring(f.read().encode('utf-8')), ism_url=ism_url)
|
||||
self.ie._sort_formats(formats)
|
||||
expect_value(self, formats, expected_formats, None)
|
||||
expect_value(self, subtitles, expected_subtitles, None)
|
||||
|
||||
def test_parse_f4m_formats(self):
|
||||
_TEST_CASES = [
|
||||
|
||||
@@ -29,6 +29,7 @@ class YDL(FakeYDL):
|
||||
self.msgs = []
|
||||
|
||||
def process_info(self, info_dict):
|
||||
info_dict.pop('__original_infodict', None)
|
||||
self.downloaded_info_dicts.append(info_dict)
|
||||
|
||||
def to_screen(self, msg):
|
||||
@@ -311,8 +312,8 @@ class TestFormatSelection(unittest.TestCase):
|
||||
self.assertRaises(ExtractorError, ydl.process_ie_result, info_dict.copy())
|
||||
|
||||
def test_youtube_format_selection(self):
|
||||
# FIXME: Rewrite in accordance with the new format sorting options
|
||||
return
|
||||
# disabled for now - this needs some changes
|
||||
|
||||
order = [
|
||||
'38', '37', '46', '22', '45', '35', '44', '18', '34', '43', '6', '5', '17', '36', '13',
|
||||
@@ -601,6 +602,26 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
self.assertTrue(subs)
|
||||
self.assertEqual(set(subs.keys()), set(['es', 'fr']))
|
||||
|
||||
result = get_info({'writesubtitles': True, 'subtitleslangs': ['all', '-en']})
|
||||
subs = result['requested_subtitles']
|
||||
self.assertTrue(subs)
|
||||
self.assertEqual(set(subs.keys()), set(['es', 'fr']))
|
||||
|
||||
result = get_info({'writesubtitles': True, 'subtitleslangs': ['en', 'fr', '-en']})
|
||||
subs = result['requested_subtitles']
|
||||
self.assertTrue(subs)
|
||||
self.assertEqual(set(subs.keys()), set(['fr']))
|
||||
|
||||
result = get_info({'writesubtitles': True, 'subtitleslangs': ['-en', 'en']})
|
||||
subs = result['requested_subtitles']
|
||||
self.assertTrue(subs)
|
||||
self.assertEqual(set(subs.keys()), set(['en']))
|
||||
|
||||
result = get_info({'writesubtitles': True, 'subtitleslangs': ['e.+']})
|
||||
subs = result['requested_subtitles']
|
||||
self.assertTrue(subs)
|
||||
self.assertEqual(set(subs.keys()), set(['es', 'en']))
|
||||
|
||||
result = get_info({'writesubtitles': True, 'writeautomaticsub': True, 'subtitleslangs': ['es', 'pt']})
|
||||
subs = result['requested_subtitles']
|
||||
self.assertTrue(subs)
|
||||
@@ -635,6 +656,8 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
'height': 1080,
|
||||
'title1': '$PATH',
|
||||
'title2': '%PATH%',
|
||||
'timestamp': 1618488000,
|
||||
'formats': [{'id': 'id1'}, {'id': 'id2'}]
|
||||
}
|
||||
|
||||
def fname(templ, na_placeholder='NA'):
|
||||
@@ -651,6 +674,7 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
# Or by provided placeholder
|
||||
self.assertEqual(fname(NA_TEST_OUTTMPL, na_placeholder='none'), 'none-none-1234.mp4')
|
||||
self.assertEqual(fname(NA_TEST_OUTTMPL, na_placeholder=''), '--1234.mp4')
|
||||
self.assertEqual(fname('%(height)s.%(ext)s'), '1080.mp4')
|
||||
self.assertEqual(fname('%(height)d.%(ext)s'), '1080.mp4')
|
||||
self.assertEqual(fname('%(height)6d.%(ext)s'), ' 1080.mp4')
|
||||
self.assertEqual(fname('%(height)-6d.%(ext)s'), '1080 .mp4')
|
||||
@@ -668,6 +692,12 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
self.assertEqual(fname('%%(width)06d.%(ext)s'), '%(width)06d.mp4')
|
||||
self.assertEqual(fname('Hello %(title1)s'), 'Hello $PATH')
|
||||
self.assertEqual(fname('Hello %(title2)s'), 'Hello %PATH%')
|
||||
self.assertEqual(fname('%(timestamp+-1000>%H-%M-%S)s'), '11-43-20')
|
||||
self.assertEqual(fname('%(id+1)05d'), '01235')
|
||||
self.assertEqual(fname('%(width+100)05d'), 'NA')
|
||||
self.assertEqual(fname('%(formats.0)s').replace("u", ""), "{'id' - 'id1'}")
|
||||
self.assertEqual(fname('%(formats.-1.id)s'), 'id2')
|
||||
self.assertEqual(fname('%(formats.2)s'), 'NA')
|
||||
|
||||
def test_format_note(self):
|
||||
ydl = YoutubeDL()
|
||||
@@ -726,7 +756,7 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
def process_info(self, info_dict):
|
||||
super(YDL, self).process_info(info_dict)
|
||||
|
||||
def _match_entry(self, info_dict, incomplete):
|
||||
def _match_entry(self, info_dict, incomplete=False):
|
||||
res = super(FilterYDL, self)._match_entry(info_dict, incomplete)
|
||||
if res is None:
|
||||
self.downloaded_info_dicts.append(info_dict)
|
||||
|
||||
@@ -37,7 +37,6 @@ class TestAllURLsMatching(unittest.TestCase):
|
||||
assertPlaylist('PL63F0C78739B09958')
|
||||
assertTab('https://www.youtube.com/AsapSCIENCE')
|
||||
assertTab('https://www.youtube.com/embedded')
|
||||
assertTab('https://www.youtube.com/feed') # Own channel's home page
|
||||
assertTab('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
|
||||
assertTab('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||
assertTab('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
|
||||
@@ -73,15 +72,6 @@ class TestAllURLsMatching(unittest.TestCase):
|
||||
self.assertMatch('http://www.youtube.com/results?search_query=making+mustard', ['youtube:search_url'])
|
||||
self.assertMatch('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', ['youtube:search_url'])
|
||||
|
||||
def test_youtube_extract(self):
|
||||
assertExtractId = lambda url, id: self.assertEqual(YoutubeIE.extract_id(url), id)
|
||||
assertExtractId('http://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
|
||||
assertExtractId('https://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
|
||||
assertExtractId('https://www.youtube.com/watch?feature=player_embedded&v=BaW_jenozKc', 'BaW_jenozKc')
|
||||
assertExtractId('https://www.youtube.com/watch_popup?v=BaW_jenozKc', 'BaW_jenozKc')
|
||||
assertExtractId('http://www.youtube.com/watch?v=BaW_jenozKcsharePLED17F32AD9753930', 'BaW_jenozKc')
|
||||
assertExtractId('BaW_jenozKc', 'BaW_jenozKc')
|
||||
|
||||
def test_facebook_matching(self):
|
||||
self.assertTrue(FacebookIE.suitable('https://www.facebook.com/Shiniknoh#!/photo.php?v=10153317450565268'))
|
||||
self.assertTrue(FacebookIE.suitable('https://www.facebook.com/cindyweather?fref=ts#!/photo.php?v=10152183998945793'))
|
||||
|
||||
@@ -121,6 +121,7 @@ def generator(test_case, tname):
|
||||
params['outtmpl'] = tname + '_' + params['outtmpl']
|
||||
if is_playlist and 'playlist' not in test_case:
|
||||
params.setdefault('extract_flat', 'in_playlist')
|
||||
params.setdefault('playlistend', test_case.get('playlist_mincount'))
|
||||
params.setdefault('skip_download', True)
|
||||
|
||||
ydl = YoutubeDL(params, auto_init=False)
|
||||
|
||||
@@ -39,6 +39,16 @@ class TestExecution(unittest.TestCase):
|
||||
_, stderr = p.communicate()
|
||||
self.assertFalse(stderr)
|
||||
|
||||
def test_lazy_extractors(self):
|
||||
try:
|
||||
subprocess.check_call([sys.executable, 'devscripts/make_lazy_extractors.py', 'yt_dlp/extractor/lazy_extractors.py'], cwd=rootDir, stdout=_DEV_NULL)
|
||||
subprocess.check_call([sys.executable, 'test/test_all_urls.py'], cwd=rootDir, stdout=_DEV_NULL)
|
||||
finally:
|
||||
try:
|
||||
os.remove('yt_dlp/extractor/lazy_extractors.py')
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
@@ -8,16 +8,50 @@ import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from yt_dlp.postprocessor import MetadataFromFieldPP, MetadataFromTitlePP
|
||||
from yt_dlp.postprocessor import (
|
||||
FFmpegThumbnailsConvertorPP,
|
||||
MetadataFromFieldPP,
|
||||
MetadataFromTitlePP,
|
||||
)
|
||||
|
||||
|
||||
class TestMetadataFromField(unittest.TestCase):
|
||||
def test_format_to_regex(self):
|
||||
pp = MetadataFromFieldPP(None, ['title:%(title)s - %(artist)s'])
|
||||
self.assertEqual(pp._data[0]['regex'], r'(?P<title>[^\r\n]+)\ \-\ (?P<artist>[^\r\n]+)')
|
||||
self.assertEqual(pp._data[0]['regex'], r'(?P<title>.+)\ \-\ (?P<artist>.+)')
|
||||
|
||||
def test_field_to_outtmpl(self):
|
||||
pp = MetadataFromFieldPP(None, ['title:%(title)s : %(artist)s'])
|
||||
self.assertEqual(pp._data[0]['tmpl'], '%(title)s')
|
||||
|
||||
def test_in_out_seperation(self):
|
||||
pp = MetadataFromFieldPP(None, ['%(title)s \\: %(artist)s:%(title)s : %(artist)s'])
|
||||
self.assertEqual(pp._data[0]['in'], '%(title)s : %(artist)s')
|
||||
self.assertEqual(pp._data[0]['out'], '%(title)s : %(artist)s')
|
||||
|
||||
|
||||
class TestMetadataFromTitle(unittest.TestCase):
|
||||
def test_format_to_regex(self):
|
||||
pp = MetadataFromTitlePP(None, '%(title)s - %(artist)s')
|
||||
self.assertEqual(pp._titleregex, r'(?P<title>[^\r\n]+)\ \-\ (?P<artist>[^\r\n]+)')
|
||||
self.assertEqual(pp._titleregex, r'(?P<title>.+)\ \-\ (?P<artist>.+)')
|
||||
|
||||
|
||||
class TestConvertThumbnail(unittest.TestCase):
|
||||
def test_escaping(self):
|
||||
pp = FFmpegThumbnailsConvertorPP()
|
||||
if not pp.available:
|
||||
print('Skipping: ffmpeg not found')
|
||||
return
|
||||
|
||||
file = 'test/testdata/thumbnails/foo %d bar/foo_%d.{}'
|
||||
tests = (('webp', 'png'), ('png', 'jpg'))
|
||||
|
||||
for inp, out in tests:
|
||||
out_file = file.format(out)
|
||||
if os.path.exists(out_file):
|
||||
os.remove(out_file)
|
||||
pp.convert_thumbnail(file.format(inp), out)
|
||||
assert os.path.exists(out_file)
|
||||
|
||||
for _, out in tests:
|
||||
os.remove(file.format(out))
|
||||
|
||||
@@ -23,6 +23,7 @@ from yt_dlp.utils import (
|
||||
clean_html,
|
||||
clean_podcast_url,
|
||||
date_from_str,
|
||||
datetime_from_str,
|
||||
DateRange,
|
||||
detect_exe_version,
|
||||
determine_ext,
|
||||
@@ -65,6 +66,7 @@ from yt_dlp.utils import (
|
||||
sanitize_filename,
|
||||
sanitize_path,
|
||||
sanitize_url,
|
||||
sanitized_Request,
|
||||
expand_path,
|
||||
prepend_extension,
|
||||
replace_extension,
|
||||
@@ -237,6 +239,16 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertEqual(sanitize_url('httpss://foo.bar'), 'https://foo.bar')
|
||||
self.assertEqual(sanitize_url('rmtps://foo.bar'), 'rtmps://foo.bar')
|
||||
self.assertEqual(sanitize_url('https://foo.bar'), 'https://foo.bar')
|
||||
self.assertEqual(sanitize_url('foo bar'), 'foo bar')
|
||||
|
||||
def test_extract_basic_auth(self):
|
||||
auth_header = lambda url: sanitized_Request(url).get_header('Authorization')
|
||||
self.assertFalse(auth_header('http://foo.bar'))
|
||||
self.assertFalse(auth_header('http://:foo.bar'))
|
||||
self.assertEqual(auth_header('http://@foo.bar'), 'Basic Og==')
|
||||
self.assertEqual(auth_header('http://:pass@foo.bar'), 'Basic OnBhc3M=')
|
||||
self.assertEqual(auth_header('http://user:@foo.bar'), 'Basic dXNlcjo=')
|
||||
self.assertEqual(auth_header('http://user:pass@foo.bar'), 'Basic dXNlcjpwYXNz')
|
||||
|
||||
def test_expand_path(self):
|
||||
def env(var):
|
||||
@@ -311,8 +323,18 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertEqual(date_from_str('yesterday'), date_from_str('now-1day'))
|
||||
self.assertEqual(date_from_str('now+7day'), date_from_str('now+1week'))
|
||||
self.assertEqual(date_from_str('now+14day'), date_from_str('now+2week'))
|
||||
self.assertEqual(date_from_str('now+365day'), date_from_str('now+1year'))
|
||||
self.assertEqual(date_from_str('now+30day'), date_from_str('now+1month'))
|
||||
self.assertEqual(date_from_str('20200229+365day'), date_from_str('20200229+1year'))
|
||||
self.assertEqual(date_from_str('20210131+28day'), date_from_str('20210131+1month'))
|
||||
|
||||
def test_datetime_from_str(self):
|
||||
self.assertEqual(datetime_from_str('yesterday', precision='day'), datetime_from_str('now-1day', precision='auto'))
|
||||
self.assertEqual(datetime_from_str('now+7day', precision='day'), datetime_from_str('now+1week', precision='auto'))
|
||||
self.assertEqual(datetime_from_str('now+14day', precision='day'), datetime_from_str('now+2week', precision='auto'))
|
||||
self.assertEqual(datetime_from_str('20200229+365day', precision='day'), datetime_from_str('20200229+1year', precision='auto'))
|
||||
self.assertEqual(datetime_from_str('20210131+28day', precision='day'), datetime_from_str('20210131+1month', precision='auto'))
|
||||
self.assertEqual(datetime_from_str('20210131+59day', precision='day'), datetime_from_str('20210131+2month', precision='auto'))
|
||||
self.assertEqual(datetime_from_str('now+1day', precision='hour'), datetime_from_str('now+24hours', precision='auto'))
|
||||
self.assertEqual(datetime_from_str('now+23hours', precision='hour'), datetime_from_str('now+23hours', precision='auto'))
|
||||
|
||||
def test_daterange(self):
|
||||
_20century = DateRange("19000101", "20000101")
|
||||
|
||||
26
test/test_youtube_misc.py
Normal file
26
test/test_youtube_misc.py
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env python
|
||||
from __future__ import unicode_literals
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
|
||||
from yt_dlp.extractor import YoutubeIE
|
||||
|
||||
|
||||
class TestYoutubeMisc(unittest.TestCase):
|
||||
def test_youtube_extract(self):
|
||||
assertExtractId = lambda url, id: self.assertEqual(YoutubeIE.extract_id(url), id)
|
||||
assertExtractId('http://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
|
||||
assertExtractId('https://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
|
||||
assertExtractId('https://www.youtube.com/watch?feature=player_embedded&v=BaW_jenozKc', 'BaW_jenozKc')
|
||||
assertExtractId('https://www.youtube.com/watch_popup?v=BaW_jenozKc', 'BaW_jenozKc')
|
||||
assertExtractId('http://www.youtube.com/watch?v=BaW_jenozKcsharePLED17F32AD9753930', 'BaW_jenozKc')
|
||||
assertExtractId('BaW_jenozKc', 'BaW_jenozKc')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
988
test/testdata/ism/sintel.Manifest
vendored
Normal file
988
test/testdata/ism/sintel.Manifest
vendored
Normal file
@@ -0,0 +1,988 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!-- Created with Unified Streaming Platform (version=1.10.18-20255) -->
|
||||
<SmoothStreamingMedia
|
||||
MajorVersion="2"
|
||||
MinorVersion="0"
|
||||
TimeScale="10000000"
|
||||
Duration="8880746666">
|
||||
<StreamIndex
|
||||
Type="audio"
|
||||
QualityLevels="1"
|
||||
TimeScale="10000000"
|
||||
Name="audio"
|
||||
Chunks="445"
|
||||
Url="QualityLevels({bitrate})/Fragments(audio={start time})">
|
||||
<QualityLevel
|
||||
Index="0"
|
||||
Bitrate="128001"
|
||||
CodecPrivateData="1190"
|
||||
SamplingRate="48000"
|
||||
Channels="2"
|
||||
BitsPerSample="16"
|
||||
PacketSize="4"
|
||||
AudioTag="255"
|
||||
FourCC="AACL" />
|
||||
<c t="0" d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="20053333" />
|
||||
<c d="20053333" />
|
||||
<c d="20053334" />
|
||||
<c d="19840000" />
|
||||
<c d="746666" />
|
||||
</StreamIndex>
|
||||
<StreamIndex
|
||||
Type="text"
|
||||
QualityLevels="1"
|
||||
TimeScale="10000000"
|
||||
Language="eng"
|
||||
Subtype="CAPT"
|
||||
Name="textstream_eng"
|
||||
Chunks="11"
|
||||
Url="QualityLevels({bitrate})/Fragments(textstream_eng={start time})">
|
||||
<QualityLevel
|
||||
Index="0"
|
||||
Bitrate="1000"
|
||||
CodecPrivateData=""
|
||||
FourCC="TTML" />
|
||||
<c t="0" d="600000000" />
|
||||
<c d="600000000" />
|
||||
<c d="600000000" />
|
||||
<c d="600000000" />
|
||||
<c d="600000000" />
|
||||
<c d="600000000" />
|
||||
<c d="600000000" />
|
||||
<c d="600000000" />
|
||||
<c d="600000000" />
|
||||
<c d="600000000" />
|
||||
<c d="240000000" />
|
||||
</StreamIndex>
|
||||
<StreamIndex
|
||||
Type="video"
|
||||
QualityLevels="5"
|
||||
TimeScale="10000000"
|
||||
Name="video"
|
||||
Chunks="444"
|
||||
Url="QualityLevels({bitrate})/Fragments(video={start time})"
|
||||
MaxWidth="1688"
|
||||
MaxHeight="720"
|
||||
DisplayWidth="1689"
|
||||
DisplayHeight="720">
|
||||
<QualityLevel
|
||||
Index="0"
|
||||
Bitrate="100000"
|
||||
CodecPrivateData="00000001674D401FDA0544EFFC2D002CBC40000003004000000C03C60CA80000000168EF32C8"
|
||||
MaxWidth="336"
|
||||
MaxHeight="144"
|
||||
FourCC="AVC1" />
|
||||
<QualityLevel
|
||||
Index="1"
|
||||
Bitrate="326000"
|
||||
CodecPrivateData="00000001674D401FDA0241FE23FFC3BC83BA44000003000400000300C03C60CA800000000168EF32C8"
|
||||
MaxWidth="562"
|
||||
MaxHeight="240"
|
||||
FourCC="AVC1" />
|
||||
<QualityLevel
|
||||
Index="2"
|
||||
Bitrate="698000"
|
||||
CodecPrivateData="00000001674D401FDA0350BFB97FF06AF06AD1000003000100000300300F1832A00000000168EF32C8"
|
||||
MaxWidth="844"
|
||||
MaxHeight="360"
|
||||
FourCC="AVC1" />
|
||||
<QualityLevel
|
||||
Index="3"
|
||||
Bitrate="1493000"
|
||||
CodecPrivateData="00000001674D401FDA011C3DE6FFF0D890D871000003000100000300300F1832A00000000168EF32C8"
|
||||
MaxWidth="1126"
|
||||
MaxHeight="480"
|
||||
FourCC="AVC1" />
|
||||
<QualityLevel
|
||||
Index="4"
|
||||
Bitrate="4482000"
|
||||
CodecPrivateData="00000001674D401FDA01A816F97FFC1ABC1AB440000003004000000C03C60CA80000000168EF32C8"
|
||||
MaxWidth="1688"
|
||||
MaxHeight="720"
|
||||
FourCC="AVC1" />
|
||||
<c t="0" d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
<c d="20000000" />
|
||||
</StreamIndex>
|
||||
</SmoothStreamingMedia>
|
||||
38
test/testdata/m3u8/bipbop_16x9.m3u8
vendored
Normal file
38
test/testdata/m3u8/bipbop_16x9.m3u8
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
#EXTM3U
|
||||
|
||||
#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="bipbop_audio",LANGUAGE="eng",NAME="BipBop Audio 1",AUTOSELECT=YES,DEFAULT=YES
|
||||
#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="bipbop_audio",LANGUAGE="eng",NAME="BipBop Audio 2",AUTOSELECT=NO,DEFAULT=NO,URI="alternate_audio_aac/prog_index.m3u8"
|
||||
|
||||
|
||||
#EXT-X-MEDIA:TYPE=SUBTITLES,GROUP-ID="subs",NAME="English",DEFAULT=YES,AUTOSELECT=YES,FORCED=NO,LANGUAGE="en",CHARACTERISTICS="public.accessibility.transcribes-spoken-dialog, public.accessibility.describes-music-and-sound",URI="subtitles/eng/prog_index.m3u8"
|
||||
#EXT-X-MEDIA:TYPE=SUBTITLES,GROUP-ID="subs",NAME="English (Forced)",DEFAULT=NO,AUTOSELECT=NO,FORCED=YES,LANGUAGE="en",URI="subtitles/eng_forced/prog_index.m3u8"
|
||||
#EXT-X-MEDIA:TYPE=SUBTITLES,GROUP-ID="subs",NAME="Français",DEFAULT=NO,AUTOSELECT=YES,FORCED=NO,LANGUAGE="fr",CHARACTERISTICS="public.accessibility.transcribes-spoken-dialog, public.accessibility.describes-music-and-sound",URI="subtitles/fra/prog_index.m3u8"
|
||||
#EXT-X-MEDIA:TYPE=SUBTITLES,GROUP-ID="subs",NAME="Français (Forced)",DEFAULT=NO,AUTOSELECT=NO,FORCED=YES,LANGUAGE="fr",URI="subtitles/fra_forced/prog_index.m3u8"
|
||||
#EXT-X-MEDIA:TYPE=SUBTITLES,GROUP-ID="subs",NAME="Español",DEFAULT=NO,AUTOSELECT=YES,FORCED=NO,LANGUAGE="es",CHARACTERISTICS="public.accessibility.transcribes-spoken-dialog, public.accessibility.describes-music-and-sound",URI="subtitles/spa/prog_index.m3u8"
|
||||
#EXT-X-MEDIA:TYPE=SUBTITLES,GROUP-ID="subs",NAME="Español (Forced)",DEFAULT=NO,AUTOSELECT=NO,FORCED=YES,LANGUAGE="es",URI="subtitles/spa_forced/prog_index.m3u8"
|
||||
#EXT-X-MEDIA:TYPE=SUBTITLES,GROUP-ID="subs",NAME="日本語",DEFAULT=NO,AUTOSELECT=YES,FORCED=NO,LANGUAGE="ja",CHARACTERISTICS="public.accessibility.transcribes-spoken-dialog, public.accessibility.describes-music-and-sound",URI="subtitles/jpn/prog_index.m3u8"
|
||||
#EXT-X-MEDIA:TYPE=SUBTITLES,GROUP-ID="subs",NAME="日本語 (Forced)",DEFAULT=NO,AUTOSELECT=NO,FORCED=YES,LANGUAGE="ja",URI="subtitles/jpn_forced/prog_index.m3u8"
|
||||
|
||||
|
||||
#EXT-X-STREAM-INF:BANDWIDTH=263851,CODECS="mp4a.40.2, avc1.4d400d",RESOLUTION=416x234,AUDIO="bipbop_audio",SUBTITLES="subs"
|
||||
gear1/prog_index.m3u8
|
||||
#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=28451,CODECS="avc1.4d400d",URI="gear1/iframe_index.m3u8"
|
||||
|
||||
#EXT-X-STREAM-INF:BANDWIDTH=577610,CODECS="mp4a.40.2, avc1.4d401e",RESOLUTION=640x360,AUDIO="bipbop_audio",SUBTITLES="subs"
|
||||
gear2/prog_index.m3u8
|
||||
#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=181534,CODECS="avc1.4d401e",URI="gear2/iframe_index.m3u8"
|
||||
|
||||
#EXT-X-STREAM-INF:BANDWIDTH=915905,CODECS="mp4a.40.2, avc1.4d401f",RESOLUTION=960x540,AUDIO="bipbop_audio",SUBTITLES="subs"
|
||||
gear3/prog_index.m3u8
|
||||
#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=297056,CODECS="avc1.4d401f",URI="gear3/iframe_index.m3u8"
|
||||
|
||||
#EXT-X-STREAM-INF:BANDWIDTH=1030138,CODECS="mp4a.40.2, avc1.4d401f",RESOLUTION=1280x720,AUDIO="bipbop_audio",SUBTITLES="subs"
|
||||
gear4/prog_index.m3u8
|
||||
#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=339492,CODECS="avc1.4d401f",URI="gear4/iframe_index.m3u8"
|
||||
|
||||
#EXT-X-STREAM-INF:BANDWIDTH=1924009,CODECS="mp4a.40.2, avc1.4d401f",RESOLUTION=1920x1080,AUDIO="bipbop_audio",SUBTITLES="subs"
|
||||
gear5/prog_index.m3u8
|
||||
#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=669554,CODECS="avc1.4d401f",URI="gear5/iframe_index.m3u8"
|
||||
|
||||
#EXT-X-STREAM-INF:BANDWIDTH=41457,CODECS="mp4a.40.2",AUDIO="bipbop_audio",SUBTITLES="subs"
|
||||
gear0/prog_index.m3u8
|
||||
351
test/testdata/mpd/subtitles.mpd
vendored
Normal file
351
test/testdata/mpd/subtitles.mpd
vendored
Normal file
@@ -0,0 +1,351 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!-- Created with Unified Streaming Platform (version=1.10.18-20255) -->
|
||||
<MPD
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xmlns="urn:mpeg:dash:schema:mpd:2011"
|
||||
xsi:schemaLocation="urn:mpeg:dash:schema:mpd:2011 http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-DASH_schema_files/DASH-MPD.xsd"
|
||||
type="static"
|
||||
mediaPresentationDuration="PT14M48S"
|
||||
maxSegmentDuration="PT1M"
|
||||
minBufferTime="PT10S"
|
||||
profiles="urn:mpeg:dash:profile:isoff-live:2011">
|
||||
<Period
|
||||
id="1"
|
||||
duration="PT14M48S">
|
||||
<BaseURL>dash/</BaseURL>
|
||||
<AdaptationSet
|
||||
id="1"
|
||||
group="1"
|
||||
contentType="audio"
|
||||
segmentAlignment="true"
|
||||
audioSamplingRate="48000"
|
||||
mimeType="audio/mp4"
|
||||
codecs="mp4a.40.2"
|
||||
startWithSAP="1">
|
||||
<AudioChannelConfiguration
|
||||
schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011"
|
||||
value="2" />
|
||||
<Role schemeIdUri="urn:mpeg:dash:role:2011" value="main" />
|
||||
<SegmentTemplate
|
||||
timescale="48000"
|
||||
initialization="3144-kZT4LWMQw6Rh7Kpd-$RepresentationID$.dash"
|
||||
media="3144-kZT4LWMQw6Rh7Kpd-$RepresentationID$-$Time$.dash">
|
||||
<SegmentTimeline>
|
||||
<S t="0" d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="96256" r="2" />
|
||||
<S d="95232" />
|
||||
<S d="3584" />
|
||||
</SegmentTimeline>
|
||||
</SegmentTemplate>
|
||||
<Representation
|
||||
id="audio=128001"
|
||||
bandwidth="128001">
|
||||
</Representation>
|
||||
</AdaptationSet>
|
||||
<AdaptationSet
|
||||
id="2"
|
||||
group="3"
|
||||
contentType="text"
|
||||
lang="en"
|
||||
mimeType="application/mp4"
|
||||
codecs="stpp"
|
||||
startWithSAP="1">
|
||||
<Role schemeIdUri="urn:mpeg:dash:role:2011" value="subtitle" />
|
||||
<SegmentTemplate
|
||||
timescale="1000"
|
||||
initialization="3144-kZT4LWMQw6Rh7Kpd-$RepresentationID$.dash"
|
||||
media="3144-kZT4LWMQw6Rh7Kpd-$RepresentationID$-$Time$.dash">
|
||||
<SegmentTimeline>
|
||||
<S t="0" d="60000" r="9" />
|
||||
<S d="24000" />
|
||||
</SegmentTimeline>
|
||||
</SegmentTemplate>
|
||||
<Representation
|
||||
id="textstream_eng=1000"
|
||||
bandwidth="1000">
|
||||
</Representation>
|
||||
</AdaptationSet>
|
||||
<AdaptationSet
|
||||
id="3"
|
||||
group="2"
|
||||
contentType="video"
|
||||
par="960:409"
|
||||
minBandwidth="100000"
|
||||
maxBandwidth="4482000"
|
||||
maxWidth="1689"
|
||||
maxHeight="720"
|
||||
segmentAlignment="true"
|
||||
mimeType="video/mp4"
|
||||
codecs="avc1.4D401F"
|
||||
startWithSAP="1">
|
||||
<Role schemeIdUri="urn:mpeg:dash:role:2011" value="main" />
|
||||
<SegmentTemplate
|
||||
timescale="12288"
|
||||
initialization="3144-kZT4LWMQw6Rh7Kpd-$RepresentationID$.dash"
|
||||
media="3144-kZT4LWMQw6Rh7Kpd-$RepresentationID$-$Time$.dash">
|
||||
<SegmentTimeline>
|
||||
<S t="0" d="24576" r="443" />
|
||||
</SegmentTimeline>
|
||||
</SegmentTemplate>
|
||||
<Representation
|
||||
id="video=100000"
|
||||
bandwidth="100000"
|
||||
width="336"
|
||||
height="144"
|
||||
sar="2880:2863"
|
||||
scanType="progressive">
|
||||
</Representation>
|
||||
<Representation
|
||||
id="video=326000"
|
||||
bandwidth="326000"
|
||||
width="562"
|
||||
height="240"
|
||||
sar="115200:114929"
|
||||
scanType="progressive">
|
||||
</Representation>
|
||||
<Representation
|
||||
id="video=698000"
|
||||
bandwidth="698000"
|
||||
width="844"
|
||||
height="360"
|
||||
sar="86400:86299"
|
||||
scanType="progressive">
|
||||
</Representation>
|
||||
<Representation
|
||||
id="video=1493000"
|
||||
bandwidth="1493000"
|
||||
width="1126"
|
||||
height="480"
|
||||
sar="230400:230267"
|
||||
scanType="progressive">
|
||||
</Representation>
|
||||
<Representation
|
||||
id="video=4482000"
|
||||
bandwidth="4482000"
|
||||
width="1688"
|
||||
height="720"
|
||||
sar="86400:86299"
|
||||
scanType="progressive">
|
||||
</Representation>
|
||||
</AdaptationSet>
|
||||
</Period>
|
||||
</MPD>
|
||||
BIN
test/testdata/thumbnails/foo %d bar/foo_%d.webp
vendored
Normal file
BIN
test/testdata/thumbnails/foo %d bar/foo_%d.webp
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 3.8 KiB |
4
yt-dlp.sh
Normal file → Executable file
4
yt-dlp.sh
Normal file → Executable file
@@ -1,2 +1,2 @@
|
||||
#!/bin/bash
|
||||
python3 "$(dirname $(realpath $0))/yt_dlp/__main__.py" "$@"
|
||||
#!/bin/sh
|
||||
exec python3 "$(dirname "$(realpath "$0")")/yt_dlp/__main__.py" "$@"
|
||||
|
||||
1410
yt_dlp/YoutubeDL.py
1410
yt_dlp/YoutubeDL.py
File diff suppressed because it is too large
Load Diff
@@ -31,20 +31,26 @@ from .utils import (
|
||||
preferredencoding,
|
||||
read_batch_urls,
|
||||
RejectedVideoReached,
|
||||
REMUX_EXTENSIONS,
|
||||
render_table,
|
||||
SameFileError,
|
||||
setproctitle,
|
||||
std_headers,
|
||||
write_string,
|
||||
)
|
||||
from .update import update_self
|
||||
from .update import run_update
|
||||
from .downloader import (
|
||||
FileDownloader,
|
||||
)
|
||||
from .extractor import gen_extractors, list_extractors
|
||||
from .extractor.common import InfoExtractor
|
||||
from .extractor.adobepass import MSO_INFO
|
||||
from .postprocessor.ffmpeg import (
|
||||
FFmpegExtractAudioPP,
|
||||
FFmpegSubtitlesConvertorPP,
|
||||
FFmpegThumbnailsConvertorPP,
|
||||
FFmpegVideoConvertorPP,
|
||||
FFmpegVideoRemuxerPP,
|
||||
)
|
||||
from .postprocessor.metadatafromfield import MetadataFromFieldPP
|
||||
from .YoutubeDL import YoutubeDL
|
||||
|
||||
@@ -60,6 +66,7 @@ def _real_main(argv=None):
|
||||
setproctitle('yt-dlp')
|
||||
|
||||
parser, opts, args = parseOpts(argv)
|
||||
warnings = []
|
||||
|
||||
# Set user agent
|
||||
if opts.user_agent is not None:
|
||||
@@ -128,16 +135,12 @@ def _real_main(argv=None):
|
||||
parser.error('account username missing\n')
|
||||
if opts.ap_password is not None and opts.ap_username is None:
|
||||
parser.error('TV Provider account username missing\n')
|
||||
if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid):
|
||||
parser.error('using output template conflicts with using title, video ID or auto number')
|
||||
if opts.autonumber_size is not None:
|
||||
if opts.autonumber_size <= 0:
|
||||
parser.error('auto number size must be positive')
|
||||
if opts.autonumber_start is not None:
|
||||
if opts.autonumber_start < 0:
|
||||
parser.error('auto number start must be positive or 0')
|
||||
if opts.usetitle and opts.useid:
|
||||
parser.error('using title conflicts with using video ID')
|
||||
if opts.username is not None and opts.password is None:
|
||||
opts.password = compat_getpass('Type account password and press [Return]: ')
|
||||
if opts.ap_username is not None and opts.ap_password is None:
|
||||
@@ -177,9 +180,10 @@ def _real_main(argv=None):
|
||||
parser.error('requests sleep interval must be positive or 0')
|
||||
if opts.ap_mso and opts.ap_mso not in MSO_INFO:
|
||||
parser.error('Unsupported TV Provider, use --ap-list-mso to get a list of supported TV Providers')
|
||||
if opts.overwrites:
|
||||
# --yes-overwrites implies --no-continue
|
||||
if opts.overwrites: # --yes-overwrites implies --no-continue
|
||||
opts.continue_dl = False
|
||||
if opts.concurrent_fragment_downloads <= 0:
|
||||
raise ValueError('Concurrent fragments must be positive')
|
||||
|
||||
def parse_retries(retries, name=''):
|
||||
if retries in ('inf', 'infinite'):
|
||||
@@ -211,44 +215,101 @@ def _real_main(argv=None):
|
||||
if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart:
|
||||
raise ValueError('Playlist end must be greater than playlist start')
|
||||
if opts.extractaudio:
|
||||
if opts.audioformat not in ['best', 'aac', 'flac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']:
|
||||
if opts.audioformat not in ['best'] + list(FFmpegExtractAudioPP.SUPPORTED_EXTS):
|
||||
parser.error('invalid audio format specified')
|
||||
if opts.audioquality:
|
||||
opts.audioquality = opts.audioquality.strip('k').strip('K')
|
||||
if not opts.audioquality.isdigit():
|
||||
parser.error('invalid audio quality specified')
|
||||
if opts.recodevideo is not None:
|
||||
if opts.recodevideo not in REMUX_EXTENSIONS:
|
||||
parser.error('invalid video recode format specified')
|
||||
opts.recodevideo = opts.recodevideo.replace(' ', '')
|
||||
if not re.match(FFmpegVideoConvertorPP.FORMAT_RE, opts.recodevideo):
|
||||
parser.error('invalid video remux format specified')
|
||||
if opts.remuxvideo is not None:
|
||||
opts.remuxvideo = opts.remuxvideo.replace(' ', '')
|
||||
remux_regex = r'{0}(?:/{0})*$'.format(r'(?:\w+>)?(?:%s)' % '|'.join(REMUX_EXTENSIONS))
|
||||
if not re.match(remux_regex, opts.remuxvideo):
|
||||
if not re.match(FFmpegVideoRemuxerPP.FORMAT_RE, opts.remuxvideo):
|
||||
parser.error('invalid video remux format specified')
|
||||
if opts.convertsubtitles is not None:
|
||||
if opts.convertsubtitles not in ['srt', 'vtt', 'ass', 'lrc']:
|
||||
if opts.convertsubtitles not in FFmpegSubtitlesConvertorPP.SUPPORTED_EXTS:
|
||||
parser.error('invalid subtitle format specified')
|
||||
if opts.convertthumbnails is not None:
|
||||
if opts.convertthumbnails not in FFmpegThumbnailsConvertorPP.SUPPORTED_EXTS:
|
||||
parser.error('invalid thumbnail format specified')
|
||||
|
||||
if opts.date is not None:
|
||||
date = DateRange.day(opts.date)
|
||||
else:
|
||||
date = DateRange(opts.dateafter, opts.datebefore)
|
||||
|
||||
# Do not download videos when there are audio-only formats
|
||||
def parse_compat_opts():
|
||||
parsed_compat_opts, compat_opts = set(), opts.compat_opts[::-1]
|
||||
while compat_opts:
|
||||
actual_opt = opt = compat_opts.pop().lower()
|
||||
if opt == 'youtube-dl':
|
||||
compat_opts.extend(['-multistreams', 'all'])
|
||||
elif opt == 'youtube-dlc':
|
||||
compat_opts.extend(['-no-youtube-channel-redirect', '-no-live-chat', 'all'])
|
||||
elif opt == 'all':
|
||||
parsed_compat_opts.update(all_compat_opts)
|
||||
elif opt == '-all':
|
||||
parsed_compat_opts = set()
|
||||
else:
|
||||
if opt[0] == '-':
|
||||
opt = opt[1:]
|
||||
parsed_compat_opts.discard(opt)
|
||||
else:
|
||||
parsed_compat_opts.update([opt])
|
||||
if opt not in all_compat_opts:
|
||||
parser.error('Invalid compatibility option %s' % actual_opt)
|
||||
return parsed_compat_opts
|
||||
|
||||
all_compat_opts = [
|
||||
'filename', 'format-sort', 'abort-on-error', 'format-spec', 'no-playlist-metafiles',
|
||||
'multistreams', 'no-live-chat', 'playlist-index', 'list-formats', 'no-direct-merge',
|
||||
'no-youtube-channel-redirect', 'no-youtube-unavailable-videos', 'no-attach-info-json',
|
||||
]
|
||||
compat_opts = parse_compat_opts()
|
||||
|
||||
def _unused_compat_opt(name):
|
||||
if name not in compat_opts:
|
||||
return False
|
||||
compat_opts.discard(name)
|
||||
compat_opts.update(['*%s' % name])
|
||||
return True
|
||||
|
||||
def set_default_compat(compat_name, opt_name, default=True, remove_compat=False):
|
||||
attr = getattr(opts, opt_name)
|
||||
if compat_name in compat_opts:
|
||||
if attr is None:
|
||||
setattr(opts, opt_name, not default)
|
||||
return True
|
||||
else:
|
||||
if remove_compat:
|
||||
_unused_compat_opt(compat_name)
|
||||
return False
|
||||
elif attr is None:
|
||||
setattr(opts, opt_name, default)
|
||||
return None
|
||||
|
||||
set_default_compat('abort-on-error', 'ignoreerrors')
|
||||
set_default_compat('no-playlist-metafiles', 'allow_playlist_files')
|
||||
if 'format-sort' in compat_opts:
|
||||
opts.format_sort.extend(InfoExtractor.FormatSort.ytdl_default)
|
||||
_video_multistreams_set = set_default_compat('multistreams', 'allow_multiple_video_streams', False, remove_compat=False)
|
||||
_audio_multistreams_set = set_default_compat('multistreams', 'allow_multiple_audio_streams', False, remove_compat=False)
|
||||
if _video_multistreams_set is False and _audio_multistreams_set is False:
|
||||
_unused_compat_opt('multistreams')
|
||||
outtmpl_default = opts.outtmpl.get('default')
|
||||
if 'filename' in compat_opts:
|
||||
if outtmpl_default is None:
|
||||
outtmpl_default = '%(title)s.%(id)s.%(ext)s'
|
||||
opts.outtmpl.update({'default': outtmpl_default})
|
||||
else:
|
||||
_unused_compat_opt('filename')
|
||||
|
||||
if opts.extractaudio and not opts.keepvideo and opts.format is None:
|
||||
opts.format = 'bestaudio/best'
|
||||
|
||||
outtmpl = opts.outtmpl
|
||||
if not outtmpl:
|
||||
outtmpl = {'default': (
|
||||
'%(title)s-%(id)s-%(format)s.%(ext)s' if opts.format == '-1' and opts.usetitle
|
||||
else '%(id)s-%(format)s.%(ext)s' if opts.format == '-1'
|
||||
else '%(autonumber)s-%(title)s-%(id)s.%(ext)s' if opts.usetitle and opts.autonumber
|
||||
else '%(title)s-%(id)s.%(ext)s' if opts.usetitle
|
||||
else '%(id)s.%(ext)s' if opts.useid
|
||||
else '%(autonumber)s-%(id)s.%(ext)s' if opts.autonumber
|
||||
else None)}
|
||||
outtmpl_default = outtmpl.get('default')
|
||||
if outtmpl_default is not None and not os.path.splitext(outtmpl_default)[1] and opts.extractaudio:
|
||||
parser.error('Cannot download a video and extract audio into the same'
|
||||
' file! Use "{0}.%(ext)s" instead of "{0}" as the output'
|
||||
@@ -266,7 +327,7 @@ def _real_main(argv=None):
|
||||
if re.match(MetadataFromFieldPP.regex, f) is None:
|
||||
parser.error('invalid format string "%s" specified for --parse-metadata' % f)
|
||||
|
||||
any_getting = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson or opts.dump_single_json
|
||||
any_getting = opts.forceprint or opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson or opts.dump_single_json
|
||||
any_printing = opts.print_json
|
||||
download_archive_fn = expand_path(opts.download_archive) if opts.download_archive is not None else opts.download_archive
|
||||
|
||||
@@ -276,10 +337,15 @@ def _real_main(argv=None):
|
||||
opts.writeinfojson = True
|
||||
|
||||
def report_conflict(arg1, arg2):
|
||||
write_string('WARNING: %s is ignored since %s was given\n' % (arg2, arg1), out=sys.stderr)
|
||||
warnings.append('%s is ignored since %s was given' % (arg2, arg1))
|
||||
|
||||
if opts.remuxvideo and opts.recodevideo:
|
||||
report_conflict('--recode-video', '--remux-video')
|
||||
opts.remuxvideo = False
|
||||
if opts.sponskrub_cut and opts.split_chapters and opts.sponskrub is not False:
|
||||
report_conflict('--split-chapter', '--sponskrub-cut')
|
||||
opts.sponskrub_cut = False
|
||||
|
||||
if opts.allow_unplayable_formats:
|
||||
if opts.extractaudio:
|
||||
report_conflict('--allow-unplayable-formats', '--extract-audio')
|
||||
@@ -315,7 +381,22 @@ def _real_main(argv=None):
|
||||
postprocessors.append({
|
||||
'key': 'MetadataFromField',
|
||||
'formats': opts.metafromfield,
|
||||
'when': 'beforedl'
|
||||
# Run this immediately after extraction is complete
|
||||
'when': 'pre_process'
|
||||
})
|
||||
if opts.convertsubtitles:
|
||||
postprocessors.append({
|
||||
'key': 'FFmpegSubtitlesConvertor',
|
||||
'format': opts.convertsubtitles,
|
||||
# Run this before the actual video download
|
||||
'when': 'before_dl'
|
||||
})
|
||||
if opts.convertthumbnails:
|
||||
postprocessors.append({
|
||||
'key': 'FFmpegThumbnailsConvertor',
|
||||
'format': opts.convertthumbnails,
|
||||
# Run this before the actual video download
|
||||
'when': 'before_dl'
|
||||
})
|
||||
if opts.extractaudio:
|
||||
postprocessors.append({
|
||||
@@ -344,15 +425,11 @@ def _real_main(argv=None):
|
||||
# so metadata can be added here.
|
||||
if opts.addmetadata:
|
||||
postprocessors.append({'key': 'FFmpegMetadata'})
|
||||
if opts.convertsubtitles:
|
||||
postprocessors.append({
|
||||
'key': 'FFmpegSubtitlesConvertor',
|
||||
'format': opts.convertsubtitles,
|
||||
})
|
||||
if opts.embedsubtitles:
|
||||
already_have_subtitle = opts.writesubtitles
|
||||
postprocessors.append({
|
||||
'key': 'FFmpegEmbedSubtitle',
|
||||
# already_have_subtitle = True prevents the file from being deleted after embedding
|
||||
'already_have_subtitle': already_have_subtitle
|
||||
})
|
||||
if not already_have_subtitle:
|
||||
@@ -361,19 +438,9 @@ def _real_main(argv=None):
|
||||
# this was the old behaviour if only --all-sub was given.
|
||||
if opts.allsubtitles and not opts.writeautomaticsub:
|
||||
opts.writesubtitles = True
|
||||
if opts.embedthumbnail:
|
||||
already_have_thumbnail = opts.writethumbnail or opts.write_all_thumbnails
|
||||
postprocessors.append({
|
||||
'key': 'EmbedThumbnail',
|
||||
'already_have_thumbnail': already_have_thumbnail
|
||||
})
|
||||
if not already_have_thumbnail:
|
||||
opts.writethumbnail = True
|
||||
# XAttrMetadataPP should be run after post-processors that may change file
|
||||
# contents
|
||||
if opts.xattrs:
|
||||
postprocessors.append({'key': 'XAttrMetadata'})
|
||||
# This should be below all ffmpeg PP because it may cut parts out from the video
|
||||
# This should be above EmbedThumbnail since sponskrub removes the thumbnail attachment
|
||||
# but must be below EmbedSubtitle and FFmpegMetadata
|
||||
# See https://github.com/yt-dlp/yt-dlp/issues/204 , https://github.com/faissaloo/SponSkrub/issues/29
|
||||
# If opts.sponskrub is None, sponskrub is used, but it silently fails if the executable can't be found
|
||||
if opts.sponskrub is not False:
|
||||
postprocessors.append({
|
||||
@@ -384,20 +451,34 @@ def _real_main(argv=None):
|
||||
'force': opts.sponskrub_force,
|
||||
'ignoreerror': opts.sponskrub is None,
|
||||
})
|
||||
if opts.embedthumbnail:
|
||||
already_have_thumbnail = opts.writethumbnail or opts.write_all_thumbnails
|
||||
postprocessors.append({
|
||||
'key': 'EmbedThumbnail',
|
||||
# already_have_thumbnail = True prevents the file from being deleted after embedding
|
||||
'already_have_thumbnail': already_have_thumbnail
|
||||
})
|
||||
if not already_have_thumbnail:
|
||||
opts.writethumbnail = True
|
||||
if opts.split_chapters:
|
||||
postprocessors.append({'key': 'FFmpegSplitChapters'})
|
||||
# XAttrMetadataPP should be run after post-processors that may change file contents
|
||||
if opts.xattrs:
|
||||
postprocessors.append({'key': 'XAttrMetadata'})
|
||||
# ExecAfterDownload must be the last PP
|
||||
if opts.exec_cmd:
|
||||
postprocessors.append({
|
||||
'key': 'ExecAfterDownload',
|
||||
'exec_cmd': opts.exec_cmd,
|
||||
'when': 'aftermove'
|
||||
# Run this only after the files have been moved to their final locations
|
||||
'when': 'after_move'
|
||||
})
|
||||
|
||||
def report_args_compat(arg, name):
|
||||
write_string(
|
||||
'WARNING: %s given without specifying name. The arguments will be given to all %s\n' % (arg, name),
|
||||
out=sys.stderr)
|
||||
warnings.append('%s given without specifying name. The arguments will be given to all %s' % (arg, name))
|
||||
|
||||
if 'default' in opts.external_downloader_args:
|
||||
report_args_compat('--external-downloader-args', 'external downloaders')
|
||||
report_args_compat('--downloader-args', 'external downloaders')
|
||||
|
||||
if 'default-compat' in opts.postprocessor_args and 'default' not in opts.postprocessor_args:
|
||||
report_args_compat('--post-processor-args', 'post-processors')
|
||||
@@ -405,17 +486,16 @@ def _real_main(argv=None):
|
||||
opts.postprocessor_args['default'] = opts.postprocessor_args['default-compat']
|
||||
|
||||
final_ext = (
|
||||
opts.recodevideo
|
||||
or (opts.remuxvideo in REMUX_EXTENSIONS) and opts.remuxvideo
|
||||
or (opts.extractaudio and opts.audioformat != 'best') and opts.audioformat
|
||||
or None)
|
||||
opts.recodevideo if opts.recodevideo in FFmpegVideoConvertorPP.SUPPORTED_EXTS
|
||||
else opts.remuxvideo if opts.remuxvideo in FFmpegVideoRemuxerPP.SUPPORTED_EXTS
|
||||
else opts.audioformat if (opts.extractaudio and opts.audioformat != 'best')
|
||||
else None)
|
||||
|
||||
match_filter = (
|
||||
None if opts.match_filter is None
|
||||
else match_filter_func(opts.match_filter))
|
||||
|
||||
ydl_opts = {
|
||||
'convertsubtitles': opts.convertsubtitles,
|
||||
'usenetrc': opts.usenetrc,
|
||||
'username': opts.username,
|
||||
'password': opts.password,
|
||||
@@ -434,6 +514,7 @@ def _real_main(argv=None):
|
||||
'forceduration': opts.getduration,
|
||||
'forcefilename': opts.getfilename,
|
||||
'forceformat': opts.getformat,
|
||||
'forceprint': opts.forceprint,
|
||||
'forcejson': opts.dumpjson or opts.print_json,
|
||||
'dump_single_json': opts.dump_single_json,
|
||||
'force_write_download_archive': opts.force_write_download_archive,
|
||||
@@ -441,13 +522,15 @@ def _real_main(argv=None):
|
||||
'skip_download': opts.skip_download,
|
||||
'format': opts.format,
|
||||
'allow_unplayable_formats': opts.allow_unplayable_formats,
|
||||
'ignore_no_formats_error': opts.ignore_no_formats_error,
|
||||
'format_sort': opts.format_sort,
|
||||
'format_sort_force': opts.format_sort_force,
|
||||
'allow_multiple_video_streams': opts.allow_multiple_video_streams,
|
||||
'allow_multiple_audio_streams': opts.allow_multiple_audio_streams,
|
||||
'check_formats': opts.check_formats,
|
||||
'listformats': opts.listformats,
|
||||
'listformats_table': opts.listformats_table,
|
||||
'outtmpl': outtmpl,
|
||||
'outtmpl': opts.outtmpl,
|
||||
'outtmpl_na_placeholder': opts.outtmpl_na_placeholder,
|
||||
'paths': opts.paths,
|
||||
'autonumber_size': opts.autonumber_size,
|
||||
@@ -463,6 +546,7 @@ def _real_main(argv=None):
|
||||
'extractor_retries': opts.extractor_retries,
|
||||
'skip_unavailable_fragments': opts.skip_unavailable_fragments,
|
||||
'keep_fragments': opts.keep_fragments,
|
||||
'concurrent_fragment_downloads': opts.concurrent_fragment_downloads,
|
||||
'buffersize': opts.buffersize,
|
||||
'noresizebuffer': opts.noresizebuffer,
|
||||
'http_chunk_size': opts.http_chunk_size,
|
||||
@@ -482,6 +566,7 @@ def _real_main(argv=None):
|
||||
'writeannotations': opts.writeannotations,
|
||||
'writeinfojson': opts.writeinfojson,
|
||||
'allow_playlist_files': opts.allow_playlist_files,
|
||||
'clean_infojson': opts.clean_infojson,
|
||||
'getcomments': opts.getcomments,
|
||||
'writethumbnail': opts.writethumbnail,
|
||||
'write_all_thumbnails': opts.write_all_thumbnails,
|
||||
@@ -516,6 +601,7 @@ def _real_main(argv=None):
|
||||
'download_archive': download_archive_fn,
|
||||
'break_on_existing': opts.break_on_existing,
|
||||
'break_on_reject': opts.break_on_reject,
|
||||
'skip_playlist_after_errors': opts.skip_playlist_after_errors,
|
||||
'cookiefile': opts.cookiefile,
|
||||
'nocheckcertificate': opts.no_check_certificate,
|
||||
'prefer_insecure': opts.prefer_insecure,
|
||||
@@ -559,9 +645,12 @@ def _real_main(argv=None):
|
||||
'geo_bypass': opts.geo_bypass,
|
||||
'geo_bypass_country': opts.geo_bypass_country,
|
||||
'geo_bypass_ip_block': opts.geo_bypass_ip_block,
|
||||
'warnings': warnings,
|
||||
'compat_opts': compat_opts,
|
||||
# just for deprecation check
|
||||
'autonumber': opts.autonumber if opts.autonumber is True else None,
|
||||
'usetitle': opts.usetitle if opts.usetitle is True else None,
|
||||
'autonumber': opts.autonumber or None,
|
||||
'usetitle': opts.usetitle or None,
|
||||
'useid': opts.useid or None,
|
||||
}
|
||||
|
||||
with YoutubeDL(ydl_opts) as ydl:
|
||||
@@ -574,7 +663,7 @@ def _real_main(argv=None):
|
||||
# Update version
|
||||
if opts.update_self:
|
||||
# If updater returns True, exit. Required for windows
|
||||
if update_self(ydl.to_screen, opts.verbose, ydl._opener):
|
||||
if run_update(ydl):
|
||||
if actual_use:
|
||||
sys.exit('ERROR: The program must exit for the update to complete')
|
||||
sys.exit()
|
||||
|
||||
@@ -78,6 +78,15 @@ try:
|
||||
except ImportError: # Python 2
|
||||
import Cookie as compat_cookies
|
||||
|
||||
if sys.version_info[0] == 2:
|
||||
class compat_cookies_SimpleCookie(compat_cookies.SimpleCookie):
|
||||
def load(self, rawdata):
|
||||
if isinstance(rawdata, compat_str):
|
||||
rawdata = str(rawdata)
|
||||
return super(compat_cookies_SimpleCookie, self).load(rawdata)
|
||||
else:
|
||||
compat_cookies_SimpleCookie = compat_cookies.SimpleCookie
|
||||
|
||||
try:
|
||||
import html.entities as compat_html_entities
|
||||
except ImportError: # Python 2
|
||||
@@ -3009,10 +3018,24 @@ else:
|
||||
return ctypes.WINFUNCTYPE(*args, **kwargs)
|
||||
|
||||
|
||||
try:
|
||||
compat_Pattern = re.Pattern
|
||||
except AttributeError:
|
||||
compat_Pattern = type(re.compile(''))
|
||||
|
||||
|
||||
try:
|
||||
compat_Match = re.Match
|
||||
except AttributeError:
|
||||
compat_Match = type(re.compile('').match(''))
|
||||
|
||||
|
||||
__all__ = [
|
||||
'compat_HTMLParseError',
|
||||
'compat_HTMLParser',
|
||||
'compat_HTTPError',
|
||||
'compat_Match',
|
||||
'compat_Pattern',
|
||||
'compat_Struct',
|
||||
'compat_b64decode',
|
||||
'compat_basestring',
|
||||
@@ -3020,6 +3043,7 @@ __all__ = [
|
||||
'compat_cookiejar',
|
||||
'compat_cookiejar_Cookie',
|
||||
'compat_cookies',
|
||||
'compat_cookies_SimpleCookie',
|
||||
'compat_ctypes_WINFUNCTYPE',
|
||||
'compat_etree_Element',
|
||||
'compat_etree_fromstring',
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
determine_protocol,
|
||||
)
|
||||
@@ -30,6 +31,7 @@ from .external import (
|
||||
|
||||
PROTOCOL_MAP = {
|
||||
'rtmp': RtmpFD,
|
||||
'rtmp_ffmpeg': FFmpegFD,
|
||||
'm3u8_native': HlsFD,
|
||||
'm3u8': FFmpegFD,
|
||||
'mms': RtspFD,
|
||||
@@ -42,6 +44,25 @@ PROTOCOL_MAP = {
|
||||
}
|
||||
|
||||
|
||||
def shorten_protocol_name(proto, simplify=False):
|
||||
short_protocol_names = {
|
||||
'm3u8_native': 'm3u8_n',
|
||||
'rtmp_ffmpeg': 'rtmp_f',
|
||||
'http_dash_segments': 'dash',
|
||||
'niconico_dmc': 'dmc',
|
||||
}
|
||||
if simplify:
|
||||
short_protocol_names.update({
|
||||
'https': 'http',
|
||||
'ftps': 'ftp',
|
||||
'm3u8_native': 'm3u8',
|
||||
'rtmp_ffmpeg': 'rtmp',
|
||||
'm3u8_frag_urls': 'm3u8',
|
||||
'dash_frag_urls': 'dash',
|
||||
})
|
||||
return short_protocol_names.get(proto, proto)
|
||||
|
||||
|
||||
def get_suitable_downloader(info_dict, params={}, default=HttpFD):
|
||||
"""Get the downloader class that can handle the info dict."""
|
||||
protocol = determine_protocol(info_dict)
|
||||
@@ -50,16 +71,24 @@ def get_suitable_downloader(info_dict, params={}, default=HttpFD):
|
||||
# if (info_dict.get('start_time') or info_dict.get('end_time')) and not info_dict.get('requested_formats') and FFmpegFD.can_download(info_dict):
|
||||
# return FFmpegFD
|
||||
|
||||
external_downloader = params.get('external_downloader')
|
||||
if external_downloader is not None:
|
||||
downloaders = params.get('external_downloader')
|
||||
external_downloader = (
|
||||
downloaders if isinstance(downloaders, compat_str) or downloaders is None
|
||||
else downloaders.get(shorten_protocol_name(protocol, True), downloaders.get('default')))
|
||||
if external_downloader and external_downloader.lower() == 'native':
|
||||
external_downloader = 'native'
|
||||
|
||||
if external_downloader not in (None, 'native'):
|
||||
ed = get_external_downloader(external_downloader)
|
||||
if ed.can_download(info_dict, external_downloader):
|
||||
return ed
|
||||
|
||||
if protocol.startswith('m3u8'):
|
||||
if protocol in ('m3u8', 'm3u8_native'):
|
||||
if info_dict.get('is_live'):
|
||||
return FFmpegFD
|
||||
elif _get_real_downloader(info_dict, 'frag_urls', params, None):
|
||||
elif external_downloader == 'native':
|
||||
return HlsFD
|
||||
elif _get_real_downloader(info_dict, 'm3u8_frag_urls', params, None):
|
||||
return HlsFD
|
||||
elif params.get('hls_prefer_native') is True:
|
||||
return HlsFD
|
||||
@@ -70,6 +99,7 @@ def get_suitable_downloader(info_dict, params={}, default=HttpFD):
|
||||
|
||||
|
||||
__all__ = [
|
||||
'get_suitable_downloader',
|
||||
'FileDownloader',
|
||||
'get_suitable_downloader',
|
||||
'shorten_protocol_name',
|
||||
]
|
||||
|
||||
@@ -147,10 +147,10 @@ class FileDownloader(object):
|
||||
return int(round(number * multiplier))
|
||||
|
||||
def to_screen(self, *args, **kargs):
|
||||
self.ydl.to_screen(*args, **kargs)
|
||||
self.ydl.to_stdout(*args, quiet=self.params.get('quiet'), **kargs)
|
||||
|
||||
def to_stderr(self, message):
|
||||
self.ydl.to_screen(message)
|
||||
self.ydl.to_stderr(message)
|
||||
|
||||
def to_console_title(self, message):
|
||||
self.ydl.to_console_title(message)
|
||||
@@ -164,6 +164,9 @@ class FileDownloader(object):
|
||||
def report_error(self, *args, **kargs):
|
||||
self.ydl.report_error(*args, **kargs)
|
||||
|
||||
def write_debug(self, *args, **kargs):
|
||||
self.ydl.write_debug(*args, **kargs)
|
||||
|
||||
def slow_down(self, start_time, now, byte_counter):
|
||||
"""Sleep if the download speed is over the rate limit."""
|
||||
rate_limit = self.params.get('ratelimit')
|
||||
@@ -326,6 +329,12 @@ class FileDownloader(object):
|
||||
"""Report it was impossible to resume download."""
|
||||
self.to_screen('[download] Unable to resume')
|
||||
|
||||
@staticmethod
|
||||
def supports_manifest(manifest):
|
||||
""" Whether the downloader can download the fragments from the manifest.
|
||||
Redefine in subclasses if needed. """
|
||||
pass
|
||||
|
||||
def download(self, filename, info_dict, subtitle=False):
|
||||
"""Download to a filename using the info from info_dict
|
||||
Return True on success and False otherwise
|
||||
@@ -396,5 +405,4 @@ class FileDownloader(object):
|
||||
if exe is None:
|
||||
exe = os.path.basename(str_args[0])
|
||||
|
||||
self.to_screen('[debug] %s command line: %s' % (
|
||||
exe, shell_quote(str_args)))
|
||||
self.write_debug('%s command line: %s' % (exe, shell_quote(str_args)))
|
||||
|
||||
@@ -1,18 +1,27 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import errno
|
||||
try:
|
||||
import concurrent.futures
|
||||
can_threaded_download = True
|
||||
except ImportError:
|
||||
can_threaded_download = False
|
||||
|
||||
from ..downloader import _get_real_downloader
|
||||
from .fragment import FragmentFD
|
||||
|
||||
from ..compat import compat_urllib_error
|
||||
from ..utils import (
|
||||
DownloadError,
|
||||
sanitize_open,
|
||||
urljoin,
|
||||
)
|
||||
|
||||
|
||||
class DashSegmentsFD(FragmentFD):
|
||||
"""
|
||||
Download segments in a DASH manifest
|
||||
Download segments in a DASH manifest. External downloaders can take over
|
||||
the fragment downloads by supporting the 'dash_frag_urls' protocol
|
||||
"""
|
||||
|
||||
FD_NAME = 'dashsegments'
|
||||
@@ -22,7 +31,7 @@ class DashSegmentsFD(FragmentFD):
|
||||
fragments = info_dict['fragments'][:1] if self.params.get(
|
||||
'test', False) else info_dict['fragments']
|
||||
|
||||
real_downloader = _get_real_downloader(info_dict, 'frag_urls', self.params, None)
|
||||
real_downloader = _get_real_downloader(info_dict, 'dash_frag_urls', self.params, None)
|
||||
|
||||
ctx = {
|
||||
'filename': filename,
|
||||
@@ -37,7 +46,7 @@ class DashSegmentsFD(FragmentFD):
|
||||
fragment_retries = self.params.get('fragment_retries', 0)
|
||||
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
|
||||
|
||||
fragment_urls = []
|
||||
fragments_to_download = []
|
||||
frag_index = 0
|
||||
for i, fragment in enumerate(fragments):
|
||||
frag_index += 1
|
||||
@@ -48,49 +57,17 @@ class DashSegmentsFD(FragmentFD):
|
||||
assert fragment_base_url
|
||||
fragment_url = urljoin(fragment_base_url, fragment['path'])
|
||||
|
||||
if real_downloader:
|
||||
fragment_urls.append(fragment_url)
|
||||
continue
|
||||
|
||||
# In DASH, the first segment contains necessary headers to
|
||||
# generate a valid MP4 file, so always abort for the first segment
|
||||
fatal = i == 0 or not skip_unavailable_fragments
|
||||
count = 0
|
||||
while count <= fragment_retries:
|
||||
try:
|
||||
success, frag_content = self._download_fragment(ctx, fragment_url, info_dict)
|
||||
if not success:
|
||||
return False
|
||||
self._append_fragment(ctx, frag_content)
|
||||
break
|
||||
except compat_urllib_error.HTTPError as err:
|
||||
# YouTube may often return 404 HTTP error for a fragment causing the
|
||||
# whole download to fail. However if the same fragment is immediately
|
||||
# retried with the same request data this usually succeeds (1-2 attempts
|
||||
# is usually enough) thus allowing to download the whole file successfully.
|
||||
# To be future-proof we will retry all fragments that fail with any
|
||||
# HTTP error.
|
||||
count += 1
|
||||
if count <= fragment_retries:
|
||||
self.report_retry_fragment(err, frag_index, count, fragment_retries)
|
||||
except DownloadError:
|
||||
# Don't retry fragment if error occurred during HTTP downloading
|
||||
# itself since it has own retry settings
|
||||
if not fatal:
|
||||
self.report_skip_fragment(frag_index)
|
||||
break
|
||||
raise
|
||||
|
||||
if count > fragment_retries:
|
||||
if not fatal:
|
||||
self.report_skip_fragment(frag_index)
|
||||
continue
|
||||
self.report_error('giving up after %s fragment retries' % fragment_retries)
|
||||
return False
|
||||
fragments_to_download.append({
|
||||
'frag_index': frag_index,
|
||||
'index': i,
|
||||
'url': fragment_url,
|
||||
})
|
||||
|
||||
if real_downloader:
|
||||
self.to_screen(
|
||||
'[%s] Fragment downloads will be delegated to %s' % (self.FD_NAME, real_downloader.get_basename()))
|
||||
info_copy = info_dict.copy()
|
||||
info_copy['url_list'] = fragment_urls
|
||||
info_copy['fragments'] = fragments_to_download
|
||||
fd = real_downloader(self.ydl, self.params)
|
||||
# TODO: Make progress updates work without hooking twice
|
||||
# for ph in self._progress_hooks:
|
||||
@@ -99,5 +76,116 @@ class DashSegmentsFD(FragmentFD):
|
||||
if not success:
|
||||
return False
|
||||
else:
|
||||
def download_fragment(fragment):
|
||||
i = fragment['index']
|
||||
frag_index = fragment['frag_index']
|
||||
fragment_url = fragment['url']
|
||||
|
||||
ctx['fragment_index'] = frag_index
|
||||
|
||||
# In DASH, the first segment contains necessary headers to
|
||||
# generate a valid MP4 file, so always abort for the first segment
|
||||
fatal = i == 0 or not skip_unavailable_fragments
|
||||
count = 0
|
||||
while count <= fragment_retries:
|
||||
try:
|
||||
success, frag_content = self._download_fragment(ctx, fragment_url, info_dict)
|
||||
if not success:
|
||||
return False, frag_index
|
||||
break
|
||||
except compat_urllib_error.HTTPError as err:
|
||||
# YouTube may often return 404 HTTP error for a fragment causing the
|
||||
# whole download to fail. However if the same fragment is immediately
|
||||
# retried with the same request data this usually succeeds (1-2 attempts
|
||||
# is usually enough) thus allowing to download the whole file successfully.
|
||||
# To be future-proof we will retry all fragments that fail with any
|
||||
# HTTP error.
|
||||
count += 1
|
||||
if count <= fragment_retries:
|
||||
self.report_retry_fragment(err, frag_index, count, fragment_retries)
|
||||
except DownloadError:
|
||||
# Don't retry fragment if error occurred during HTTP downloading
|
||||
# itself since it has own retry settings
|
||||
if not fatal:
|
||||
break
|
||||
raise
|
||||
|
||||
if count > fragment_retries:
|
||||
if not fatal:
|
||||
return False, frag_index
|
||||
ctx['dest_stream'].close()
|
||||
self.report_error('Giving up after %s fragment retries' % fragment_retries)
|
||||
return False, frag_index
|
||||
|
||||
return frag_content, frag_index
|
||||
|
||||
def append_fragment(frag_content, frag_index):
|
||||
fatal = frag_index == 1 or not skip_unavailable_fragments
|
||||
if frag_content:
|
||||
fragment_filename = '%s-Frag%d' % (ctx['tmpfilename'], frag_index)
|
||||
try:
|
||||
file, frag_sanitized = sanitize_open(fragment_filename, 'rb')
|
||||
ctx['fragment_filename_sanitized'] = frag_sanitized
|
||||
file.close()
|
||||
self._append_fragment(ctx, frag_content)
|
||||
return True
|
||||
except EnvironmentError as ose:
|
||||
if ose.errno != errno.ENOENT:
|
||||
raise
|
||||
# FileNotFoundError
|
||||
if not fatal:
|
||||
self.report_skip_fragment(frag_index)
|
||||
return True
|
||||
else:
|
||||
ctx['dest_stream'].close()
|
||||
self.report_error(
|
||||
'fragment %s not found, unable to continue' % frag_index)
|
||||
return False
|
||||
else:
|
||||
if not fatal:
|
||||
self.report_skip_fragment(frag_index)
|
||||
return True
|
||||
else:
|
||||
ctx['dest_stream'].close()
|
||||
self.report_error(
|
||||
'fragment %s not found, unable to continue' % frag_index)
|
||||
return False
|
||||
|
||||
max_workers = self.params.get('concurrent_fragment_downloads', 1)
|
||||
if can_threaded_download and max_workers > 1:
|
||||
self.report_warning('The download speed shown is only of one thread. This is a known issue')
|
||||
_download_fragment = lambda f: (f, download_fragment(f)[1])
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers) as pool:
|
||||
futures = [pool.submit(_download_fragment, fragment) for fragment in fragments_to_download]
|
||||
# timeout must be 0 to return instantly
|
||||
done, not_done = concurrent.futures.wait(futures, timeout=0)
|
||||
try:
|
||||
while not_done:
|
||||
# Check every 1 second for KeyboardInterrupt
|
||||
freshly_done, not_done = concurrent.futures.wait(not_done, timeout=1)
|
||||
done |= freshly_done
|
||||
except KeyboardInterrupt:
|
||||
for future in not_done:
|
||||
future.cancel()
|
||||
# timeout must be none to cancel
|
||||
concurrent.futures.wait(not_done, timeout=None)
|
||||
raise KeyboardInterrupt
|
||||
|
||||
for fragment, frag_index in map(lambda x: x.result(), futures):
|
||||
fragment_filename = '%s-Frag%d' % (ctx['tmpfilename'], frag_index)
|
||||
down, frag_sanitized = sanitize_open(fragment_filename, 'rb')
|
||||
fragment['fragment_filename_sanitized'] = frag_sanitized
|
||||
frag_content = down.read()
|
||||
down.close()
|
||||
result = append_fragment(frag_content, frag_index)
|
||||
if not result:
|
||||
return False
|
||||
else:
|
||||
for fragment in fragments_to_download:
|
||||
frag_content, frag_index = download_fragment(fragment)
|
||||
result = append_fragment(frag_content, frag_index)
|
||||
if not result:
|
||||
return False
|
||||
|
||||
self._finish_frag_download(ctx)
|
||||
return True
|
||||
|
||||
@@ -24,7 +24,6 @@ from ..utils import (
|
||||
cli_bool_option,
|
||||
cli_configuration_args,
|
||||
encodeFilename,
|
||||
error_to_compat_str,
|
||||
encodeArgument,
|
||||
handle_youtubedl_headers,
|
||||
check_executable,
|
||||
@@ -82,11 +81,15 @@ class ExternalFD(FileDownloader):
|
||||
|
||||
@property
|
||||
def exe(self):
|
||||
return self.params.get('external_downloader')
|
||||
return self.get_basename()
|
||||
|
||||
@classmethod
|
||||
def available(cls, path=None):
|
||||
return check_executable(path or cls.get_basename(), [cls.AVAILABLE_OPT])
|
||||
path = check_executable(path or cls.get_basename(), [cls.AVAILABLE_OPT])
|
||||
if path:
|
||||
cls.exe = path
|
||||
return path
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def supports(cls, info_dict):
|
||||
@@ -108,7 +111,8 @@ class ExternalFD(FileDownloader):
|
||||
def _configuration_args(self, *args, **kwargs):
|
||||
return cli_configuration_args(
|
||||
self.params.get('external_downloader_args'),
|
||||
self.get_basename(), *args, **kwargs)
|
||||
[self.get_basename(), 'default'],
|
||||
*args, **kwargs)
|
||||
|
||||
def _call_downloader(self, tmpfilename, info_dict):
|
||||
""" Either overwrite this or implement _make_cmd """
|
||||
@@ -116,24 +120,43 @@ class ExternalFD(FileDownloader):
|
||||
|
||||
self._debug_cmd(cmd)
|
||||
|
||||
p = subprocess.Popen(
|
||||
cmd, stderr=subprocess.PIPE)
|
||||
_, stderr = process_communicate_or_kill(p)
|
||||
if p.returncode != 0:
|
||||
self.to_stderr(stderr.decode('utf-8', 'replace'))
|
||||
if 'fragments' in info_dict:
|
||||
fragment_retries = self.params.get('fragment_retries', 0)
|
||||
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
|
||||
|
||||
count = 0
|
||||
while count <= fragment_retries:
|
||||
p = subprocess.Popen(
|
||||
cmd, stderr=subprocess.PIPE)
|
||||
_, stderr = process_communicate_or_kill(p)
|
||||
if p.returncode == 0:
|
||||
break
|
||||
# TODO: Decide whether to retry based on error code
|
||||
# https://aria2.github.io/manual/en/html/aria2c.html#exit-status
|
||||
self.to_stderr(stderr.decode('utf-8', 'replace'))
|
||||
count += 1
|
||||
if count <= fragment_retries:
|
||||
self.to_screen(
|
||||
'[%s] Got error. Retrying fragments (attempt %d of %s)...'
|
||||
% (self.get_basename(), count, self.format_retries(fragment_retries)))
|
||||
if count > fragment_retries:
|
||||
if not skip_unavailable_fragments:
|
||||
self.report_error('Giving up after %s fragment retries' % fragment_retries)
|
||||
return -1
|
||||
|
||||
if 'url_list' in info_dict:
|
||||
file_list = []
|
||||
for [i, url] in enumerate(info_dict['url_list']):
|
||||
tmpsegmentname = '%s_%s.frag' % (tmpfilename, i)
|
||||
file_list.append(tmpsegmentname)
|
||||
key_list = info_dict.get('key_list')
|
||||
decrypt_info = None
|
||||
dest, _ = sanitize_open(tmpfilename, 'wb')
|
||||
for i, file in enumerate(file_list):
|
||||
src, _ = sanitize_open(file, 'rb')
|
||||
if key_list:
|
||||
decrypt_info = next((x for x in key_list if x['INDEX'] == i), decrypt_info)
|
||||
for frag_index, fragment in enumerate(info_dict['fragments']):
|
||||
fragment_filename = '%s-Frag%d' % (tmpfilename, frag_index)
|
||||
try:
|
||||
src, _ = sanitize_open(fragment_filename, 'rb')
|
||||
except IOError:
|
||||
if skip_unavailable_fragments and frag_index > 1:
|
||||
self.to_screen('[%s] Skipping fragment %d ...' % (self.get_basename(), frag_index))
|
||||
continue
|
||||
self.report_error('Unable to open fragment %d' % frag_index)
|
||||
return -1
|
||||
decrypt_info = fragment.get('decrypt_info')
|
||||
if decrypt_info:
|
||||
if decrypt_info['METHOD'] == 'AES-128':
|
||||
iv = decrypt_info.get('IV')
|
||||
decrypt_info['KEY'] = decrypt_info.get('KEY') or self.ydl.urlopen(
|
||||
@@ -149,19 +172,16 @@ class ExternalFD(FileDownloader):
|
||||
fragment_data = src.read()
|
||||
dest.write(fragment_data)
|
||||
src.close()
|
||||
if not self.params.get('keep_fragments', False):
|
||||
os.remove(encodeFilename(fragment_filename))
|
||||
dest.close()
|
||||
if not self.params.get('keep_fragments', False):
|
||||
for file_path in file_list:
|
||||
try:
|
||||
os.remove(file_path)
|
||||
except OSError as ose:
|
||||
self.report_error("Unable to delete file %s; %s" % (file_path, error_to_compat_str(ose)))
|
||||
try:
|
||||
file_path = '%s.frag.urls' % tmpfilename
|
||||
os.remove(file_path)
|
||||
except OSError as ose:
|
||||
self.report_error("Unable to delete file %s; %s" % (file_path, error_to_compat_str(ose)))
|
||||
|
||||
os.remove(encodeFilename('%s.frag.urls' % tmpfilename))
|
||||
else:
|
||||
p = subprocess.Popen(
|
||||
cmd, stderr=subprocess.PIPE)
|
||||
_, stderr = process_communicate_or_kill(p)
|
||||
if p.returncode != 0:
|
||||
self.to_stderr(stderr.decode('utf-8', 'replace'))
|
||||
return p.returncode
|
||||
|
||||
def _prepare_url(self, info_dict, url):
|
||||
@@ -243,17 +263,24 @@ class WgetFD(ExternalFD):
|
||||
|
||||
class Aria2cFD(ExternalFD):
|
||||
AVAILABLE_OPT = '-v'
|
||||
SUPPORTED_PROTOCOLS = ('http', 'https', 'ftp', 'ftps', 'frag_urls')
|
||||
SUPPORTED_PROTOCOLS = ('http', 'https', 'ftp', 'ftps', 'dash_frag_urls', 'm3u8_frag_urls')
|
||||
|
||||
@staticmethod
|
||||
def supports_manifest(manifest):
|
||||
UNSUPPORTED_FEATURES = [
|
||||
r'#EXT-X-BYTERANGE', # playlists composed of byte ranges of media files [1]
|
||||
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.2.2
|
||||
]
|
||||
check_results = (not re.search(feature, manifest) for feature in UNSUPPORTED_FEATURES)
|
||||
return all(check_results)
|
||||
|
||||
def _make_cmd(self, tmpfilename, info_dict):
|
||||
cmd = [self.exe, '-c']
|
||||
dn = os.path.dirname(tmpfilename)
|
||||
if 'url_list' not in info_dict:
|
||||
cmd += ['--out', os.path.basename(tmpfilename)]
|
||||
verbose_level_args = ['--console-log-level=warn', '--summary-interval=0']
|
||||
cmd += self._configuration_args(['--file-allocation=none', '-x16', '-j16', '-s16'] + verbose_level_args)
|
||||
if dn:
|
||||
cmd += ['--dir', dn]
|
||||
cmd = [self.exe, '-c',
|
||||
'--console-log-level=warn', '--summary-interval=0', '--download-result=hide',
|
||||
'--file-allocation=none', '-x16', '-j16', '-s16']
|
||||
if 'fragments' in info_dict:
|
||||
cmd += ['--allow-overwrite=true', '--allow-piece-length-change=true']
|
||||
|
||||
if info_dict.get('http_headers') is not None:
|
||||
for key, val in info_dict['http_headers'].items():
|
||||
cmd += ['--header', '%s: %s' % (key, val)]
|
||||
@@ -261,19 +288,33 @@ class Aria2cFD(ExternalFD):
|
||||
cmd += self._option('--all-proxy', 'proxy')
|
||||
cmd += self._bool_option('--check-certificate', 'nocheckcertificate', 'false', 'true', '=')
|
||||
cmd += self._bool_option('--remote-time', 'updatetime', 'true', 'false', '=')
|
||||
cmd += self._configuration_args()
|
||||
|
||||
# aria2c strips out spaces from the beginning/end of filenames and paths.
|
||||
# We work around this issue by adding a "./" to the beginning of the
|
||||
# filename and relative path, and adding a "/" at the end of the path.
|
||||
# See: https://github.com/yt-dlp/yt-dlp/issues/276
|
||||
# https://github.com/ytdl-org/youtube-dl/issues/20312
|
||||
# https://github.com/aria2/aria2/issues/1373
|
||||
dn = os.path.dirname(tmpfilename)
|
||||
if dn:
|
||||
if not os.path.isabs(dn):
|
||||
dn = '.%s%s' % (os.path.sep, dn)
|
||||
cmd += ['--dir', dn + os.path.sep]
|
||||
if 'fragments' not in info_dict:
|
||||
cmd += ['--out', '.%s%s' % (os.path.sep, os.path.basename(tmpfilename))]
|
||||
cmd += ['--auto-file-renaming=false']
|
||||
if 'url_list' in info_dict:
|
||||
cmd += verbose_level_args
|
||||
cmd += ['--uri-selector', 'inorder', '--download-result=hide']
|
||||
|
||||
if 'fragments' in info_dict:
|
||||
cmd += ['--file-allocation=none', '--uri-selector=inorder']
|
||||
url_list_file = '%s.frag.urls' % tmpfilename
|
||||
url_list = []
|
||||
for [i, url] in enumerate(info_dict['url_list']):
|
||||
tmpsegmentname = '%s_%s.frag' % (os.path.basename(tmpfilename), i)
|
||||
url_list.append('%s\n\tout=%s' % (url, tmpsegmentname))
|
||||
for frag_index, fragment in enumerate(info_dict['fragments']):
|
||||
fragment_filename = '%s-Frag%d' % (os.path.basename(tmpfilename), frag_index)
|
||||
url_list.append('%s\n\tout=%s' % (fragment['url'], fragment_filename))
|
||||
stream, _ = sanitize_open(url_list_file, 'wb')
|
||||
stream.write('\n'.join(url_list).encode('utf-8'))
|
||||
stream.close()
|
||||
|
||||
cmd += ['-i', url_list_file]
|
||||
else:
|
||||
cmd += ['--', info_dict['url']]
|
||||
@@ -281,9 +322,11 @@ class Aria2cFD(ExternalFD):
|
||||
|
||||
|
||||
class HttpieFD(ExternalFD):
|
||||
AVAILABLE_OPT = '--version'
|
||||
|
||||
@classmethod
|
||||
def available(cls):
|
||||
return check_executable('http', ['--version'])
|
||||
def available(cls, path=None):
|
||||
return ExternalFD.available(cls, path or 'http')
|
||||
|
||||
def _make_cmd(self, tmpfilename, info_dict):
|
||||
cmd = ['http', '--download', '--output', tmpfilename, info_dict['url']]
|
||||
@@ -295,14 +338,15 @@ class HttpieFD(ExternalFD):
|
||||
|
||||
|
||||
class FFmpegFD(ExternalFD):
|
||||
SUPPORTED_PROTOCOLS = ('http', 'https', 'ftp', 'ftps', 'm3u8', 'rtsp', 'rtmp', 'mms')
|
||||
SUPPORTED_PROTOCOLS = ('http', 'https', 'ftp', 'ftps', 'm3u8', 'm3u8_native', 'rtsp', 'rtmp', 'rtmp_ffmpeg', 'mms')
|
||||
|
||||
@classmethod
|
||||
def available(cls):
|
||||
def available(cls, path=None):
|
||||
# TODO: Fix path for ffmpeg
|
||||
return FFmpegPostProcessor().available
|
||||
|
||||
def _call_downloader(self, tmpfilename, info_dict):
|
||||
url = info_dict['url']
|
||||
urls = [f['url'] for f in info_dict.get('requested_formats', [])] or [info_dict['url']]
|
||||
ffpp = FFmpegPostProcessor(downloader=self)
|
||||
if not ffpp.available:
|
||||
self.report_error('m3u8 download detected but ffmpeg could not be found. Please install')
|
||||
@@ -334,7 +378,7 @@ class FFmpegFD(ExternalFD):
|
||||
# if end_time:
|
||||
# args += ['-t', compat_str(end_time - start_time)]
|
||||
|
||||
if info_dict.get('http_headers') is not None and re.match(r'^https?://', url):
|
||||
if info_dict.get('http_headers') is not None and re.match(r'^https?://', urls[0]):
|
||||
# Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv:
|
||||
# [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
|
||||
headers = handle_youtubedl_headers(info_dict['http_headers'])
|
||||
@@ -392,7 +436,15 @@ class FFmpegFD(ExternalFD):
|
||||
elif isinstance(conn, compat_str):
|
||||
args += ['-rtmp_conn', conn]
|
||||
|
||||
args += ['-i', url, '-c', 'copy']
|
||||
for url in urls:
|
||||
args += ['-i', url]
|
||||
args += ['-c', 'copy']
|
||||
if info_dict.get('requested_formats'):
|
||||
for (i, fmt) in enumerate(info_dict['requested_formats']):
|
||||
if fmt.get('acodec') != 'none':
|
||||
args.extend(['-map', '%d:a:0' % i])
|
||||
if fmt.get('vcodec') != 'none':
|
||||
args.extend(['-map', '%d:v:0' % i])
|
||||
|
||||
if self.params.get('test', False):
|
||||
args += ['-fs', compat_str(self._TEST_FILE_SIZE)]
|
||||
@@ -455,4 +507,4 @@ def get_external_downloader(external_downloader):
|
||||
downloader . """
|
||||
# Drop .exe extension on Windows
|
||||
bn = os.path.splitext(os.path.basename(external_downloader))[0]
|
||||
return _BY_NAME[bn]
|
||||
return _BY_NAME.get(bn)
|
||||
|
||||
@@ -31,6 +31,7 @@ class FragmentFD(FileDownloader):
|
||||
Skip unavailable fragments (DASH and hlsnative only)
|
||||
keep_fragments: Keep downloaded fragments on disk after downloading is
|
||||
finished
|
||||
_no_ytdl_file: Don't use .ytdl file
|
||||
|
||||
For each incomplete fragment download yt-dlp keeps on disk a special
|
||||
bookkeeping file with download state and metadata (in future such files will
|
||||
@@ -69,15 +70,17 @@ class FragmentFD(FileDownloader):
|
||||
self._prepare_frag_download(ctx)
|
||||
self._start_frag_download(ctx)
|
||||
|
||||
@staticmethod
|
||||
def __do_ytdl_file(ctx):
|
||||
return not ctx['live'] and not ctx['tmpfilename'] == '-'
|
||||
def __do_ytdl_file(self, ctx):
|
||||
return not ctx['live'] and not ctx['tmpfilename'] == '-' and not self.params.get('_no_ytdl_file')
|
||||
|
||||
def _read_ytdl_file(self, ctx):
|
||||
assert 'ytdl_corrupt' not in ctx
|
||||
stream, _ = sanitize_open(self.ytdl_filename(ctx['filename']), 'r')
|
||||
try:
|
||||
ctx['fragment_index'] = json.loads(stream.read())['downloader']['current_fragment']['index']
|
||||
ytdl_data = json.loads(stream.read())
|
||||
ctx['fragment_index'] = ytdl_data['downloader']['current_fragment']['index']
|
||||
if 'extra_state' in ytdl_data['downloader']:
|
||||
ctx['extra_state'] = ytdl_data['downloader']['extra_state']
|
||||
except Exception:
|
||||
ctx['ytdl_corrupt'] = True
|
||||
finally:
|
||||
@@ -90,6 +93,8 @@ class FragmentFD(FileDownloader):
|
||||
'index': ctx['fragment_index'],
|
||||
},
|
||||
}
|
||||
if 'extra_state' in ctx:
|
||||
downloader['extra_state'] = ctx['extra_state']
|
||||
if ctx.get('fragment_count') is not None:
|
||||
downloader['fragment_count'] = ctx['fragment_count']
|
||||
frag_index_stream.write(json.dumps({'downloader': downloader}))
|
||||
|
||||
@@ -1,12 +1,19 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import errno
|
||||
import re
|
||||
import io
|
||||
import binascii
|
||||
try:
|
||||
from Crypto.Cipher import AES
|
||||
can_decrypt_frag = True
|
||||
except ImportError:
|
||||
can_decrypt_frag = False
|
||||
try:
|
||||
import concurrent.futures
|
||||
can_threaded_download = True
|
||||
except ImportError:
|
||||
can_threaded_download = False
|
||||
|
||||
from ..downloader import _get_real_downloader
|
||||
from .fragment import FragmentFD
|
||||
@@ -19,12 +26,19 @@ from ..compat import (
|
||||
)
|
||||
from ..utils import (
|
||||
parse_m3u8_attributes,
|
||||
sanitize_open,
|
||||
update_url_query,
|
||||
bug_reports_message,
|
||||
)
|
||||
from .. import webvtt
|
||||
|
||||
|
||||
class HlsFD(FragmentFD):
|
||||
""" A limited implementation that does not require ffmpeg """
|
||||
"""
|
||||
Download segments in a m3u8 manifest. External downloaders can take over
|
||||
the fragment downloads by supporting the 'm3u8_frag_urls' protocol and
|
||||
re-defining 'supports_manifest' function
|
||||
"""
|
||||
|
||||
FD_NAME = 'hlsnative'
|
||||
|
||||
@@ -53,12 +67,15 @@ class HlsFD(FragmentFD):
|
||||
UNSUPPORTED_FEATURES += [
|
||||
r'#EXT-X-KEY:METHOD=(?!NONE|AES-128)', # encrypted streams [1]
|
||||
]
|
||||
check_results = [not re.search(feature, manifest) for feature in UNSUPPORTED_FEATURES]
|
||||
is_aes128_enc = '#EXT-X-KEY:METHOD=AES-128' in manifest
|
||||
check_results.append(with_crypto or not is_aes128_enc)
|
||||
check_results.append(not (is_aes128_enc and r'#EXT-X-BYTERANGE' in manifest))
|
||||
check_results.append(not info_dict.get('is_live'))
|
||||
return all(check_results)
|
||||
|
||||
def check_results():
|
||||
yield not info_dict.get('is_live')
|
||||
is_aes128_enc = '#EXT-X-KEY:METHOD=AES-128' in manifest
|
||||
yield with_crypto or not is_aes128_enc
|
||||
yield not (is_aes128_enc and r'#EXT-X-BYTERANGE' in manifest)
|
||||
for feature in UNSUPPORTED_FEATURES:
|
||||
yield not re.search(feature, manifest)
|
||||
return all(check_results())
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
man_url = info_dict['url']
|
||||
@@ -70,20 +87,28 @@ class HlsFD(FragmentFD):
|
||||
|
||||
if not self.can_download(s, info_dict, self.params.get('allow_unplayable_formats')):
|
||||
if info_dict.get('extra_param_to_segment_url') or info_dict.get('_decryption_key_url'):
|
||||
self.report_error('pycryptodome not found. Please install it.')
|
||||
self.report_error('pycryptodome not found. Please install')
|
||||
return False
|
||||
if self.can_download(s, info_dict, with_crypto=True):
|
||||
self.report_warning('pycryptodome is needed to download this file with hlsnative')
|
||||
self.report_warning(
|
||||
'hlsnative has detected features it does not support, '
|
||||
'extraction will be delegated to ffmpeg')
|
||||
self.report_warning('pycryptodome is needed to download this file natively')
|
||||
fd = FFmpegFD(self.ydl, self.params)
|
||||
self.report_warning(
|
||||
'%s detected unsupported features; extraction will be delegated to %s' % (self.FD_NAME, fd.get_basename()))
|
||||
# TODO: Make progress updates work without hooking twice
|
||||
# for ph in self._progress_hooks:
|
||||
# fd.add_progress_hook(ph)
|
||||
return fd.real_download(filename, info_dict)
|
||||
|
||||
real_downloader = _get_real_downloader(info_dict, 'frag_urls', self.params, None)
|
||||
is_webvtt = info_dict['ext'] == 'vtt'
|
||||
if is_webvtt:
|
||||
real_downloader = None # Packing the fragments is not currently supported for external downloader
|
||||
else:
|
||||
real_downloader = _get_real_downloader(info_dict, 'm3u8_frag_urls', self.params, None)
|
||||
if real_downloader and not real_downloader.supports_manifest(s):
|
||||
real_downloader = None
|
||||
if real_downloader:
|
||||
self.to_screen(
|
||||
'[%s] Fragment downloads will be delegated to %s' % (self.FD_NAME, real_downloader.get_basename()))
|
||||
|
||||
def is_ad_fragment_start(s):
|
||||
return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s
|
||||
@@ -93,7 +118,7 @@ class HlsFD(FragmentFD):
|
||||
return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=master' in s
|
||||
or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',segment'))
|
||||
|
||||
fragment_urls = []
|
||||
fragments = []
|
||||
|
||||
media_frags = 0
|
||||
ad_frags = 0
|
||||
@@ -124,6 +149,8 @@ class HlsFD(FragmentFD):
|
||||
else:
|
||||
self._prepare_and_start_frag_download(ctx)
|
||||
|
||||
extra_state = ctx.setdefault('extra_state', {})
|
||||
|
||||
fragment_retries = self.params.get('fragment_retries', 0)
|
||||
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
|
||||
test = self.params.get('test', False)
|
||||
@@ -136,14 +163,12 @@ class HlsFD(FragmentFD):
|
||||
i = 0
|
||||
media_sequence = 0
|
||||
decrypt_info = {'METHOD': 'NONE'}
|
||||
key_list = []
|
||||
byte_range = {}
|
||||
discontinuity_count = 0
|
||||
frag_index = 0
|
||||
ad_frag_next = False
|
||||
for line in s.splitlines():
|
||||
line = line.strip()
|
||||
download_frag = False
|
||||
if line:
|
||||
if not line.startswith('#'):
|
||||
if format_index and discontinuity_count != format_index:
|
||||
@@ -160,17 +185,20 @@ class HlsFD(FragmentFD):
|
||||
if extra_query:
|
||||
frag_url = update_url_query(frag_url, extra_query)
|
||||
|
||||
if real_downloader:
|
||||
fragment_urls.append(frag_url)
|
||||
continue
|
||||
download_frag = True
|
||||
fragments.append({
|
||||
'frag_index': frag_index,
|
||||
'url': frag_url,
|
||||
'decrypt_info': decrypt_info,
|
||||
'byte_range': byte_range,
|
||||
'media_sequence': media_sequence,
|
||||
})
|
||||
|
||||
elif line.startswith('#EXT-X-MAP'):
|
||||
if format_index and discontinuity_count != format_index:
|
||||
continue
|
||||
if frag_index > 0:
|
||||
self.report_error(
|
||||
'initialization fragment found after media fragments, unable to download')
|
||||
'Initialization fragment found after media fragments, unable to download')
|
||||
return False
|
||||
frag_index += 1
|
||||
map_info = parse_m3u8_attributes(line[11:])
|
||||
@@ -180,9 +208,14 @@ class HlsFD(FragmentFD):
|
||||
else compat_urlparse.urljoin(man_url, map_info.get('URI')))
|
||||
if extra_query:
|
||||
frag_url = update_url_query(frag_url, extra_query)
|
||||
if real_downloader:
|
||||
fragment_urls.append(frag_url)
|
||||
continue
|
||||
|
||||
fragments.append({
|
||||
'frag_index': frag_index,
|
||||
'url': frag_url,
|
||||
'decrypt_info': decrypt_info,
|
||||
'byte_range': byte_range,
|
||||
'media_sequence': media_sequence
|
||||
})
|
||||
|
||||
if map_info.get('BYTERANGE'):
|
||||
splitted_byte_range = map_info.get('BYTERANGE').split('@')
|
||||
@@ -191,7 +224,6 @@ class HlsFD(FragmentFD):
|
||||
'start': sub_range_start,
|
||||
'end': sub_range_start + int(splitted_byte_range[0]),
|
||||
}
|
||||
download_frag = True
|
||||
|
||||
elif line.startswith('#EXT-X-KEY'):
|
||||
decrypt_url = decrypt_info.get('URI')
|
||||
@@ -206,9 +238,6 @@ class HlsFD(FragmentFD):
|
||||
decrypt_info['URI'] = update_url_query(decrypt_info['URI'], extra_query)
|
||||
if decrypt_url != decrypt_info['URI']:
|
||||
decrypt_info['KEY'] = None
|
||||
key_data = decrypt_info.copy()
|
||||
key_data['INDEX'] = frag_index
|
||||
key_list.append(key_data)
|
||||
|
||||
elif line.startswith('#EXT-X-MEDIA-SEQUENCE'):
|
||||
media_sequence = int(line[22:])
|
||||
@@ -225,58 +254,16 @@ class HlsFD(FragmentFD):
|
||||
ad_frag_next = False
|
||||
elif line.startswith('#EXT-X-DISCONTINUITY'):
|
||||
discontinuity_count += 1
|
||||
i += 1
|
||||
media_sequence += 1
|
||||
|
||||
if download_frag:
|
||||
count = 0
|
||||
headers = info_dict.get('http_headers', {})
|
||||
if byte_range:
|
||||
headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'] - 1)
|
||||
while count <= fragment_retries:
|
||||
try:
|
||||
success, frag_content = self._download_fragment(
|
||||
ctx, frag_url, info_dict, headers)
|
||||
if not success:
|
||||
return False
|
||||
break
|
||||
except compat_urllib_error.HTTPError as err:
|
||||
# Unavailable (possibly temporary) fragments may be served.
|
||||
# First we try to retry then either skip or abort.
|
||||
# See https://github.com/ytdl-org/youtube-dl/issues/10165,
|
||||
# https://github.com/ytdl-org/youtube-dl/issues/10448).
|
||||
count += 1
|
||||
if count <= fragment_retries:
|
||||
self.report_retry_fragment(err, frag_index, count, fragment_retries)
|
||||
if count > fragment_retries:
|
||||
if skip_unavailable_fragments:
|
||||
i += 1
|
||||
media_sequence += 1
|
||||
self.report_skip_fragment(frag_index)
|
||||
continue
|
||||
self.report_error(
|
||||
'giving up after %s fragment retries' % fragment_retries)
|
||||
return False
|
||||
|
||||
if decrypt_info['METHOD'] == 'AES-128':
|
||||
iv = decrypt_info.get('IV') or compat_struct_pack('>8xq', media_sequence)
|
||||
decrypt_info['KEY'] = decrypt_info.get('KEY') or self.ydl.urlopen(
|
||||
self._prepare_url(info_dict, info_dict.get('_decryption_key_url') or decrypt_info['URI'])).read()
|
||||
# Don't decrypt the content in tests since the data is explicitly truncated and it's not to a valid block
|
||||
# size (see https://github.com/ytdl-org/youtube-dl/pull/27660). Tests only care that the correct data downloaded,
|
||||
# not what it decrypts to.
|
||||
if not test:
|
||||
frag_content = AES.new(
|
||||
decrypt_info['KEY'], AES.MODE_CBC, iv).decrypt(frag_content)
|
||||
self._append_fragment(ctx, frag_content)
|
||||
# We only download the first fragment during the test
|
||||
if test:
|
||||
break
|
||||
i += 1
|
||||
media_sequence += 1
|
||||
# We only download the first fragment during the test
|
||||
if test:
|
||||
fragments = [fragments[0] if fragments else None]
|
||||
|
||||
if real_downloader:
|
||||
info_copy = info_dict.copy()
|
||||
info_copy['url_list'] = fragment_urls
|
||||
info_copy['key_list'] = key_list
|
||||
info_copy['fragments'] = fragments
|
||||
fd = real_downloader(self.ydl, self.params)
|
||||
# TODO: Make progress updates work without hooking twice
|
||||
# for ph in self._progress_hooks:
|
||||
@@ -285,5 +272,190 @@ class HlsFD(FragmentFD):
|
||||
if not success:
|
||||
return False
|
||||
else:
|
||||
def download_fragment(fragment):
|
||||
frag_index = fragment['frag_index']
|
||||
frag_url = fragment['url']
|
||||
decrypt_info = fragment['decrypt_info']
|
||||
byte_range = fragment['byte_range']
|
||||
media_sequence = fragment['media_sequence']
|
||||
|
||||
ctx['fragment_index'] = frag_index
|
||||
|
||||
count = 0
|
||||
headers = info_dict.get('http_headers', {})
|
||||
if byte_range:
|
||||
headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'] - 1)
|
||||
while count <= fragment_retries:
|
||||
try:
|
||||
success, frag_content = self._download_fragment(
|
||||
ctx, frag_url, info_dict, headers)
|
||||
if not success:
|
||||
return False, frag_index
|
||||
break
|
||||
except compat_urllib_error.HTTPError as err:
|
||||
# Unavailable (possibly temporary) fragments may be served.
|
||||
# First we try to retry then either skip or abort.
|
||||
# See https://github.com/ytdl-org/youtube-dl/issues/10165,
|
||||
# https://github.com/ytdl-org/youtube-dl/issues/10448).
|
||||
count += 1
|
||||
if count <= fragment_retries:
|
||||
self.report_retry_fragment(err, frag_index, count, fragment_retries)
|
||||
if count > fragment_retries:
|
||||
ctx['dest_stream'].close()
|
||||
self.report_error('Giving up after %s fragment retries' % fragment_retries)
|
||||
return False, frag_index
|
||||
|
||||
if decrypt_info['METHOD'] == 'AES-128':
|
||||
iv = decrypt_info.get('IV') or compat_struct_pack('>8xq', media_sequence)
|
||||
decrypt_info['KEY'] = decrypt_info.get('KEY') or self.ydl.urlopen(
|
||||
self._prepare_url(info_dict, info_dict.get('_decryption_key_url') or decrypt_info['URI'])).read()
|
||||
# Don't decrypt the content in tests since the data is explicitly truncated and it's not to a valid block
|
||||
# size (see https://github.com/ytdl-org/youtube-dl/pull/27660). Tests only care that the correct data downloaded,
|
||||
# not what it decrypts to.
|
||||
if not test:
|
||||
frag_content = AES.new(
|
||||
decrypt_info['KEY'], AES.MODE_CBC, iv).decrypt(frag_content)
|
||||
|
||||
return frag_content, frag_index
|
||||
|
||||
pack_fragment = lambda frag_content, _: frag_content
|
||||
|
||||
if is_webvtt:
|
||||
def pack_fragment(frag_content, frag_index):
|
||||
output = io.StringIO()
|
||||
adjust = 0
|
||||
for block in webvtt.parse_fragment(frag_content):
|
||||
if isinstance(block, webvtt.CueBlock):
|
||||
block.start += adjust
|
||||
block.end += adjust
|
||||
|
||||
dedup_window = extra_state.setdefault('webvtt_dedup_window', [])
|
||||
cue = block.as_json
|
||||
|
||||
# skip the cue if an identical one appears
|
||||
# in the window of potential duplicates
|
||||
# and prune the window of unviable candidates
|
||||
i = 0
|
||||
skip = True
|
||||
while i < len(dedup_window):
|
||||
window_cue = dedup_window[i]
|
||||
if window_cue == cue:
|
||||
break
|
||||
if window_cue['end'] >= cue['start']:
|
||||
i += 1
|
||||
continue
|
||||
del dedup_window[i]
|
||||
else:
|
||||
skip = False
|
||||
|
||||
if skip:
|
||||
continue
|
||||
|
||||
# add the cue to the window
|
||||
dedup_window.append(cue)
|
||||
elif isinstance(block, webvtt.Magic):
|
||||
# take care of MPEG PES timestamp overflow
|
||||
if block.mpegts is None:
|
||||
block.mpegts = 0
|
||||
extra_state.setdefault('webvtt_mpegts_adjust', 0)
|
||||
block.mpegts += extra_state['webvtt_mpegts_adjust'] << 33
|
||||
if block.mpegts < extra_state.get('webvtt_mpegts_last', 0):
|
||||
extra_state['webvtt_mpegts_adjust'] += 1
|
||||
block.mpegts += 1 << 33
|
||||
extra_state['webvtt_mpegts_last'] = block.mpegts
|
||||
|
||||
if frag_index == 1:
|
||||
extra_state['webvtt_mpegts'] = block.mpegts or 0
|
||||
extra_state['webvtt_local'] = block.local or 0
|
||||
# XXX: block.local = block.mpegts = None ?
|
||||
else:
|
||||
if block.mpegts is not None and block.local is not None:
|
||||
adjust = (
|
||||
(block.mpegts - extra_state.get('webvtt_mpegts', 0))
|
||||
- (block.local - extra_state.get('webvtt_local', 0))
|
||||
)
|
||||
continue
|
||||
elif isinstance(block, webvtt.HeaderBlock):
|
||||
if frag_index != 1:
|
||||
# XXX: this should probably be silent as well
|
||||
# or verify that all segments contain the same data
|
||||
self.report_warning(bug_reports_message(
|
||||
'Discarding a %s block found in the middle of the stream; '
|
||||
'if the subtitles display incorrectly,'
|
||||
% (type(block).__name__)))
|
||||
continue
|
||||
block.write_into(output)
|
||||
|
||||
return output.getvalue().encode('utf-8')
|
||||
|
||||
def append_fragment(frag_content, frag_index):
|
||||
fatal = frag_index == 1 or not skip_unavailable_fragments
|
||||
if frag_content:
|
||||
fragment_filename = '%s-Frag%d' % (ctx['tmpfilename'], frag_index)
|
||||
try:
|
||||
file, frag_sanitized = sanitize_open(fragment_filename, 'rb')
|
||||
ctx['fragment_filename_sanitized'] = frag_sanitized
|
||||
file.close()
|
||||
frag_content = pack_fragment(frag_content, frag_index)
|
||||
self._append_fragment(ctx, frag_content)
|
||||
return True
|
||||
except EnvironmentError as ose:
|
||||
if ose.errno != errno.ENOENT:
|
||||
raise
|
||||
# FileNotFoundError
|
||||
if not fatal:
|
||||
self.report_skip_fragment(frag_index)
|
||||
return True
|
||||
else:
|
||||
ctx['dest_stream'].close()
|
||||
self.report_error(
|
||||
'fragment %s not found, unable to continue' % frag_index)
|
||||
return False
|
||||
else:
|
||||
if not fatal:
|
||||
self.report_skip_fragment(frag_index)
|
||||
return True
|
||||
else:
|
||||
ctx['dest_stream'].close()
|
||||
self.report_error(
|
||||
'fragment %s not found, unable to continue' % frag_index)
|
||||
return False
|
||||
|
||||
max_workers = self.params.get('concurrent_fragment_downloads', 1)
|
||||
if can_threaded_download and max_workers > 1:
|
||||
self.report_warning('The download speed shown is only of one thread. This is a known issue')
|
||||
_download_fragment = lambda f: (f, download_fragment(f)[1])
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers) as pool:
|
||||
futures = [pool.submit(_download_fragment, fragment) for fragment in fragments]
|
||||
# timeout must be 0 to return instantly
|
||||
done, not_done = concurrent.futures.wait(futures, timeout=0)
|
||||
try:
|
||||
while not_done:
|
||||
# Check every 1 second for KeyboardInterrupt
|
||||
freshly_done, not_done = concurrent.futures.wait(not_done, timeout=1)
|
||||
done |= freshly_done
|
||||
except KeyboardInterrupt:
|
||||
for future in not_done:
|
||||
future.cancel()
|
||||
# timeout must be none to cancel
|
||||
concurrent.futures.wait(not_done, timeout=None)
|
||||
raise KeyboardInterrupt
|
||||
|
||||
for fragment, frag_index in map(lambda x: x.result(), futures):
|
||||
fragment_filename = '%s-Frag%d' % (ctx['tmpfilename'], frag_index)
|
||||
down, frag_sanitized = sanitize_open(fragment_filename, 'rb')
|
||||
fragment['fragment_filename_sanitized'] = frag_sanitized
|
||||
frag_content = down.read()
|
||||
down.close()
|
||||
result = append_fragment(frag_content, frag_index)
|
||||
if not result:
|
||||
return False
|
||||
else:
|
||||
for fragment in fragments:
|
||||
frag_content, frag_index = download_fragment(fragment)
|
||||
result = append_fragment(frag_content, frag_index)
|
||||
if not result:
|
||||
return False
|
||||
|
||||
self._finish_frag_download(ctx)
|
||||
return True
|
||||
|
||||
@@ -48,7 +48,7 @@ def write_piff_header(stream, params):
|
||||
language = params.get('language', 'und')
|
||||
height = params.get('height', 0)
|
||||
width = params.get('width', 0)
|
||||
is_audio = width == 0 and height == 0
|
||||
stream_type = params['stream_type']
|
||||
creation_time = modification_time = int(time.time())
|
||||
|
||||
ftyp_payload = b'isml' # major brand
|
||||
@@ -77,7 +77,7 @@ def write_piff_header(stream, params):
|
||||
tkhd_payload += u32.pack(0) * 2 # reserved
|
||||
tkhd_payload += s16.pack(0) # layer
|
||||
tkhd_payload += s16.pack(0) # alternate group
|
||||
tkhd_payload += s88.pack(1 if is_audio else 0) # volume
|
||||
tkhd_payload += s88.pack(1 if stream_type == 'audio' else 0) # volume
|
||||
tkhd_payload += u16.pack(0) # reserved
|
||||
tkhd_payload += unity_matrix
|
||||
tkhd_payload += u1616.pack(width)
|
||||
@@ -93,19 +93,34 @@ def write_piff_header(stream, params):
|
||||
mdia_payload = full_box(b'mdhd', 1, 0, mdhd_payload) # Media Header Box
|
||||
|
||||
hdlr_payload = u32.pack(0) # pre defined
|
||||
hdlr_payload += b'soun' if is_audio else b'vide' # handler type
|
||||
hdlr_payload += u32.pack(0) * 3 # reserved
|
||||
hdlr_payload += (b'Sound' if is_audio else b'Video') + b'Handler\0' # name
|
||||
if stream_type == 'audio': # handler type
|
||||
hdlr_payload += b'soun'
|
||||
hdlr_payload += u32.pack(0) * 3 # reserved
|
||||
hdlr_payload += b'SoundHandler\0' # name
|
||||
elif stream_type == 'video':
|
||||
hdlr_payload += b'vide'
|
||||
hdlr_payload += u32.pack(0) * 3 # reserved
|
||||
hdlr_payload += b'VideoHandler\0' # name
|
||||
elif stream_type == 'text':
|
||||
hdlr_payload += b'subt'
|
||||
hdlr_payload += u32.pack(0) * 3 # reserved
|
||||
hdlr_payload += b'SubtitleHandler\0' # name
|
||||
else:
|
||||
assert False
|
||||
mdia_payload += full_box(b'hdlr', 0, 0, hdlr_payload) # Handler Reference Box
|
||||
|
||||
if is_audio:
|
||||
if stream_type == 'audio':
|
||||
smhd_payload = s88.pack(0) # balance
|
||||
smhd_payload += u16.pack(0) # reserved
|
||||
media_header_box = full_box(b'smhd', 0, 0, smhd_payload) # Sound Media Header
|
||||
else:
|
||||
elif stream_type == 'video':
|
||||
vmhd_payload = u16.pack(0) # graphics mode
|
||||
vmhd_payload += u16.pack(0) * 3 # opcolor
|
||||
media_header_box = full_box(b'vmhd', 0, 1, vmhd_payload) # Video Media Header
|
||||
elif stream_type == 'text':
|
||||
media_header_box = full_box(b'sthd', 0, 0, b'') # Subtitle Media Header
|
||||
else:
|
||||
assert False
|
||||
minf_payload = media_header_box
|
||||
|
||||
dref_payload = u32.pack(1) # entry count
|
||||
@@ -117,7 +132,7 @@ def write_piff_header(stream, params):
|
||||
|
||||
sample_entry_payload = u8.pack(0) * 6 # reserved
|
||||
sample_entry_payload += u16.pack(1) # data reference index
|
||||
if is_audio:
|
||||
if stream_type == 'audio':
|
||||
sample_entry_payload += u32.pack(0) * 2 # reserved
|
||||
sample_entry_payload += u16.pack(params.get('channels', 2))
|
||||
sample_entry_payload += u16.pack(params.get('bits_per_sample', 16))
|
||||
@@ -127,7 +142,7 @@ def write_piff_header(stream, params):
|
||||
|
||||
if fourcc == 'AACL':
|
||||
sample_entry_box = box(b'mp4a', sample_entry_payload)
|
||||
else:
|
||||
elif stream_type == 'video':
|
||||
sample_entry_payload += u16.pack(0) # pre defined
|
||||
sample_entry_payload += u16.pack(0) # reserved
|
||||
sample_entry_payload += u32.pack(0) * 3 # pre defined
|
||||
@@ -155,6 +170,18 @@ def write_piff_header(stream, params):
|
||||
avcc_payload += pps
|
||||
sample_entry_payload += box(b'avcC', avcc_payload) # AVC Decoder Configuration Record
|
||||
sample_entry_box = box(b'avc1', sample_entry_payload) # AVC Simple Entry
|
||||
else:
|
||||
assert False
|
||||
elif stream_type == 'text':
|
||||
if fourcc == 'TTML':
|
||||
sample_entry_payload += b'http://www.w3.org/ns/ttml\0' # namespace
|
||||
sample_entry_payload += b'\0' # schema location
|
||||
sample_entry_payload += b'\0' # auxilary mime types(??)
|
||||
sample_entry_box = box(b'stpp', sample_entry_payload)
|
||||
else:
|
||||
assert False
|
||||
else:
|
||||
assert False
|
||||
stsd_payload += sample_entry_box
|
||||
|
||||
stbl_payload = full_box(b'stsd', 0, 0, stsd_payload) # Sample Description Box
|
||||
@@ -221,10 +248,13 @@ class IsmFD(FragmentFD):
|
||||
|
||||
self._prepare_and_start_frag_download(ctx)
|
||||
|
||||
extra_state = ctx.setdefault('extra_state', {
|
||||
'ism_track_written': False,
|
||||
})
|
||||
|
||||
fragment_retries = self.params.get('fragment_retries', 0)
|
||||
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
|
||||
|
||||
track_written = False
|
||||
frag_index = 0
|
||||
for i, segment in enumerate(segments):
|
||||
frag_index += 1
|
||||
@@ -236,11 +266,11 @@ class IsmFD(FragmentFD):
|
||||
success, frag_content = self._download_fragment(ctx, segment['url'], info_dict)
|
||||
if not success:
|
||||
return False
|
||||
if not track_written:
|
||||
if not extra_state['ism_track_written']:
|
||||
tfhd_data = extract_box_data(frag_content, [b'moof', b'traf', b'tfhd'])
|
||||
info_dict['_download_params']['track_id'] = u32.unpack(tfhd_data[4:8])[0]
|
||||
write_piff_header(ctx['dest_stream'], info_dict['_download_params'])
|
||||
track_written = True
|
||||
extra_state['ism_track_written'] = True
|
||||
self._append_fragment(ctx, frag_content)
|
||||
break
|
||||
except compat_urllib_error.HTTPError as err:
|
||||
|
||||
@@ -24,16 +24,14 @@ class NiconicoDmcFD(FileDownloader):
|
||||
|
||||
success = download_complete = False
|
||||
timer = [None]
|
||||
|
||||
heartbeat_lock = threading.Lock()
|
||||
heartbeat_url = heartbeat_info_dict['url']
|
||||
heartbeat_data = heartbeat_info_dict['data']
|
||||
heartbeat_data = heartbeat_info_dict['data'].encode()
|
||||
heartbeat_interval = heartbeat_info_dict.get('interval', 30)
|
||||
self.to_screen('[%s] Heartbeat with %s second interval ...' % (self.FD_NAME, heartbeat_interval))
|
||||
|
||||
def heartbeat():
|
||||
try:
|
||||
compat_urllib_request.urlopen(url=heartbeat_url, data=heartbeat_data.encode())
|
||||
compat_urllib_request.urlopen(url=heartbeat_url, data=heartbeat_data)
|
||||
except Exception:
|
||||
self.to_screen('[%s] Heartbeat failed' % self.FD_NAME)
|
||||
|
||||
@@ -42,13 +40,16 @@ class NiconicoDmcFD(FileDownloader):
|
||||
timer[0] = threading.Timer(heartbeat_interval, heartbeat)
|
||||
timer[0].start()
|
||||
|
||||
heartbeat_info_dict['ping']()
|
||||
self.to_screen('[%s] Heartbeat with %d second interval ...' % (self.FD_NAME, heartbeat_interval))
|
||||
try:
|
||||
heartbeat()
|
||||
if type(fd).__name__ == 'HlsFD':
|
||||
info_dict.update(ie._extract_m3u8_formats(info_dict['url'], info_dict['id'])[0])
|
||||
success = fd.real_download(filename, info_dict)
|
||||
finally:
|
||||
if heartbeat_lock:
|
||||
with heartbeat_lock:
|
||||
timer[0].cancel()
|
||||
download_complete = True
|
||||
|
||||
return success
|
||||
|
||||
@@ -117,7 +117,7 @@ class RtmpFD(FileDownloader):
|
||||
|
||||
# Check for rtmpdump first
|
||||
if not check_executable('rtmpdump', ['-h']):
|
||||
self.report_error('RTMP download detected but "rtmpdump" could not be run. Please install it.')
|
||||
self.report_error('RTMP download detected but "rtmpdump" could not be run. Please install')
|
||||
return False
|
||||
|
||||
# Download using rtmpdump. rtmpdump returns exit code 2 when
|
||||
|
||||
@@ -24,7 +24,7 @@ class RtspFD(FileDownloader):
|
||||
args = [
|
||||
'mpv', '-really-quiet', '--vo=null', '--stream-dump=' + tmpfilename, url]
|
||||
else:
|
||||
self.report_error('MMS or RTSP download detected but neither "mplayer" nor "mpv" could be run. Please install any.')
|
||||
self.report_error('MMS or RTSP download detected but neither "mplayer" nor "mpv" could be run. Please install one')
|
||||
return False
|
||||
|
||||
self._debug_cmd(args)
|
||||
|
||||
@@ -79,8 +79,7 @@ class YoutubeLiveChatReplayFD(FragmentFD):
|
||||
|
||||
self._prepare_and_start_frag_download(ctx)
|
||||
|
||||
success, raw_fragment = dl_fragment(
|
||||
'https://www.youtube.com/watch?v={}'.format(video_id))
|
||||
success, raw_fragment = dl_fragment(info_dict['url'])
|
||||
if not success:
|
||||
return False
|
||||
try:
|
||||
|
||||
@@ -12,9 +12,6 @@ except ImportError:
|
||||
|
||||
if not _LAZY_LOADER:
|
||||
from .extractors import *
|
||||
|
||||
_PLUGIN_CLASSES = load_plugins('extractor', 'IE', globals())
|
||||
|
||||
_ALL_CLASSES = [
|
||||
klass
|
||||
for name, klass in globals().items()
|
||||
@@ -22,6 +19,9 @@ if not _LAZY_LOADER:
|
||||
]
|
||||
_ALL_CLASSES.append(GenericIE)
|
||||
|
||||
_PLUGIN_CLASSES = load_plugins('extractor', 'IE', globals())
|
||||
_ALL_CLASSES = _PLUGIN_CLASSES + _ALL_CLASSES
|
||||
|
||||
|
||||
def gen_extractor_classes():
|
||||
""" Return a list of supported extractors.
|
||||
|
||||
@@ -1414,7 +1414,7 @@ class AdobePassIE(InfoExtractor):
|
||||
authn_token = None
|
||||
if not authn_token:
|
||||
# TODO add support for other TV Providers
|
||||
mso_id = self._downloader.params.get('ap_mso')
|
||||
mso_id = self.get_param('ap_mso')
|
||||
if not mso_id:
|
||||
raise_mvpd_required()
|
||||
username, password = self._get_login_info('ap_username', 'ap_password', mso_id)
|
||||
|
||||
@@ -257,7 +257,7 @@ class AfreecaTVIE(InfoExtractor):
|
||||
if flag and flag == 'SUCCEED':
|
||||
break
|
||||
if flag == 'PARTIAL_ADULT':
|
||||
self._downloader.report_warning(
|
||||
self.report_warning(
|
||||
'In accordance with local laws and regulations, underage users are restricted from watching adult content. '
|
||||
'Only content suitable for all ages will be downloaded. '
|
||||
'Provide account credentials if you wish to download restricted content.')
|
||||
@@ -323,7 +323,7 @@ class AfreecaTVIE(InfoExtractor):
|
||||
'url': file_url,
|
||||
'format_id': 'http',
|
||||
}]
|
||||
if not formats:
|
||||
if not formats and not self.get_param('ignore_no_formats'):
|
||||
continue
|
||||
self._sort_formats(formats)
|
||||
file_info = common_entry.copy()
|
||||
|
||||
@@ -65,15 +65,35 @@ class AMCNetworksIE(ThePlatformIE):
|
||||
def _real_extract(self, url):
|
||||
site, display_id = re.match(self._VALID_URL, url).groups()
|
||||
requestor_id = self._REQUESTOR_ID_MAP[site]
|
||||
properties = self._download_json(
|
||||
'https://content-delivery-gw.svc.ds.amcn.com/api/v2/content/amcn/%s/url/%s' % (requestor_id.lower(), display_id),
|
||||
display_id)['data']['properties']
|
||||
page_data = self._download_json(
|
||||
'https://content-delivery-gw.svc.ds.amcn.com/api/v2/content/amcn/%s/url/%s'
|
||||
% (requestor_id.lower(), display_id), display_id)['data']
|
||||
properties = page_data.get('properties') or {}
|
||||
query = {
|
||||
'mbr': 'true',
|
||||
'manifest': 'm3u',
|
||||
}
|
||||
tp_path = 'M_UwQC/media/' + properties['videoPid']
|
||||
media_url = 'https://link.theplatform.com/s/' + tp_path
|
||||
|
||||
video_player_count = 0
|
||||
try:
|
||||
for v in page_data['children']:
|
||||
if v.get('type') == 'video-player':
|
||||
releasePid = v['properties']['currentVideo']['meta']['releasePid']
|
||||
tp_path = 'M_UwQC/' + releasePid
|
||||
media_url = 'https://link.theplatform.com/s/' + tp_path
|
||||
video_player_count += 1
|
||||
except KeyError:
|
||||
pass
|
||||
if video_player_count > 1:
|
||||
self.report_warning(
|
||||
'The JSON data has %d video players. Only one will be extracted' % video_player_count)
|
||||
|
||||
# Fall back to videoPid if releasePid not found.
|
||||
# TODO: Fall back to videoPid if releasePid manifest uses DRM.
|
||||
if not video_player_count:
|
||||
tp_path = 'M_UwQC/media/' + properties['videoPid']
|
||||
media_url = 'https://link.theplatform.com/s/' + tp_path
|
||||
|
||||
theplatform_metadata = self._download_theplatform_metadata(tp_path, display_id)
|
||||
info = self._parse_theplatform_metadata(theplatform_metadata)
|
||||
video_id = theplatform_metadata['pid']
|
||||
@@ -90,30 +110,41 @@ class AMCNetworksIE(ThePlatformIE):
|
||||
formats, subtitles = self._extract_theplatform_smil(
|
||||
media_url, video_id)
|
||||
self._sort_formats(formats)
|
||||
|
||||
thumbnails = []
|
||||
thumbnail_urls = [properties.get('imageDesktop')]
|
||||
if 'thumbnail' in info:
|
||||
thumbnail_urls.append(info.pop('thumbnail'))
|
||||
for thumbnail_url in thumbnail_urls:
|
||||
if not thumbnail_url:
|
||||
continue
|
||||
mobj = re.search(r'(\d+)x(\d+)', thumbnail_url)
|
||||
thumbnails.append({
|
||||
'url': thumbnail_url,
|
||||
'width': int(mobj.group(1)) if mobj else None,
|
||||
'height': int(mobj.group(2)) if mobj else None,
|
||||
})
|
||||
|
||||
info.update({
|
||||
'age_limit': parse_age_limit(rating),
|
||||
'formats': formats,
|
||||
'id': video_id,
|
||||
'subtitles': subtitles,
|
||||
'formats': formats,
|
||||
'age_limit': parse_age_limit(parse_age_limit(rating)),
|
||||
'thumbnails': thumbnails,
|
||||
})
|
||||
ns_keys = theplatform_metadata.get('$xmlns', {}).keys()
|
||||
if ns_keys:
|
||||
ns = list(ns_keys)[0]
|
||||
series = theplatform_metadata.get(ns + '$show')
|
||||
season_number = int_or_none(
|
||||
theplatform_metadata.get(ns + '$season'))
|
||||
episode = theplatform_metadata.get(ns + '$episodeTitle')
|
||||
episode = theplatform_metadata.get(ns + '$episodeTitle') or None
|
||||
episode_number = int_or_none(
|
||||
theplatform_metadata.get(ns + '$episode'))
|
||||
if season_number:
|
||||
title = 'Season %d - %s' % (season_number, title)
|
||||
if series:
|
||||
title = '%s - %s' % (series, title)
|
||||
season_number = int_or_none(
|
||||
theplatform_metadata.get(ns + '$season'))
|
||||
series = theplatform_metadata.get(ns + '$show') or None
|
||||
info.update({
|
||||
'title': title,
|
||||
'series': series,
|
||||
'season_number': season_number,
|
||||
'episode': episode,
|
||||
'episode_number': episode_number,
|
||||
'season_number': season_number,
|
||||
'series': series,
|
||||
})
|
||||
return info
|
||||
|
||||
@@ -42,6 +42,7 @@ class ApplePodcastsIE(InfoExtractor):
|
||||
ember_data = self._parse_json(self._search_regex(
|
||||
r'id="shoebox-ember-data-store"[^>]*>\s*({.+?})\s*<',
|
||||
webpage, 'ember data'), episode_id)
|
||||
ember_data = ember_data.get(episode_id) or ember_data
|
||||
episode = ember_data['data']['attributes']
|
||||
description = episode.get('description') or {}
|
||||
|
||||
|
||||
@@ -1,22 +1,36 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urllib_parse_unquote_plus
|
||||
from .youtube import YoutubeIE
|
||||
from ..compat import (
|
||||
compat_urllib_parse_unquote,
|
||||
compat_urllib_parse_unquote_plus,
|
||||
compat_urlparse,
|
||||
compat_parse_qs,
|
||||
compat_HTTPError
|
||||
)
|
||||
from ..utils import (
|
||||
KNOWN_EXTENSIONS,
|
||||
|
||||
clean_html,
|
||||
determine_ext,
|
||||
dict_get,
|
||||
extract_attributes,
|
||||
ExtractorError,
|
||||
HEADRequest,
|
||||
int_or_none,
|
||||
KNOWN_EXTENSIONS,
|
||||
merge_dicts,
|
||||
mimetype2ext,
|
||||
parse_duration,
|
||||
RegexNotFoundError,
|
||||
str_to_int,
|
||||
str_or_none,
|
||||
try_get,
|
||||
unified_strdate,
|
||||
unified_timestamp,
|
||||
clean_html,
|
||||
dict_get,
|
||||
parse_duration,
|
||||
int_or_none,
|
||||
str_or_none,
|
||||
merge_dicts,
|
||||
)
|
||||
|
||||
|
||||
@@ -241,3 +255,165 @@ class ArchiveOrgIE(InfoExtractor):
|
||||
'parent': 'root'})
|
||||
|
||||
return info
|
||||
|
||||
|
||||
class YoutubeWebArchiveIE(InfoExtractor):
|
||||
IE_NAME = 'web.archive:youtube'
|
||||
IE_DESC = 'web.archive.org saved youtube videos'
|
||||
_VALID_URL = r"""(?x)^
|
||||
(?:https?://)?web\.archive\.org/
|
||||
(?:web/)?
|
||||
(?:[0-9A-Za-z_*]+/)? # /web and the version index is optional
|
||||
|
||||
(?:https?(?::|%3[Aa])//)?
|
||||
(?:
|
||||
(?:\w+\.)?youtube\.com/watch(?:\?|%3[fF])(?:[^\#]+(?:&|%26))?v(?:=|%3[dD]) # Youtube URL
|
||||
|(wayback-fakeurl\.archive\.org/yt/) # Or the internal fake url
|
||||
)
|
||||
(?P<id>[0-9A-Za-z_-]{11})(?:%26|\#|&|$)
|
||||
"""
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'https://web.archive.org/web/20150415002341/https://www.youtube.com/watch?v=aYAGB11YrSs',
|
||||
'info_dict': {
|
||||
'id': 'aYAGB11YrSs',
|
||||
'ext': 'webm',
|
||||
'title': 'Team Fortress 2 - Sandviches!'
|
||||
}
|
||||
},
|
||||
{
|
||||
# Internal link
|
||||
'url': 'https://web.archive.org/web/2oe/http://wayback-fakeurl.archive.org/yt/97t7Xj_iBv0',
|
||||
'info_dict': {
|
||||
'id': '97t7Xj_iBv0',
|
||||
'ext': 'mp4',
|
||||
'title': 'How Flexible Machines Could Save The World'
|
||||
}
|
||||
},
|
||||
{
|
||||
# Video from 2012, webm format itag 45.
|
||||
'url': 'https://web.archive.org/web/20120712231619/http://www.youtube.com/watch?v=AkhihxRKcrs&gl=US&hl=en',
|
||||
'info_dict': {
|
||||
'id': 'AkhihxRKcrs',
|
||||
'ext': 'webm',
|
||||
'title': 'Limited Run: Mondo\'s Modern Classic 1 of 3 (SDCC 2012)'
|
||||
}
|
||||
},
|
||||
{
|
||||
# Old flash-only video. Webpage title starts with "YouTube - ".
|
||||
'url': 'https://web.archive.org/web/20081211103536/http://www.youtube.com/watch?v=jNQXAC9IVRw',
|
||||
'info_dict': {
|
||||
'id': 'jNQXAC9IVRw',
|
||||
'ext': 'unknown_video',
|
||||
'title': 'Me at the zoo'
|
||||
}
|
||||
},
|
||||
{
|
||||
# Flash video with .flv extension (itag 34). Title has prefix "YouTube -"
|
||||
# Title has some weird unicode characters too.
|
||||
'url': 'https://web.archive.org/web/20110712231407/http://www.youtube.com/watch?v=lTx3G6h2xyA',
|
||||
'info_dict': {
|
||||
'id': 'lTx3G6h2xyA',
|
||||
'ext': 'flv',
|
||||
'title': 'Madeon - Pop Culture (live mashup)'
|
||||
}
|
||||
},
|
||||
{ # Some versions of Youtube have have "YouTube" as page title in html (and later rewritten by js).
|
||||
'url': 'https://web.archive.org/web/http://www.youtube.com/watch?v=kH-G_aIBlFw',
|
||||
'info_dict': {
|
||||
'id': 'kH-G_aIBlFw',
|
||||
'ext': 'mp4',
|
||||
'title': 'kH-G_aIBlFw'
|
||||
},
|
||||
'expected_warnings': [
|
||||
'unable to extract title',
|
||||
]
|
||||
},
|
||||
{
|
||||
# First capture is a 302 redirect intermediary page.
|
||||
'url': 'https://web.archive.org/web/20050214000000/http://www.youtube.com/watch?v=0altSZ96U4M',
|
||||
'info_dict': {
|
||||
'id': '0altSZ96U4M',
|
||||
'ext': 'mp4',
|
||||
'title': '0altSZ96U4M'
|
||||
},
|
||||
'expected_warnings': [
|
||||
'unable to extract title',
|
||||
]
|
||||
},
|
||||
{
|
||||
# Video not archived, only capture is unavailable video page
|
||||
'url': 'https://web.archive.org/web/20210530071008/https://www.youtube.com/watch?v=lHJTf93HL1s&spfreload=10',
|
||||
'only_matching': True,
|
||||
},
|
||||
{ # Encoded url
|
||||
'url': 'https://web.archive.org/web/20120712231619/http%3A//www.youtube.com/watch%3Fgl%3DUS%26v%3DAkhihxRKcrs%26hl%3Den',
|
||||
'only_matching': True,
|
||||
},
|
||||
{
|
||||
'url': 'https://web.archive.org/web/20120712231619/http%3A//www.youtube.com/watch%3Fv%3DAkhihxRKcrs%26gl%3DUS%26hl%3Den',
|
||||
'only_matching': True,
|
||||
}
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
title = video_id # if we are not able get a title
|
||||
|
||||
def _extract_title(webpage):
|
||||
page_title = self._html_search_regex(
|
||||
r'<title>([^<]*)</title>', webpage, 'title', fatal=False) or ''
|
||||
# YouTube video pages appear to always have either 'YouTube -' as suffix or '- YouTube' as prefix.
|
||||
try:
|
||||
page_title = self._html_search_regex(
|
||||
r'(?:YouTube\s*-\s*(.*)$)|(?:(.*)\s*-\s*YouTube$)',
|
||||
page_title, 'title', default='')
|
||||
except RegexNotFoundError:
|
||||
page_title = None
|
||||
|
||||
if not page_title:
|
||||
self.report_warning('unable to extract title', video_id=video_id)
|
||||
return
|
||||
return page_title
|
||||
|
||||
# If the video is no longer available, the oldest capture may be one before it was removed.
|
||||
# Setting the capture date in url to early date seems to redirect to earliest capture.
|
||||
webpage = self._download_webpage(
|
||||
'https://web.archive.org/web/20050214000000/http://www.youtube.com/watch?v=%s' % video_id,
|
||||
video_id=video_id, fatal=False, errnote='unable to download video webpage (probably not archived).')
|
||||
if webpage:
|
||||
title = _extract_title(webpage) or title
|
||||
|
||||
# Use link translator mentioned in https://github.com/ytdl-org/youtube-dl/issues/13655
|
||||
internal_fake_url = 'https://web.archive.org/web/2oe_/http://wayback-fakeurl.archive.org/yt/%s' % video_id
|
||||
try:
|
||||
video_file_webpage = self._request_webpage(
|
||||
HEADRequest(internal_fake_url), video_id,
|
||||
note='Fetching video file url', expected_status=True)
|
||||
except ExtractorError as e:
|
||||
# HTTP Error 404 is expected if the video is not saved.
|
||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
|
||||
raise ExtractorError(
|
||||
'HTTP Error %s. Most likely the video is not archived or issue with web.archive.org.' % e.cause.code,
|
||||
expected=True)
|
||||
raise
|
||||
video_file_url = compat_urllib_parse_unquote(video_file_webpage.url)
|
||||
video_file_url_qs = compat_parse_qs(compat_urlparse.urlparse(video_file_url).query)
|
||||
|
||||
# Attempt to recover any ext & format info from playback url
|
||||
format = {'url': video_file_url}
|
||||
itag = try_get(video_file_url_qs, lambda x: x['itag'][0])
|
||||
if itag and itag in YoutubeIE._formats: # Naughty access but it works
|
||||
format.update(YoutubeIE._formats[itag])
|
||||
format.update({'format_id': itag})
|
||||
else:
|
||||
mime = try_get(video_file_url_qs, lambda x: x['mime'][0])
|
||||
ext = mimetype2ext(mime) or determine_ext(video_file_url)
|
||||
format.update({'ext': ext})
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'formats': [format],
|
||||
'duration': str_to_int(try_get(video_file_url_qs, lambda x: x['dur'][0]))
|
||||
}
|
||||
|
||||
@@ -36,12 +36,12 @@ class ARDMediathekBaseIE(InfoExtractor):
|
||||
|
||||
if not formats:
|
||||
if fsk:
|
||||
raise ExtractorError(
|
||||
self.raise_no_formats(
|
||||
'This video is only available after 20:00', expected=True)
|
||||
elif media_info.get('_geoblocked'):
|
||||
self.raise_geo_restricted(
|
||||
'This video is not available due to geoblocking',
|
||||
countries=self._GEO_COUNTRIES)
|
||||
countries=self._GEO_COUNTRIES, metadata_available=True)
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
@@ -272,7 +272,8 @@ class ARDMediathekIE(ARDMediathekBaseIE):
|
||||
else: # request JSON file
|
||||
if not document_id:
|
||||
video_id = self._search_regex(
|
||||
r'/play/(?:config|media)/(\d+)', webpage, 'media id')
|
||||
(r'/play/(?:config|media|sola)/(\d+)', r'contentId["\']\s*:\s*(\d+)'),
|
||||
webpage, 'media id', default=None)
|
||||
info = self._extract_media_info(
|
||||
'http://www.ardmediathek.de/play/media/%s' % video_id,
|
||||
webpage, video_id)
|
||||
@@ -289,14 +290,14 @@ class ARDMediathekIE(ARDMediathekBaseIE):
|
||||
|
||||
|
||||
class ARDIE(InfoExtractor):
|
||||
_VALID_URL = r'(?P<mainurl>https?://(?:www\.)?daserste\.de/[^?#]+/videos(?:extern)?/(?P<display_id>[^/?#]+)-(?:video-?)?(?P<id>[0-9]+))\.html'
|
||||
_VALID_URL = r'(?P<mainurl>https?://(?:www\.)?daserste\.de/(?:[^/?#&]+/)+(?P<id>[^/?#&]+))\.html'
|
||||
_TESTS = [{
|
||||
# available till 7.01.2022
|
||||
'url': 'https://www.daserste.de/information/talk/maischberger/videos/maischberger-die-woche-video100.html',
|
||||
'md5': '867d8aa39eeaf6d76407c5ad1bb0d4c1',
|
||||
'info_dict': {
|
||||
'display_id': 'maischberger-die-woche',
|
||||
'id': '100',
|
||||
'id': 'maischberger-die-woche-video100',
|
||||
'display_id': 'maischberger-die-woche-video100',
|
||||
'ext': 'mp4',
|
||||
'duration': 3687.0,
|
||||
'title': 'maischberger. die woche vom 7. Januar 2021',
|
||||
@@ -304,16 +305,28 @@ class ARDIE(InfoExtractor):
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.daserste.de/information/reportage-dokumentation/erlebnis-erde/videosextern/woelfe-und-herdenschutzhunde-ungleiche-brueder-102.html',
|
||||
'url': 'https://www.daserste.de/information/politik-weltgeschehen/morgenmagazin/videosextern/dominik-kahun-aus-der-nhl-direkt-zur-weltmeisterschaft-100.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.daserste.de/information/nachrichten-wetter/tagesthemen/videosextern/tagesthemen-17736.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.daserste.de/unterhaltung/serie/in-aller-freundschaft-die-jungen-aerzte/videos/diversity-tag-sanam-afrashteh100.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.daserste.de/information/reportage-dokumentation/dokus/videos/die-story-im-ersten-mission-unter-falscher-flagge-100.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.daserste.de/unterhaltung/serie/in-aller-freundschaft-die-jungen-aerzte/Drehpause-100.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.daserste.de/unterhaltung/film/filmmittwoch-im-ersten/videos/making-ofwendezeit-video-100.html',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
display_id = mobj.group('display_id')
|
||||
display_id = mobj.group('id')
|
||||
|
||||
player_url = mobj.group('mainurl') + '~playerXml.xml'
|
||||
doc = self._download_xml(player_url, display_id)
|
||||
@@ -364,7 +377,7 @@ class ARDIE(InfoExtractor):
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': mobj.group('id'),
|
||||
'id': xpath_text(video_node, './videoId', default=display_id),
|
||||
'formats': formats,
|
||||
'display_id': display_id,
|
||||
'title': video_node.find('./title').text,
|
||||
|
||||
101
yt_dlp/extractor/arnes.py
Normal file
101
yt_dlp/extractor/arnes.py
Normal file
@@ -0,0 +1,101 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_parse_qs,
|
||||
compat_urllib_parse_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
parse_iso8601,
|
||||
remove_start,
|
||||
)
|
||||
|
||||
|
||||
class ArnesIE(InfoExtractor):
|
||||
IE_NAME = 'video.arnes.si'
|
||||
IE_DESC = 'Arnes Video'
|
||||
_VALID_URL = r'https?://video\.arnes\.si/(?:[a-z]{2}/)?(?:watch|embed|api/(?:asset|public/video))/(?P<id>[0-9a-zA-Z]{12})'
|
||||
_TESTS = [{
|
||||
'url': 'https://video.arnes.si/watch/a1qrWTOQfVoU?t=10',
|
||||
'md5': '4d0f4d0a03571b33e1efac25fd4a065d',
|
||||
'info_dict': {
|
||||
'id': 'a1qrWTOQfVoU',
|
||||
'ext': 'mp4',
|
||||
'title': 'Linearna neodvisnost, definicija',
|
||||
'description': 'Linearna neodvisnost, definicija',
|
||||
'license': 'PRIVATE',
|
||||
'creator': 'Polona Oblak',
|
||||
'timestamp': 1585063725,
|
||||
'upload_date': '20200324',
|
||||
'channel': 'Polona Oblak',
|
||||
'channel_id': 'q6pc04hw24cj',
|
||||
'channel_url': 'https://video.arnes.si/?channel=q6pc04hw24cj',
|
||||
'duration': 596.75,
|
||||
'view_count': int,
|
||||
'tags': ['linearna_algebra'],
|
||||
'start_time': 10,
|
||||
}
|
||||
}, {
|
||||
'url': 'https://video.arnes.si/api/asset/s1YjnV7hadlC/play.mp4',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://video.arnes.si/embed/s1YjnV7hadlC',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://video.arnes.si/en/watch/s1YjnV7hadlC',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://video.arnes.si/embed/s1YjnV7hadlC?t=123&hideRelated=1',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://video.arnes.si/api/public/video/s1YjnV7hadlC',
|
||||
'only_matching': True,
|
||||
}]
|
||||
_BASE_URL = 'https://video.arnes.si'
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
video = self._download_json(
|
||||
self._BASE_URL + '/api/public/video/' + video_id, video_id)['data']
|
||||
title = video['title']
|
||||
|
||||
formats = []
|
||||
for media in (video.get('media') or []):
|
||||
media_url = media.get('url')
|
||||
if not media_url:
|
||||
continue
|
||||
formats.append({
|
||||
'url': self._BASE_URL + media_url,
|
||||
'format_id': remove_start(media.get('format'), 'FORMAT_'),
|
||||
'format_note': media.get('formatTranslation'),
|
||||
'width': int_or_none(media.get('width')),
|
||||
'height': int_or_none(media.get('height')),
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
channel = video.get('channel') or {}
|
||||
channel_id = channel.get('url')
|
||||
thumbnail = video.get('thumbnailUrl')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'thumbnail': self._BASE_URL + thumbnail,
|
||||
'description': video.get('description'),
|
||||
'license': video.get('license'),
|
||||
'creator': video.get('author'),
|
||||
'timestamp': parse_iso8601(video.get('creationTime')),
|
||||
'channel': channel.get('name'),
|
||||
'channel_id': channel_id,
|
||||
'channel_url': self._BASE_URL + '/?channel=' + channel_id if channel_id else None,
|
||||
'duration': float_or_none(video.get('duration'), 1000),
|
||||
'view_count': int_or_none(video.get('views')),
|
||||
'tags': video.get('hashtags'),
|
||||
'start_time': int_or_none(compat_parse_qs(
|
||||
compat_urllib_parse_urlparse(url).query).get('t', [None])[0]),
|
||||
}
|
||||
@@ -86,18 +86,19 @@ class AtresPlayerIE(InfoExtractor):
|
||||
title = episode['titulo']
|
||||
|
||||
formats = []
|
||||
subtitles = {}
|
||||
for source in episode.get('sources', []):
|
||||
src = source.get('src')
|
||||
if not src:
|
||||
continue
|
||||
src_type = source.get('type')
|
||||
if src_type == 'application/vnd.apple.mpegurl':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
formats, subtitles = self._extract_m3u8_formats(
|
||||
src, video_id, 'mp4', 'm3u8_native',
|
||||
m3u8_id='hls', fatal=False))
|
||||
m3u8_id='hls', fatal=False)
|
||||
elif src_type == 'application/dash+xml':
|
||||
formats.extend(self._extract_mpd_formats(
|
||||
src, video_id, mpd_id='dash', fatal=False))
|
||||
formats, subtitles = self._extract_mpd_formats(
|
||||
src, video_id, mpd_id='dash', fatal=False)
|
||||
self._sort_formats(formats)
|
||||
|
||||
heartbeat = episode.get('heartbeat') or {}
|
||||
@@ -115,4 +116,5 @@ class AtresPlayerIE(InfoExtractor):
|
||||
'channel': get_meta('channel'),
|
||||
'season': get_meta('season'),
|
||||
'episode_number': int_or_none(get_meta('episodeNumber')),
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
||||
@@ -245,3 +245,31 @@ class AudiusPlaylistIE(AudiusBaseIE):
|
||||
return self.playlist_result(entries, playlist_id,
|
||||
playlist_data.get('playlist_name', title),
|
||||
playlist_data.get('description'))
|
||||
|
||||
|
||||
class AudiusProfileIE(AudiusPlaylistIE):
|
||||
IE_NAME = 'audius:artist'
|
||||
IE_DESC = 'Audius.co profile/artist pages'
|
||||
_VALID_URL = r'https?://(?:www)?audius\.co/(?P<id>[^\/]+)/?(?:[?#]|$)'
|
||||
_TEST = {
|
||||
'url': 'https://audius.co/pzl/',
|
||||
'info_dict': {
|
||||
'id': 'ezRo7',
|
||||
'description': 'TAMALE\n\nContact: officialpzl@gmail.com',
|
||||
'title': 'pzl',
|
||||
},
|
||||
'playlist_count': 24,
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
self._select_api_base()
|
||||
profile_id = self._match_id(url)
|
||||
try:
|
||||
_profile_data = self._api_request('/full/users/handle/' + profile_id, profile_id)
|
||||
except ExtractorError as e:
|
||||
raise ExtractorError('Could not download profile info; ' + str(e))
|
||||
profile_audius_id = _profile_data[0]['id']
|
||||
profile_bio = _profile_data[0].get('bio')
|
||||
|
||||
api_call = self._api_request('/full/users/handle/%s/tracks' % profile_id, profile_id)
|
||||
return self.playlist_result(self._build_playlist(api_call), profile_audius_id, profile_id, profile_bio)
|
||||
|
||||
@@ -49,6 +49,7 @@ class BandcampIE(InfoExtractor):
|
||||
'uploader': 'Ben Prunty',
|
||||
'timestamp': 1396508491,
|
||||
'upload_date': '20140403',
|
||||
'release_timestamp': 1396483200,
|
||||
'release_date': '20140403',
|
||||
'duration': 260.877,
|
||||
'track': 'Lanius (Battle)',
|
||||
@@ -69,6 +70,7 @@ class BandcampIE(InfoExtractor):
|
||||
'uploader': 'Mastodon',
|
||||
'timestamp': 1322005399,
|
||||
'upload_date': '20111122',
|
||||
'release_timestamp': 1076112000,
|
||||
'release_date': '20040207',
|
||||
'duration': 120.79,
|
||||
'track': 'Hail to Fire',
|
||||
@@ -197,7 +199,7 @@ class BandcampIE(InfoExtractor):
|
||||
'thumbnail': thumbnail,
|
||||
'uploader': artist,
|
||||
'timestamp': timestamp,
|
||||
'release_date': unified_strdate(tralbum.get('album_release_date')),
|
||||
'release_timestamp': unified_timestamp(tralbum.get('album_release_date')),
|
||||
'duration': duration,
|
||||
'track': track,
|
||||
'track_number': track_number,
|
||||
|
||||
@@ -1,17 +1,23 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import functools
|
||||
import itertools
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_etree_Element,
|
||||
compat_HTTPError,
|
||||
compat_parse_qs,
|
||||
compat_str,
|
||||
compat_urllib_parse_urlparse,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
OnDemandPagedList,
|
||||
clean_html,
|
||||
dict_get,
|
||||
float_or_none,
|
||||
@@ -20,8 +26,10 @@ from ..utils import (
|
||||
js_to_json,
|
||||
parse_duration,
|
||||
parse_iso8601,
|
||||
strip_or_none,
|
||||
try_get,
|
||||
unescapeHTML,
|
||||
unified_timestamp,
|
||||
url_or_none,
|
||||
urlencode_postdata,
|
||||
urljoin,
|
||||
@@ -756,8 +764,17 @@ class BBCIE(BBCCoUkIE):
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# custom redirection to www.bbc.com
|
||||
# also, video with window.__INITIAL_DATA__
|
||||
'url': 'http://www.bbc.co.uk/news/science-environment-33661876',
|
||||
'only_matching': True,
|
||||
'info_dict': {
|
||||
'id': 'p02xzws1',
|
||||
'ext': 'mp4',
|
||||
'title': "Pluto may have 'nitrogen glaciers'",
|
||||
'description': 'md5:6a95b593f528d7a5f2605221bc56912f',
|
||||
'thumbnail': r're:https?://.+/.+\.jpg',
|
||||
'timestamp': 1437785037,
|
||||
'upload_date': '20150725',
|
||||
},
|
||||
}, {
|
||||
# single video article embedded with data-media-vpid
|
||||
'url': 'http://www.bbc.co.uk/sport/rowing/35908187',
|
||||
@@ -811,7 +828,7 @@ class BBCIE(BBCCoUkIE):
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
EXCLUDE_IE = (BBCCoUkIE, BBCCoUkArticleIE, BBCCoUkIPlayerPlaylistIE, BBCCoUkPlaylistIE)
|
||||
EXCLUDE_IE = (BBCCoUkIE, BBCCoUkArticleIE, BBCCoUkIPlayerEpisodesIE, BBCCoUkIPlayerGroupIE, BBCCoUkPlaylistIE)
|
||||
return (False if any(ie.suitable(url) for ie in EXCLUDE_IE)
|
||||
else super(BBCIE, cls).suitable(url))
|
||||
|
||||
@@ -1159,12 +1176,29 @@ class BBCIE(BBCCoUkIE):
|
||||
continue
|
||||
formats, subtitles = self._download_media_selector(item_id)
|
||||
self._sort_formats(formats)
|
||||
item_desc = None
|
||||
blocks = try_get(media, lambda x: x['summary']['blocks'], list)
|
||||
if blocks:
|
||||
summary = []
|
||||
for block in blocks:
|
||||
text = try_get(block, lambda x: x['model']['text'], compat_str)
|
||||
if text:
|
||||
summary.append(text)
|
||||
if summary:
|
||||
item_desc = '\n\n'.join(summary)
|
||||
item_time = None
|
||||
for meta in try_get(media, lambda x: x['metadata']['items'], list) or []:
|
||||
if try_get(meta, lambda x: x['label']) == 'Published':
|
||||
item_time = unified_timestamp(meta.get('timestamp'))
|
||||
break
|
||||
entries.append({
|
||||
'id': item_id,
|
||||
'title': item_title,
|
||||
'thumbnail': item.get('holdingImageUrl'),
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
'timestamp': item_time,
|
||||
'description': strip_or_none(item_desc),
|
||||
})
|
||||
for resp in (initial_data.get('data') or {}).values():
|
||||
name = resp.get('name')
|
||||
@@ -1237,7 +1271,7 @@ class BBCIE(BBCCoUkIE):
|
||||
entries = []
|
||||
for num, media_meta in enumerate(medias, start=1):
|
||||
formats, subtitles = self._extract_from_media_meta(media_meta, playlist_id)
|
||||
if not formats:
|
||||
if not formats and not self.get_param('ignore_no_formats'):
|
||||
continue
|
||||
self._sort_formats(formats)
|
||||
|
||||
@@ -1338,21 +1372,149 @@ class BBCCoUkPlaylistBaseIE(InfoExtractor):
|
||||
playlist_id, title, description)
|
||||
|
||||
|
||||
class BBCCoUkIPlayerPlaylistIE(BBCCoUkPlaylistBaseIE):
|
||||
IE_NAME = 'bbc.co.uk:iplayer:playlist'
|
||||
_VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/iplayer/(?:episodes|group)/(?P<id>%s)' % BBCCoUkIE._ID_REGEX
|
||||
_URL_TEMPLATE = 'http://www.bbc.co.uk/iplayer/episode/%s'
|
||||
_VIDEO_ID_TEMPLATE = r'data-ip-id=["\'](%s)'
|
||||
class BBCCoUkIPlayerPlaylistBaseIE(InfoExtractor):
|
||||
_VALID_URL_TMPL = r'https?://(?:www\.)?bbc\.co\.uk/iplayer/%%s/(?P<id>%s)' % BBCCoUkIE._ID_REGEX
|
||||
|
||||
@staticmethod
|
||||
def _get_default(episode, key, default_key='default'):
|
||||
return try_get(episode, lambda x: x[key][default_key])
|
||||
|
||||
def _get_description(self, data):
|
||||
synopsis = data.get(self._DESCRIPTION_KEY) or {}
|
||||
return dict_get(synopsis, ('large', 'medium', 'small'))
|
||||
|
||||
def _fetch_page(self, programme_id, per_page, series_id, page):
|
||||
elements = self._get_elements(self._call_api(
|
||||
programme_id, per_page, page + 1, series_id))
|
||||
for element in elements:
|
||||
episode = self._get_episode(element)
|
||||
episode_id = episode.get('id')
|
||||
if not episode_id:
|
||||
continue
|
||||
thumbnail = None
|
||||
image = self._get_episode_image(episode)
|
||||
if image:
|
||||
thumbnail = image.replace('{recipe}', 'raw')
|
||||
category = self._get_default(episode, 'labels', 'category')
|
||||
yield {
|
||||
'_type': 'url',
|
||||
'id': episode_id,
|
||||
'title': self._get_episode_field(episode, 'subtitle'),
|
||||
'url': 'https://www.bbc.co.uk/iplayer/episode/' + episode_id,
|
||||
'thumbnail': thumbnail,
|
||||
'description': self._get_description(episode),
|
||||
'categories': [category] if category else None,
|
||||
'series': self._get_episode_field(episode, 'title'),
|
||||
'ie_key': BBCCoUkIE.ie_key(),
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
pid = self._match_id(url)
|
||||
qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
|
||||
series_id = qs.get('seriesId', [None])[0]
|
||||
page = qs.get('page', [None])[0]
|
||||
per_page = 36 if page else self._PAGE_SIZE
|
||||
fetch_page = functools.partial(self._fetch_page, pid, per_page, series_id)
|
||||
entries = fetch_page(int(page) - 1) if page else OnDemandPagedList(fetch_page, self._PAGE_SIZE)
|
||||
playlist_data = self._get_playlist_data(self._call_api(pid, 1))
|
||||
return self.playlist_result(
|
||||
entries, pid, self._get_playlist_title(playlist_data),
|
||||
self._get_description(playlist_data))
|
||||
|
||||
|
||||
class BBCCoUkIPlayerEpisodesIE(BBCCoUkIPlayerPlaylistBaseIE):
|
||||
IE_NAME = 'bbc.co.uk:iplayer:episodes'
|
||||
_VALID_URL = BBCCoUkIPlayerPlaylistBaseIE._VALID_URL_TMPL % 'episodes'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.bbc.co.uk/iplayer/episodes/b05rcz9v',
|
||||
'info_dict': {
|
||||
'id': 'b05rcz9v',
|
||||
'title': 'The Disappearance',
|
||||
'description': 'French thriller serial about a missing teenager.',
|
||||
'description': 'md5:58eb101aee3116bad4da05f91179c0cb',
|
||||
},
|
||||
'playlist_mincount': 6,
|
||||
'skip': 'This programme is not currently available on BBC iPlayer',
|
||||
'playlist_mincount': 8,
|
||||
}, {
|
||||
# all seasons
|
||||
'url': 'https://www.bbc.co.uk/iplayer/episodes/b094m5t9/doctor-foster',
|
||||
'info_dict': {
|
||||
'id': 'b094m5t9',
|
||||
'title': 'Doctor Foster',
|
||||
'description': 'md5:5aa9195fad900e8e14b52acd765a9fd6',
|
||||
},
|
||||
'playlist_mincount': 10,
|
||||
}, {
|
||||
# explicit season
|
||||
'url': 'https://www.bbc.co.uk/iplayer/episodes/b094m5t9/doctor-foster?seriesId=b094m6nv',
|
||||
'info_dict': {
|
||||
'id': 'b094m5t9',
|
||||
'title': 'Doctor Foster',
|
||||
'description': 'md5:5aa9195fad900e8e14b52acd765a9fd6',
|
||||
},
|
||||
'playlist_mincount': 5,
|
||||
}, {
|
||||
# all pages
|
||||
'url': 'https://www.bbc.co.uk/iplayer/episodes/m0004c4v/beechgrove',
|
||||
'info_dict': {
|
||||
'id': 'm0004c4v',
|
||||
'title': 'Beechgrove',
|
||||
'description': 'Gardening show that celebrates Scottish horticulture and growing conditions.',
|
||||
},
|
||||
'playlist_mincount': 37,
|
||||
}, {
|
||||
# explicit page
|
||||
'url': 'https://www.bbc.co.uk/iplayer/episodes/m0004c4v/beechgrove?page=2',
|
||||
'info_dict': {
|
||||
'id': 'm0004c4v',
|
||||
'title': 'Beechgrove',
|
||||
'description': 'Gardening show that celebrates Scottish horticulture and growing conditions.',
|
||||
},
|
||||
'playlist_mincount': 1,
|
||||
}]
|
||||
_PAGE_SIZE = 100
|
||||
_DESCRIPTION_KEY = 'synopsis'
|
||||
|
||||
def _get_episode_image(self, episode):
|
||||
return self._get_default(episode, 'image')
|
||||
|
||||
def _get_episode_field(self, episode, field):
|
||||
return self._get_default(episode, field)
|
||||
|
||||
@staticmethod
|
||||
def _get_elements(data):
|
||||
return data['entities']['results']
|
||||
|
||||
@staticmethod
|
||||
def _get_episode(element):
|
||||
return element.get('episode') or {}
|
||||
|
||||
def _call_api(self, pid, per_page, page=1, series_id=None):
|
||||
variables = {
|
||||
'id': pid,
|
||||
'page': page,
|
||||
'perPage': per_page,
|
||||
}
|
||||
if series_id:
|
||||
variables['sliceId'] = series_id
|
||||
return self._download_json(
|
||||
'https://graph.ibl.api.bbc.co.uk/', pid, headers={
|
||||
'Content-Type': 'application/json'
|
||||
}, data=json.dumps({
|
||||
'id': '5692d93d5aac8d796a0305e895e61551',
|
||||
'variables': variables,
|
||||
}).encode('utf-8'))['data']['programme']
|
||||
|
||||
@staticmethod
|
||||
def _get_playlist_data(data):
|
||||
return data
|
||||
|
||||
def _get_playlist_title(self, data):
|
||||
return self._get_default(data, 'title')
|
||||
|
||||
|
||||
class BBCCoUkIPlayerGroupIE(BBCCoUkIPlayerPlaylistBaseIE):
|
||||
IE_NAME = 'bbc.co.uk:iplayer:group'
|
||||
_VALID_URL = BBCCoUkIPlayerPlaylistBaseIE._VALID_URL_TMPL % 'group'
|
||||
_TESTS = [{
|
||||
# Available for over a year unlike 30 days for most other programmes
|
||||
'url': 'http://www.bbc.co.uk/iplayer/group/p02tcc32',
|
||||
'info_dict': {
|
||||
@@ -1361,14 +1523,56 @@ class BBCCoUkIPlayerPlaylistIE(BBCCoUkPlaylistBaseIE):
|
||||
'description': 'md5:683e901041b2fe9ba596f2ab04c4dbe7',
|
||||
},
|
||||
'playlist_mincount': 10,
|
||||
}, {
|
||||
# all pages
|
||||
'url': 'https://www.bbc.co.uk/iplayer/group/p081d7j7',
|
||||
'info_dict': {
|
||||
'id': 'p081d7j7',
|
||||
'title': 'Music in Scotland',
|
||||
'description': 'Perfomances in Scotland and programmes featuring Scottish acts.',
|
||||
},
|
||||
'playlist_mincount': 47,
|
||||
}, {
|
||||
# explicit page
|
||||
'url': 'https://www.bbc.co.uk/iplayer/group/p081d7j7?page=2',
|
||||
'info_dict': {
|
||||
'id': 'p081d7j7',
|
||||
'title': 'Music in Scotland',
|
||||
'description': 'Perfomances in Scotland and programmes featuring Scottish acts.',
|
||||
},
|
||||
'playlist_mincount': 11,
|
||||
}]
|
||||
_PAGE_SIZE = 200
|
||||
_DESCRIPTION_KEY = 'synopses'
|
||||
|
||||
def _extract_title_and_description(self, webpage):
|
||||
title = self._search_regex(r'<h1>([^<]+)</h1>', webpage, 'title', fatal=False)
|
||||
description = self._search_regex(
|
||||
r'<p[^>]+class=(["\'])subtitle\1[^>]*>(?P<value>[^<]+)</p>',
|
||||
webpage, 'description', fatal=False, group='value')
|
||||
return title, description
|
||||
def _get_episode_image(self, episode):
|
||||
return self._get_default(episode, 'images', 'standard')
|
||||
|
||||
def _get_episode_field(self, episode, field):
|
||||
return episode.get(field)
|
||||
|
||||
@staticmethod
|
||||
def _get_elements(data):
|
||||
return data['elements']
|
||||
|
||||
@staticmethod
|
||||
def _get_episode(element):
|
||||
return element
|
||||
|
||||
def _call_api(self, pid, per_page, page=1, series_id=None):
|
||||
return self._download_json(
|
||||
'http://ibl.api.bbc.co.uk/ibl/v1/groups/%s/episodes' % pid,
|
||||
pid, query={
|
||||
'page': page,
|
||||
'per_page': per_page,
|
||||
})['group_episodes']
|
||||
|
||||
@staticmethod
|
||||
def _get_playlist_data(data):
|
||||
return data['group']
|
||||
|
||||
def _get_playlist_title(self, data):
|
||||
return data.get('title')
|
||||
|
||||
|
||||
class BBCCoUkPlaylistIE(BBCCoUkPlaylistBaseIE):
|
||||
|
||||
@@ -2,11 +2,13 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import hashlib
|
||||
import itertools
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor, SearchInfoExtractor
|
||||
from ..compat import (
|
||||
compat_str,
|
||||
compat_parse_qs,
|
||||
compat_urlparse,
|
||||
)
|
||||
@@ -15,6 +17,7 @@ from ..utils import (
|
||||
int_or_none,
|
||||
float_or_none,
|
||||
parse_iso8601,
|
||||
try_get,
|
||||
smuggle_url,
|
||||
str_or_none,
|
||||
strip_jsonp,
|
||||
@@ -113,6 +116,14 @@ class BiliBiliIE(InfoExtractor):
|
||||
# new BV video id format
|
||||
'url': 'https://www.bilibili.com/video/BV1JE411F741',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# Anthology
|
||||
'url': 'https://www.bilibili.com/video/BV1bK411W797',
|
||||
'info_dict': {
|
||||
'id': 'BV1bK411W797',
|
||||
'title': '物语中的人物是如何吐槽自己的OP的'
|
||||
},
|
||||
'playlist_count': 17,
|
||||
}]
|
||||
|
||||
_APP_KEY = 'iVGUTjsxvpLeuDCf'
|
||||
@@ -139,9 +150,20 @@ class BiliBiliIE(InfoExtractor):
|
||||
page_id = mobj.group('page')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
# Bilibili anthologies are similar to playlists but all videos share the same video ID as the anthology itself.
|
||||
# If the video has no page argument, check to see if it's an anthology
|
||||
if page_id is None:
|
||||
if not self.get_param('noplaylist'):
|
||||
r = self._extract_anthology_entries(bv_id, video_id, webpage)
|
||||
if r is not None:
|
||||
self.to_screen('Downloading anthology %s - add --no-playlist to just download video' % video_id)
|
||||
return r
|
||||
else:
|
||||
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
|
||||
|
||||
if 'anime/' not in url:
|
||||
cid = self._search_regex(
|
||||
r'\bcid(?:["\']:|=)(\d+),["\']page(?:["\']:|=)' + str(page_id), webpage, 'cid',
|
||||
r'\bcid(?:["\']:|=)(\d+),["\']page(?:["\']:|=)' + compat_str(page_id), webpage, 'cid',
|
||||
default=None
|
||||
) or self._search_regex(
|
||||
r'\bcid(?:["\']:|=)(\d+)', webpage, 'cid',
|
||||
@@ -170,6 +192,7 @@ class BiliBiliIE(InfoExtractor):
|
||||
cid = js['result']['cid']
|
||||
|
||||
headers = {
|
||||
'Accept': 'application/json',
|
||||
'Referer': url
|
||||
}
|
||||
headers.update(self.geo_verification_headers())
|
||||
@@ -223,7 +246,18 @@ class BiliBiliIE(InfoExtractor):
|
||||
title = self._html_search_regex(
|
||||
(r'<h1[^>]+\btitle=(["\'])(?P<title>(?:(?!\1).)+)\1',
|
||||
r'(?s)<h1[^>]*>(?P<title>.+?)</h1>'), webpage, 'title',
|
||||
group='title') + ('_p' + str(page_id) if page_id is not None else '')
|
||||
group='title')
|
||||
|
||||
# Get part title for anthologies
|
||||
if page_id is not None:
|
||||
# TODO: The json is already downloaded by _extract_anthology_entries. Don't redownload for each video
|
||||
part_title = try_get(
|
||||
self._download_json(
|
||||
"https://api.bilibili.com/x/player/pagelist?bvid=%s&jsonp=jsonp" % bv_id,
|
||||
video_id, note='Extracting videos in anthology'),
|
||||
lambda x: x['data'][int(page_id) - 1]['part'])
|
||||
title = part_title or title
|
||||
|
||||
description = self._html_search_meta('description', webpage)
|
||||
timestamp = unified_timestamp(self._html_search_regex(
|
||||
r'<time[^>]+datetime="([^"]+)"', webpage, 'upload time',
|
||||
@@ -233,7 +267,7 @@ class BiliBiliIE(InfoExtractor):
|
||||
|
||||
# TODO 'view_count' requires deobfuscating Javascript
|
||||
info = {
|
||||
'id': str(video_id) if page_id is None else '%s_p%s' % (video_id, page_id),
|
||||
'id': compat_str(video_id) if page_id is None else '%s_p%s' % (video_id, page_id),
|
||||
'cid': cid,
|
||||
'title': title,
|
||||
'description': description,
|
||||
@@ -243,7 +277,7 @@ class BiliBiliIE(InfoExtractor):
|
||||
}
|
||||
|
||||
uploader_mobj = re.search(
|
||||
r'<a[^>]+href="(?:https?:)?//space\.bilibili\.com/(?P<id>\d+)"[^>]*>(?P<name>[^<]+)',
|
||||
r'<a[^>]+href="(?:https?:)?//space\.bilibili\.com/(?P<id>\d+)"[^>]*>\s*(?P<name>[^<]+?)\s*<',
|
||||
webpage)
|
||||
if uploader_mobj:
|
||||
info.update({
|
||||
@@ -265,7 +299,7 @@ class BiliBiliIE(InfoExtractor):
|
||||
'tags': tags,
|
||||
'raw_tags': raw_tags,
|
||||
}
|
||||
if self._downloader.params.get('getcomments', False):
|
||||
if self.get_param('getcomments', False):
|
||||
def get_comments():
|
||||
comments = self._get_all_comment_pages(video_id)
|
||||
return {
|
||||
@@ -299,7 +333,7 @@ class BiliBiliIE(InfoExtractor):
|
||||
|
||||
global_info = {
|
||||
'_type': 'multi_video',
|
||||
'id': video_id,
|
||||
'id': compat_str(video_id),
|
||||
'bv_id': bv_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
@@ -311,6 +345,20 @@ class BiliBiliIE(InfoExtractor):
|
||||
|
||||
return global_info
|
||||
|
||||
def _extract_anthology_entries(self, bv_id, video_id, webpage):
|
||||
title = self._html_search_regex(
|
||||
(r'<h1[^>]+\btitle=(["\'])(?P<title>(?:(?!\1).)+)\1',
|
||||
r'(?s)<h1[^>]*>(?P<title>.+?)</h1>'), webpage, 'title',
|
||||
group='title')
|
||||
json_data = self._download_json(
|
||||
"https://api.bilibili.com/x/player/pagelist?bvid=%s&jsonp=jsonp" % bv_id,
|
||||
video_id, note='Extracting videos in anthology')
|
||||
|
||||
if len(json_data['data']) > 1:
|
||||
return self.playlist_from_matches(
|
||||
json_data['data'], bv_id, title, ie=BiliBiliIE.ie_key(),
|
||||
getter=lambda entry: 'https://www.bilibili.com/video/%s?p=%d' % (bv_id, entry['page']))
|
||||
|
||||
def _get_video_id_set(self, id, is_bv):
|
||||
query = {'bvid': id} if is_bv else {'aid': id}
|
||||
response = self._download_json(
|
||||
@@ -451,28 +499,40 @@ class BiliBiliBangumiIE(InfoExtractor):
|
||||
|
||||
class BilibiliChannelIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://space.bilibili\.com/(?P<id>\d+)'
|
||||
# May need to add support for pagination? Need to find a user with many video uploads to test
|
||||
_API_URL = "https://api.bilibili.com/x/space/arc/search?mid=%s&pn=1&ps=25&jsonp=jsonp"
|
||||
_TEST = {} # TODO: Add tests
|
||||
_API_URL = "https://api.bilibili.com/x/space/arc/search?mid=%s&pn=%d&jsonp=jsonp"
|
||||
_TESTS = [{
|
||||
'url': 'https://space.bilibili.com/3985676/video',
|
||||
'info_dict': {},
|
||||
'playlist_mincount': 112,
|
||||
}]
|
||||
|
||||
def _entries(self, list_id):
|
||||
count, max_count = 0, None
|
||||
|
||||
for page_num in itertools.count(1):
|
||||
data = self._parse_json(
|
||||
self._download_webpage(
|
||||
self._API_URL % (list_id, page_num), list_id,
|
||||
note='Downloading page %d' % page_num),
|
||||
list_id)['data']
|
||||
|
||||
max_count = max_count or try_get(data, lambda x: x['page']['count'])
|
||||
|
||||
entries = try_get(data, lambda x: x['list']['vlist'])
|
||||
if not entries:
|
||||
return
|
||||
for entry in entries:
|
||||
yield self.url_result(
|
||||
'https://www.bilibili.com/video/%s' % entry['bvid'],
|
||||
BiliBiliIE.ie_key(), entry['bvid'])
|
||||
|
||||
count += len(entries)
|
||||
if max_count and count >= max_count:
|
||||
return
|
||||
|
||||
def _real_extract(self, url):
|
||||
list_id = self._match_id(url)
|
||||
json_str = self._download_webpage(self._API_URL % list_id, "None")
|
||||
|
||||
json_parsed = json.loads(json_str)
|
||||
entries = [{
|
||||
'_type': 'url',
|
||||
'ie_key': BiliBiliIE.ie_key(),
|
||||
'url': ('https://www.bilibili.com/video/%s' %
|
||||
entry['bvid']),
|
||||
'id': entry['bvid'],
|
||||
} for entry in json_parsed['data']['list']['vlist']]
|
||||
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'id': list_id,
|
||||
'entries': entries
|
||||
}
|
||||
return self.playlist_result(self._entries(list_id), list_id)
|
||||
|
||||
|
||||
class BiliBiliSearchIE(SearchInfoExtractor):
|
||||
@@ -505,7 +565,7 @@ class BiliBiliSearchIE(SearchInfoExtractor):
|
||||
|
||||
videos = data['result']
|
||||
for video in videos:
|
||||
e = self.url_result(video['arcurl'], 'BiliBili', str(video['aid']))
|
||||
e = self.url_result(video['arcurl'], 'BiliBili', compat_str(video['aid']))
|
||||
entries.append(e)
|
||||
|
||||
if(len(entries) >= n or len(videos) >= BiliBiliSearchIE.MAX_NUMBER_OF_RESULTS):
|
||||
|
||||
@@ -78,8 +78,8 @@ class BlinkxIE(InfoExtractor):
|
||||
'fullid': video_id,
|
||||
'title': data['title'],
|
||||
'formats': formats,
|
||||
'uploader': data['channel_name'],
|
||||
'timestamp': data['pubdate_epoch'],
|
||||
'uploader': data.get('channel_name'),
|
||||
'timestamp': data.get('pubdate_epoch'),
|
||||
'description': data.get('description'),
|
||||
'thumbnails': thumbnails,
|
||||
'duration': duration,
|
||||
|
||||
@@ -114,7 +114,7 @@ class BRIE(InfoExtractor):
|
||||
medias.append(media)
|
||||
|
||||
if len(medias) > 1:
|
||||
self._downloader.report_warning(
|
||||
self.report_warning(
|
||||
'found multiple medias; please '
|
||||
'report this with the video URL to http://yt-dl.org/bug')
|
||||
if not medias:
|
||||
|
||||
@@ -478,7 +478,7 @@ class BrightcoveNewIE(AdobePassIE):
|
||||
container = source.get('container')
|
||||
ext = mimetype2ext(source.get('type'))
|
||||
src = source.get('src')
|
||||
skip_unplayable = not self._downloader.params.get('allow_unplayable_formats')
|
||||
skip_unplayable = not self.get_param('allow_unplayable_formats')
|
||||
# https://support.brightcove.com/playback-api-video-fields-reference#key_systems_object
|
||||
if skip_unplayable and (container == 'WVM' or source.get('key_systems')):
|
||||
num_drm_sources += 1
|
||||
@@ -545,9 +545,9 @@ class BrightcoveNewIE(AdobePassIE):
|
||||
errors = json_data.get('errors')
|
||||
if errors:
|
||||
error = errors[0]
|
||||
raise ExtractorError(
|
||||
self.raise_no_formats(
|
||||
error.get('message') or error.get('error_subcode') or error['error_code'], expected=True)
|
||||
if (not self._downloader.params.get('allow_unplayable_formats')
|
||||
elif (not self.get_param('allow_unplayable_formats')
|
||||
and sources and num_drm_sources == len(sources)):
|
||||
raise ExtractorError('This video is DRM protected.', expected=True)
|
||||
|
||||
|
||||
@@ -82,6 +82,7 @@ class BYUtvIE(InfoExtractor):
|
||||
|
||||
info = {}
|
||||
formats = []
|
||||
subtitles = {}
|
||||
for format_id, ep in video.items():
|
||||
if not isinstance(ep, dict):
|
||||
continue
|
||||
@@ -90,12 +91,16 @@ class BYUtvIE(InfoExtractor):
|
||||
continue
|
||||
ext = determine_ext(video_url)
|
||||
if ext == 'm3u8':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
m3u8_fmts, m3u8_subs = self._extract_m3u8_formats_and_subtitles(
|
||||
video_url, video_id, 'mp4', entry_protocol='m3u8_native',
|
||||
m3u8_id='hls', fatal=False))
|
||||
m3u8_id='hls', fatal=False)
|
||||
formats.extend(m3u8_fmts)
|
||||
subtitles = self._merge_subtitles(subtitles, m3u8_subs)
|
||||
elif ext == 'mpd':
|
||||
formats.extend(self._extract_mpd_formats(
|
||||
video_url, video_id, mpd_id='dash', fatal=False))
|
||||
mpd_fmts, mpd_subs = self._extract_mpd_formats_and_subtitles(
|
||||
video_url, video_id, mpd_id='dash', fatal=False)
|
||||
formats.extend(mpd_fmts)
|
||||
subtitles = self._merge_subtitles(subtitles, mpd_subs)
|
||||
else:
|
||||
formats.append({
|
||||
'url': video_url,
|
||||
@@ -114,4 +119,5 @@ class BYUtvIE(InfoExtractor):
|
||||
'display_id': display_id,
|
||||
'title': display_id,
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
})
|
||||
|
||||
@@ -83,24 +83,31 @@ class CanvasIE(InfoExtractor):
|
||||
description = data.get('description')
|
||||
|
||||
formats = []
|
||||
subtitles = {}
|
||||
for target in data['targetUrls']:
|
||||
format_url, format_type = url_or_none(target.get('url')), str_or_none(target.get('type'))
|
||||
if not format_url or not format_type:
|
||||
continue
|
||||
format_type = format_type.upper()
|
||||
if format_type in self._HLS_ENTRY_PROTOCOLS_MAP:
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
fmts, subs = self._extract_m3u8_formats_and_subtitles(
|
||||
format_url, video_id, 'mp4', self._HLS_ENTRY_PROTOCOLS_MAP[format_type],
|
||||
m3u8_id=format_type, fatal=False))
|
||||
m3u8_id=format_type, fatal=False)
|
||||
formats.extend(fmts)
|
||||
subtitles = self._merge_subtitles(subtitles, subs)
|
||||
elif format_type == 'HDS':
|
||||
formats.extend(self._extract_f4m_formats(
|
||||
format_url, video_id, f4m_id=format_type, fatal=False))
|
||||
elif format_type == 'MPEG_DASH':
|
||||
formats.extend(self._extract_mpd_formats(
|
||||
format_url, video_id, mpd_id=format_type, fatal=False))
|
||||
fmts, subs = self._extract_mpd_formats_and_subtitles(
|
||||
format_url, video_id, mpd_id=format_type, fatal=False)
|
||||
formats.extend(fmts)
|
||||
subtitles = self._merge_subtitles(subtitles, subs)
|
||||
elif format_type == 'HSS':
|
||||
formats.extend(self._extract_ism_formats(
|
||||
format_url, video_id, ism_id='mss', fatal=False))
|
||||
fmts, subs = self._extract_ism_formats_and_subtitles(
|
||||
format_url, video_id, ism_id='mss', fatal=False)
|
||||
formats.extend(fmts)
|
||||
subtitles = self._merge_subtitles(subtitles, subs)
|
||||
else:
|
||||
formats.append({
|
||||
'format_id': format_type,
|
||||
@@ -108,7 +115,6 @@ class CanvasIE(InfoExtractor):
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
subtitles = {}
|
||||
subtitle_urls = data.get('subtitleUrls')
|
||||
if isinstance(subtitle_urls, list):
|
||||
for subtitle in subtitle_urls:
|
||||
|
||||
@@ -27,10 +27,16 @@ class CBSBaseIE(ThePlatformFeedIE):
|
||||
|
||||
|
||||
class CBSIE(CBSBaseIE):
|
||||
_VALID_URL = r'(?:cbs:|https?://(?:www\.)?(?:cbs\.com/shows/[^/]+/video|colbertlateshow\.com/(?:video|podcasts))/)(?P<id>[\w-]+)'
|
||||
_VALID_URL = r'''(?x)
|
||||
(?:
|
||||
cbs:|
|
||||
https?://(?:www\.)?(?:
|
||||
(?:cbs|paramountplus)\.com/(?:shows/[^/]+/video|movies/[^/]+)/|
|
||||
colbertlateshow\.com/(?:video|podcasts)/)
|
||||
)(?P<id>[\w-]+)'''
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.cbs.com/shows/garth-brooks/video/_u7W953k6la293J7EPTd9oHkSPs6Xn6_/connect-chat-feat-garth-brooks/',
|
||||
'url': 'https://www.cbs.com/shows/garth-brooks/video/_u7W953k6la293J7EPTd9oHkSPs6Xn6_/connect-chat-feat-garth-brooks/',
|
||||
'info_dict': {
|
||||
'id': '_u7W953k6la293J7EPTd9oHkSPs6Xn6_',
|
||||
'ext': 'mp4',
|
||||
@@ -52,16 +58,22 @@ class CBSIE(CBSBaseIE):
|
||||
}, {
|
||||
'url': 'http://www.colbertlateshow.com/podcasts/dYSwjqPs_X1tvbV_P2FcPWRa_qT6akTC/in-the-bad-room-with-stephen/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.paramountplus.com/shows/all-rise/video/QmR1WhNkh1a_IrdHZrbcRklm176X_rVc/all-rise-space/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.paramountplus.com/movies/million-dollar-american-princesses-meghan-and-harry/C0LpgNwXYeB8txxycdWdR9TjxpJOsdCq',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _extract_video_info(self, content_id, site='cbs', mpx_acc=2198311517):
|
||||
items_data = self._download_xml(
|
||||
'http://can.cbs.com/thunder/player/videoPlayerService.php',
|
||||
'https://can.cbs.com/thunder/player/videoPlayerService.php',
|
||||
content_id, query={'partner': site, 'contentId': content_id})
|
||||
video_data = xpath_element(items_data, './/item')
|
||||
title = xpath_text(video_data, 'videoTitle', 'title') or xpath_text(video_data, 'videotitle', 'title')
|
||||
tp_path = 'dJ5BDC/media/guid/%d/%s' % (mpx_acc, content_id)
|
||||
tp_release_url = 'http://link.theplatform.com/s/' + tp_path
|
||||
tp_release_url = 'https://link.theplatform.com/s/' + tp_path
|
||||
|
||||
asset_types = []
|
||||
subtitles = {}
|
||||
|
||||
@@ -26,7 +26,7 @@ class CBSNewsEmbedIE(CBSIE):
|
||||
def _real_extract(self, url):
|
||||
item = self._parse_json(zlib.decompress(compat_b64decode(
|
||||
compat_urllib_parse_unquote(self._match_id(url))),
|
||||
-zlib.MAX_WBITS), None)['video']['items'][0]
|
||||
-zlib.MAX_WBITS).decode('utf-8'), None)['video']['items'][0]
|
||||
return self._extract_video_info(item['mpxRefId'], 'cbsnews')
|
||||
|
||||
|
||||
|
||||
@@ -1,38 +1,113 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .cbs import CBSBaseIE
|
||||
import re
|
||||
|
||||
# from .cbs import CBSBaseIE
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
try_get,
|
||||
)
|
||||
|
||||
|
||||
class CBSSportsIE(CBSBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?cbssports\.com/[^/]+/(?:video|news)/(?P<id>[^/?#&]+)'
|
||||
|
||||
# class CBSSportsEmbedIE(CBSBaseIE):
|
||||
class CBSSportsEmbedIE(InfoExtractor):
|
||||
IE_NAME = 'cbssports:embed'
|
||||
_VALID_URL = r'''(?ix)https?://(?:(?:www\.)?cbs|embed\.247)sports\.com/player/embed.+?
|
||||
(?:
|
||||
ids%3D(?P<id>[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12})|
|
||||
pcid%3D(?P<pcid>\d+)
|
||||
)'''
|
||||
_TESTS = [{
|
||||
'url': 'https://www.cbssports.com/nba/video/donovan-mitchell-flashes-star-potential-in-game-2-victory-over-thunder/',
|
||||
'info_dict': {
|
||||
'id': '1214315075735',
|
||||
'ext': 'mp4',
|
||||
'title': 'Donovan Mitchell flashes star potential in Game 2 victory over Thunder',
|
||||
'description': 'md5:df6f48622612c2d6bd2e295ddef58def',
|
||||
'timestamp': 1524111457,
|
||||
'upload_date': '20180419',
|
||||
'uploader': 'CBSI-NEW',
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
}
|
||||
'url': 'https://www.cbssports.com/player/embed/?args=player_id%3Db56c03a6-231a-4bbe-9c55-af3c8a8e9636%26ids%3Db56c03a6-231a-4bbe-9c55-af3c8a8e9636%26resizable%3D1%26autoplay%3Dtrue%26domain%3Dcbssports.com%26comp_ads_enabled%3Dfalse%26watchAndRead%3D0%26startTime%3D0%26env%3Dprod',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.cbssports.com/nba/news/nba-playoffs-2018-watch-76ers-vs-heat-game-3-series-schedule-tv-channel-online-stream/',
|
||||
'url': 'https://embed.247sports.com/player/embed/?args=%3fplayer_id%3d1827823171591%26channel%3dcollege-football-recruiting%26pcid%3d1827823171591%26width%3d640%26height%3d360%26autoplay%3dTrue%26comp_ads_enabled%3dFalse%26uvpc%3dhttps%253a%252f%252fwww.cbssports.com%252fapi%252fcontent%252fvideo%252fconfig%252f%253fcfg%253duvp_247sports_v4%2526partner%253d247%26uvpc_m%3dhttps%253a%252f%252fwww.cbssports.com%252fapi%252fcontent%252fvideo%252fconfig%252f%253fcfg%253duvp_247sports_m_v4%2526partner_m%253d247_mobile%26utag%3d247sportssite%26resizable%3dTrue',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _extract_video_info(self, filter_query, video_id):
|
||||
return self._extract_feed_info('dJ5BDC', 'VxxJg8Ymh8sE', filter_query, video_id)
|
||||
# def _extract_video_info(self, filter_query, video_id):
|
||||
# return self._extract_feed_info('dJ5BDC', 'VxxJg8Ymh8sE', filter_query, video_id)
|
||||
|
||||
def _real_extract(self, url):
|
||||
uuid, pcid = re.match(self._VALID_URL, url).groups()
|
||||
query = {'id': uuid} if uuid else {'pcid': pcid}
|
||||
video = self._download_json(
|
||||
'https://www.cbssports.com/api/content/video/',
|
||||
uuid or pcid, query=query)[0]
|
||||
video_id = video['id']
|
||||
title = video['title']
|
||||
metadata = video.get('metaData') or {}
|
||||
# return self._extract_video_info('byId=%d' % metadata['mpxOutletId'], video_id)
|
||||
# return self._extract_video_info('byGuid=' + metadata['mpxRefId'], video_id)
|
||||
|
||||
formats = self._extract_m3u8_formats(
|
||||
metadata['files'][0]['url'], video_id, 'mp4',
|
||||
'm3u8_native', m3u8_id='hls', fatal=False)
|
||||
self._sort_formats(formats)
|
||||
|
||||
image = video.get('image')
|
||||
thumbnails = None
|
||||
if image:
|
||||
image_path = image.get('path')
|
||||
if image_path:
|
||||
thumbnails = [{
|
||||
'url': image_path,
|
||||
'width': int_or_none(image.get('width')),
|
||||
'height': int_or_none(image.get('height')),
|
||||
'filesize': int_or_none(image.get('size')),
|
||||
}]
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'thumbnails': thumbnails,
|
||||
'description': video.get('description'),
|
||||
'timestamp': int_or_none(try_get(video, lambda x: x['dateCreated']['epoch'])),
|
||||
'duration': int_or_none(metadata.get('duration')),
|
||||
}
|
||||
|
||||
|
||||
class CBSSportsBaseIE(InfoExtractor):
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
video_id = self._search_regex(
|
||||
[r'(?:=|%26)pcid%3D(\d+)', r'embedVideo(?:Container)?_(\d+)'],
|
||||
webpage, 'video id')
|
||||
return self._extract_video_info('byId=%s' % video_id, video_id)
|
||||
iframe_url = self._search_regex(
|
||||
r'<iframe[^>]+(?:data-)?src="(https?://[^/]+/player/embed[^"]+)"',
|
||||
webpage, 'embed url')
|
||||
return self.url_result(iframe_url, CBSSportsEmbedIE.ie_key())
|
||||
|
||||
|
||||
class CBSSportsIE(CBSSportsBaseIE):
|
||||
IE_NAME = 'cbssports'
|
||||
_VALID_URL = r'https?://(?:www\.)?cbssports\.com/[^/]+/video/(?P<id>[^/?#&]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.cbssports.com/college-football/video/cover-3-stanford-spring-gleaning/',
|
||||
'info_dict': {
|
||||
'id': 'b56c03a6-231a-4bbe-9c55-af3c8a8e9636',
|
||||
'ext': 'mp4',
|
||||
'title': 'Cover 3: Stanford Spring Gleaning',
|
||||
'description': 'The Cover 3 crew break down everything you need to know about the Stanford Cardinal this spring.',
|
||||
'timestamp': 1617218398,
|
||||
'upload_date': '20210331',
|
||||
'duration': 502,
|
||||
},
|
||||
}]
|
||||
|
||||
|
||||
class TwentyFourSevenSportsIE(CBSSportsBaseIE):
|
||||
IE_NAME = '247sports'
|
||||
_VALID_URL = r'https?://(?:www\.)?247sports\.com/Video/(?:[^/?#&]+-)?(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://247sports.com/Video/2021-QB-Jake-Garcia-senior-highlights-through-five-games-10084854/',
|
||||
'info_dict': {
|
||||
'id': '4f1265cb-c3b5-44a8-bb1d-1914119a0ccc',
|
||||
'ext': 'mp4',
|
||||
'title': '2021 QB Jake Garcia senior highlights through five games',
|
||||
'description': 'md5:8cb67ebed48e2e6adac1701e0ff6e45b',
|
||||
'timestamp': 1607114223,
|
||||
'upload_date': '20201204',
|
||||
'duration': 208,
|
||||
},
|
||||
}]
|
||||
|
||||
@@ -133,6 +133,8 @@ class CDAIE(InfoExtractor):
|
||||
'age_limit': 18 if need_confirm_age else 0,
|
||||
}
|
||||
|
||||
info = self._search_json_ld(webpage, video_id, default={})
|
||||
|
||||
# Source: https://www.cda.pl/js/player.js?t=1606154898
|
||||
def decrypt_file(a):
|
||||
for p in ('_XDDD', '_CDA', '_ADC', '_CXD', '_QWE', '_Q5', '_IKSDE'):
|
||||
@@ -197,7 +199,7 @@ class CDAIE(InfoExtractor):
|
||||
handler = self._download_webpage
|
||||
|
||||
webpage = handler(
|
||||
self._BASE_URL + href, video_id,
|
||||
urljoin(self._BASE_URL, href), video_id,
|
||||
'Downloading %s version information' % resolution, fatal=False)
|
||||
if not webpage:
|
||||
# Manually report warning because empty page is returned when
|
||||
@@ -209,6 +211,4 @@ class CDAIE(InfoExtractor):
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
info = self._search_json_ld(webpage, video_id, default={})
|
||||
|
||||
return merge_dicts(info_dict, info)
|
||||
|
||||
@@ -147,7 +147,7 @@ class CeskaTelevizeIE(InfoExtractor):
|
||||
is_live = item.get('type') == 'LIVE'
|
||||
formats = []
|
||||
for format_id, stream_url in item.get('streamUrls', {}).items():
|
||||
if (not self._downloader.params.get('allow_unplayable_formats')
|
||||
if (not self.get_param('allow_unplayable_formats')
|
||||
and 'drmOnly=true' in stream_url):
|
||||
continue
|
||||
if 'playerType=flash' in stream_url:
|
||||
|
||||
@@ -5,7 +5,6 @@ import re
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
parse_iso8601,
|
||||
qualities,
|
||||
@@ -187,14 +186,13 @@ class Channel9IE(InfoExtractor):
|
||||
'quality': quality(q, q_url),
|
||||
})
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
slides = content_data.get('Slides')
|
||||
zip_file = content_data.get('ZipFile')
|
||||
|
||||
if not formats and not slides and not zip_file:
|
||||
raise ExtractorError(
|
||||
self.raise_no_formats(
|
||||
'None of recording, slides or zip are available for %s' % content_path)
|
||||
self._sort_formats(formats)
|
||||
|
||||
subtitles = {}
|
||||
for caption in content_data.get('Captions', []):
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -26,8 +26,8 @@ class CommonMistakesIE(InfoExtractor):
|
||||
'That doesn\'t make any sense. '
|
||||
'Simply remove the parameter in your command or configuration.'
|
||||
) % url
|
||||
if not self._downloader.params.get('verbose'):
|
||||
msg += ' Add -v to the command line to see what arguments and configuration yt-dlp got.'
|
||||
if not self.get_param('verbose'):
|
||||
msg += ' Add -v to the command line to see what arguments and configuration yt-dlp has'
|
||||
raise ExtractorError(msg, expected=True)
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urlparse,
|
||||
@@ -58,3 +60,16 @@ class MmsIE(InfoExtractor):
|
||||
'title': title,
|
||||
'url': url,
|
||||
}
|
||||
|
||||
|
||||
class ViewSourceIE(InfoExtractor):
|
||||
IE_DESC = False
|
||||
_VALID_URL = r'view-source:(?P<url>.+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'view-source:https://www.youtube.com/watch?v=BaW_jenozKc',
|
||||
'only_matching': True
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
return self.url_result(re.match(self._VALID_URL, url).group('url'))
|
||||
|
||||
@@ -131,7 +131,7 @@ class CorusIE(ThePlatformFeedIE):
|
||||
formats.extend(self._parse_smil_formats(
|
||||
smil, smil_url, video_id, namespace))
|
||||
if not formats and video.get('drm'):
|
||||
raise ExtractorError('This video is DRM protected.', expected=True)
|
||||
self.raise_no_formats('This video is DRM protected.', expected=True)
|
||||
self._sort_formats(formats)
|
||||
|
||||
subtitles = {}
|
||||
|
||||
@@ -12,6 +12,7 @@ from ..utils import (
|
||||
determine_ext,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
orderedSet,
|
||||
parse_age_limit,
|
||||
parse_duration,
|
||||
url_or_none,
|
||||
@@ -66,135 +67,179 @@ class CrackleIE(InfoExtractor):
|
||||
},
|
||||
}
|
||||
|
||||
def _download_json(self, url, *args, **kwargs):
|
||||
# Authorization generation algorithm is reverse engineered from:
|
||||
# https://www.sonycrackle.com/static/js/main.ea93451f.chunk.js
|
||||
timestamp = time.strftime('%Y%m%d%H%M', time.gmtime())
|
||||
h = hmac.new(b'IGSLUQCBDFHEOIFM', '|'.join([url, timestamp]).encode(), hashlib.sha1).hexdigest().upper()
|
||||
headers = {
|
||||
'Accept': 'application/json',
|
||||
'Authorization': '|'.join([h, timestamp, '117', '1']),
|
||||
}
|
||||
return InfoExtractor._download_json(self, url, *args, headers=headers, **kwargs)
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
country_code = self._downloader.params.get('geo_bypass_country', None)
|
||||
countries = [country_code] if country_code else (
|
||||
'US', 'AU', 'CA', 'AS', 'FM', 'GU', 'MP', 'PR', 'PW', 'MH', 'VI')
|
||||
geo_bypass_country = self.get_param('geo_bypass_country', None)
|
||||
countries = orderedSet((geo_bypass_country, 'US', 'AU', 'CA', 'AS', 'FM', 'GU', 'MP', 'PR', 'PW', 'MH', 'VI', ''))
|
||||
num_countries, num = len(countries) - 1, 0
|
||||
|
||||
last_e = None
|
||||
media = {}
|
||||
for num, country in enumerate(countries):
|
||||
if num == 1: # start hard-coded list
|
||||
self.report_warning('%s. Trying with a list of known countries' % (
|
||||
'Unable to obtain video formats from %s API' % geo_bypass_country if geo_bypass_country
|
||||
else 'No country code was given using --geo-bypass-country'))
|
||||
elif num == num_countries: # end of list
|
||||
geo_info = self._download_json(
|
||||
'https://web-api-us.crackle.com/Service.svc/geo/country',
|
||||
video_id, fatal=False, note='Downloading geo-location information from crackle API',
|
||||
errnote='Unable to fetch geo-location information from crackle') or {}
|
||||
country = geo_info.get('CountryCode')
|
||||
if country is None:
|
||||
continue
|
||||
self.to_screen('%s identified country as %s' % (self.IE_NAME, country))
|
||||
if country in countries:
|
||||
self.to_screen('Downloading from %s API was already attempted. Skipping...' % country)
|
||||
continue
|
||||
|
||||
for country in countries:
|
||||
if country is None:
|
||||
continue
|
||||
try:
|
||||
# Authorization generation algorithm is reverse engineered from:
|
||||
# https://www.sonycrackle.com/static/js/main.ea93451f.chunk.js
|
||||
media_detail_url = 'https://web-api-us.crackle.com/Service.svc/details/media/%s/%s?disableProtocols=true' % (video_id, country)
|
||||
timestamp = time.strftime('%Y%m%d%H%M', time.gmtime())
|
||||
h = hmac.new(b'IGSLUQCBDFHEOIFM', '|'.join([media_detail_url, timestamp]).encode(), hashlib.sha1).hexdigest().upper()
|
||||
media = self._download_json(
|
||||
media_detail_url, video_id, 'Downloading media JSON as %s' % country,
|
||||
'Unable to download media JSON', headers={
|
||||
'Accept': 'application/json',
|
||||
'Authorization': '|'.join([h, timestamp, '117', '1']),
|
||||
})
|
||||
'https://web-api-us.crackle.com/Service.svc/details/media/%s/%s?disableProtocols=true' % (video_id, country),
|
||||
video_id, note='Downloading media JSON from %s API' % country,
|
||||
errnote='Unable to download media JSON')
|
||||
except ExtractorError as e:
|
||||
# 401 means geo restriction, trying next country
|
||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
|
||||
last_e = e
|
||||
continue
|
||||
raise
|
||||
|
||||
media_urls = media.get('MediaURLs')
|
||||
if not media_urls or not isinstance(media_urls, list):
|
||||
status = media.get('status')
|
||||
if status.get('messageCode') != '0':
|
||||
raise ExtractorError(
|
||||
'%s said: %s %s - %s' % (
|
||||
self.IE_NAME, status.get('messageCodeDescription'), status.get('messageCode'), status.get('message')),
|
||||
expected=True)
|
||||
|
||||
# Found video formats
|
||||
if isinstance(media.get('MediaURLs'), list):
|
||||
break
|
||||
|
||||
ignore_no_formats = self.get_param('ignore_no_formats_error')
|
||||
allow_unplayable_formats = self.get_param('allow_unplayable_formats')
|
||||
|
||||
if not media or (not media.get('MediaURLs') and not ignore_no_formats):
|
||||
raise ExtractorError(
|
||||
'Unable to access the crackle API. Try passing your country code '
|
||||
'to --geo-bypass-country. If it still does not work and the '
|
||||
'video is available in your country')
|
||||
title = media['Title']
|
||||
|
||||
formats, subtitles = [], {}
|
||||
has_drm = False
|
||||
for e in media.get('MediaURLs') or []:
|
||||
if e.get('UseDRM'):
|
||||
has_drm = True
|
||||
if not allow_unplayable_formats:
|
||||
continue
|
||||
format_url = url_or_none(e.get('Path'))
|
||||
if not format_url:
|
||||
continue
|
||||
|
||||
title = media['Title']
|
||||
|
||||
formats = []
|
||||
for e in media['MediaURLs']:
|
||||
if not self._downloader.params.get('allow_unplayable_formats') and e.get('UseDRM') is True:
|
||||
continue
|
||||
format_url = url_or_none(e.get('Path'))
|
||||
if not format_url:
|
||||
continue
|
||||
ext = determine_ext(format_url)
|
||||
if ext == 'm3u8':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
format_url, video_id, 'mp4', entry_protocol='m3u8_native',
|
||||
m3u8_id='hls', fatal=False))
|
||||
elif ext == 'mpd':
|
||||
formats.extend(self._extract_mpd_formats(
|
||||
format_url, video_id, mpd_id='dash', fatal=False))
|
||||
elif format_url.endswith('.ism/Manifest'):
|
||||
formats.extend(self._extract_ism_formats(
|
||||
format_url, video_id, ism_id='mss', fatal=False))
|
||||
else:
|
||||
mfs_path = e.get('Type')
|
||||
mfs_info = self._MEDIA_FILE_SLOTS.get(mfs_path)
|
||||
if not mfs_info:
|
||||
continue
|
||||
formats.append({
|
||||
'url': format_url,
|
||||
'format_id': 'http-' + mfs_path.split('.')[0],
|
||||
'width': mfs_info['width'],
|
||||
'height': mfs_info['height'],
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
description = media.get('Description')
|
||||
duration = int_or_none(media.get(
|
||||
'DurationInSeconds')) or parse_duration(media.get('Duration'))
|
||||
view_count = int_or_none(media.get('CountViews'))
|
||||
average_rating = float_or_none(media.get('UserRating'))
|
||||
age_limit = parse_age_limit(media.get('Rating'))
|
||||
genre = media.get('Genre')
|
||||
release_year = int_or_none(media.get('ReleaseYear'))
|
||||
creator = media.get('Directors')
|
||||
artist = media.get('Cast')
|
||||
|
||||
if media.get('MediaTypeDisplayValue') == 'Full Episode':
|
||||
series = media.get('ShowName')
|
||||
episode = title
|
||||
season_number = int_or_none(media.get('Season'))
|
||||
episode_number = int_or_none(media.get('Episode'))
|
||||
ext = determine_ext(format_url)
|
||||
if ext == 'm3u8':
|
||||
fmts, subs = self._extract_m3u8_formats_and_subtitles(
|
||||
format_url, video_id, 'mp4', entry_protocol='m3u8_native',
|
||||
m3u8_id='hls', fatal=False)
|
||||
formats.extend(fmts)
|
||||
subtitles = self._merge_subtitles(subtitles, subs)
|
||||
elif ext == 'mpd':
|
||||
fmts, subs = self._extract_mpd_formats_and_subtitles(
|
||||
format_url, video_id, mpd_id='dash', fatal=False)
|
||||
formats.extend(fmts)
|
||||
subtitles = self._merge_subtitles(subtitles, subs)
|
||||
elif format_url.endswith('.ism/Manifest'):
|
||||
fmts, subs = self._extract_ism_formats_and_subtitles(
|
||||
format_url, video_id, ism_id='mss', fatal=False)
|
||||
formats.extend(fmts)
|
||||
subtitles = self._merge_subtitles(subtitles, subs)
|
||||
else:
|
||||
series = episode = season_number = episode_number = None
|
||||
mfs_path = e.get('Type')
|
||||
mfs_info = self._MEDIA_FILE_SLOTS.get(mfs_path)
|
||||
if not mfs_info:
|
||||
continue
|
||||
formats.append({
|
||||
'url': format_url,
|
||||
'format_id': 'http-' + mfs_path.split('.')[0],
|
||||
'width': mfs_info['width'],
|
||||
'height': mfs_info['height'],
|
||||
})
|
||||
if not formats and has_drm and not ignore_no_formats:
|
||||
raise ExtractorError('The video is DRM protected', expected=True)
|
||||
self._sort_formats(formats)
|
||||
|
||||
subtitles = {}
|
||||
cc_files = media.get('ClosedCaptionFiles')
|
||||
if isinstance(cc_files, list):
|
||||
for cc_file in cc_files:
|
||||
if not isinstance(cc_file, dict):
|
||||
continue
|
||||
cc_url = url_or_none(cc_file.get('Path'))
|
||||
if not cc_url:
|
||||
continue
|
||||
lang = cc_file.get('Locale') or 'en'
|
||||
subtitles.setdefault(lang, []).append({'url': cc_url})
|
||||
description = media.get('Description')
|
||||
duration = int_or_none(media.get(
|
||||
'DurationInSeconds')) or parse_duration(media.get('Duration'))
|
||||
view_count = int_or_none(media.get('CountViews'))
|
||||
average_rating = float_or_none(media.get('UserRating'))
|
||||
age_limit = parse_age_limit(media.get('Rating'))
|
||||
genre = media.get('Genre')
|
||||
release_year = int_or_none(media.get('ReleaseYear'))
|
||||
creator = media.get('Directors')
|
||||
artist = media.get('Cast')
|
||||
|
||||
thumbnails = []
|
||||
images = media.get('Images')
|
||||
if isinstance(images, list):
|
||||
for image_key, image_url in images.items():
|
||||
mobj = re.search(r'Img_(\d+)[xX](\d+)', image_key)
|
||||
if not mobj:
|
||||
continue
|
||||
thumbnails.append({
|
||||
'url': image_url,
|
||||
'width': int(mobj.group(1)),
|
||||
'height': int(mobj.group(2)),
|
||||
})
|
||||
if media.get('MediaTypeDisplayValue') == 'Full Episode':
|
||||
series = media.get('ShowName')
|
||||
episode = title
|
||||
season_number = int_or_none(media.get('Season'))
|
||||
episode_number = int_or_none(media.get('Episode'))
|
||||
else:
|
||||
series = episode = season_number = episode_number = None
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'duration': duration,
|
||||
'view_count': view_count,
|
||||
'average_rating': average_rating,
|
||||
'age_limit': age_limit,
|
||||
'genre': genre,
|
||||
'creator': creator,
|
||||
'artist': artist,
|
||||
'release_year': release_year,
|
||||
'series': series,
|
||||
'episode': episode,
|
||||
'season_number': season_number,
|
||||
'episode_number': episode_number,
|
||||
'thumbnails': thumbnails,
|
||||
'subtitles': subtitles,
|
||||
'formats': formats,
|
||||
}
|
||||
cc_files = media.get('ClosedCaptionFiles')
|
||||
if isinstance(cc_files, list):
|
||||
for cc_file in cc_files:
|
||||
if not isinstance(cc_file, dict):
|
||||
continue
|
||||
cc_url = url_or_none(cc_file.get('Path'))
|
||||
if not cc_url:
|
||||
continue
|
||||
lang = cc_file.get('Locale') or 'en'
|
||||
subtitles.setdefault(lang, []).append({'url': cc_url})
|
||||
|
||||
raise last_e
|
||||
thumbnails = []
|
||||
images = media.get('Images')
|
||||
if isinstance(images, list):
|
||||
for image_key, image_url in images.items():
|
||||
mobj = re.search(r'Img_(\d+)[xX](\d+)', image_key)
|
||||
if not mobj:
|
||||
continue
|
||||
thumbnails.append({
|
||||
'url': image_url,
|
||||
'width': int(mobj.group(1)),
|
||||
'height': int(mobj.group(2)),
|
||||
})
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'duration': duration,
|
||||
'view_count': view_count,
|
||||
'average_rating': average_rating,
|
||||
'age_limit': age_limit,
|
||||
'genre': genre,
|
||||
'creator': creator,
|
||||
'artist': artist,
|
||||
'release_year': release_year,
|
||||
'series': series,
|
||||
'episode': episode,
|
||||
'season_number': season_number,
|
||||
'episode_number': episode_number,
|
||||
'thumbnails': thumbnails,
|
||||
'subtitles': subtitles,
|
||||
'formats': formats,
|
||||
}
|
||||
|
||||
@@ -428,7 +428,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
r'<div class="showmedia-trailer-notice">(.+?)</div>',
|
||||
webpage, 'trailer-notice', default='')
|
||||
if note_m:
|
||||
raise ExtractorError(note_m)
|
||||
raise ExtractorError(note_m, expected=True)
|
||||
|
||||
mobj = re.search(r'Page\.messaging_box_controller\.addItems\(\[(?P<msg>{.+?})\]\)', webpage)
|
||||
if mobj:
|
||||
|
||||
@@ -25,12 +25,12 @@ class CuriosityStreamBaseIE(InfoExtractor):
|
||||
raise ExtractorError(
|
||||
'%s said: %s' % (self.IE_NAME, error), expected=True)
|
||||
|
||||
def _call_api(self, path, video_id):
|
||||
def _call_api(self, path, video_id, query=None):
|
||||
headers = {}
|
||||
if self._auth_token:
|
||||
headers['X-Auth-Token'] = self._auth_token
|
||||
result = self._download_json(
|
||||
self._API_BASE_URL + path, video_id, headers=headers)
|
||||
self._API_BASE_URL + path, video_id, headers=headers, query=query)
|
||||
self._handle_errors(result)
|
||||
return result['data']
|
||||
|
||||
@@ -52,62 +52,75 @@ class CuriosityStreamIE(CuriosityStreamBaseIE):
|
||||
_VALID_URL = r'https?://(?:app\.)?curiositystream\.com/video/(?P<id>\d+)'
|
||||
_TEST = {
|
||||
'url': 'https://app.curiositystream.com/video/2',
|
||||
'md5': '262bb2f257ff301115f1973540de8983',
|
||||
'info_dict': {
|
||||
'id': '2',
|
||||
'ext': 'mp4',
|
||||
'title': 'How Did You Develop The Internet?',
|
||||
'description': 'Vint Cerf, Google\'s Chief Internet Evangelist, describes how he and Bob Kahn created the internet.',
|
||||
}
|
||||
},
|
||||
'params': {
|
||||
'format': 'bestvideo',
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
media = self._call_api('media/' + video_id, video_id)
|
||||
title = media['title']
|
||||
|
||||
formats = []
|
||||
for encoding in media.get('encodings', []):
|
||||
m3u8_url = encoding.get('master_playlist_url')
|
||||
if m3u8_url:
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
m3u8_url, video_id, 'mp4', 'm3u8_native',
|
||||
m3u8_id='hls', fatal=False))
|
||||
encoding_url = encoding.get('url')
|
||||
file_url = encoding.get('file_url')
|
||||
if not encoding_url and not file_url:
|
||||
continue
|
||||
f = {
|
||||
'width': int_or_none(encoding.get('width')),
|
||||
'height': int_or_none(encoding.get('height')),
|
||||
'vbr': int_or_none(encoding.get('video_bitrate')),
|
||||
'abr': int_or_none(encoding.get('audio_bitrate')),
|
||||
'filesize': int_or_none(encoding.get('size_in_bytes')),
|
||||
'vcodec': encoding.get('video_codec'),
|
||||
'acodec': encoding.get('audio_codec'),
|
||||
'container': encoding.get('container_type'),
|
||||
}
|
||||
for f_url in (encoding_url, file_url):
|
||||
if not f_url:
|
||||
for encoding_format in ('m3u8', 'mpd'):
|
||||
media = self._call_api('media/' + video_id, video_id, query={
|
||||
'encodingsNew': 'true',
|
||||
'encodingsFormat': encoding_format,
|
||||
})
|
||||
for encoding in media.get('encodings', []):
|
||||
playlist_url = encoding.get('master_playlist_url')
|
||||
if encoding_format == 'm3u8':
|
||||
# use `m3u8` entry_protocol until EXT-X-MAP is properly supported by `m3u8_native` entry_protocol
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
playlist_url, video_id, 'mp4',
|
||||
m3u8_id='hls', fatal=False))
|
||||
elif encoding_format == 'mpd':
|
||||
formats.extend(self._extract_mpd_formats(
|
||||
playlist_url, video_id, mpd_id='dash', fatal=False))
|
||||
encoding_url = encoding.get('url')
|
||||
file_url = encoding.get('file_url')
|
||||
if not encoding_url and not file_url:
|
||||
continue
|
||||
fmt = f.copy()
|
||||
rtmp = re.search(r'^(?P<url>rtmpe?://(?P<host>[^/]+)/(?P<app>.+))/(?P<playpath>mp[34]:.+)$', f_url)
|
||||
if rtmp:
|
||||
fmt.update({
|
||||
'url': rtmp.group('url'),
|
||||
'play_path': rtmp.group('playpath'),
|
||||
'app': rtmp.group('app'),
|
||||
'ext': 'flv',
|
||||
'format_id': 'rtmp',
|
||||
})
|
||||
else:
|
||||
fmt.update({
|
||||
'url': f_url,
|
||||
'format_id': 'http',
|
||||
})
|
||||
formats.append(fmt)
|
||||
f = {
|
||||
'width': int_or_none(encoding.get('width')),
|
||||
'height': int_or_none(encoding.get('height')),
|
||||
'vbr': int_or_none(encoding.get('video_bitrate')),
|
||||
'abr': int_or_none(encoding.get('audio_bitrate')),
|
||||
'filesize': int_or_none(encoding.get('size_in_bytes')),
|
||||
'vcodec': encoding.get('video_codec'),
|
||||
'acodec': encoding.get('audio_codec'),
|
||||
'container': encoding.get('container_type'),
|
||||
}
|
||||
for f_url in (encoding_url, file_url):
|
||||
if not f_url:
|
||||
continue
|
||||
fmt = f.copy()
|
||||
rtmp = re.search(r'^(?P<url>rtmpe?://(?P<host>[^/]+)/(?P<app>.+))/(?P<playpath>mp[34]:.+)$', f_url)
|
||||
if rtmp:
|
||||
fmt.update({
|
||||
'url': rtmp.group('url'),
|
||||
'play_path': rtmp.group('playpath'),
|
||||
'app': rtmp.group('app'),
|
||||
'ext': 'flv',
|
||||
'format_id': 'rtmp',
|
||||
})
|
||||
else:
|
||||
fmt.update({
|
||||
'url': f_url,
|
||||
'format_id': 'http',
|
||||
})
|
||||
formats.append(fmt)
|
||||
self._sort_formats(formats)
|
||||
|
||||
title = media['title']
|
||||
|
||||
subtitles = {}
|
||||
for closed_caption in media.get('closed_captions', []):
|
||||
sub_url = closed_caption.get('file')
|
||||
@@ -130,32 +143,45 @@ class CuriosityStreamIE(CuriosityStreamBaseIE):
|
||||
}
|
||||
|
||||
|
||||
class CuriosityStreamCollectionIE(CuriosityStreamBaseIE):
|
||||
IE_NAME = 'curiositystream:collection'
|
||||
_VALID_URL = r'https?://(?:app\.)?curiositystream\.com/(?:collection|series)/(?P<id>\d+)'
|
||||
class CuriosityStreamCollectionsIE(CuriosityStreamBaseIE):
|
||||
IE_NAME = 'curiositystream:collections'
|
||||
_VALID_URL = r'https?://(?:app\.)?curiositystream\.com/collections/(?P<id>\d+)'
|
||||
_API_BASE_URL = 'https://api.curiositystream.com/v2/collections/'
|
||||
_TESTS = [{
|
||||
'url': 'https://app.curiositystream.com/collection/2',
|
||||
'url': 'https://curiositystream.com/collections/86',
|
||||
'info_dict': {
|
||||
'id': '86',
|
||||
'title': 'Staff Picks',
|
||||
'description': 'Wondering where to start? Here are a few of our favorite series and films... from our couch to yours.',
|
||||
},
|
||||
'playlist_mincount': 7,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
collection_id = self._match_id(url)
|
||||
collection = self._call_api(collection_id, collection_id)
|
||||
entries = []
|
||||
for media in collection.get('media', []):
|
||||
media_id = compat_str(media.get('id'))
|
||||
media_type, ie = ('series', CuriosityStreamSeriesIE) if media.get('is_collection') else ('video', CuriosityStreamIE)
|
||||
entries.append(self.url_result(
|
||||
'https://curiositystream.com/%s/%s' % (media_type, media_id),
|
||||
ie=ie.ie_key(), video_id=media_id))
|
||||
return self.playlist_result(
|
||||
entries, collection_id,
|
||||
collection.get('title'), collection.get('description'))
|
||||
|
||||
|
||||
class CuriosityStreamSeriesIE(CuriosityStreamCollectionsIE):
|
||||
IE_NAME = 'curiositystream:series'
|
||||
_VALID_URL = r'https?://(?:app\.)?curiositystream\.com/series/(?P<id>\d+)'
|
||||
_API_BASE_URL = 'https://api.curiositystream.com/v2/series/'
|
||||
_TESTS = [{
|
||||
'url': 'https://app.curiositystream.com/series/2',
|
||||
'info_dict': {
|
||||
'id': '2',
|
||||
'title': 'Curious Minds: The Internet',
|
||||
'description': 'How is the internet shaping our lives in the 21st Century?',
|
||||
},
|
||||
'playlist_mincount': 17,
|
||||
}, {
|
||||
'url': 'https://curiositystream.com/series/2',
|
||||
'only_matching': True,
|
||||
'playlist_mincount': 16,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
collection_id = self._match_id(url)
|
||||
collection = self._call_api(
|
||||
'collections/' + collection_id, collection_id)
|
||||
entries = []
|
||||
for media in collection.get('media', []):
|
||||
media_id = compat_str(media.get('id'))
|
||||
entries.append(self.url_result(
|
||||
'https://curiositystream.com/video/' + media_id,
|
||||
CuriosityStreamIE.ie_key(), media_id))
|
||||
return self.playlist_result(
|
||||
entries, collection_id,
|
||||
collection.get('title'), collection.get('description'))
|
||||
|
||||
@@ -42,7 +42,7 @@ class DailymotionBaseInfoExtractor(InfoExtractor):
|
||||
def _real_initialize(self):
|
||||
cookies = self._get_dailymotion_cookies()
|
||||
ff = self._get_cookie_value(cookies, 'ff')
|
||||
self._FAMILY_FILTER = ff == 'on' if ff else age_restricted(18, self._downloader.params.get('age_limit'))
|
||||
self._FAMILY_FILTER = ff == 'on' if ff else age_restricted(18, self.get_param('age_limit'))
|
||||
self._set_dailymotion_cookie('ff', 'on' if self._FAMILY_FILTER else 'off')
|
||||
|
||||
def _call_api(self, object_type, xid, object_fields, note, filter_extra=None):
|
||||
@@ -207,14 +207,14 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
|
||||
video_id, playlist_id = re.match(self._VALID_URL, url).groups()
|
||||
|
||||
if playlist_id:
|
||||
if not self._downloader.params.get('noplaylist'):
|
||||
if not self.get_param('noplaylist'):
|
||||
self.to_screen('Downloading playlist %s - add --no-playlist to just download video' % playlist_id)
|
||||
return self.url_result(
|
||||
'http://www.dailymotion.com/playlist/' + playlist_id,
|
||||
'DailymotionPlaylist', playlist_id)
|
||||
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
|
||||
|
||||
password = self._downloader.params.get('videopassword')
|
||||
password = self.get_param('videopassword')
|
||||
media = self._call_api(
|
||||
'media', video_id, '''... on Video {
|
||||
%s
|
||||
@@ -232,7 +232,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
|
||||
audienceCount
|
||||
isOnAir
|
||||
}''' % (self._COMMON_MEDIA_FIELDS, self._COMMON_MEDIA_FIELDS), 'Downloading media JSON metadata',
|
||||
'password: "%s"' % self._downloader.params.get('videopassword') if password else None)
|
||||
'password: "%s"' % self.get_param('videopassword') if password else None)
|
||||
xid = media['xid']
|
||||
|
||||
metadata = self._download_json(
|
||||
|
||||
@@ -158,7 +158,7 @@ class DaumListIE(InfoExtractor):
|
||||
query_dict = compat_parse_qs(compat_urlparse.urlparse(url).query)
|
||||
if 'clipid' in query_dict:
|
||||
clip_id = query_dict['clipid'][0]
|
||||
if self._downloader.params.get('noplaylist'):
|
||||
if self.get_param('noplaylist'):
|
||||
self.to_screen('Downloading just video %s because of --no-playlist' % clip_id)
|
||||
return self.url_result(DaumClipIE._URL_TEMPLATE % clip_id, 'DaumClip')
|
||||
else:
|
||||
|
||||
@@ -13,8 +13,8 @@ from ..utils import (
|
||||
|
||||
class DeezerBaseInfoExtractor(InfoExtractor):
|
||||
def get_data(self, url):
|
||||
if not self._downloader.params.get('test'):
|
||||
self._downloader.report_warning('For now, this extractor only supports the 30 second previews. Patches welcome!')
|
||||
if not self.get_param('test'):
|
||||
self.report_warning('For now, this extractor only supports the 30 second previews. Patches welcome!')
|
||||
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
data_id = mobj.group('id')
|
||||
|
||||
100
yt_dlp/extractor/discoveryplusindia.py
Normal file
100
yt_dlp/extractor/discoveryplusindia.py
Normal file
@@ -0,0 +1,100 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from ..compat import compat_str
|
||||
from ..utils import try_get
|
||||
from .common import InfoExtractor
|
||||
from .dplay import DPlayIE
|
||||
|
||||
|
||||
class DiscoveryPlusIndiaIE(DPlayIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?discoveryplus\.in/videos?' + DPlayIE._PATH_REGEX
|
||||
_TESTS = [{
|
||||
'url': 'https://www.discoveryplus.in/videos/how-do-they-do-it/fugu-and-more?seasonId=8&type=EPISODE',
|
||||
'info_dict': {
|
||||
'id': '27104',
|
||||
'ext': 'mp4',
|
||||
'display_id': 'how-do-they-do-it/fugu-and-more',
|
||||
'title': 'Fugu and More',
|
||||
'description': 'The Japanese catch, prepare and eat the deadliest fish on the planet.',
|
||||
'duration': 1319,
|
||||
'timestamp': 1582309800,
|
||||
'upload_date': '20200221',
|
||||
'series': 'How Do They Do It?',
|
||||
'season_number': 8,
|
||||
'episode_number': 2,
|
||||
'creator': 'Discovery Channel',
|
||||
},
|
||||
'params': {
|
||||
'format': 'bestvideo',
|
||||
'skip_download': True,
|
||||
},
|
||||
'skip': 'Cookies (not necessarily logged in) are needed'
|
||||
}]
|
||||
|
||||
def _update_disco_api_headers(self, headers, disco_base, display_id, realm):
|
||||
headers['x-disco-params'] = 'realm=%s' % realm
|
||||
headers['x-disco-client'] = 'WEB:UNKNOWN:dplus-india:17.0.0'
|
||||
|
||||
def _download_video_playback_info(self, disco_base, video_id, headers):
|
||||
return self._download_json(
|
||||
disco_base + 'playback/v3/videoPlaybackInfo',
|
||||
video_id, headers=headers, data=json.dumps({
|
||||
'deviceInfo': {
|
||||
'adBlocker': False,
|
||||
},
|
||||
'videoId': video_id,
|
||||
}).encode('utf-8'))['data']['attributes']['streaming']
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
return self._get_disco_api_info(
|
||||
url, display_id, 'ap2-prod-direct.discoveryplus.in', 'dplusindia', 'in')
|
||||
|
||||
|
||||
class DiscoveryPlusIndiaShowIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?discoveryplus\.in/show/(?P<show_name>[^/]+)/?(?:[?#]|$)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.discoveryplus.in/show/how-do-they-do-it',
|
||||
'playlist_mincount': 140,
|
||||
'info_dict': {
|
||||
'id': 'how-do-they-do-it',
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
def _entries(self, show_name):
|
||||
headers = {
|
||||
'x-disco-client': 'WEB:UNKNOWN:dplus-india:prod',
|
||||
'x-disco-params': 'realm=dplusindia',
|
||||
'referer': 'https://www.discoveryplus.in/',
|
||||
}
|
||||
show_url = 'https://ap2-prod-direct.discoveryplus.in/cms/routes/show/{}?include=default'.format(show_name)
|
||||
show_json = self._download_json(show_url,
|
||||
video_id=show_name,
|
||||
headers=headers)['included'][4]['attributes']['component']
|
||||
show_id = show_json['mandatoryParams'].split('=')[-1]
|
||||
season_url = 'https://ap2-prod-direct.discoveryplus.in/content/videos?sort=episodeNumber&filter[seasonNumber]={}&filter[show.id]={}&page[size]=100&page[number]={}'
|
||||
for season in show_json['filters'][0]['options']:
|
||||
season_id = season['id']
|
||||
total_pages, page_num = 1, 0
|
||||
while page_num < total_pages:
|
||||
season_json = self._download_json(season_url.format(season_id, show_id, compat_str(page_num + 1)),
|
||||
video_id=show_id, headers=headers,
|
||||
note='Downloading JSON metadata%s' % (' page %d' % page_num if page_num else ''))
|
||||
if page_num == 0:
|
||||
total_pages = try_get(season_json, lambda x: x['meta']['totalPages'], int) or 1
|
||||
episodes_json = season_json['data']
|
||||
for episode in episodes_json:
|
||||
video_id = episode['attributes']['path']
|
||||
yield self.url_result(
|
||||
'https://discoveryplus.in/videos/%s' % video_id,
|
||||
ie=DiscoveryPlusIndiaIE.ie_key(), video_id=video_id)
|
||||
page_num += 1
|
||||
|
||||
def _real_extract(self, url):
|
||||
show_name = re.match(self._VALID_URL, url).group('show_name')
|
||||
return self.playlist_result(self._entries(show_name), playlist_id=show_name)
|
||||
@@ -9,7 +9,6 @@ from ..utils import (
|
||||
unified_strdate,
|
||||
compat_str,
|
||||
determine_ext,
|
||||
ExtractorError,
|
||||
update_url_query,
|
||||
)
|
||||
|
||||
@@ -140,7 +139,7 @@ class DisneyIE(InfoExtractor):
|
||||
'vcodec': 'none' if (width == 0 and height == 0) else None,
|
||||
})
|
||||
if not formats and video_data.get('expired'):
|
||||
raise ExtractorError(
|
||||
self.raise_no_formats(
|
||||
'%s said: %s' % (self.IE_NAME, page_data['translations']['video_expired']),
|
||||
expected=True)
|
||||
self._sort_formats(formats)
|
||||
|
||||
@@ -32,6 +32,18 @@ class DigitallySpeakingIE(InfoExtractor):
|
||||
# From http://www.gdcvault.com/play/1013700/Advanced-Material
|
||||
'url': 'http://sevt.dispeak.com/ubm/gdc/eur10/xml/11256_1282118587281VNIT.xml',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# From https://gdcvault.com/play/1016624, empty speakerVideo
|
||||
'url': 'https://sevt.dispeak.com/ubm/gdc/online12/xml/201210-822101_1349794556671DDDD.xml',
|
||||
'info_dict': {
|
||||
'id': '201210-822101_1349794556671DDDD',
|
||||
'ext': 'flv',
|
||||
'title': 'Pre-launch - Preparing to Take the Plunge',
|
||||
},
|
||||
}, {
|
||||
# From http://www.gdcvault.com/play/1014846/Conference-Keynote-Shigeru, empty slideVideo
|
||||
'url': 'http://events.digitallyspeaking.com/gdc/project25/xml/p25-miyamoto1999_1282467389849HSVB.xml',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _parse_mp4(self, metadata):
|
||||
@@ -85,25 +97,19 @@ class DigitallySpeakingIE(InfoExtractor):
|
||||
'quality': 1,
|
||||
'format_id': audio.get('code'),
|
||||
})
|
||||
slide_video_path = xpath_text(metadata, './slideVideo', fatal=True)
|
||||
formats.append({
|
||||
'url': 'rtmp://%s/ondemand?ovpfv=1.1' % akamai_url,
|
||||
'play_path': remove_end(slide_video_path, '.flv'),
|
||||
'ext': 'flv',
|
||||
'format_note': 'slide deck video',
|
||||
'quality': -2,
|
||||
'format_id': 'slides',
|
||||
'acodec': 'none',
|
||||
})
|
||||
speaker_video_path = xpath_text(metadata, './speakerVideo', fatal=True)
|
||||
formats.append({
|
||||
'url': 'rtmp://%s/ondemand?ovpfv=1.1' % akamai_url,
|
||||
'play_path': remove_end(speaker_video_path, '.flv'),
|
||||
'ext': 'flv',
|
||||
'format_note': 'speaker video',
|
||||
'quality': -1,
|
||||
'format_id': 'speaker',
|
||||
})
|
||||
for video_key, format_id, preference in (
|
||||
('slide', 'slides', -2), ('speaker', 'speaker', -1)):
|
||||
video_path = xpath_text(metadata, './%sVideo' % video_key)
|
||||
if not video_path:
|
||||
continue
|
||||
formats.append({
|
||||
'url': 'rtmp://%s/ondemand?ovpfv=1.1' % akamai_url,
|
||||
'play_path': remove_end(video_path, '.flv'),
|
||||
'ext': 'flv',
|
||||
'format_note': '%s video' % video_key,
|
||||
'quality': preference,
|
||||
'format_id': format_id,
|
||||
})
|
||||
return formats
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
||||
@@ -107,8 +107,7 @@ class EggheadLessonIE(EggheadBaseIE):
|
||||
ext = determine_ext(format_url)
|
||||
if ext == 'm3u8':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
format_url, lesson_id, 'mp4', entry_protocol='m3u8',
|
||||
m3u8_id='hls', fatal=False))
|
||||
format_url, lesson_id, 'mp4', m3u8_id='hls', fatal=False))
|
||||
elif ext == 'mpd':
|
||||
formats.extend(self._extract_mpd_formats(
|
||||
format_url, lesson_id, mpd_id='dash', fatal=False))
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import os
|
||||
import re
|
||||
import tempfile
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
@@ -12,12 +10,12 @@ from ..utils import (
|
||||
try_get,
|
||||
)
|
||||
from ..compat import compat_str
|
||||
from ..downloader.hls import HlsFD
|
||||
|
||||
|
||||
class ElonetIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://elonet\.finna\.fi/Record/kavi\.elonet_elokuva_(?P<id>[0-9]+)'
|
||||
_TEST = {
|
||||
_TESTS = [{
|
||||
# m3u8 with subtitles
|
||||
'url': 'https://elonet.finna.fi/Record/kavi.elonet_elokuva_107867',
|
||||
'md5': '8efc954b96c543711707f87de757caea',
|
||||
'info_dict': {
|
||||
@@ -27,62 +25,17 @@ class ElonetIE(InfoExtractor):
|
||||
'description': 'Valkoinen peura (1952) on Erik Blombergin ohjaama ja yhdessä Mirjami Kuosmasen kanssa käsikirjoittama tarunomainen kertomus valkoisen peuran hahmossa lii...',
|
||||
'thumbnail': 'https://elonet.finna.fi/Cover/Show?id=kavi.elonet_elokuva_107867&index=0&size=large',
|
||||
},
|
||||
}
|
||||
|
||||
def _download_m3u8_chunked_subtitle(self, chunklist_url):
|
||||
"""
|
||||
Download VTT subtitles from pieces in manifest URL.
|
||||
Return a string containing joined chunks with extra headers removed.
|
||||
"""
|
||||
with tempfile.NamedTemporaryFile(delete=True) as outfile:
|
||||
fname = outfile.name
|
||||
hlsdl = HlsFD(self._downloader, {})
|
||||
hlsdl.download(compat_str(fname), {"url": chunklist_url})
|
||||
with open(fname, 'r') as fin:
|
||||
# Remove (some) headers
|
||||
fdata = re.sub(r'X-TIMESTAMP-MAP.*\n+|WEBVTT\n+', '', fin.read())
|
||||
os.remove(fname)
|
||||
return "WEBVTT\n\n" + fdata
|
||||
|
||||
def _parse_m3u8_subtitles(self, m3u8_doc, m3u8_url):
|
||||
"""
|
||||
Parse subtitles from HLS / m3u8 manifest.
|
||||
"""
|
||||
subtitles = {}
|
||||
baseurl = m3u8_url[:m3u8_url.rindex('/') + 1]
|
||||
for line in m3u8_doc.split('\n'):
|
||||
if 'EXT-X-MEDIA:TYPE=SUBTITLES' in line:
|
||||
lang = self._search_regex(
|
||||
r'LANGUAGE="(.+?)"', line, 'lang', default=False)
|
||||
uri = self._search_regex(
|
||||
r'URI="(.+?)"', line, 'uri', default=False)
|
||||
if lang and uri:
|
||||
data = self._download_m3u8_chunked_subtitle(baseurl + uri)
|
||||
subtitles[lang] = [{'ext': 'vtt', 'data': data}]
|
||||
return subtitles
|
||||
|
||||
def _parse_mpd_subtitles(self, mpd_doc):
|
||||
"""
|
||||
Parse subtitles from MPD manifest.
|
||||
"""
|
||||
ns = '{urn:mpeg:dash:schema:mpd:2011}'
|
||||
subtitles = {}
|
||||
for aset in mpd_doc.findall(".//%sAdaptationSet[@mimeType='text/vtt']" % (ns)):
|
||||
lang = aset.attrib.get('lang', 'unk')
|
||||
url = aset.find("./%sRepresentation/%sBaseURL" % (ns, ns)).text
|
||||
subtitles[lang] = [{'ext': 'vtt', 'url': url}]
|
||||
return subtitles
|
||||
|
||||
def _get_subtitles(self, fmt, doc, url):
|
||||
if fmt == 'm3u8':
|
||||
subs = self._parse_m3u8_subtitles(doc, url)
|
||||
elif fmt == 'mpd':
|
||||
subs = self._parse_mpd_subtitles(doc)
|
||||
else:
|
||||
self._downloader.report_warning(
|
||||
"Cannot download subtitles from '%s' streams." % (fmt))
|
||||
subs = {}
|
||||
return subs
|
||||
}, {
|
||||
# DASH with subtitles
|
||||
'url': 'https://elonet.finna.fi/Record/kavi.elonet_elokuva_116539',
|
||||
'info_dict': {
|
||||
'id': '116539',
|
||||
'ext': 'mp4',
|
||||
'title': 'Minulla on tiikeri',
|
||||
'description': 'Pienellä pojalla, joka asuu kerrostalossa, on kotieläimenä tiikeri. Se on kuitenkin salaisuus. Kerrostalon räpätäti on Kotilaisen täti, joka on aina vali...',
|
||||
'thumbnail': 'https://elonet.finna.fi/Cover/Show?id=kavi.elonet_elokuva_116539&index=0&size=large&source=Solr',
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
@@ -101,8 +54,8 @@ class ElonetIE(InfoExtractor):
|
||||
self._parse_json(json_s, video_id),
|
||||
lambda x: x[0]["src"], compat_str)
|
||||
formats = []
|
||||
subtitles = {}
|
||||
if re.search(r'\.m3u8\??', src):
|
||||
fmt = 'm3u8'
|
||||
res = self._download_webpage_handle(
|
||||
# elonet servers have certificate problems
|
||||
src.replace('https:', 'http:'), video_id,
|
||||
@@ -111,11 +64,10 @@ class ElonetIE(InfoExtractor):
|
||||
if res:
|
||||
doc, urlh = res
|
||||
url = urlh.geturl()
|
||||
formats = self._parse_m3u8_formats(doc, url)
|
||||
formats, subtitles = self._parse_m3u8_formats_and_subtitles(doc, url)
|
||||
for f in formats:
|
||||
f['ext'] = 'mp4'
|
||||
elif re.search(r'\.mpd\??', src):
|
||||
fmt = 'mpd'
|
||||
res = self._download_xml_handle(
|
||||
src, video_id,
|
||||
note='Downloading MPD manifest',
|
||||
@@ -123,7 +75,7 @@ class ElonetIE(InfoExtractor):
|
||||
if res:
|
||||
doc, urlh = res
|
||||
url = base_url(urlh.geturl())
|
||||
formats = self._parse_mpd_formats(doc, mpd_base_url=url)
|
||||
formats, subtitles = self._parse_mpd_formats_and_subtitles(doc, mpd_base_url=url)
|
||||
else:
|
||||
raise ExtractorError("Unknown streaming format")
|
||||
|
||||
@@ -133,5 +85,5 @@ class ElonetIE(InfoExtractor):
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'formats': formats,
|
||||
'subtitles': self.extract_subtitles(fmt, doc, url),
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ from .common import InfoExtractor
|
||||
from ..compat import compat_urllib_parse_urlencode
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
unescapeHTML
|
||||
merge_dicts,
|
||||
)
|
||||
|
||||
|
||||
@@ -24,7 +24,8 @@ class EroProfileIE(InfoExtractor):
|
||||
'title': 'sexy babe softcore',
|
||||
'thumbnail': r're:https?://.*\.jpg',
|
||||
'age_limit': 18,
|
||||
}
|
||||
},
|
||||
'skip': 'Video not found',
|
||||
}, {
|
||||
'url': 'http://www.eroprofile.com/m/videos/view/Try-It-On-Pee_cut_2-wmv-4shared-com-file-sharing-download-movie-file',
|
||||
'md5': '1baa9602ede46ce904c431f5418d8916',
|
||||
@@ -77,19 +78,15 @@ class EroProfileIE(InfoExtractor):
|
||||
[r"glbUpdViews\s*\('\d*','(\d+)'", r'p/report/video/(\d+)'],
|
||||
webpage, 'video id', default=None)
|
||||
|
||||
video_url = unescapeHTML(self._search_regex(
|
||||
r'<source src="([^"]+)', webpage, 'video url'))
|
||||
title = self._html_search_regex(
|
||||
r'Title:</th><td>([^<]+)</td>', webpage, 'title')
|
||||
thumbnail = self._search_regex(
|
||||
r'onclick="showVideoPlayer\(\)"><img src="([^"]+)',
|
||||
webpage, 'thumbnail', fatal=False)
|
||||
(r'Title:</th><td>([^<]+)</td>', r'<h1[^>]*>(.+?)</h1>'),
|
||||
webpage, 'title')
|
||||
|
||||
return {
|
||||
info = self._parse_html5_media_entries(url, webpage, video_id)[0]
|
||||
|
||||
return merge_dicts(info, {
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
'url': video_url,
|
||||
'title': title,
|
||||
'thumbnail': thumbnail,
|
||||
'age_limit': 18,
|
||||
}
|
||||
})
|
||||
|
||||
@@ -67,7 +67,10 @@ from .appletrailers import (
|
||||
AppleTrailersSectionIE,
|
||||
)
|
||||
from .applepodcasts import ApplePodcastsIE
|
||||
from .archiveorg import ArchiveOrgIE
|
||||
from .archiveorg import (
|
||||
ArchiveOrgIE,
|
||||
YoutubeWebArchiveIE,
|
||||
)
|
||||
from .arcpublishing import ArcPublishingIE
|
||||
from .arkena import ArkenaIE
|
||||
from .ard import (
|
||||
@@ -80,6 +83,7 @@ from .arte import (
|
||||
ArteTVEmbedIE,
|
||||
ArteTVPlaylistIE,
|
||||
)
|
||||
from .arnes import ArnesIE
|
||||
from .asiancrush import (
|
||||
AsianCrushIE,
|
||||
AsianCrushPlaylistIE,
|
||||
@@ -93,7 +97,8 @@ from .audiomack import AudiomackIE, AudiomackAlbumIE
|
||||
from .audius import (
|
||||
AudiusIE,
|
||||
AudiusTrackIE,
|
||||
AudiusPlaylistIE
|
||||
AudiusPlaylistIE,
|
||||
AudiusProfileIE,
|
||||
)
|
||||
from .awaan import (
|
||||
AWAANIE,
|
||||
@@ -108,7 +113,8 @@ from .bandcamp import BandcampIE, BandcampAlbumIE, BandcampWeeklyIE
|
||||
from .bbc import (
|
||||
BBCCoUkIE,
|
||||
BBCCoUkArticleIE,
|
||||
BBCCoUkIPlayerPlaylistIE,
|
||||
BBCCoUkIPlayerEpisodesIE,
|
||||
BBCCoUkIPlayerGroupIE,
|
||||
BBCCoUkPlaylistIE,
|
||||
BBCIE,
|
||||
)
|
||||
@@ -149,7 +155,6 @@ from .bleacherreport import (
|
||||
BleacherReportIE,
|
||||
BleacherReportCMSIE,
|
||||
)
|
||||
from .blinkx import BlinkxIE
|
||||
from .bloomberg import BloombergIE
|
||||
from .bokecc import BokeCCIE
|
||||
from .bongacams import BongaCamsIE
|
||||
@@ -208,7 +213,11 @@ from .cbsnews import (
|
||||
CBSNewsIE,
|
||||
CBSNewsLiveVideoIE,
|
||||
)
|
||||
from .cbssports import CBSSportsIE
|
||||
from .cbssports import (
|
||||
CBSSportsEmbedIE,
|
||||
CBSSportsIE,
|
||||
TwentyFourSevenSportsIE,
|
||||
)
|
||||
from .ccc import (
|
||||
CCCIE,
|
||||
CCCPlaylistIE,
|
||||
@@ -263,6 +272,7 @@ from .commonmistakes import CommonMistakesIE, UnicodeBOMIE
|
||||
from .commonprotocols import (
|
||||
MmsIE,
|
||||
RtmpIE,
|
||||
ViewSourceIE,
|
||||
)
|
||||
from .condenast import CondeNastIE
|
||||
from .contv import CONtvIE
|
||||
@@ -281,7 +291,8 @@ from .ctvnews import CTVNewsIE
|
||||
from .cultureunplugged import CultureUnpluggedIE
|
||||
from .curiositystream import (
|
||||
CuriosityStreamIE,
|
||||
CuriosityStreamCollectionIE,
|
||||
CuriosityStreamCollectionsIE,
|
||||
CuriosityStreamSeriesIE,
|
||||
)
|
||||
from .cwtv import CWTVIE
|
||||
from .dailymail import DailyMailIE
|
||||
@@ -306,6 +317,10 @@ from .democracynow import DemocracynowIE
|
||||
from .dfb import DFBIE
|
||||
from .dhm import DHMIE
|
||||
from .digg import DiggIE
|
||||
from .discoveryplusindia import (
|
||||
DiscoveryPlusIndiaIE,
|
||||
DiscoveryPlusIndiaShowIE,
|
||||
)
|
||||
from .dotsub import DotsubIE
|
||||
from .douyutv import (
|
||||
DouyuShowIE,
|
||||
@@ -384,6 +399,7 @@ from .facebook import (
|
||||
FacebookIE,
|
||||
FacebookPluginsVideoIE,
|
||||
)
|
||||
from .fancode import FancodeVodIE
|
||||
from .faz import FazIE
|
||||
from .fc2 import (
|
||||
FC2IE,
|
||||
@@ -450,10 +466,7 @@ from .gamestar import GameStarIE
|
||||
from .gaskrank import GaskrankIE
|
||||
from .gazeta import GazetaIE
|
||||
from .gdcvault import GDCVaultIE
|
||||
from .gedi import (
|
||||
GediIE,
|
||||
GediEmbedsIE,
|
||||
)
|
||||
from .gedidigital import GediDigitalIE
|
||||
from .generic import GenericIE
|
||||
from .gfycat import GfycatIE
|
||||
from .giantbomb import GiantBombIE
|
||||
@@ -492,6 +505,7 @@ from .hotnewhiphop import HotNewHipHopIE
|
||||
from .hotstar import (
|
||||
HotStarIE,
|
||||
HotStarPlaylistIE,
|
||||
HotStarSeriesIE,
|
||||
)
|
||||
from .howcast import HowcastIE
|
||||
from .howstuffworks import HowStuffWorksIE
|
||||
@@ -584,7 +598,11 @@ from .kuwo import (
|
||||
KuwoCategoryIE,
|
||||
KuwoMvIE,
|
||||
)
|
||||
from .la7 import LA7IE
|
||||
from .la7 import (
|
||||
LA7IE,
|
||||
LA7PodcastEpisodeIE,
|
||||
LA7PodcastIE,
|
||||
)
|
||||
from .laola1tv import (
|
||||
Laola1TvEmbedIE,
|
||||
Laola1TvIE,
|
||||
@@ -625,7 +643,11 @@ from .limelight import (
|
||||
LimelightChannelIE,
|
||||
LimelightChannelListIE,
|
||||
)
|
||||
from .line import LineTVIE
|
||||
from .line import (
|
||||
LineTVIE,
|
||||
LineLiveIE,
|
||||
LineLiveChannelIE,
|
||||
)
|
||||
from .linkedin import (
|
||||
LinkedInLearningIE,
|
||||
LinkedInLearningCourseIE,
|
||||
@@ -663,6 +685,7 @@ from .mangomolo import (
|
||||
MangomoloLiveIE,
|
||||
)
|
||||
from .manyvids import ManyVidsIE
|
||||
from .maoritv import MaoriTVIE
|
||||
from .markiza import (
|
||||
MarkizaIE,
|
||||
MarkizaPageIE,
|
||||
@@ -711,7 +734,10 @@ from .mixcloud import (
|
||||
MixcloudUserIE,
|
||||
MixcloudPlaylistIE,
|
||||
)
|
||||
from .mlb import MLBIE
|
||||
from .mlb import (
|
||||
MLBIE,
|
||||
MLBVideoIE,
|
||||
)
|
||||
from .mnet import MnetIE
|
||||
from .moevideo import MoeVideoIE
|
||||
from .mofosex import (
|
||||
@@ -735,10 +761,15 @@ from .mtv import (
|
||||
MTVServicesEmbeddedIE,
|
||||
MTVDEIE,
|
||||
MTVJapanIE,
|
||||
MTVItaliaIE,
|
||||
MTVItaliaProgrammaIE,
|
||||
)
|
||||
from .muenchentv import MuenchenTVIE
|
||||
from .mwave import MwaveIE, MwaveMeetGreetIE
|
||||
from .mxplayer import MxplayerIE
|
||||
from .mxplayer import (
|
||||
MxplayerIE,
|
||||
MxplayerShowIE,
|
||||
)
|
||||
from .mychannels import MyChannelsIE
|
||||
from .myspace import MySpaceIE, MySpaceAlbumIE
|
||||
from .myspass import MySpassIE
|
||||
@@ -781,8 +812,9 @@ from .ndr import (
|
||||
NJoyEmbedIE,
|
||||
)
|
||||
from .ndtv import NDTVIE
|
||||
from .netzkino import NetzkinoIE
|
||||
from .nebula import NebulaIE
|
||||
from .nerdcubed import NerdCubedFeedIE
|
||||
from .netzkino import NetzkinoIE
|
||||
from .neteasemusic import (
|
||||
NetEaseMusicIE,
|
||||
NetEaseMusicAlbumIE,
|
||||
@@ -807,6 +839,7 @@ from .nexx import (
|
||||
NexxIE,
|
||||
NexxEmbedIE,
|
||||
)
|
||||
from .nfhsnetwork import NFHSNetworkIE
|
||||
from .nfl import (
|
||||
NFLIE,
|
||||
NFLArticleIE,
|
||||
@@ -823,7 +856,7 @@ from .nick import (
|
||||
NickNightIE,
|
||||
NickRuIE,
|
||||
)
|
||||
from .niconico import NiconicoIE, NiconicoPlaylistIE
|
||||
from .niconico import NiconicoIE, NiconicoPlaylistIE, NiconicoUserIE
|
||||
from .ninecninemedia import NineCNineMediaIE
|
||||
from .ninegag import NineGagIE
|
||||
from .ninenow import NineNowIE
|
||||
@@ -918,8 +951,14 @@ from .packtpub import (
|
||||
PacktPubIE,
|
||||
PacktPubCourseIE,
|
||||
)
|
||||
from .palcomp3 import (
|
||||
PalcoMP3IE,
|
||||
PalcoMP3ArtistIE,
|
||||
PalcoMP3VideoIE,
|
||||
)
|
||||
from .pandoratv import PandoraTVIE
|
||||
from .parliamentliveuk import ParliamentLiveUKIE
|
||||
from .parlview import ParlviewIE
|
||||
from .patreon import PatreonIE
|
||||
from .pbs import PBSIE
|
||||
from .pearvideo import PearVideoIE
|
||||
@@ -951,9 +990,11 @@ from .platzi import (
|
||||
from .playfm import PlayFMIE
|
||||
from .playplustv import PlayPlusTVIE
|
||||
from .plays import PlaysTVIE
|
||||
from .playstuff import PlayStuffIE
|
||||
from .playtvak import PlaytvakIE
|
||||
from .playvid import PlayvidIE
|
||||
from .playwire import PlaywireIE
|
||||
from .plutotv import PlutoTVIE
|
||||
from .pluralsight import (
|
||||
PluralsightIE,
|
||||
PluralsightCourseIE,
|
||||
@@ -1083,6 +1124,7 @@ from .safari import (
|
||||
SafariApiIE,
|
||||
SafariCourseIE,
|
||||
)
|
||||
from .saitosan import SaitosanIE
|
||||
from .samplefocus import SampleFocusIE
|
||||
from .sapo import SapoIE
|
||||
from .savefrom import SaveFromIE
|
||||
@@ -1115,6 +1157,7 @@ from .shared import (
|
||||
SharedIE,
|
||||
VivoIE,
|
||||
)
|
||||
from .shemaroome import ShemarooMeIE
|
||||
from .showroomlive import ShowRoomLiveIE
|
||||
from .simplecast import (
|
||||
SimplecastIE,
|
||||
@@ -1148,7 +1191,10 @@ from .slideslive import SlidesLiveIE
|
||||
from .slutload import SlutloadIE
|
||||
from .snotr import SnotrIE
|
||||
from .sohu import SohuIE
|
||||
from .sonyliv import SonyLIVIE
|
||||
from .sonyliv import (
|
||||
SonyLIVIE,
|
||||
SonyLIVSeriesIE,
|
||||
)
|
||||
from .soundcloud import (
|
||||
SoundcloudEmbedIE,
|
||||
SoundcloudIE,
|
||||
@@ -1179,7 +1225,10 @@ from .spike import (
|
||||
BellatorIE,
|
||||
ParamountNetworkIE,
|
||||
)
|
||||
from .stitcher import StitcherIE
|
||||
from .stitcher import (
|
||||
StitcherIE,
|
||||
StitcherShowIE,
|
||||
)
|
||||
from .sport5 import Sport5IE
|
||||
from .sportbox import SportBoxIE
|
||||
from .sportdeutschland import SportDeutschlandIE
|
||||
@@ -1253,6 +1302,7 @@ from .telebruxelles import TeleBruxellesIE
|
||||
from .telecinco import TelecincoIE
|
||||
from .telegraaf import TelegraafIE
|
||||
from .telemb import TeleMBIE
|
||||
from .telemundo import TelemundoIE
|
||||
from .telequebec import (
|
||||
TeleQuebecIE,
|
||||
TeleQuebecSquatIE,
|
||||
@@ -1307,7 +1357,10 @@ from .trovo import (
|
||||
from .trunews import TruNewsIE
|
||||
from .trutv import TruTVIE
|
||||
from .tube8 import Tube8IE
|
||||
from .tubitv import TubiTvIE
|
||||
from .tubitv import (
|
||||
TubiTvIE,
|
||||
TubiTvShowIE,
|
||||
)
|
||||
from .tumblr import TumblrIE
|
||||
from .tunein import (
|
||||
TuneInClipIE,
|
||||
@@ -1402,6 +1455,7 @@ from .ufctv import (
|
||||
UFCTVIE,
|
||||
UFCArabiaIE,
|
||||
)
|
||||
from .ukcolumn import UkColumnIE
|
||||
from .uktvplay import UKTVPlayIE
|
||||
from .digiteka import DigitekaIE
|
||||
from .dlive import (
|
||||
@@ -1510,7 +1564,10 @@ from .vodlocker import VodlockerIE
|
||||
from .vodpl import VODPlIE
|
||||
from .vodplatform import VODPlatformIE
|
||||
from .voicerepublic import VoiceRepublicIE
|
||||
from .voot import VootIE
|
||||
from .voot import (
|
||||
VootIE,
|
||||
VootSeriesIE,
|
||||
)
|
||||
from .voxmedia import (
|
||||
VoxMediaVolumeIE,
|
||||
VoxMediaIE,
|
||||
@@ -1560,6 +1617,8 @@ from .weibo import (
|
||||
WeiboMobileIE
|
||||
)
|
||||
from .weiqitv import WeiqiTVIE
|
||||
from .wimtv import WimTVIE
|
||||
from .whowatch import WhoWatchIE
|
||||
from .wistia import (
|
||||
WistiaIE,
|
||||
WistiaPlaylistIE,
|
||||
@@ -1666,8 +1725,14 @@ from .zattoo import (
|
||||
ZattooLiveIE,
|
||||
)
|
||||
from .zdf import ZDFIE, ZDFChannelIE
|
||||
from .zee5 import (
|
||||
Zee5IE,
|
||||
Zee5SeriesIE,
|
||||
)
|
||||
from .zhihu import ZhihuIE
|
||||
from .zingmp3 import ZingMp3IE
|
||||
from .zee5 import Zee5IE
|
||||
from .zingmp3 import (
|
||||
ZingMp3IE,
|
||||
ZingMp3AlbumIE,
|
||||
)
|
||||
from .zoom import ZoomIE
|
||||
from .zype import ZypeIE
|
||||
|
||||
@@ -3,14 +3,11 @@ from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
import socket
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_etree_fromstring,
|
||||
compat_http_client,
|
||||
compat_str,
|
||||
compat_urllib_error,
|
||||
compat_urllib_parse_unquote,
|
||||
compat_urllib_parse_unquote_plus,
|
||||
)
|
||||
@@ -23,6 +20,7 @@ from ..utils import (
|
||||
int_or_none,
|
||||
js_to_json,
|
||||
limit_length,
|
||||
network_exceptions,
|
||||
parse_count,
|
||||
qualities,
|
||||
sanitized_Request,
|
||||
@@ -348,7 +346,7 @@ class FacebookIE(InfoExtractor):
|
||||
login_results, 'login error', default=None, group='error')
|
||||
if error:
|
||||
raise ExtractorError('Unable to login: %s' % error, expected=True)
|
||||
self._downloader.report_warning('unable to log in: bad username/password, or exceeded login rate limit (~3/min). Check credentials or wait.')
|
||||
self.report_warning('unable to log in: bad username/password, or exceeded login rate limit (~3/min). Check credentials or wait.')
|
||||
return
|
||||
|
||||
fb_dtsg = self._search_regex(
|
||||
@@ -369,9 +367,9 @@ class FacebookIE(InfoExtractor):
|
||||
check_response = self._download_webpage(check_req, None,
|
||||
note='Confirming login')
|
||||
if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
|
||||
self._downloader.report_warning('Unable to confirm login, you have to login in your browser and authorize the login.')
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err))
|
||||
self.report_warning('Unable to confirm login, you have to login in your browser and authorize the login.')
|
||||
except network_exceptions as err:
|
||||
self.report_warning('unable to log in: %s' % error_to_compat_str(err))
|
||||
return
|
||||
|
||||
def _real_initialize(self):
|
||||
@@ -625,8 +623,6 @@ class FacebookIE(InfoExtractor):
|
||||
subtitles_src = f[0].get('subtitles_src')
|
||||
if subtitles_src:
|
||||
subtitles.setdefault('en', []).append({'url': subtitles_src})
|
||||
if not formats:
|
||||
raise ExtractorError('Cannot find video formats')
|
||||
|
||||
process_formats(formats)
|
||||
|
||||
|
||||
91
yt_dlp/extractor/fancode.py
Normal file
91
yt_dlp/extractor/fancode.py
Normal file
@@ -0,0 +1,91 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
parse_iso8601,
|
||||
ExtractorError,
|
||||
try_get
|
||||
)
|
||||
|
||||
|
||||
class FancodeVodIE(InfoExtractor):
|
||||
IE_NAME = 'fancode:vod'
|
||||
|
||||
_VALID_URL = r'https?://(?:www\.)?fancode\.com/video/(?P<id>[0-9]+)\b'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://fancode.com/video/15043/match-preview-pbks-vs-mi',
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
'format': 'bestvideo'
|
||||
},
|
||||
'info_dict': {
|
||||
'id': '6249806281001',
|
||||
'ext': 'mp4',
|
||||
'title': 'Match Preview: PBKS vs MI',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
"timestamp": 1619081590,
|
||||
'view_count': int,
|
||||
'like_count': int,
|
||||
'upload_date': '20210422',
|
||||
'uploader_id': '6008340455001'
|
||||
}
|
||||
}, {
|
||||
'url': 'https://fancode.com/video/15043',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
||||
BRIGHTCOVE_URL_TEMPLATE = 'https://players.brightcove.net/%s/default_default/index.html?videoId=%s'
|
||||
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
brightcove_user_id = self._html_search_regex(
|
||||
r'(?:https?://)?players\.brightcove\.net/(\d+)/default_default/index(?:\.min)?\.js',
|
||||
webpage, 'user id')
|
||||
|
||||
data = '''{
|
||||
"query":"query Video($id: Int\\u0021, $filter: SegmentFilter) { media(id: $id, filter: $filter) { id contentId title contentId publishedTime totalViews totalUpvotes provider thumbnail { src } mediaSource {brightcove } duration isPremium isUserEntitled tags duration }}",
|
||||
"variables":{
|
||||
"id":%s,
|
||||
"filter":{
|
||||
"contentDataType":"DEFAULT"
|
||||
}
|
||||
},
|
||||
"operationName":"Video"
|
||||
}''' % video_id
|
||||
|
||||
metadata_json = self._download_json(
|
||||
'https://www.fancode.com/graphql', video_id, data=data.encode(), note='Downloading metadata',
|
||||
headers={
|
||||
'content-type': 'application/json',
|
||||
'origin': 'https://fancode.com',
|
||||
'referer': url,
|
||||
})
|
||||
|
||||
media = try_get(metadata_json, lambda x: x['data']['media'], dict) or {}
|
||||
brightcove_video_id = try_get(media, lambda x: x['mediaSource']['brightcove'], compat_str)
|
||||
|
||||
if brightcove_video_id is None:
|
||||
raise ExtractorError('Unable to extract brightcove Video ID')
|
||||
|
||||
is_premium = media.get('isPremium')
|
||||
if is_premium:
|
||||
self.report_warning('this video requires a premium account', video_id)
|
||||
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'url': BRIGHTCOVE_URL_TEMPLATE % (brightcove_user_id, brightcove_video_id),
|
||||
'ie_key': 'BrightcoveNew',
|
||||
'id': video_id,
|
||||
'title': media['title'],
|
||||
'like_count': media.get('totalUpvotes'),
|
||||
'view_count': media.get('totalViews'),
|
||||
'tags': media.get('tags'),
|
||||
'release_timestamp': parse_iso8601(media.get('publishedTime')),
|
||||
'availability': self._availability(needs_premium=is_premium),
|
||||
}
|
||||
@@ -151,6 +151,7 @@ class FranceTVIE(InfoExtractor):
|
||||
videos.append(fallback_info['video'])
|
||||
|
||||
formats = []
|
||||
subtitles = {}
|
||||
for video in videos:
|
||||
video_url = video.get('url')
|
||||
if not video_url:
|
||||
@@ -171,10 +172,12 @@ class FranceTVIE(InfoExtractor):
|
||||
sign(video_url, format_id) + '&hdcore=3.7.0&plugin=aasp-3.7.0.39.44',
|
||||
video_id, f4m_id=format_id, fatal=False))
|
||||
elif ext == 'm3u8':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
m3u8_fmts, m3u8_subs = self._extract_m3u8_formats_and_subtitles(
|
||||
sign(video_url, format_id), video_id, 'mp4',
|
||||
entry_protocol='m3u8_native', m3u8_id=format_id,
|
||||
fatal=False))
|
||||
fatal=False)
|
||||
formats.extend(m3u8_fmts)
|
||||
subtitles = self._merge_subtitles(subtitles, m3u8_subs)
|
||||
elif ext == 'mpd':
|
||||
formats.extend(self._extract_mpd_formats(
|
||||
sign(video_url, format_id), video_id, mpd_id=format_id, fatal=False))
|
||||
@@ -199,13 +202,12 @@ class FranceTVIE(InfoExtractor):
|
||||
title += ' - %s' % subtitle
|
||||
title = title.strip()
|
||||
|
||||
subtitles = {}
|
||||
subtitles_list = [{
|
||||
'url': subformat['url'],
|
||||
'ext': subformat.get('format'),
|
||||
} for subformat in info.get('subtitles', []) if subformat.get('url')]
|
||||
if subtitles_list:
|
||||
subtitles['fr'] = subtitles_list
|
||||
subtitles.setdefault('fr', []).extend(
|
||||
[{
|
||||
'url': subformat['url'],
|
||||
'ext': subformat.get('format'),
|
||||
} for subformat in info.get('subtitles', []) if subformat.get('url')]
|
||||
)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
@@ -357,6 +359,22 @@ class FranceTVInfoIE(FranceTVBaseInfoExtractor):
|
||||
'skip_download': True,
|
||||
},
|
||||
'add_ie': [FranceTVIE.ie_key()],
|
||||
}, {
|
||||
'note': 'Only an image exists in initial webpage instead of the video',
|
||||
'url': 'https://www.francetvinfo.fr/sante/maladie/coronavirus/covid-19-en-inde-une-situation-catastrophique-a-new-dehli_4381095.html',
|
||||
'info_dict': {
|
||||
'id': '7d204c9e-a2d3-11eb-9e4c-000d3a23d482',
|
||||
'ext': 'mp4',
|
||||
'title': 'Covid-19 : une situation catastrophique à New Dehli',
|
||||
'thumbnail': str,
|
||||
'duration': 76,
|
||||
'timestamp': 1619028518,
|
||||
'upload_date': '20210421',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
'add_ie': [FranceTVIE.ie_key()],
|
||||
}, {
|
||||
'url': 'http://www.francetvinfo.fr/elections/europeennes/direct-europeennes-regardez-le-debat-entre-les-candidats-a-la-presidence-de-la-commission_600639.html',
|
||||
'only_matching': True,
|
||||
@@ -384,6 +402,10 @@ class FranceTVInfoIE(FranceTVBaseInfoExtractor):
|
||||
}, {
|
||||
'url': 'http://france3-regions.francetvinfo.fr/limousin/emissions/jt-1213-limousin',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# "<figure id=" pattern (#28792)
|
||||
'url': 'https://www.francetvinfo.fr/culture/patrimoine/incendie-de-notre-dame-de-paris/notre-dame-de-paris-de-l-incendie-de-la-cathedrale-a-sa-reconstruction_4372291.html',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
@@ -401,7 +423,7 @@ class FranceTVInfoIE(FranceTVBaseInfoExtractor):
|
||||
(r'player\.load[^;]+src:\s*["\']([^"\']+)',
|
||||
r'id-video=([^@]+@[^"]+)',
|
||||
r'<a[^>]+href="(?:https?:)?//videos\.francetv\.fr/video/([^@]+@[^"]+)"',
|
||||
r'data-id="([^"]+)"'),
|
||||
r'(?:data-id|<figure[^<]+\bid)=["\']([\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'),
|
||||
webpage, 'video id')
|
||||
|
||||
return self._make_url_result(video_id)
|
||||
|
||||
@@ -17,7 +17,7 @@ class FujiTVFODPlus7IE(InfoExtractor):
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
formats = self._extract_m3u8_formats(
|
||||
self._BASE_URL + 'abr/pc_html5/%s.m3u8' % video_id, video_id)
|
||||
self._BASE_URL + 'abr/pc_html5/%s.m3u8' % video_id, video_id, 'mp4')
|
||||
for f in formats:
|
||||
wh = self._BITRATE_MAP.get(f.get('tbr'))
|
||||
if wh:
|
||||
|
||||
@@ -16,7 +16,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class FunimationIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?funimation(?:\.com|now\.uk)/shows/[^/]+/(?P<id>[^/?#&]+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?funimation(?:\.com|now\.uk)/(?:[^/]+/)?shows/[^/]+/(?P<id>[^/?#&]+)'
|
||||
|
||||
_NETRC_MACHINE = 'funimation'
|
||||
_TOKEN = None
|
||||
@@ -51,6 +51,10 @@ class FunimationIE(InfoExtractor):
|
||||
}, {
|
||||
'url': 'https://www.funimationnow.uk/shows/puzzle-dragons-x/drop-impact/simulcast/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# with lang code
|
||||
'url': 'https://www.funimation.com/en/shows/hacksign/role-play/',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _login(self):
|
||||
|
||||
@@ -5,7 +5,10 @@ import re
|
||||
from .common import InfoExtractor
|
||||
from .kaltura import KalturaIE
|
||||
from ..utils import (
|
||||
HEADRequest,
|
||||
remove_start,
|
||||
sanitized_Request,
|
||||
smuggle_url,
|
||||
urlencode_postdata,
|
||||
)
|
||||
|
||||
@@ -100,6 +103,26 @@ class GDCVaultIE(InfoExtractor):
|
||||
'format': 'mp4-408',
|
||||
},
|
||||
},
|
||||
{
|
||||
# Kaltura embed, whitespace between quote and embedded URL in iframe's src
|
||||
'url': 'https://www.gdcvault.com/play/1025699',
|
||||
'info_dict': {
|
||||
'id': '0_zagynv0a',
|
||||
'ext': 'mp4',
|
||||
'title': 'Tech Toolbox',
|
||||
'upload_date': '20190408',
|
||||
'uploader_id': 'joe@blazestreaming.com',
|
||||
'timestamp': 1554764629,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
},
|
||||
{
|
||||
# HTML5 video
|
||||
'url': 'http://www.gdcvault.com/play/1014846/Conference-Keynote-Shigeru',
|
||||
'only_matching': True,
|
||||
},
|
||||
]
|
||||
|
||||
def _login(self, webpage_url, display_id):
|
||||
@@ -120,38 +143,78 @@ class GDCVaultIE(InfoExtractor):
|
||||
request = sanitized_Request(login_url, urlencode_postdata(login_form))
|
||||
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
self._download_webpage(request, display_id, 'Logging in')
|
||||
webpage = self._download_webpage(webpage_url, display_id, 'Getting authenticated video page')
|
||||
start_page = self._download_webpage(webpage_url, display_id, 'Getting authenticated video page')
|
||||
self._download_webpage(logout_url, display_id, 'Logging out')
|
||||
|
||||
return webpage
|
||||
return start_page
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id, name = re.match(self._VALID_URL, url).groups()
|
||||
display_id = name or video_id
|
||||
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
webpage_url = 'http://www.gdcvault.com/play/' + video_id
|
||||
start_page = self._download_webpage(webpage_url, display_id)
|
||||
|
||||
title = self._html_search_regex(
|
||||
r'<td><strong>Session Name:?</strong></td>\s*<td>(.*?)</td>',
|
||||
webpage, 'title')
|
||||
direct_url = self._search_regex(
|
||||
r's1\.addVariable\("file",\s*encodeURIComponent\("(/[^"]+)"\)\);',
|
||||
start_page, 'url', default=None)
|
||||
if direct_url:
|
||||
title = self._html_search_regex(
|
||||
r'<td><strong>Session Name:?</strong></td>\s*<td>(.*?)</td>',
|
||||
start_page, 'title')
|
||||
video_url = 'http://www.gdcvault.com' + direct_url
|
||||
# resolve the url so that we can detect the correct extension
|
||||
video_url = self._request_webpage(
|
||||
HEADRequest(video_url), video_id).geturl()
|
||||
|
||||
PLAYER_REGEX = r'<iframe src=\"(?P<manifest_url>.*?)\".*?</iframe>'
|
||||
manifest_url = self._html_search_regex(
|
||||
PLAYER_REGEX, webpage, 'manifest_url')
|
||||
return {
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
'url': video_url,
|
||||
'title': title,
|
||||
}
|
||||
|
||||
partner_id = self._search_regex(
|
||||
r'/p(?:artner_id)?/(\d+)', manifest_url, 'partner id',
|
||||
default='1670711')
|
||||
embed_url = KalturaIE._extract_url(start_page)
|
||||
if embed_url:
|
||||
embed_url = smuggle_url(embed_url, {'source_url': url})
|
||||
ie_key = 'Kaltura'
|
||||
else:
|
||||
PLAYER_REGEX = r'<iframe src="(?P<xml_root>.+?)/(?:gdc-)?player.*?\.html.*?".*?</iframe>'
|
||||
|
||||
kaltura_id = self._search_regex(
|
||||
r'entry_id=(?P<id>(?:[^&])+)', manifest_url,
|
||||
'kaltura id', group='id')
|
||||
xml_root = self._html_search_regex(
|
||||
PLAYER_REGEX, start_page, 'xml root', default=None)
|
||||
if xml_root is None:
|
||||
# Probably need to authenticate
|
||||
login_res = self._login(webpage_url, display_id)
|
||||
if login_res is None:
|
||||
self.report_warning('Could not login.')
|
||||
else:
|
||||
start_page = login_res
|
||||
# Grab the url from the authenticated page
|
||||
xml_root = self._html_search_regex(
|
||||
PLAYER_REGEX, start_page, 'xml root')
|
||||
|
||||
xml_name = self._html_search_regex(
|
||||
r'<iframe src=".*?\?xml(?:=|URL=xml/)(.+?\.xml).*?".*?</iframe>',
|
||||
start_page, 'xml filename', default=None)
|
||||
if not xml_name:
|
||||
info = self._parse_html5_media_entries(url, start_page, video_id)[0]
|
||||
info.update({
|
||||
'title': remove_start(self._search_regex(
|
||||
r'>Session Name:\s*<.*?>\s*<td>(.+?)</td>', start_page,
|
||||
'title', default=None) or self._og_search_title(
|
||||
start_page, default=None), 'GDC Vault - '),
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
})
|
||||
return info
|
||||
embed_url = '%s/xml/%s' % (xml_root, xml_name)
|
||||
ie_key = 'DigitallySpeaking'
|
||||
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'url': 'kaltura:%s:%s' % (partner_id, kaltura_id),
|
||||
'ie_key': KalturaIE.ie_key(),
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
'title': title,
|
||||
'url': embed_url,
|
||||
'ie_key': ie_key,
|
||||
}
|
||||
|
||||
@@ -1,266 +0,0 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
base_url,
|
||||
url_basename,
|
||||
urljoin,
|
||||
)
|
||||
|
||||
|
||||
class GediBaseIE(InfoExtractor):
|
||||
@staticmethod
|
||||
def _clean_audio_fmts(formats):
|
||||
unique_formats = []
|
||||
for f in formats:
|
||||
if 'acodec' in f:
|
||||
unique_formats.append(f)
|
||||
formats[:] = unique_formats
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
player_data = re.findall(
|
||||
r'PlayerFactory\.setParam\(\'(?P<type>.+?)\',\s*\'(?P<name>.+?)\',\s*\'(?P<val>.+?)\'\);',
|
||||
webpage)
|
||||
|
||||
formats = []
|
||||
audio_fmts = []
|
||||
hls_fmts = []
|
||||
http_fmts = []
|
||||
title = ''
|
||||
thumb = ''
|
||||
|
||||
fmt_reg = r'(?P<t>video|audio)-(?P<p>rrtv|hls)-(?P<h>[\w\d]+)(?:-(?P<br>[\w\d]+))?$'
|
||||
br_reg = r'video-rrtv-(?P<br>\d+)-'
|
||||
|
||||
for t, n, v in player_data:
|
||||
if t == 'format':
|
||||
m = re.match(fmt_reg, n)
|
||||
if m:
|
||||
# audio formats
|
||||
if m.group('t') == 'audio':
|
||||
if m.group('p') == 'hls':
|
||||
audio_fmts.extend(self._extract_m3u8_formats(
|
||||
v, video_id, 'm4a', m3u8_id='hls', fatal=False))
|
||||
elif m.group('p') == 'rrtv':
|
||||
audio_fmts.append({
|
||||
'format_id': 'mp3',
|
||||
'url': v,
|
||||
'tbr': 128,
|
||||
'ext': 'mp3',
|
||||
'vcodec': 'none',
|
||||
'acodec': 'mp3',
|
||||
})
|
||||
|
||||
# video formats
|
||||
elif m.group('t') == 'video':
|
||||
# hls manifest video
|
||||
if m.group('p') == 'hls':
|
||||
hls_fmts.extend(self._extract_m3u8_formats(
|
||||
v, video_id, 'mp4', m3u8_id='hls', fatal=False))
|
||||
# direct mp4 video
|
||||
elif m.group('p') == 'rrtv':
|
||||
if not m.group('br'):
|
||||
mm = re.search(br_reg, v)
|
||||
http_fmts.append({
|
||||
'format_id': 'https-' + m.group('h'),
|
||||
'protocol': 'https',
|
||||
'url': v,
|
||||
'tbr': int(m.group('br')) if m.group('br') else
|
||||
(int(mm.group('br')) if mm.group('br') else 0),
|
||||
'height': int(m.group('h'))
|
||||
})
|
||||
|
||||
elif t == 'param':
|
||||
if n == 'videotitle':
|
||||
title = v
|
||||
if n == 'image_full_play':
|
||||
thumb = v
|
||||
|
||||
title = self._og_search_title(webpage) if title == '' else title
|
||||
|
||||
# clean weird char
|
||||
title = compat_str(title).encode('utf8', 'replace').replace(b'\xc3\x82', b'').decode('utf8', 'replace')
|
||||
|
||||
if audio_fmts:
|
||||
self._clean_audio_fmts(audio_fmts)
|
||||
self._sort_formats(audio_fmts)
|
||||
if hls_fmts:
|
||||
self._sort_formats(hls_fmts)
|
||||
if http_fmts:
|
||||
self._sort_formats(http_fmts)
|
||||
|
||||
formats.extend(audio_fmts)
|
||||
formats.extend(hls_fmts)
|
||||
formats.extend(http_fmts)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': self._html_search_meta('twitter:description', webpage),
|
||||
'thumbnail': thumb,
|
||||
'formats': formats,
|
||||
}
|
||||
|
||||
|
||||
class GediIE(GediBaseIE):
|
||||
_VALID_URL = r'''(?x)https?://video\.
|
||||
(?:
|
||||
(?:espresso\.)?repubblica
|
||||
|lastampa
|
||||
|huffingtonpost
|
||||
|ilsecoloxix
|
||||
|iltirreno
|
||||
|messaggeroveneto
|
||||
|ilpiccolo
|
||||
|gazzettadimantova
|
||||
|mattinopadova
|
||||
|laprovinciapavese
|
||||
|tribunatreviso
|
||||
|nuovavenezia
|
||||
|gazzettadimodena
|
||||
|lanuovaferrara
|
||||
|corrierealpi
|
||||
|lasentinella
|
||||
)
|
||||
(?:\.gelocal)?\.it/(?!embed/).+?/(?P<id>[\d/]+)(?:\?|\&|$)'''
|
||||
_TESTS = [{
|
||||
'url': 'https://video.lastampa.it/politica/il-paradosso-delle-regionali-la-lega-vince-ma-sembra-aver-perso/121559/121683',
|
||||
'md5': '84658d7fb9e55a6e57ecc77b73137494',
|
||||
'info_dict': {
|
||||
'id': '121559/121683',
|
||||
'ext': 'mp4',
|
||||
'title': 'Il paradosso delle Regionali: ecco perché la Lega vince ma sembra aver perso',
|
||||
'description': 'md5:de7f4d6eaaaf36c153b599b10f8ce7ca',
|
||||
'thumbnail': r're:^https://www\.repstatic\.it/video/photo/.+?-thumb-social-play\.jpg$',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://video.repubblica.it/motori/record-della-pista-a-spa-francorchamps-la-pagani-huayra-roadster-bc-stupisce/367415/367963',
|
||||
'md5': 'e763b94b7920799a0e0e23ffefa2d157',
|
||||
'info_dict': {
|
||||
'id': '367415/367963',
|
||||
'ext': 'mp4',
|
||||
'title': 'Record della pista a Spa Francorchamps, la Pagani Huayra Roadster BC stupisce',
|
||||
'description': 'md5:5deb503cefe734a3eb3f07ed74303920',
|
||||
'thumbnail': r're:^https://www\.repstatic\.it/video/photo/.+?-thumb-social-play\.jpg$',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://video.ilsecoloxix.it/sport/cassani-e-i-brividi-azzurri-ai-mondiali-di-imola-qui-mi-sono-innamorato-del-ciclismo-da-ragazzino-incredibile-tornarci-da-ct/66184/66267',
|
||||
'md5': 'e48108e97b1af137d22a8469f2019057',
|
||||
'info_dict': {
|
||||
'id': '66184/66267',
|
||||
'ext': 'mp4',
|
||||
'title': 'Cassani e i brividi azzurri ai Mondiali di Imola: \\"Qui mi sono innamorato del ciclismo da ragazzino, incredibile tornarci da ct\\"',
|
||||
'description': 'md5:fc9c50894f70a2469bb9b54d3d0a3d3b',
|
||||
'thumbnail': r're:^https://www\.repstatic\.it/video/photo/.+?-thumb-social-play\.jpg$',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://video.iltirreno.gelocal.it/sport/dentro-la-notizia-ferrari-cosa-succede-a-maranello/141059/142723',
|
||||
'md5': 'a6e39f3bdc1842bbd92abbbbef230817',
|
||||
'info_dict': {
|
||||
'id': '141059/142723',
|
||||
'ext': 'mp4',
|
||||
'title': 'Dentro la notizia - Ferrari, cosa succede a Maranello',
|
||||
'description': 'md5:9907d65b53765681fa3a0b3122617c1f',
|
||||
'thumbnail': r're:^https://www\.repstatic\.it/video/photo/.+?-thumb-social-play\.jpg$',
|
||||
},
|
||||
}]
|
||||
|
||||
|
||||
class GediEmbedsIE(GediBaseIE):
|
||||
_VALID_URL = r'''(?x)https?://video\.
|
||||
(?:
|
||||
(?:espresso\.)?repubblica
|
||||
|lastampa
|
||||
|huffingtonpost
|
||||
|ilsecoloxix
|
||||
|iltirreno
|
||||
|messaggeroveneto
|
||||
|ilpiccolo
|
||||
|gazzettadimantova
|
||||
|mattinopadova
|
||||
|laprovinciapavese
|
||||
|tribunatreviso
|
||||
|nuovavenezia
|
||||
|gazzettadimodena
|
||||
|lanuovaferrara
|
||||
|corrierealpi
|
||||
|lasentinella
|
||||
)
|
||||
(?:\.gelocal)?\.it/embed/.+?/(?P<id>[\d/]+)(?:\?|\&|$)'''
|
||||
_TESTS = [{
|
||||
'url': 'https://video.huffingtonpost.it/embed/politica/cotticelli-non-so-cosa-mi-sia-successo-sto-cercando-di-capire-se-ho-avuto-un-malore/29312/29276?responsive=true&el=video971040871621586700',
|
||||
'md5': 'f4ac23cadfea7fef89bea536583fa7ed',
|
||||
'info_dict': {
|
||||
'id': '29312/29276',
|
||||
'ext': 'mp4',
|
||||
'title': 'Cotticelli: \\"Non so cosa mi sia successo. Sto cercando di capire se ho avuto un malore\\"',
|
||||
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
|
||||
'thumbnail': r're:^https://www\.repstatic\.it/video/photo/.+?-thumb-social-play\.jpg$',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://video.espresso.repubblica.it/embed/tutti-i-video/01-ted-villa/14772/14870&width=640&height=360',
|
||||
'md5': '0391c2c83c6506581003aaf0255889c0',
|
||||
'info_dict': {
|
||||
'id': '14772/14870',
|
||||
'ext': 'mp4',
|
||||
'title': 'Festival EMERGENCY, Villa: «La buona informazione aiuta la salute» (14772-14870)',
|
||||
'description': 'md5:2bce954d278248f3c950be355b7c2226',
|
||||
'thumbnail': r're:^https://www\.repstatic\.it/video/photo/.+?-thumb-social-play\.jpg$',
|
||||
},
|
||||
}]
|
||||
|
||||
@staticmethod
|
||||
def _sanitize_urls(urls):
|
||||
# add protocol if missing
|
||||
for i, e in enumerate(urls):
|
||||
if e.startswith('//'):
|
||||
urls[i] = 'https:%s' % e
|
||||
# clean iframes urls
|
||||
for i, e in enumerate(urls):
|
||||
urls[i] = urljoin(base_url(e), url_basename(e))
|
||||
return urls
|
||||
|
||||
@staticmethod
|
||||
def _extract_urls(webpage):
|
||||
entries = [
|
||||
mobj.group('url')
|
||||
for mobj in re.finditer(r'''(?x)
|
||||
(?:
|
||||
data-frame-src=|
|
||||
<iframe[^\n]+src=
|
||||
)
|
||||
(["'])
|
||||
(?P<url>https?://video\.
|
||||
(?:
|
||||
(?:espresso\.)?repubblica
|
||||
|lastampa
|
||||
|huffingtonpost
|
||||
|ilsecoloxix
|
||||
|iltirreno
|
||||
|messaggeroveneto
|
||||
|ilpiccolo
|
||||
|gazzettadimantova
|
||||
|mattinopadova
|
||||
|laprovinciapavese
|
||||
|tribunatreviso
|
||||
|nuovavenezia
|
||||
|gazzettadimodena
|
||||
|lanuovaferrara
|
||||
|corrierealpi
|
||||
|lasentinella
|
||||
)
|
||||
(?:\.gelocal)?\.it/embed/.+?)
|
||||
\1''', webpage)]
|
||||
return GediEmbedsIE._sanitize_urls(entries)
|
||||
|
||||
@staticmethod
|
||||
def _extract_url(webpage):
|
||||
urls = GediEmbedsIE._extract_urls(webpage)
|
||||
return urls[0] if urls else None
|
||||
210
yt_dlp/extractor/gedidigital.py
Normal file
210
yt_dlp/extractor/gedidigital.py
Normal file
@@ -0,0 +1,210 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
base_url,
|
||||
determine_ext,
|
||||
int_or_none,
|
||||
url_basename,
|
||||
urljoin,
|
||||
)
|
||||
|
||||
|
||||
class GediDigitalIE(InfoExtractor):
|
||||
_VALID_URL = r'''(?x)(?P<url>(?:https?:)//video\.
|
||||
(?:
|
||||
(?:
|
||||
(?:espresso\.)?repubblica
|
||||
|lastampa
|
||||
|ilsecoloxix
|
||||
|huffingtonpost
|
||||
)|
|
||||
(?:
|
||||
iltirreno
|
||||
|messaggeroveneto
|
||||
|ilpiccolo
|
||||
|gazzettadimantova
|
||||
|mattinopadova
|
||||
|laprovinciapavese
|
||||
|tribunatreviso
|
||||
|nuovavenezia
|
||||
|gazzettadimodena
|
||||
|lanuovaferrara
|
||||
|corrierealpi
|
||||
|lasentinella
|
||||
)\.gelocal
|
||||
)\.it(?:/[^/]+){2,4}/(?P<id>\d+))(?:$|[?&].*)'''
|
||||
_TESTS = [{
|
||||
'url': 'https://video.lastampa.it/politica/il-paradosso-delle-regionali-la-lega-vince-ma-sembra-aver-perso/121559/121683',
|
||||
'md5': '84658d7fb9e55a6e57ecc77b73137494',
|
||||
'info_dict': {
|
||||
'id': '121683',
|
||||
'ext': 'mp4',
|
||||
'title': 'Il paradosso delle Regionali: ecco perché la Lega vince ma sembra aver perso',
|
||||
'description': 'md5:de7f4d6eaaaf36c153b599b10f8ce7ca',
|
||||
'thumbnail': r're:^https://www\.repstatic\.it/video/photo/.+?-thumb-full-.+?\.jpg$',
|
||||
'duration': 125,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://video.huffingtonpost.it/embed/politica/cotticelli-non-so-cosa-mi-sia-successo-sto-cercando-di-capire-se-ho-avuto-un-malore/29312/29276?responsive=true&el=video971040871621586700',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://video.espresso.repubblica.it/embed/tutti-i-video/01-ted-villa/14772/14870&width=640&height=360',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://video.repubblica.it/motori/record-della-pista-a-spa-francorchamps-la-pagani-huayra-roadster-bc-stupisce/367415/367963',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://video.ilsecoloxix.it/sport/cassani-e-i-brividi-azzurri-ai-mondiali-di-imola-qui-mi-sono-innamorato-del-ciclismo-da-ragazzino-incredibile-tornarci-da-ct/66184/66267',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://video.iltirreno.gelocal.it/sport/dentro-la-notizia-ferrari-cosa-succede-a-maranello/141059/142723',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://video.messaggeroveneto.gelocal.it/locale/maria-giovanna-elmi-covid-vaccino/138155/139268',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://video.ilpiccolo.gelocal.it/dossier/big-john/dinosauro-big-john-al-via-le-visite-guidate-a-trieste/135226/135751',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://video.gazzettadimantova.gelocal.it/locale/dal-ponte-visconteo-di-valeggio-l-and-8217sos-dei-ristoratori-aprire-anche-a-cena/137310/137818',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://video.mattinopadova.gelocal.it/dossier/coronavirus-in-veneto/covid-a-vo-un-anno-dopo-un-cuore-tricolore-per-non-dimenticare/138402/138964',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://video.laprovinciapavese.gelocal.it/locale/mede-zona-rossa-via-alle-vaccinazioni-per-gli-over-80/137545/138120',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://video.tribunatreviso.gelocal.it/dossier/coronavirus-in-veneto/ecco-le-prima-vaccinazioni-di-massa-nella-marca/134485/135024',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://video.nuovavenezia.gelocal.it/locale/camion-troppo-alto-per-il-ponte-ferroviario-perde-il-carico/135734/136266',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://video.gazzettadimodena.gelocal.it/locale/modena-scoperta-la-proteina-che-predice-il-livello-di-gravita-del-covid/139109/139796',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://video.lanuovaferrara.gelocal.it/locale/due-bombole-di-gpl-aperte-e-abbandonate-i-vigili-bruciano-il-gas/134391/134957',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://video.corrierealpi.gelocal.it/dossier/cortina-2021-i-mondiali-di-sci-alpino/mondiali-di-sci-il-timelapse-sulla-splendida-olympia/133760/134331',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://video.lasentinella.gelocal.it/locale/vestigne-centra-un-auto-e-si-ribalta/138931/139466',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://video.espresso.repubblica.it/tutti-i-video/01-ted-villa/14772',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
@staticmethod
|
||||
def _sanitize_urls(urls):
|
||||
# add protocol if missing
|
||||
for i, e in enumerate(urls):
|
||||
if e.startswith('//'):
|
||||
urls[i] = 'https:%s' % e
|
||||
# clean iframes urls
|
||||
for i, e in enumerate(urls):
|
||||
urls[i] = urljoin(base_url(e), url_basename(e))
|
||||
return urls
|
||||
|
||||
@staticmethod
|
||||
def _extract_urls(webpage):
|
||||
entries = [
|
||||
mobj.group('eurl')
|
||||
for mobj in re.finditer(r'''(?x)
|
||||
(?:
|
||||
data-frame-src=|
|
||||
<iframe[^\n]+src=
|
||||
)
|
||||
(["'])(?P<eurl>%s)\1''' % GediDigitalIE._VALID_URL, webpage)]
|
||||
return GediDigitalIE._sanitize_urls(entries)
|
||||
|
||||
@staticmethod
|
||||
def _extract_url(webpage):
|
||||
urls = GediDigitalIE._extract_urls(webpage)
|
||||
return urls[0] if urls else None
|
||||
|
||||
@staticmethod
|
||||
def _clean_formats(formats):
|
||||
format_urls = set()
|
||||
clean_formats = []
|
||||
for f in formats:
|
||||
if f['url'] not in format_urls:
|
||||
if f.get('audio_ext') != 'none' and not f.get('acodec'):
|
||||
continue
|
||||
format_urls.add(f['url'])
|
||||
clean_formats.append(f)
|
||||
formats[:] = clean_formats
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
url = re.match(self._VALID_URL, url).group('url')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
title = self._html_search_meta(
|
||||
['twitter:title', 'og:title'], webpage, fatal=True)
|
||||
player_data = re.findall(
|
||||
r"PlayerFactory\.setParam\('(?P<type>format|param)',\s*'(?P<name>[^']+)',\s*'(?P<val>[^']+)'\);",
|
||||
webpage)
|
||||
|
||||
formats = []
|
||||
duration = thumb = None
|
||||
for t, n, v in player_data:
|
||||
if t == 'format':
|
||||
if n in ('video-hds-vod-ec', 'video-hls-vod-ec', 'video-viralize', 'video-youtube-pfp'):
|
||||
continue
|
||||
elif n.endswith('-vod-ak'):
|
||||
formats.extend(self._extract_akamai_formats(
|
||||
v, video_id, {'http': 'media.gedidigital.it'}))
|
||||
else:
|
||||
ext = determine_ext(v)
|
||||
if ext == 'm3u8':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
v, video_id, 'mp4', 'm3u8_native', m3u8_id=n, fatal=False))
|
||||
continue
|
||||
f = {
|
||||
'format_id': n,
|
||||
'url': v,
|
||||
}
|
||||
if ext == 'mp3':
|
||||
abr = int_or_none(self._search_regex(
|
||||
r'-mp3-audio-(\d+)', v, 'abr', default=None))
|
||||
f.update({
|
||||
'abr': abr,
|
||||
'tbr': abr,
|
||||
'acodec': ext,
|
||||
'vcodec': 'none'
|
||||
})
|
||||
else:
|
||||
mobj = re.match(r'^video-rrtv-(\d+)(?:-(\d+))?$', n)
|
||||
if mobj:
|
||||
f.update({
|
||||
'height': int(mobj.group(1)),
|
||||
'vbr': int_or_none(mobj.group(2)),
|
||||
})
|
||||
if not f.get('vbr'):
|
||||
f['vbr'] = int_or_none(self._search_regex(
|
||||
r'-video-rrtv-(\d+)', v, 'abr', default=None))
|
||||
formats.append(f)
|
||||
elif t == 'param':
|
||||
if n in ['image_full', 'image']:
|
||||
thumb = v
|
||||
elif n == 'videoDuration':
|
||||
duration = int_or_none(v)
|
||||
|
||||
self._clean_formats(formats)
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': self._html_search_meta(
|
||||
['twitter:description', 'og:description', 'description'], webpage),
|
||||
'thumbnail': thumb or self._og_search_thumbnail(webpage),
|
||||
'formats': formats,
|
||||
'duration': duration,
|
||||
}
|
||||
@@ -126,14 +126,16 @@ from .viqeo import ViqeoIE
|
||||
from .expressen import ExpressenIE
|
||||
from .zype import ZypeIE
|
||||
from .odnoklassniki import OdnoklassnikiIE
|
||||
from .vk import VKIE
|
||||
from .kinja import KinjaEmbedIE
|
||||
from .gedi import GediEmbedsIE
|
||||
from .gedidigital import GediDigitalIE
|
||||
from .rcs import RCSEmbedsIE
|
||||
from .bitchute import BitChuteIE
|
||||
from .rumble import RumbleEmbedIE
|
||||
from .arcpublishing import ArcPublishingIE
|
||||
from .medialaan import MedialaanIE
|
||||
from .simplecast import SimplecastIE
|
||||
from .wimtv import WimTVIE
|
||||
|
||||
|
||||
class GenericIE(InfoExtractor):
|
||||
@@ -2250,6 +2252,19 @@ class GenericIE(InfoExtractor):
|
||||
},
|
||||
'playlist_mincount': 52,
|
||||
},
|
||||
{
|
||||
# Sibnet embed (https://help.sibnet.ru/?sibnet_video_embed)
|
||||
'url': 'https://phpbb3.x-tk.ru/bbcode-video-sibnet-t24.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# WimTv embed player
|
||||
'url': 'http://www.msmotor.tv/wearefmi-pt-2-2021/',
|
||||
'info_dict': {
|
||||
'id': 'wearefmi-pt-2-2021',
|
||||
'title': '#WEAREFMI – PT.2 – 2021 – MsMotorTV',
|
||||
},
|
||||
'playlist_count': 1,
|
||||
},
|
||||
]
|
||||
|
||||
def report_following_redirect(self, new_url):
|
||||
@@ -2360,13 +2375,13 @@ class GenericIE(InfoExtractor):
|
||||
|
||||
parsed_url = compat_urlparse.urlparse(url)
|
||||
if not parsed_url.scheme:
|
||||
default_search = self._downloader.params.get('default_search')
|
||||
default_search = self.get_param('default_search')
|
||||
if default_search is None:
|
||||
default_search = 'fixup_error'
|
||||
|
||||
if default_search in ('auto', 'auto_warning', 'fixup_error'):
|
||||
if re.match(r'^[^\s/]+\.[^\s/]+/', url):
|
||||
self._downloader.report_warning('The url doesn\'t specify the protocol, trying with http')
|
||||
self.report_warning('The url doesn\'t specify the protocol, trying with http')
|
||||
return self.url_result('http://' + url)
|
||||
elif default_search != 'fixup_error':
|
||||
if default_search == 'auto_warning':
|
||||
@@ -2375,7 +2390,7 @@ class GenericIE(InfoExtractor):
|
||||
'Invalid URL: %r . Call yt-dlp like this: yt-dlp -v "https://www.youtube.com/watch?v=BaW_jenozKc" ' % url,
|
||||
expected=True)
|
||||
else:
|
||||
self._downloader.report_warning(
|
||||
self.report_warning(
|
||||
'Falling back to youtube search for %s . Set --default-search "auto" to suppress this warning.' % url)
|
||||
return self.url_result('ytsearch:' + url)
|
||||
|
||||
@@ -2434,8 +2449,9 @@ class GenericIE(InfoExtractor):
|
||||
m = re.match(r'^(?P<type>audio|video|application(?=/(?:ogg$|(?:vnd\.apple\.|x-)?mpegurl)))/(?P<format_id>[^;\s]+)', content_type)
|
||||
if m:
|
||||
format_id = compat_str(m.group('format_id'))
|
||||
subtitles = {}
|
||||
if format_id.endswith('mpegurl'):
|
||||
formats = self._extract_m3u8_formats(url, video_id, 'mp4')
|
||||
formats, subtitles = self._extract_m3u8_formats_and_subtitles(url, video_id, 'mp4')
|
||||
elif format_id == 'f4m':
|
||||
formats = self._extract_f4m_formats(url, video_id)
|
||||
else:
|
||||
@@ -2447,11 +2463,12 @@ class GenericIE(InfoExtractor):
|
||||
info_dict['direct'] = True
|
||||
self._sort_formats(formats)
|
||||
info_dict['formats'] = formats
|
||||
info_dict['subtitles'] = subtitles
|
||||
return info_dict
|
||||
|
||||
if not self._downloader.params.get('test', False) and not is_intentional:
|
||||
force = self._downloader.params.get('force_generic_extractor', False)
|
||||
self._downloader.report_warning(
|
||||
if not self.get_param('test', False) and not is_intentional:
|
||||
force = self.get_param('force_generic_extractor', False)
|
||||
self.report_warning(
|
||||
'%s on generic information extractor.' % ('Forcing' if force else 'Falling back'))
|
||||
|
||||
if not full_response:
|
||||
@@ -2478,7 +2495,7 @@ class GenericIE(InfoExtractor):
|
||||
# Maybe it's a direct link to a video?
|
||||
# Be careful not to download the whole thing!
|
||||
if not is_html(first_bytes):
|
||||
self._downloader.report_warning(
|
||||
self.report_warning(
|
||||
'URL could be a direct video link, returning it as such.')
|
||||
info_dict.update({
|
||||
'direct': True,
|
||||
@@ -2496,11 +2513,14 @@ class GenericIE(InfoExtractor):
|
||||
|
||||
# Is it an RSS feed, a SMIL file, an XSPF playlist or a MPD manifest?
|
||||
try:
|
||||
doc = compat_etree_fromstring(webpage.encode('utf-8'))
|
||||
try:
|
||||
doc = compat_etree_fromstring(webpage)
|
||||
except compat_xml_parse_error:
|
||||
doc = compat_etree_fromstring(webpage.encode('utf-8'))
|
||||
if doc.tag == 'rss':
|
||||
return self._extract_rss(url, video_id, doc)
|
||||
elif doc.tag == 'SmoothStreamingMedia':
|
||||
info_dict['formats'] = self._parse_ism_formats(doc, url)
|
||||
info_dict['formats'], info_dict['subtitles'] = self._parse_ism_formats_and_subtitles(doc, url)
|
||||
self._sort_formats(info_dict['formats'])
|
||||
return info_dict
|
||||
elif re.match(r'^(?:{[^}]+})?smil$', doc.tag):
|
||||
@@ -2514,7 +2534,7 @@ class GenericIE(InfoExtractor):
|
||||
xspf_base_url=full_response.geturl()),
|
||||
video_id)
|
||||
elif re.match(r'(?i)^(?:{[^}]+})?MPD$', doc.tag):
|
||||
info_dict['formats'] = self._parse_mpd_formats(
|
||||
info_dict['formats'], info_dict['subtitles'] = self._parse_mpd_formats_and_subtitles(
|
||||
doc,
|
||||
mpd_base_url=full_response.geturl().rpartition('/')[0],
|
||||
mpd_url=url)
|
||||
@@ -2649,6 +2669,15 @@ class GenericIE(InfoExtractor):
|
||||
if vid_me_embed_url is not None:
|
||||
return self.url_result(vid_me_embed_url, 'Vidme')
|
||||
|
||||
# Invidious Instances
|
||||
# https://github.com/yt-dlp/yt-dlp/issues/195
|
||||
# https://github.com/iv-org/invidious/pull/1730
|
||||
youtube_url = self._search_regex(
|
||||
r'<link rel="alternate" href="(https://www\.youtube\.com/watch\?v=[0-9A-Za-z_-]{11})"',
|
||||
webpage, 'youtube link', default=None)
|
||||
if youtube_url:
|
||||
return self.url_result(youtube_url, YoutubeIE.ie_key())
|
||||
|
||||
# Look for YouTube embeds
|
||||
youtube_urls = YoutubeIE._extract_urls(webpage)
|
||||
if youtube_urls:
|
||||
@@ -2779,6 +2808,11 @@ class GenericIE(InfoExtractor):
|
||||
if odnoklassniki_url:
|
||||
return self.url_result(odnoklassniki_url, OdnoklassnikiIE.ie_key())
|
||||
|
||||
# Look for sibnet embedded player
|
||||
sibnet_urls = VKIE._extract_sibnet_urls(webpage)
|
||||
if sibnet_urls:
|
||||
return self.playlist_from_matches(sibnet_urls, video_id, video_title)
|
||||
|
||||
# Look for embedded ivi player
|
||||
mobj = re.search(r'<embed[^>]+?src=(["\'])(?P<url>https?://(?:www\.)?ivi\.ru/video/player.+?)\1', webpage)
|
||||
if mobj is not None:
|
||||
@@ -2955,7 +2989,7 @@ class GenericIE(InfoExtractor):
|
||||
webpage)
|
||||
if not mobj:
|
||||
mobj = re.search(
|
||||
r'data-video-link=["\'](?P<url>http://m.mlb.com/video/[^"\']+)',
|
||||
r'data-video-link=["\'](?P<url>http://m\.mlb\.com/video/[^"\']+)',
|
||||
webpage)
|
||||
if mobj is not None:
|
||||
return self.url_result(mobj.group('url'), 'MLB')
|
||||
@@ -3339,17 +3373,22 @@ class GenericIE(InfoExtractor):
|
||||
return self.playlist_from_matches(
|
||||
zype_urls, video_id, video_title, ie=ZypeIE.ie_key())
|
||||
|
||||
# Look for RCS media group embeds
|
||||
gedi_urls = GediEmbedsIE._extract_urls(webpage)
|
||||
gedi_urls = GediDigitalIE._extract_urls(webpage)
|
||||
if gedi_urls:
|
||||
return self.playlist_from_matches(
|
||||
gedi_urls, video_id, video_title, ie=GediEmbedsIE.ie_key())
|
||||
gedi_urls, video_id, video_title, ie=GediDigitalIE.ie_key())
|
||||
|
||||
# Look for RCS media group embeds
|
||||
rcs_urls = RCSEmbedsIE._extract_urls(webpage)
|
||||
if rcs_urls:
|
||||
return self.playlist_from_matches(
|
||||
rcs_urls, video_id, video_title, ie=RCSEmbedsIE.ie_key())
|
||||
|
||||
wimtv_urls = WimTVIE._extract_urls(webpage)
|
||||
if wimtv_urls:
|
||||
return self.playlist_from_matches(
|
||||
wimtv_urls, video_id, video_title, ie=WimTVIE.ie_key())
|
||||
|
||||
bitchute_urls = BitChuteIE._extract_urls(webpage)
|
||||
if bitchute_urls:
|
||||
return self.playlist_from_matches(
|
||||
@@ -3425,6 +3464,9 @@ class GenericIE(InfoExtractor):
|
||||
'url': src,
|
||||
'ext': (mimetype2ext(src_type)
|
||||
or ext if ext in KNOWN_EXTENSIONS else 'mp4'),
|
||||
'http_headers': {
|
||||
'Referer': full_response.geturl(),
|
||||
},
|
||||
})
|
||||
if formats:
|
||||
self._sort_formats(formats)
|
||||
@@ -3493,7 +3535,7 @@ class GenericIE(InfoExtractor):
|
||||
m_video_type = re.findall(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage)
|
||||
# We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
|
||||
if m_video_type is not None:
|
||||
found = filter_video(re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage))
|
||||
found = filter_video(re.findall(r'<meta.*?property="og:(?:video|audio)".*?content="(.*?)"', webpage))
|
||||
if not found:
|
||||
REDIRECT_REGEX = r'[0-9]{,2};\s*(?:URL|url)=\'?([^\'"]+)'
|
||||
found = re.search(
|
||||
|
||||
@@ -96,7 +96,7 @@ class GloboIE(InfoExtractor):
|
||||
video = self._download_json(
|
||||
'http://api.globovideos.com/videos/%s/playlist' % video_id,
|
||||
video_id)['videos'][0]
|
||||
if not self._downloader.params.get('allow_unplayable_formats') and video.get('encrypted') is True:
|
||||
if not self.get_param('allow_unplayable_formats') and video.get('encrypted') is True:
|
||||
raise ExtractorError('This video is DRM protected.', expected=True)
|
||||
|
||||
title = video['title']
|
||||
|
||||
@@ -4,10 +4,14 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
|
||||
from .adobepass import AdobePassIE
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
determine_ext,
|
||||
parse_age_limit,
|
||||
remove_start,
|
||||
remove_end,
|
||||
try_get,
|
||||
urlencode_postdata,
|
||||
ExtractorError,
|
||||
)
|
||||
@@ -46,15 +50,15 @@ class GoIE(AdobePassIE):
|
||||
}
|
||||
_VALID_URL = r'''(?x)
|
||||
https?://
|
||||
(?:
|
||||
(?:(?P<sub_domain>%s)\.)?go|
|
||||
(?P<sub_domain_2>abc|freeform|disneynow|fxnow\.fxnetworks)
|
||||
(?P<sub_domain>
|
||||
(?:%s\.)?go|fxnow\.fxnetworks|
|
||||
(?:www\.)?(?:abc|freeform|disneynow)
|
||||
)\.com/
|
||||
(?:
|
||||
(?:[^/]+/)*(?P<id>[Vv][Dd][Kk][Aa]\w+)|
|
||||
(?:[^/]+/)*(?P<display_id>[^/?\#]+)
|
||||
)
|
||||
''' % '|'.join(list(_SITE_INFO.keys()))
|
||||
''' % r'\.|'.join(list(_SITE_INFO.keys()))
|
||||
_TESTS = [{
|
||||
'url': 'http://abc.go.com/shows/designated-survivor/video/most-recent/VDKA3807643',
|
||||
'info_dict': {
|
||||
@@ -116,6 +120,18 @@ class GoIE(AdobePassIE):
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://abc.com/shows/modern-family/episode-guide/season-01/101-pilot',
|
||||
'info_dict': {
|
||||
'id': 'VDKA22600213',
|
||||
'ext': 'mp4',
|
||||
'title': 'Pilot',
|
||||
'description': 'md5:74306df917cfc199d76d061d66bebdb4',
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://abc.go.com/shows/the-catch/episode-guide/season-01/10-the-wedding',
|
||||
'only_matching': True,
|
||||
@@ -133,6 +149,9 @@ class GoIE(AdobePassIE):
|
||||
}, {
|
||||
'url': 'https://disneynow.com/shows/minnies-bow-toons/video/happy-campers/vdka4872013',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.freeform.com/shows/cruel-summer/episode-guide/season-01/01-happy-birthday-jeanette-turner',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _extract_videos(self, brand, video_id='-1', show_id='-1'):
|
||||
@@ -143,24 +162,36 @@ class GoIE(AdobePassIE):
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
sub_domain = mobj.group('sub_domain') or mobj.group('sub_domain_2')
|
||||
sub_domain = remove_start(remove_end(mobj.group('sub_domain') or '', '.go'), 'www.')
|
||||
video_id, display_id = mobj.group('id', 'display_id')
|
||||
site_info = self._SITE_INFO.get(sub_domain, {})
|
||||
brand = site_info.get('brand')
|
||||
if not video_id or not site_info:
|
||||
webpage = self._download_webpage(url, display_id or video_id)
|
||||
video_id = self._search_regex(
|
||||
(
|
||||
# There may be inner quotes, e.g. data-video-id="'VDKA3609139'"
|
||||
# from http://freeform.go.com/shows/shadowhunters/episodes/season-2/1-this-guilty-blood
|
||||
r'data-video-id=["\']*(VDKA\w+)',
|
||||
# https://github.com/ytdl-org/youtube-dl/pull/25216/files
|
||||
# The following is based on the pull request on the line above. Changed the ABC.com URL to a show available now.
|
||||
# https://abc.com/shows/the-rookie/episode-guide/season-02/19-the-q-word
|
||||
r'\bvideoIdCode["\']\s*:\s*["\'](vdka\w+)',
|
||||
# Deprecated fallback pattern
|
||||
r'\b(?:video)?id["\']\s*:\s*["\'](VDKA\w+)'
|
||||
), webpage, 'video id', default=video_id)
|
||||
data = self._parse_json(
|
||||
self._search_regex(
|
||||
r'["\']__abc_com__["\']\s*\]\s*=\s*({.+?})\s*;', webpage,
|
||||
'data', default='{}'),
|
||||
display_id or video_id, fatal=False)
|
||||
# https://abc.com/shows/modern-family/episode-guide/season-01/101-pilot
|
||||
layout = try_get(data, lambda x: x['page']['content']['video']['layout'], dict)
|
||||
video_id = None
|
||||
if layout:
|
||||
video_id = try_get(
|
||||
layout,
|
||||
(lambda x: x['videoid'], lambda x: x['video']['id']),
|
||||
compat_str)
|
||||
if not video_id:
|
||||
video_id = self._search_regex(
|
||||
(
|
||||
# There may be inner quotes, e.g. data-video-id="'VDKA3609139'"
|
||||
# from http://freeform.go.com/shows/shadowhunters/episodes/season-2/1-this-guilty-blood
|
||||
r'data-video-id=["\']*(VDKA\w+)',
|
||||
# page.analytics.videoIdCode
|
||||
r'\bvideoIdCode["\']\s*:\s*["\']((?:vdka|VDKA)\w+)',
|
||||
# https://abc.com/shows/the-rookie/episode-guide/season-02/03-the-bet
|
||||
r'\b(?:video)?id["\']\s*:\s*["\'](VDKA\w+)'
|
||||
), webpage, 'video id', default=video_id)
|
||||
if not site_info:
|
||||
brand = self._search_regex(
|
||||
(r'data-brand=\s*["\']\s*(\d+)',
|
||||
|
||||
@@ -253,7 +253,7 @@ class GoogleDriveIE(InfoExtractor):
|
||||
or 'unable to extract confirmation code')
|
||||
|
||||
if not formats and reason:
|
||||
raise ExtractorError(reason, expected=True)
|
||||
self.raise_no_formats(reason, expected=True)
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
|
||||
@@ -87,7 +87,14 @@ class HotStarBaseIE(InfoExtractor):
|
||||
|
||||
class HotStarIE(HotStarBaseIE):
|
||||
IE_NAME = 'hotstar'
|
||||
_VALID_URL = r'https?://(?:www\.)?hotstar\.com/.*(?P<id>\d{10})'
|
||||
_VALID_URL = r'''(?x)
|
||||
https?://(?:www\.)?hotstar\.com(?:/in)?/(?!in/)
|
||||
(?:
|
||||
tv/(?:[^/?#]+/){3}|
|
||||
(?!tv/)[^?#]+/
|
||||
)?
|
||||
(?P<id>\d{10})
|
||||
'''
|
||||
_TESTS = [{
|
||||
# contentData
|
||||
'url': 'https://www.hotstar.com/can-you-not-spread-rumours/1000076273',
|
||||
@@ -141,7 +148,7 @@ class HotStarIE(HotStarBaseIE):
|
||||
|
||||
title = video_data['title']
|
||||
|
||||
if not self._downloader.params.get('allow_unplayable_formats') and video_data.get('drmProtected'):
|
||||
if not self.get_param('allow_unplayable_formats') and video_data.get('drmProtected'):
|
||||
raise ExtractorError('This video is DRM protected.', expected=True)
|
||||
|
||||
headers = {'Referer': url}
|
||||
@@ -184,7 +191,7 @@ class HotStarIE(HotStarBaseIE):
|
||||
geo_restricted = True
|
||||
continue
|
||||
if not formats and geo_restricted:
|
||||
self.raise_geo_restricted(countries=['IN'])
|
||||
self.raise_geo_restricted(countries=['IN'], metadata_available=True)
|
||||
self._sort_formats(formats)
|
||||
|
||||
for f in formats:
|
||||
@@ -235,3 +242,41 @@ class HotStarPlaylistIE(HotStarBaseIE):
|
||||
if video.get('contentId')]
|
||||
|
||||
return self.playlist_result(entries, playlist_id)
|
||||
|
||||
|
||||
class HotStarSeriesIE(HotStarBaseIE):
|
||||
IE_NAME = 'hotstar:series'
|
||||
_VALID_URL = r'(?:https?://)(?:www\.)?hotstar\.com(?:/in)?/tv/[^/]+/(?P<id>\d{10})$'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.hotstar.com/in/tv/radhakrishn/1260000646',
|
||||
'info_dict': {
|
||||
'id': '1260000646',
|
||||
},
|
||||
'playlist_mincount': 690,
|
||||
}, {
|
||||
'url': 'https://www.hotstar.com/tv/dancee-/1260050431',
|
||||
'info_dict': {
|
||||
'id': '1260050431',
|
||||
},
|
||||
'playlist_mincount': 43,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
series_id = self._match_id(url)
|
||||
headers = {
|
||||
'x-country-code': 'IN',
|
||||
'x-platform-code': 'PCTV',
|
||||
}
|
||||
detail_json = self._download_json('https://api.hotstar.com/o/v1/show/detail?contentId=' + series_id,
|
||||
video_id=series_id, headers=headers)
|
||||
id = compat_str(try_get(detail_json, lambda x: x['body']['results']['item']['id'], int))
|
||||
item_json = self._download_json('https://api.hotstar.com/o/v1/tray/g/1/items?etid=0&tao=0&tas=10000&eid=' + id,
|
||||
video_id=series_id, headers=headers)
|
||||
entries = [
|
||||
self.url_result(
|
||||
'https://www.hotstar.com/%d' % video['contentId'],
|
||||
ie=HotStarIE.ie_key(), video_id=video['contentId'])
|
||||
for video in item_json['body']['results']['items']
|
||||
if video.get('contentId')]
|
||||
|
||||
return self.playlist_result(entries, series_id)
|
||||
|
||||
@@ -65,7 +65,7 @@ class ImgGamingBaseIE(InfoExtractor):
|
||||
domain, media_type, media_id, playlist_id = re.match(self._VALID_URL, url).groups()
|
||||
|
||||
if playlist_id:
|
||||
if self._downloader.params.get('noplaylist'):
|
||||
if self.get_param('noplaylist'):
|
||||
self.to_screen('Downloading just video %s because of --no-playlist' % media_id)
|
||||
else:
|
||||
self.to_screen('Downloading playlist %s - add --no-playlist to just download video' % playlist_id)
|
||||
|
||||
@@ -12,6 +12,7 @@ from ..compat import (
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
float_or_none,
|
||||
get_element_by_attribute,
|
||||
int_or_none,
|
||||
lowercase_escape,
|
||||
@@ -32,6 +33,7 @@ class InstagramIE(InfoExtractor):
|
||||
'title': 'Video by naomipq',
|
||||
'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
|
||||
'thumbnail': r're:^https?://.*\.jpg',
|
||||
'duration': 0,
|
||||
'timestamp': 1371748545,
|
||||
'upload_date': '20130620',
|
||||
'uploader_id': 'naomipq',
|
||||
@@ -48,6 +50,7 @@ class InstagramIE(InfoExtractor):
|
||||
'ext': 'mp4',
|
||||
'title': 'Video by britneyspears',
|
||||
'thumbnail': r're:^https?://.*\.jpg',
|
||||
'duration': 0,
|
||||
'timestamp': 1453760977,
|
||||
'upload_date': '20160125',
|
||||
'uploader_id': 'britneyspears',
|
||||
@@ -86,6 +89,24 @@ class InstagramIE(InfoExtractor):
|
||||
'title': 'Post by instagram',
|
||||
'description': 'md5:0f9203fc6a2ce4d228da5754bcf54957',
|
||||
},
|
||||
}, {
|
||||
# IGTV
|
||||
'url': 'https://www.instagram.com/tv/BkfuX9UB-eK/',
|
||||
'info_dict': {
|
||||
'id': 'BkfuX9UB-eK',
|
||||
'ext': 'mp4',
|
||||
'title': 'Fingerboarding Tricks with @cass.fb',
|
||||
'thumbnail': r're:^https?://.*\.jpg',
|
||||
'duration': 53.83,
|
||||
'timestamp': 1530032919,
|
||||
'upload_date': '20180626',
|
||||
'uploader_id': 'instagram',
|
||||
'uploader': 'Instagram',
|
||||
'like_count': int,
|
||||
'comment_count': int,
|
||||
'comments': list,
|
||||
'description': 'Meet Cass Hirst (@cass.fb), a fingerboarding pro who can perform tiny ollies and kickflips while blindfolded.',
|
||||
}
|
||||
}, {
|
||||
'url': 'https://instagram.com/p/-Cmh1cukG2/',
|
||||
'only_matching': True,
|
||||
@@ -159,7 +180,9 @@ class InstagramIE(InfoExtractor):
|
||||
description = try_get(
|
||||
media, lambda x: x['edge_media_to_caption']['edges'][0]['node']['text'],
|
||||
compat_str) or media.get('caption')
|
||||
title = media.get('title')
|
||||
thumbnail = media.get('display_src') or media.get('display_url')
|
||||
duration = float_or_none(media.get('video_duration'))
|
||||
timestamp = int_or_none(media.get('taken_at_timestamp') or media.get('date'))
|
||||
uploader = media.get('owner', {}).get('full_name')
|
||||
uploader_id = media.get('owner', {}).get('username')
|
||||
@@ -200,9 +223,10 @@ class InstagramIE(InfoExtractor):
|
||||
continue
|
||||
entries.append({
|
||||
'id': node.get('shortcode') or node['id'],
|
||||
'title': 'Video %d' % edge_num,
|
||||
'title': node.get('title') or 'Video %d' % edge_num,
|
||||
'url': node_video_url,
|
||||
'thumbnail': node.get('display_url'),
|
||||
'duration': float_or_none(node.get('video_duration')),
|
||||
'width': int_or_none(try_get(node, lambda x: x['dimensions']['width'])),
|
||||
'height': int_or_none(try_get(node, lambda x: x['dimensions']['height'])),
|
||||
'view_count': int_or_none(node.get('video_view_count')),
|
||||
@@ -239,8 +263,9 @@ class InstagramIE(InfoExtractor):
|
||||
'id': video_id,
|
||||
'formats': formats,
|
||||
'ext': 'mp4',
|
||||
'title': 'Video by %s' % uploader_id,
|
||||
'title': title or 'Video by %s' % uploader_id,
|
||||
'description': description,
|
||||
'duration': duration,
|
||||
'thumbnail': thumbnail,
|
||||
'timestamp': timestamp,
|
||||
'uploader_id': uploader_id,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user