mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2025-12-17 11:22:23 +01:00
Compare commits
302 Commits
2021.06.23
...
2021.09.02
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9ee4f0bb5b | ||
|
|
be4d9f4cd9 | ||
|
|
347182a0cd | ||
|
|
a7429aa9fa | ||
|
|
7a340e0df3 | ||
|
|
f0e5366335 | ||
|
|
49ca8db06b | ||
|
|
ee57a19d84 | ||
|
|
908b56eaf7 | ||
|
|
1461d7bef2 | ||
|
|
8a2d992389 | ||
|
|
8e25d624df | ||
|
|
e88dabb35e | ||
|
|
8eb7ba82ca | ||
|
|
b2eeee0ce0 | ||
|
|
875cfb8cbc | ||
|
|
b8773e63f0 | ||
|
|
05664a2f7b | ||
|
|
2ee6389bef | ||
|
|
62cdaaf0e2 | ||
|
|
419508eabb | ||
|
|
54153fb71b | ||
|
|
1dd6d9ca9d | ||
|
|
356ac009d3 | ||
|
|
9a292a620c | ||
|
|
7e55872286 | ||
|
|
2fc14b9925 | ||
|
|
58f68fe703 | ||
|
|
abafce59a1 | ||
|
|
2e7781a93c | ||
|
|
bc36bc36a1 | ||
|
|
d75201a873 | ||
|
|
691d5823d6 | ||
|
|
c311988d19 | ||
|
|
26e8e04454 | ||
|
|
198e3a04c9 | ||
|
|
61bfacb233 | ||
|
|
85a0021fb3 | ||
|
|
7a45a1590b | ||
|
|
1c36c1f320 | ||
|
|
e0493e90fc | ||
|
|
1931a55ee8 | ||
|
|
63b1ad0f05 | ||
|
|
0bb1bc1b10 | ||
|
|
45842107b9 | ||
|
|
6251555f1c | ||
|
|
330690a214 | ||
|
|
91d4b32bb6 | ||
|
|
a181cd0c60 | ||
|
|
ea81966e64 | ||
|
|
2acf2ce5cb | ||
|
|
f7f18f905c | ||
|
|
4f8b70b593 | ||
|
|
e43e9f3c2c | ||
|
|
71dd5d4a00 | ||
|
|
52a2f994c9 | ||
|
|
8b7491c8d1 | ||
|
|
251ae04e6a | ||
|
|
5bc4a65eea | ||
|
|
1151c4079a | ||
|
|
88acdbc269 | ||
|
|
9b5fa9ee7c | ||
|
|
aca5774e68 | ||
|
|
3fb4e21b38 | ||
|
|
4dfbf8696b | ||
|
|
8fc54b1230 | ||
|
|
da33e35b05 | ||
|
|
5ad28e7ffd | ||
|
|
f79ec47d71 | ||
|
|
45b0596290 | ||
|
|
96c23f3be8 | ||
|
|
6e7dfe4959 | ||
|
|
c34f505b04 | ||
|
|
14183d1f80 | ||
|
|
58adec4677 | ||
|
|
9e598870dd | ||
|
|
8f18aca871 | ||
|
|
3ad56b4236 | ||
|
|
5d62709bc7 | ||
|
|
7581d2467a | ||
|
|
5fa206fb54 | ||
|
|
df2a5633da | ||
|
|
7a6742b5f9 | ||
|
|
e040bb0a41 | ||
|
|
f8fabc9930 | ||
|
|
d967c68e4c | ||
|
|
3dd39c5f9a | ||
|
|
be44eefd5e | ||
|
|
f775c83110 | ||
|
|
b714b41f81 | ||
|
|
31654882e9 | ||
|
|
86c66b2d3e | ||
|
|
37242e56f2 | ||
|
|
6c7274ecd2 | ||
|
|
5c333d7496 | ||
|
|
641ad5d813 | ||
|
|
0715f7e19b | ||
|
|
a8731fcc1d | ||
|
|
5a64127f94 | ||
|
|
ade6dc5e9e | ||
|
|
418964fa91 | ||
|
|
c196640ff1 | ||
|
|
60c8fc73c6 | ||
|
|
bc8745480e | ||
|
|
ff5e16f2f6 | ||
|
|
be2fc5b212 | ||
|
|
7be9ccff0b | ||
|
|
245d43cacf | ||
|
|
246fb276e0 | ||
|
|
6e6e0d95b3 | ||
|
|
25a3f4f5d6 | ||
|
|
ad3dc496bb | ||
|
|
2831b4686c | ||
|
|
8c0ae192a4 | ||
|
|
e9f4ccd19e | ||
|
|
a38bd1defa | ||
|
|
476febeb3a | ||
|
|
b6a35ad83b | ||
|
|
bfd56b74b9 | ||
|
|
858a65ecc1 | ||
|
|
3b34e38813 | ||
|
|
3448870205 | ||
|
|
b868936cd6 | ||
|
|
c681cb5d93 | ||
|
|
379e44ed3c | ||
|
|
243c57cfe8 | ||
|
|
28f436bad0 | ||
|
|
2b8a2973bd | ||
|
|
b7b04c782e | ||
|
|
6e84b21559 | ||
|
|
575e17a1b9 | ||
|
|
57015a4a3f | ||
|
|
9cc1a3130a | ||
|
|
b51d2ae3ca | ||
|
|
fee5f0c909 | ||
|
|
7bb6434767 | ||
|
|
124bc071ee | ||
|
|
a047eeb6d2 | ||
|
|
77b87f0519 | ||
|
|
678da2f21b | ||
|
|
cc3fa8d39d | ||
|
|
89efdc15dd | ||
|
|
8012d892bd | ||
|
|
9d65e7bd6d | ||
|
|
36576d7c4c | ||
|
|
bb36a55c41 | ||
|
|
3dbb2a9dcb | ||
|
|
9997eee4af | ||
|
|
3e376d183e | ||
|
|
888299e6ca | ||
|
|
c31be5b009 | ||
|
|
e5611e8eda | ||
|
|
8e6cc12c80 | ||
|
|
e980017ac8 | ||
|
|
e9d9efc0f2 | ||
|
|
6ccf351a87 | ||
|
|
28dff70b51 | ||
|
|
1aebc0f79e | ||
|
|
cf87314d4e | ||
|
|
1bd3639f69 | ||
|
|
68f5867cf0 | ||
|
|
605cad0be7 | ||
|
|
0855702f3f | ||
|
|
e8384376c0 | ||
|
|
e7e94f2a5c | ||
|
|
a46a815b05 | ||
|
|
96fccc101f | ||
|
|
dbf5416a20 | ||
|
|
d74a58a186 | ||
|
|
f5510afef0 | ||
|
|
e4f0275711 | ||
|
|
e0f2b4b47d | ||
|
|
eca330cb88 | ||
|
|
d24734daea | ||
|
|
d9e6e9481e | ||
|
|
3619f78d2c | ||
|
|
65c2fde23f | ||
|
|
000c15a4ca | ||
|
|
9275f62cf8 | ||
|
|
6552469433 | ||
|
|
11cc45718c | ||
|
|
fe07e2c69f | ||
|
|
89ce723edd | ||
|
|
45d1f15725 | ||
|
|
a318f59d14 | ||
|
|
7d1eb38af1 | ||
|
|
901130bbcf | ||
|
|
c0bc527bca | ||
|
|
2a9c6dcd22 | ||
|
|
5a1fc62b41 | ||
|
|
b4c055bac2 | ||
|
|
ea05b3020d | ||
|
|
9536bc072d | ||
|
|
8242bf220d | ||
|
|
4bfa401d40 | ||
|
|
0222620725 | ||
|
|
1fe3c4c27e | ||
|
|
f703a88055 | ||
|
|
a353beba83 | ||
|
|
052e135029 | ||
|
|
cb89cfc14b | ||
|
|
060ac76257 | ||
|
|
063c409dfb | ||
|
|
767b02a99b | ||
|
|
f45e6c1126 | ||
|
|
3944e7af92 | ||
|
|
ad34b2951e | ||
|
|
c8fa48fd94 | ||
|
|
2fd226f6a7 | ||
|
|
3ba7740dd8 | ||
|
|
29b208f6f9 | ||
|
|
e4d666d27b | ||
|
|
245524e6a3 | ||
|
|
9c0d7f4951 | ||
|
|
e37d0efbd9 | ||
|
|
c926c9541f | ||
|
|
982ee69a74 | ||
|
|
7ea6541124 | ||
|
|
ae30b84072 | ||
|
|
cc9d1493c6 | ||
|
|
f6755419d1 | ||
|
|
145bd631c5 | ||
|
|
b35496d825 | ||
|
|
352d63fdb5 | ||
|
|
11f9be0912 | ||
|
|
c84aeac6b5 | ||
|
|
50fed816dd | ||
|
|
a1a7907bc0 | ||
|
|
d61fc64618 | ||
|
|
6586bca9b9 | ||
|
|
da503b7a52 | ||
|
|
7c365c2109 | ||
|
|
3f698246b2 | ||
|
|
cca80fe611 | ||
|
|
c634ad2a3c | ||
|
|
8f3343809e | ||
|
|
0ba692acc8 | ||
|
|
d9488f69c1 | ||
|
|
dce8743677 | ||
|
|
5520aa2dc9 | ||
|
|
8d9b902243 | ||
|
|
fe93e2c4cf | ||
|
|
314ee30548 | ||
|
|
34917076ad | ||
|
|
ccc7795ca3 | ||
|
|
da1c94ee45 | ||
|
|
3b297919e0 | ||
|
|
47193e0298 | ||
|
|
49bd8c66d3 | ||
|
|
182b6ae8a6 | ||
|
|
c843e68588 | ||
|
|
198f7ea89e | ||
|
|
c888ffb95a | ||
|
|
9752433221 | ||
|
|
f0ff9979c6 | ||
|
|
501dd1ad55 | ||
|
|
75722b037d | ||
|
|
2d6659b9ea | ||
|
|
c5370857b3 | ||
|
|
00034c146a | ||
|
|
325ebc1703 | ||
|
|
7dde84f3c9 | ||
|
|
6606817a86 | ||
|
|
73d829c144 | ||
|
|
60bdb7bd9e | ||
|
|
4bb6b02f93 | ||
|
|
b5ac45b197 | ||
|
|
38a40c9e16 | ||
|
|
a8bf9b4dc1 | ||
|
|
51f8a31d65 | ||
|
|
be05d5cff1 | ||
|
|
30d569d2ac | ||
|
|
08625e4125 | ||
|
|
3acf6d3856 | ||
|
|
46890374f7 | ||
|
|
60755938b3 | ||
|
|
723d44b92b | ||
|
|
bc97cdae67 | ||
|
|
e010672ab5 | ||
|
|
169dbde946 | ||
|
|
17f0eb66b8 | ||
|
|
981052c9c6 | ||
|
|
b1e60d1806 | ||
|
|
6b6c16ca6c | ||
|
|
f6745c4980 | ||
|
|
109dd3b237 | ||
|
|
c2603313b1 | ||
|
|
1e79316e20 | ||
|
|
45261e063b | ||
|
|
49c258e18d | ||
|
|
d3f62c1967 | ||
|
|
5d3a0e794b | ||
|
|
125728b038 | ||
|
|
15a4fd53d3 | ||
|
|
4513a41a72 | ||
|
|
6033d9808d | ||
|
|
bd4d1ea398 | ||
|
|
8e897ed283 | ||
|
|
412cce82b0 | ||
|
|
d534c4520b | ||
|
|
2b18a8c590 | ||
|
|
dac8b87b0c |
13
.github/FUNDING.yml
vendored
Normal file
13
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
||||
patreon: # Replace with a single Patreon username
|
||||
open_collective: # Replace with a single Open Collective username
|
||||
ko_fi: # Replace with a single Ko-fi username
|
||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
liberapay: # Replace with a single Liberapay username
|
||||
issuehunt: # Replace with a single IssueHunt username
|
||||
otechie: # Replace with a single Otechie username
|
||||
|
||||
custom: ['https://github.com/yt-dlp/yt-dlp/blob/master/Collaborators.md#collaborators']
|
||||
8
.github/ISSUE_TEMPLATE/1_broken_site.md
vendored
8
.github/ISSUE_TEMPLATE/1_broken_site.md
vendored
@@ -21,7 +21,7 @@ assignees: ''
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.06.09. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.08.10. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
||||
- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in https://github.com/yt-dlp/yt-dlp.
|
||||
- Search the bugtracker for similar issues: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
||||
@@ -29,7 +29,7 @@ Carefully read and work through this check list in order to prevent the most com
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a broken site support
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.06.09**
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.08.10**
|
||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
||||
- [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
|
||||
- [ ] I've searched the bugtracker for similar issues including closed ones
|
||||
@@ -42,9 +42,9 @@ Provide the complete verbose output of yt-dlp that clearly demonstrates the prob
|
||||
Add the `-v` flag to your command line you run yt-dlp with (`yt-dlp -v <your command line>`), copy the WHOLE output and insert it below. It should look similar to this:
|
||||
[debug] System config: []
|
||||
[debug] User config: []
|
||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||
[debug] yt-dlp version 2021.06.09
|
||||
[debug] yt-dlp version 2021.08.10
|
||||
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
||||
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
||||
[debug] Proxy map: {}
|
||||
|
||||
@@ -21,7 +21,7 @@ assignees: ''
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.06.09. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.08.10. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
||||
- Make sure that site you are requesting is not dedicated to copyright infringement, see https://github.com/yt-dlp/yt-dlp. yt-dlp does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
|
||||
- Search the bugtracker for similar site support requests: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
||||
@@ -29,9 +29,10 @@ Carefully read and work through this check list in order to prevent the most com
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a new site support request
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.06.09**
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.08.10**
|
||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
||||
- [ ] I've checked that none of provided URLs violate any copyrights
|
||||
- [ ] The provided URLs do not contain any DRM to the best of my knowledge
|
||||
- [ ] I've searched the bugtracker for similar site support requests including closed ones
|
||||
|
||||
|
||||
|
||||
@@ -21,13 +21,13 @@ assignees: ''
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.06.09. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.08.10. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- Search the bugtracker for similar site feature requests: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a site feature request
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.06.09**
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.08.10**
|
||||
- [ ] I've searched the bugtracker for similar site feature requests including closed ones
|
||||
|
||||
|
||||
|
||||
11
.github/ISSUE_TEMPLATE/4_bug_report.md
vendored
11
.github/ISSUE_TEMPLATE/4_bug_report.md
vendored
@@ -21,7 +21,7 @@ assignees: ''
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.06.09. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.08.10. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
||||
- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in https://github.com/yt-dlp/yt-dlp.
|
||||
- Search the bugtracker for similar issues: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
||||
@@ -29,9 +29,10 @@ Carefully read and work through this check list in order to prevent the most com
|
||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a broken site support issue
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.06.09**
|
||||
- [ ] I'm reporting a bug unrelated to a specific site
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.08.10**
|
||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
||||
- [ ] The provided URLs do not contain any DRM to the best of my knowledge
|
||||
- [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
|
||||
- [ ] I've searched the bugtracker for similar bug reports including closed ones
|
||||
- [ ] I've read bugs section in FAQ
|
||||
@@ -44,9 +45,9 @@ Provide the complete verbose output of yt-dlp that clearly demonstrates the prob
|
||||
Add the `-v` flag to your command line you run yt-dlp with (`yt-dlp -v <your command line>`), copy the WHOLE output and insert it below. It should look similar to this:
|
||||
[debug] System config: []
|
||||
[debug] User config: []
|
||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||
[debug] yt-dlp version 2021.06.09
|
||||
[debug] yt-dlp version 2021.08.10
|
||||
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
||||
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
||||
[debug] Proxy map: {}
|
||||
|
||||
4
.github/ISSUE_TEMPLATE/5_feature_request.md
vendored
4
.github/ISSUE_TEMPLATE/5_feature_request.md
vendored
@@ -21,13 +21,13 @@ assignees: ''
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.06.09. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.08.10. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- Search the bugtracker for similar feature requests: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a feature request
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.06.09**
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.08.10**
|
||||
- [ ] I've searched the bugtracker for similar feature requests including closed ones
|
||||
|
||||
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/6_question.md
vendored
2
.github/ISSUE_TEMPLATE/6_question.md
vendored
@@ -1,6 +1,6 @@
|
||||
---
|
||||
name: Ask question
|
||||
about: Ask youtube-dl related question
|
||||
about: Ask yt-dlp related question
|
||||
title: "[Question]"
|
||||
labels: question
|
||||
assignees: ''
|
||||
|
||||
2
.github/ISSUE_TEMPLATE_tmpl/1_broken_site.md
vendored
2
.github/ISSUE_TEMPLATE_tmpl/1_broken_site.md
vendored
@@ -42,7 +42,7 @@ Provide the complete verbose output of yt-dlp that clearly demonstrates the prob
|
||||
Add the `-v` flag to your command line you run yt-dlp with (`yt-dlp -v <your command line>`), copy the WHOLE output and insert it below. It should look similar to this:
|
||||
[debug] System config: []
|
||||
[debug] User config: []
|
||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||
[debug] yt-dlp version %(version)s
|
||||
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
||||
|
||||
@@ -32,6 +32,7 @@ Carefully read and work through this check list in order to prevent the most com
|
||||
- [ ] I've verified that I'm running yt-dlp version **%(version)s**
|
||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
||||
- [ ] I've checked that none of provided URLs violate any copyrights
|
||||
- [ ] The provided URLs do not contain any DRM to the best of my knowledge
|
||||
- [ ] I've searched the bugtracker for similar site support requests including closed ones
|
||||
|
||||
|
||||
|
||||
5
.github/ISSUE_TEMPLATE_tmpl/4_bug_report.md
vendored
5
.github/ISSUE_TEMPLATE_tmpl/4_bug_report.md
vendored
@@ -29,9 +29,10 @@ Carefully read and work through this check list in order to prevent the most com
|
||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a broken site support issue
|
||||
- [ ] I'm reporting a bug unrelated to a specific site
|
||||
- [ ] I've verified that I'm running yt-dlp version **%(version)s**
|
||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
||||
- [ ] The provided URLs do not contain any DRM to the best of my knowledge
|
||||
- [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
|
||||
- [ ] I've searched the bugtracker for similar bug reports including closed ones
|
||||
- [ ] I've read bugs section in FAQ
|
||||
@@ -44,7 +45,7 @@ Provide the complete verbose output of yt-dlp that clearly demonstrates the prob
|
||||
Add the `-v` flag to your command line you run yt-dlp with (`yt-dlp -v <your command line>`), copy the WHOLE output and insert it below. It should look similar to this:
|
||||
[debug] System config: []
|
||||
[debug] User config: []
|
||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||
[debug] yt-dlp version %(version)s
|
||||
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
||||
|
||||
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -11,7 +11,7 @@
|
||||
- [ ] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
|
||||
- [ ] Checked the code with [flake8](https://pypi.python.org/pypi/flake8)
|
||||
|
||||
### In order to be accepted and merged into youtube-dl each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options:
|
||||
### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options:
|
||||
- [ ] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
|
||||
- [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
|
||||
|
||||
|
||||
31
.github/banner.svg
vendored
Normal file
31
.github/banner.svg
vendored
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 24 KiB |
5
.github/workflows/build.yml
vendored
5
.github/workflows/build.yml
vendored
@@ -103,7 +103,8 @@ jobs:
|
||||
- name: Upgrade pip and enable wheel support
|
||||
run: python -m pip install --upgrade pip setuptools wheel
|
||||
- name: Install Requirements
|
||||
run: pip install pyinstaller mutagen pycryptodome websockets
|
||||
# Custom pyinstaller built with https://github.com/yt-dlp/pyinstaller-builds
|
||||
run: pip install "https://yt-dlp.github.io/pyinstaller-builds/x86_64/pyinstaller-4.5.1-py3-none-any.whl" mutagen pycryptodome websockets
|
||||
- name: Bump version
|
||||
id: bump_version
|
||||
run: python devscripts/update-version.py
|
||||
@@ -147,7 +148,7 @@ jobs:
|
||||
- name: Upgrade pip and enable wheel support
|
||||
run: python -m pip install --upgrade pip setuptools wheel
|
||||
- name: Install Requirements
|
||||
run: pip install pyinstaller mutagen pycryptodome websockets
|
||||
run: pip install "https://yt-dlp.github.io/pyinstaller-builds/i686/pyinstaller-4.5.1-py3-none-any.whl" mutagen pycryptodome websockets
|
||||
- name: Bump version
|
||||
id: bump_version
|
||||
run: python devscripts/update-version.py
|
||||
|
||||
10
.github/workflows/core.yml
vendored
10
.github/workflows/core.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
matrix:
|
||||
os: [ubuntu-18.04]
|
||||
# py3.9 is in quick-test
|
||||
python-version: [3.7, 3.8, pypy-3.6, pypy-3.7]
|
||||
python-version: [3.7, 3.8, 3.10-dev, pypy-3.6, pypy-3.7]
|
||||
run-tests-ext: [sh]
|
||||
include:
|
||||
# atleast one of the tests must be in windows
|
||||
@@ -23,11 +23,9 @@ jobs:
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install nose
|
||||
run: pip install nose
|
||||
- name: Install pytest
|
||||
run: pip install pytest
|
||||
- name: Run tests
|
||||
continue-on-error: False
|
||||
env:
|
||||
YTDL_TEST_SET: core
|
||||
run: ./devscripts/run_tests.${{ matrix.run-tests-ext }}
|
||||
run: ./devscripts/run_tests.${{ matrix.run-tests-ext }} core
|
||||
# Linter is in quick-test
|
||||
|
||||
10
.github/workflows/download.yml
vendored
10
.github/workflows/download.yml
vendored
@@ -9,7 +9,7 @@ jobs:
|
||||
fail-fast: true
|
||||
matrix:
|
||||
os: [ubuntu-18.04]
|
||||
python-version: [3.7, 3.8, 3.9, pypy-3.6, pypy-3.7]
|
||||
python-version: [3.7, 3.8, 3.9, 3.10-dev, pypy-3.6, pypy-3.7]
|
||||
run-tests-ext: [sh]
|
||||
include:
|
||||
- os: windows-latest
|
||||
@@ -21,10 +21,8 @@ jobs:
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install nose
|
||||
run: pip install nose
|
||||
- name: Install pytest
|
||||
run: pip install pytest
|
||||
- name: Run tests
|
||||
continue-on-error: true
|
||||
env:
|
||||
YTDL_TEST_SET: download
|
||||
run: ./devscripts/run_tests.${{ matrix.run-tests-ext }}
|
||||
run: ./devscripts/run_tests.${{ matrix.run-tests-ext }} download
|
||||
|
||||
12
.github/workflows/quick-test.yml
vendored
12
.github/workflows/quick-test.yml
vendored
@@ -11,12 +11,10 @@ jobs:
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install nose
|
||||
run: pip install nose
|
||||
- name: Install test requirements
|
||||
run: pip install pytest pycryptodome
|
||||
- name: Run tests
|
||||
env:
|
||||
YTDL_TEST_SET: core
|
||||
run: ./devscripts/run_tests.sh
|
||||
run: ./devscripts/run_tests.sh core
|
||||
flake8:
|
||||
name: Linter
|
||||
if: "!contains(github.event.head_commit.message, 'ci skip all')"
|
||||
@@ -29,5 +27,7 @@ jobs:
|
||||
python-version: 3.9
|
||||
- name: Install flake8
|
||||
run: pip install flake8
|
||||
- name: Make lazy extractors
|
||||
run: python devscripts/make_lazy_extractors.py yt_dlp/extractor/lazy_extractors.py
|
||||
- name: Run flake8
|
||||
run: flake8 .
|
||||
run: flake8 .
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -19,6 +19,8 @@ cookies.txt
|
||||
*.wav
|
||||
*.ape
|
||||
*.mkv
|
||||
*.flac
|
||||
*.avi
|
||||
*.swf
|
||||
*.part
|
||||
*.part-*
|
||||
@@ -33,6 +35,7 @@ cookies.txt
|
||||
*.info.json
|
||||
*.live_chat.json
|
||||
*.jpg
|
||||
*.jpeg
|
||||
*.png
|
||||
*.webp
|
||||
*.annotations.xml
|
||||
@@ -44,6 +47,7 @@ cookies.txt
|
||||
# Python
|
||||
*.pyc
|
||||
*.pyo
|
||||
.pytest_cache
|
||||
wine-py2exe/
|
||||
py2exe.log
|
||||
build/
|
||||
@@ -78,6 +82,7 @@ README.txt
|
||||
*.tar.gz
|
||||
*.zsh
|
||||
*.spec
|
||||
test/testdata/player-*.js
|
||||
|
||||
# Binary
|
||||
/youtube-dl
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
$ youtube-dl -v <your command line>
|
||||
[debug] System config: []
|
||||
[debug] User config: []
|
||||
[debug] Command-line args: [u'-v', u'https://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||
[debug] Command-line args: [u'-v', u'https://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||
[debug] youtube-dl version 2015.12.06
|
||||
[debug] Git HEAD: 135392e
|
||||
@@ -81,16 +81,17 @@ To run the test, simply invoke your favorite test runner, or execute a test file
|
||||
python -m unittest discover
|
||||
python test/test_download.py
|
||||
nosetests
|
||||
pytest
|
||||
|
||||
See item 6 of [new extractor tutorial](#adding-support-for-a-new-site) for how to run extractor specific test cases.
|
||||
|
||||
If you want to create a build of youtube-dl yourself, you'll need
|
||||
|
||||
* python
|
||||
* python3
|
||||
* make (only GNU make is supported)
|
||||
* pandoc
|
||||
* zip
|
||||
* nosetests
|
||||
* pytest
|
||||
|
||||
### Adding support for a new site
|
||||
|
||||
|
||||
52
CONTRIBUTORS
52
CONTRIBUTORS
@@ -1,6 +1,6 @@
|
||||
pukkandan (owner)
|
||||
shirt-dev (collaborator)
|
||||
colethedj (collaborator)
|
||||
coletdjnz/colethedj (collaborator)
|
||||
Ashish0804 (collaborator)
|
||||
h-h-h-h
|
||||
pauldubois98
|
||||
@@ -22,7 +22,7 @@ Zocker1999NET
|
||||
nao20010128nao
|
||||
kurumigi
|
||||
bbepis
|
||||
animelover1984
|
||||
animelover1984/horahoradev
|
||||
Pccode66
|
||||
RobinD42
|
||||
hseg
|
||||
@@ -52,5 +52,51 @@ hhirtz
|
||||
louie-github
|
||||
MinePlayersPE
|
||||
olifre
|
||||
rhsmachine
|
||||
rhsmachine/zenerdi0de
|
||||
nihil-admirari
|
||||
krichbanana
|
||||
ohmybahgosh
|
||||
nyuszika7h
|
||||
blackjack4494
|
||||
pyx
|
||||
TpmKranz
|
||||
mzbaulhaque
|
||||
zackmark29
|
||||
mbway
|
||||
zerodytrash
|
||||
wesnm
|
||||
pento
|
||||
rigstot
|
||||
dirkf
|
||||
funniray
|
||||
Jessecar96
|
||||
jhwgh1968
|
||||
kikuyan
|
||||
max-te
|
||||
nchilada
|
||||
pgaig
|
||||
PSlava
|
||||
stdedos
|
||||
u-spec-png
|
||||
Sipherdrakon
|
||||
kidonng
|
||||
smege1001
|
||||
tandy1000
|
||||
IONECarter
|
||||
capntrips
|
||||
mrfade
|
||||
ParadoxGBB
|
||||
wlritchi
|
||||
NeroBurner
|
||||
mahanstreamer
|
||||
alerikaisattera
|
||||
Derkades
|
||||
BunnyHelp
|
||||
i6t
|
||||
std-move
|
||||
Chocobozzz
|
||||
ouwou
|
||||
korli
|
||||
octotherp
|
||||
CeruleanSky
|
||||
zootedb0t
|
||||
|
||||
410
Changelog.md
410
Changelog.md
@@ -19,19 +19,355 @@
|
||||
-->
|
||||
|
||||
|
||||
### 2021.09.02
|
||||
|
||||
* **Native SponsorBlock** implementation by [nihil-admirari](https://github.com/nihil-admirari), [pukkandan](https://github.com/pukkandan)
|
||||
* `--sponsorblock-remove CATS` removes specified chapters from file
|
||||
* `--sponsorblock-mark CATS` marks the specified sponsor sections as chapters
|
||||
* `--sponsorblock-chapter-title TMPL` to specify sponsor chapter template
|
||||
* `--sponsorblock-api URL` to use a different API
|
||||
* No re-encoding is done unless `--force-keyframes-at-cuts` is used
|
||||
* The fetched sponsor sections are written to the infojson
|
||||
* Deprecates: `--sponskrub`, `--no-sponskrub`, `--sponskrub-cut`, `--no-sponskrub-cut`, `--sponskrub-force`, `--no-sponskrub-force`, `--sponskrub-location`, `--sponskrub-args`
|
||||
* Split `--embed-chapters` from `--embed-metadata` (it still implies the former by default)
|
||||
* Add option `--remove-chapters` to remove arbitrary chapters by [nihil-admirari](https://github.com/nihil-admirari), pukkandan
|
||||
* Add option `--force-keyframes-at-cuts` for more accurate cuts when removing and splitting chapters by [nihil-admirari](https://github.com/nihil-admirari)
|
||||
* Let `--match-filter` reject entries early
|
||||
* Makes redundant: `--match-title`, `--reject-title`, `--min-views`, `--max-views`
|
||||
* [lazy_extractor] Improvements (It now passes all tests)
|
||||
* Bugfix for when plugin directory doesn't exist by [kidonng](https://github.com/kidonng)
|
||||
* Create instance only after pre-checking archive
|
||||
* Import actual class if an attribute is accessed
|
||||
* Fix `suitable` and add flake8 test
|
||||
* [downloader/ffmpeg] Experimental support for DASH manifests (including live)
|
||||
* Your ffmpeg must have [this patch](https://github.com/FFmpeg/FFmpeg/commit/3249c757aed678780e22e99a1a49f4672851bca9) applied for YouTube DASH to work
|
||||
* [downloader/ffmpeg] Allow passing custom arguments before `-i`
|
||||
|
||||
* [BannedVideo] Add extractor by [smege1001](https://github.com/smege1001), [blackjack4494](https://github.com/blackjack4494), [pukkandan](https://github.com/pukkandan)
|
||||
* [bilibili] Add category extractor by [animelover1984](https://github.com/animelover1984)
|
||||
* [Epicon] Add extractors by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [filmmodu] Add extractor by [mzbaulhaque](https://github.com/mzbaulhaque)
|
||||
* [GabTV] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [Hungama] Fix `HungamaSongIE` and add `HungamaAlbumPlaylistIE` by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [ManotoTV] Add new extractors by [tandy1000](https://github.com/tandy1000)
|
||||
* [Niconico] Add Search extractors by [animelover1984](https://github.com/animelover1984), [pukkandan](https://github.com/pukkandan)
|
||||
* [Patreon] Add `PatreonUserIE` by [zenerdi0de](https://github.com/zenerdi0de)
|
||||
* [peloton] Add extractor by [IONECarter](https://github.com/IONECarter), [capntrips](https://github.com/capntrips), [pukkandan](https://github.com/pukkandan)
|
||||
* [ProjectVeritas] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [radiko] Add extractors by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [StarTV] Add extractor for `startv.com.tr` by [mrfade](https://github.com/mrfade), [coletdjnz](https://github.com/coletdjnz)
|
||||
* [tiktok] Add `TikTokUserIE` by [Ashish0804](https://github.com/Ashish0804), [pukkandan](https://github.com/pukkandan)
|
||||
* [Tokentube] Add extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [TV2Hu] Fix `TV2HuIE` and add `TV2HuSeriesIE` by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [voicy] Add extractor by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
|
||||
* [adobepass] Fix Verizon SAML login by [nyuszika7h](https://github.com/nyuszika7h), [ParadoxGBB](https://github.com/ParadoxGBB)
|
||||
* [afreecatv] Fix adult VODs by [wlritchi](https://github.com/wlritchi)
|
||||
* [afreecatv] Tolerate failure to parse date string by [wlritchi](https://github.com/wlritchi)
|
||||
* [aljazeera] Fix extractor by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [ATV.at] Fix extractor for ATV.at by [NeroBurner](https://github.com/NeroBurner), [coletdjnz](https://github.com/coletdjnz)
|
||||
* [bitchute] Fix test by [mahanstreamer](https://github.com/mahanstreamer)
|
||||
* [camtube] Remove obsolete extractor by [alerikaisattera](https://github.com/alerikaisattera)
|
||||
* [CDA] Add more formats by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [eroprofile] Fix page skipping in albums by [jhwgh1968](https://github.com/jhwgh1968)
|
||||
* [facebook] Fix format sorting
|
||||
* [facebook] Fix metadata extraction by [kikuyan](https://github.com/kikuyan)
|
||||
* [facebook] Update onion URL by [Derkades](https://github.com/Derkades)
|
||||
* [HearThisAtIE] Fix extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [instagram] Add referrer to prevent throttling by [u-spec-png](https://github.com/u-spec-png), [kikuyan](https://github.com/kikuyan)
|
||||
* [iwara.tv] Extract more metadata by [BunnyHelp](https://github.com/BunnyHelp)
|
||||
* [iwara] Add thumbnail by [i6t](https://github.com/i6t)
|
||||
* [kakao] Fix extractor
|
||||
* [mediaset] Fix extraction for some videos by [nyuszika7h](https://github.com/nyuszika7h)
|
||||
* [Motherless] Fix extractor by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [Nova] fix extractor by [std-move](https://github.com/std-move)
|
||||
* [ParamountPlus] Fix geo verification by [shirt](https://github.com/shirt-dev)
|
||||
* [peertube] handle new video URL format by [Chocobozzz](https://github.com/Chocobozzz)
|
||||
* [pornhub] Separate and fix playlist extractor by [mzbaulhaque](https://github.com/mzbaulhaque)
|
||||
* [reddit] Fix for quarantined subreddits by [ouwou](https://github.com/ouwou)
|
||||
* [ShemarooMe] Fix extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [soundcloud] Refetch `client_id` on 403
|
||||
* [tiktok] Fix metadata extraction
|
||||
* [TV2] Fix extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [tv5mondeplus] Fix extractor by [korli](https://github.com/korli)
|
||||
* [VH1,TVLand] Fix extractors by [Sipherdrakon](https://github.com/Sipherdrakon)
|
||||
* [Viafree] Fix extractor and extract subtitles by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [XHamster] Extract `uploader_id` by [octotherp](https://github.com/octotherp)
|
||||
* [youtube] Add `shorts` to `_VALID_URL`
|
||||
* [youtube] Add av01 itags to known formats list by [blackjack4494](https://github.com/blackjack4494)
|
||||
* [youtube] Extract error messages from HTTPError response by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Fix subtitle names
|
||||
* [youtube] Prefer audio stream that YouTube considers default
|
||||
* [youtube] Remove annotations and deprecate `--write-annotations` by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [Zee5] Fix extractor and add subtitles by [Ashish0804](https://github.com/Ashish0804)
|
||||
|
||||
* [aria2c] Obey `--rate-limit`
|
||||
* [EmbedSubtitle] Continue even if some files are missing
|
||||
* [extractor] Better error message for DRM
|
||||
* [extractor] Common function `_match_valid_url`
|
||||
* [extractor] Show video id in error messages if possible
|
||||
* [FormatSort] Remove priority of `lang`
|
||||
* [options] Add `_set_from_options_callback`
|
||||
* [SubtitleConvertor] Fix bug during subtitle conversion
|
||||
* [utils] Add `parse_qs`
|
||||
* [webvtt] Fix timestamp overflow adjustment by [fstirlitz](https://github.com/fstirlitz)
|
||||
* Bugfix for `--replace-in-metadata`
|
||||
* Don't try to merge with final extension
|
||||
* Fix `--force-overwrites` when using `-k`
|
||||
* Fix `--no-prefer-free-formats` by [CeruleanSky](https://github.com/CeruleanSky)
|
||||
* Fix `-F` for extractors that directly return url
|
||||
* Fix `-J` when there are failed videos
|
||||
* Fix `extra_info` being reused across runs
|
||||
* Fix `playlist_index` not obeying `playlist_start` and add tests
|
||||
* Fix resuming of single formats when using `--no-part`
|
||||
* Revert erroneous use of the `Content-Length` header by [fstirlitz](https://github.com/fstirlitz)
|
||||
* Use `os.replace` where applicable by; paulwrubel
|
||||
* [build] Add homebrew taps `yt-dlp/taps/yt-dlp` by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [build] Fix bug in making `yt-dlp.tar.gz`
|
||||
* [docs] Fix some typos by [pukkandan](https://github.com/pukkandan), [zootedb0t](https://github.com/zootedb0t)
|
||||
* [cleanup] Replace improper use of tab in trovo by [glenn-slayden](https://github.com/glenn-slayden)
|
||||
|
||||
|
||||
### 2021.08.10
|
||||
|
||||
* Add option `--replace-in-metadata`
|
||||
* Add option `--no-simulate` to not simulate even when `--print` or `--list...` are used - Deprecates `--print-json`
|
||||
* Allow entire infodict to be printed using `%()s` - makes `--dump-json` redundant
|
||||
* Allow multiple `--exec` and `--exec-before-download`
|
||||
* Add regex to `--match-filter`
|
||||
* Add all format filtering operators also to `--match-filter` by [max-te](https://github.com/max-te)
|
||||
* Add compat-option `no-keep-subs`
|
||||
* [adobepass] Add MSO Cablevision by [Jessecar96](https://github.com/Jessecar96)
|
||||
* [BandCamp] Add BandcampMusicIE by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [blackboardcollaborate] Add new extractor by [mzbaulhaque](https://github.com/mzbaulhaque)
|
||||
* [eroprofile] Add album downloader by [jhwgh1968](https://github.com/jhwgh1968)
|
||||
* [mirrativ] Add extractors by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [openrec] Add extractors by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [nbcolympics:stream] Fix extractor by [nchilada](https://github.com/nchilada), [pukkandan](https://github.com/pukkandan)
|
||||
* [nbcolympics] Update extractor for 2020 olympics by [wesnm](https://github.com/wesnm)
|
||||
* [paramountplus] Separate extractor and fix some titles by [shirt](https://github.com/shirt-dev), [pukkandan](https://github.com/pukkandan)
|
||||
* [RCTIPlus] Support events and TV by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [Newgrounds] Improve extractor and fix playlist by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [aenetworks] Update `_THEPLATFORM_KEY` and `_THEPLATFORM_SECRET` by [wesnm](https://github.com/wesnm)
|
||||
* [crunchyroll] Fix thumbnail by [funniray](https://github.com/funniray)
|
||||
* [HotStar] Use API for metadata and extract subtitles by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [instagram] Fix comments extraction by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [peertube] Fix videos without description by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [twitch:clips] Extract `display_id` by [dirkf](https://github.com/dirkf)
|
||||
* [viki] Print error message from API request
|
||||
* [Vine] Remove invalid formats by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [VrtNU] Fix XSRF token by [pgaig](https://github.com/pgaig)
|
||||
* [vrv] Fix thumbnail extraction by [funniray](https://github.com/funniray)
|
||||
* [youtube] Add extractor-arg `include-live-dash` to show live dash formats
|
||||
* [youtube] Improve signature function detection by [PSlava](https://github.com/PSlava)
|
||||
* [youtube] Raise appropriate error when API pages can't be downloaded
|
||||
* Ensure `_write_ytdl_file` closes file handle on error
|
||||
* Fix `--compat-options filename` by [stdedos](https://github.com/stdedos)
|
||||
* Fix issues with infodict sanitization
|
||||
* Fix resuming when using `--no-part`
|
||||
* Fix wrong extension for intermediate files
|
||||
* Handle `BrokenPipeError` by [kikuyan](https://github.com/kikuyan)
|
||||
* Show libraries present in verbose head
|
||||
* [extractor] Detect `sttp` as subtitles in MPD by [fstirlitz](https://github.com/fstirlitz)
|
||||
* [extractor] Reset non-repeating warnings per video
|
||||
* [ffmpeg] Fix streaming `mp4` to `stdout`
|
||||
* [ffpmeg] Allow `--ffmpeg-location` to be a file with different name
|
||||
* [utils] Fix `InAdvancePagedList.__getitem__`
|
||||
* [utils] Fix `traverse_obj` depth when `is_user_input`
|
||||
* [webvtt] Merge daisy-chained duplicate cues by [fstirlitz](https://github.com/fstirlitz)
|
||||
* [build] Use custom build of `pyinstaller` by [shirt](https://github.com/shirt-dev)
|
||||
* [tests:download] Add batch testing for extractors (`test_YourExtractor_all`)
|
||||
* [docs] Document which fields `--add-metadata` adds to the file
|
||||
* [docs] Fix some mistakes and improve doc
|
||||
* [cleanup] Misc code cleanup
|
||||
|
||||
|
||||
### 2021.08.02
|
||||
|
||||
* Add logo, banner and donate links
|
||||
* Expand and escape environment variables correctly in output template
|
||||
* Add format types `j` (json), `l` (comma delimited list), `q` (quoted for terminal) in output template
|
||||
* [downloader] Allow streaming some unmerged formats to stdout using ffmpeg
|
||||
* [youtube] **Age-gate bypass**
|
||||
* Add `agegate` clients by [pukkandan](https://github.com/pukkandan), [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* Add `thirdParty` to agegate clients to bypass more videos
|
||||
* Simplify client definitions, expose `embedded` clients
|
||||
* Improve age-gate detection by [coletdjnz](https://github.com/coletdjnz)
|
||||
* Fix default global API key by [coletdjnz](https://github.com/coletdjnz)
|
||||
* Add `creator` clients for age-gate bypass using unverified accounts by [zerodytrash](https://github.com/zerodytrash), [coletdjnz](https://github.com/coletdjnz), [pukkandan](https://github.com/pukkandan)
|
||||
* [adobepass] Add MSO Sling TV by [wesnm](https://github.com/wesnm)
|
||||
* [CBS] Add ParamountPlusSeriesIE by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [dplay] Add `ScienceChannelIE` by [Sipherdrakon](https://github.com/Sipherdrakon)
|
||||
* [UtreonIE] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [youtube] Add `mweb` client by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Add `player_client=all`
|
||||
* [youtube] Force `hl=en` for comments by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Fix format sorting when using alternate clients
|
||||
* [youtube] Misc cleanup by [pukkandan](https://github.com/pukkandan), [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Extract SAPISID only once
|
||||
* [CBS] Add fallback by [llacb47](https://github.com/llacb47), [pukkandan](https://github.com/pukkandan)
|
||||
* [Hotstar] Support cookies by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [HotStarSeriesIE] Fix regex by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [bilibili] Improve `_VALID_URL`
|
||||
* [mediaset] Fix extraction by [nixxo](https://github.com/nixxo)
|
||||
* [Mxplayer] Add h265 formats by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [RCTIPlus] Remove PhantomJS dependency by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [tenplay] Add MA15+ age limit by [pento](https://github.com/pento)
|
||||
* [vidio] Fix login error detection by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [vimeo] Better extraction of original file by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [generic] Support KVS player (replaces ThisVidIE) by [rigstot](https://github.com/rigstot)
|
||||
* Add compat-option `no-clean-infojson`
|
||||
* Remove `asr` appearing twice in `-F`
|
||||
* Set `home:` as the default key for `-P`
|
||||
* [utils] Fix slicing of reversed `LazyList`
|
||||
* [FormatSort] Fix bug for audio with unknown codec
|
||||
* [test:download] Support testing with `ignore_no_formats_error`
|
||||
* [cleanup] Refactor some code
|
||||
|
||||
|
||||
### 2021.07.24
|
||||
|
||||
* [youtube:tab] Extract video duration early
|
||||
* [downloader] Pass `info_dict` to `progress_hook`s
|
||||
* [youtube] Fix age-gated videos for API clients when cookies are supplied by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Disable `get_video_info` age-gate workaround - This endpoint seems to be completely dead
|
||||
* [youtube] Try all clients even if age-gated
|
||||
* [youtube] Fix subtitles only being extracted from the first client
|
||||
* [youtube] Simplify `_get_text`
|
||||
* [cookies] bugfix for microsoft edge on macOS
|
||||
* [cookies] Handle `sqlite` `ImportError` gracefully by [mbway](https://github.com/mbway)
|
||||
* [cookies] Handle errors when importing `keyring`
|
||||
|
||||
### 2021.07.21
|
||||
|
||||
* **Add option `--cookies-from-browser`** to load cookies from a browser by [mbway](https://github.com/mbway)
|
||||
* Usage: `--cookies-from-browser BROWSER[:PROFILE_NAME_OR_PATH]`
|
||||
* Also added `--no-cookies-from-browser`
|
||||
* To decrypt chromium cookies, `keyring` is needed for UNIX and `pycryptodome` for Windows
|
||||
* Add option `--exec-before-download`
|
||||
* Add field `live_status`
|
||||
* [FFmpegMetadata] Add language of each stream and some refactoring
|
||||
* [douyin] Add extractor by [pukkandan](https://github.com/pukkandan), [pyx](https://github.com/pyx)
|
||||
* [pornflip] Add extractor by [mzbaulhaque](https://github.com/mzbaulhaque)
|
||||
* **[youtube] Extract data from multiple clients** by [pukkandan](https://github.com/pukkandan), [coletdjnz](https://github.com/coletdjnz)
|
||||
* `player_client` now accepts multiple clients
|
||||
* Default `player_client` = `android,web`
|
||||
* This uses twice as many requests, but avoids throttling for most videos while also not losing any formats
|
||||
* Music clients can be specifically requested and is enabled by default if `music.youtube.com`
|
||||
* Added `player_client=ios` (Known issue: formats from ios are not sorted correctly)
|
||||
* Add age-gate bypass for android and ios clients
|
||||
* [youtube] Extract more thumbnails
|
||||
* The thumbnail URLs are hard-coded and their actual existence is tested lazily
|
||||
* Added option `--no-check-formats` to not test them
|
||||
* [youtube] Misc fixes
|
||||
* Improve extraction of livestream metadata by [pukkandan](https://github.com/pukkandan), [krichbanana](https://github.com/krichbanana)
|
||||
* Hide live dash formats since they can't be downloaded anyway
|
||||
* Fix authentication when using multiple accounts by [coletdjnz](https://github.com/coletdjnz)
|
||||
* Fix controversial videos when requested via API by [coletdjnz](https://github.com/coletdjnz)
|
||||
* Fix session index extraction and headers for non-web player clients by [coletdjnz](https://github.com/coletdjnz)
|
||||
* Make `--extractor-retries` work for more errors
|
||||
* Fix sorting of 3gp format
|
||||
* Sanity check `chapters` (and refactor related code)
|
||||
* Make `parse_time_text` and `_extract_chapters` non-fatal
|
||||
* Misc cleanup and bug fixes by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube:tab] Fix channels tab
|
||||
* [youtube:tab] Extract playlist availability by [coletdjnz](https://github.com/coletdjnz)
|
||||
* **[youtube:comments] Move comment extraction to new API** by [coletdjnz](https://github.com/coletdjnz)
|
||||
* Adds extractor-args `comment_sort` (`top`/`new`), `max_comments`, `max_comment_depth`
|
||||
* [youtube:comments] Fix `is_favorited`, improve `like_count` parsing by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [BravoTV] Improve metadata extraction by [kevinoconnor7](https://github.com/kevinoconnor7)
|
||||
* [crunchyroll:playlist] Force http
|
||||
* [yahoo:gyao:player] Relax `_VALID_URL` by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [nebula] Authentication via tokens from cookie jar by [hheimbuerger](https://github.com/hheimbuerger), [TpmKranz](https://github.com/TpmKranz)
|
||||
* [RTP] Fix extraction and add subtitles by [fstirlitz](https://github.com/fstirlitz)
|
||||
* [viki] Rewrite extractors and add extractor-arg `video_types` to `vikichannel` by [zackmark29](https://github.com/zackmark29), [pukkandan](https://github.com/pukkandan)
|
||||
* [vlive] Extract thumbnail directly in addition to the one from Naver
|
||||
* [generic] Extract previously missed subtitles by [fstirlitz](https://github.com/fstirlitz)
|
||||
* [generic] Extract everything in the SMIL manifest and detect discarded subtitles by [fstirlitz](https://github.com/fstirlitz)
|
||||
* [embedthumbnail] Fix `_get_thumbnail_resolution`
|
||||
* [metadatafromfield] Do not detect numbers as field names
|
||||
* Fix selectors `all`, `mergeall` and add tests
|
||||
* Errors in playlist extraction should obey `--ignore-errors`
|
||||
* Fix bug where `original_url` was not propagated when `_type`=`url`
|
||||
* Revert "Merge webm formats into mkv if thumbnails are to be embedded (#173)"
|
||||
* This was wrongly checking for `write_thumbnail`
|
||||
* Improve `extractor_args` parsing
|
||||
* Rename `NOTE` in `-F` to `MORE INFO` since it's often confused to be the same as `format_note`
|
||||
* Add `only_once` param for `write_debug` and `report_warning`
|
||||
* [extractor] Allow extracting multiple groups in `_search_regex` by [fstirlitz](https://github.com/fstirlitz)
|
||||
* [utils] Improve `traverse_obj`
|
||||
* [utils] Add `variadic`
|
||||
* [utils] Improve `js_to_json` comment regex by [fstirlitz](https://github.com/fstirlitz)
|
||||
* [webtt] Fix timestamps
|
||||
* [compat] Remove unnecessary code
|
||||
* [docs] fix default of multistreams
|
||||
|
||||
|
||||
### 2021.07.07
|
||||
|
||||
* Merge youtube-dl: Upto [commit/a803582](https://github.com/ytdl-org/youtube-dl/commit/a8035827177d6b59aca03bd717acb6a9bdd75ada)
|
||||
* Add `--extractor-args` to pass some extractor-specific arguments. See [readme](https://github.com/yt-dlp/yt-dlp#extractor-arguments)
|
||||
* Add extractor option `skip` for `youtube`. Eg: `--extractor-args youtube:skip=hls,dash`
|
||||
* Deprecates `--youtube-skip-dash-manifest`, `--youtube-skip-hls-manifest`, `--youtube-include-dash-manifest`, `--youtube-include-hls-manifest`
|
||||
* Allow `--list...` options to work with `--print`, `--quiet` and other `--list...` options
|
||||
* [youtube] Use `player` API for additional video extraction requests by [coletdjnz](https://github.com/coletdjnz)
|
||||
* **Fixes youtube premium music** (format 141) extraction
|
||||
* Adds extractor option `player_client` = `web`/`android`
|
||||
* **`--extractor-args youtube:player_client=android` works around the throttling** for the time-being
|
||||
* Adds extractor option `player_skip=config`
|
||||
* Adds age-gate fallback using embedded client
|
||||
* [youtube] Choose correct Live chat API for upcoming streams by [krichbanana](https://github.com/krichbanana)
|
||||
* [youtube] Fix subtitle names for age-gated videos
|
||||
* [youtube:comments] Fix error handling and add `itct` to params by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube_live_chat] Fix download with cookies by [siikamiika](https://github.com/siikamiika)
|
||||
* [youtube_live_chat] use `clickTrackingParams` by [siikamiika](https://github.com/siikamiika)
|
||||
* [Funimation] Rewrite extractor
|
||||
* Add `FunimationShowIE` by [Mevious](https://github.com/Mevious)
|
||||
* **Treat the different versions of an episode as different formats of a single video**
|
||||
* This changes the video `id` and will break break existing archives
|
||||
* Compat option `seperate-video-versions` to fall back to old behavior including using the old video ids
|
||||
* Support direct `/player/` URL
|
||||
* Extractor options `language` and `version` to pre-select them during extraction
|
||||
* These options may be removed in the future if we can extract all formats without additional network requests
|
||||
* Do not rely on these for format selection and use `-f` filters instead
|
||||
* [AdobePass] Add Spectrum MSO by [kevinoconnor7](https://github.com/kevinoconnor7), [ohmybahgosh](https://github.com/ohmybahgosh)
|
||||
* [facebook] Extract description and fix title
|
||||
* [fancode] Fix extraction, support live and allow login with refresh token by [zenerdi0de](https://github.com/zenerdi0de)
|
||||
* [plutotv] Improve `_VALID_URL`
|
||||
* [RCTIPlus] Add extractor by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [Soundcloud] Allow login using oauth token by [blackjack4494](https://github.com/blackjack4494)
|
||||
* [TBS] Support livestreams by [llacb47](https://github.com/llacb47)
|
||||
* [videa] Fix extraction by [nyuszika7h](https://github.com/nyuszika7h)
|
||||
* [yahoo] Fix extraction by [llacb47](https://github.com/llacb47), [pukkandan](https://github.com/pukkandan)
|
||||
* Process videos when using `--ignore-no-formats-error` by [krichbanana](https://github.com/krichbanana)
|
||||
* Fix `--throttled-rate` when using `--load-info-json`
|
||||
* Fix `--flat-playlist` when entry has no `ie_key`
|
||||
* Fix `check_formats` catching `ExtractorError` instead of `DownloadError`
|
||||
* Fix deprecated option `--list-formats-old`
|
||||
* [downloader/ffmpeg] Fix `--ppa` when using simultaneous download
|
||||
* [extractor] Prevent unnecessary download of hls manifests and refactor `hls_split_discontinuity`
|
||||
* [fragment] Handle status of download and errors in threads correctly; and minor refactoring
|
||||
* [thumbnailsconvertor] Treat `jpeg` as `jpg`
|
||||
* [utils] Fix issues with `LazyList` reversal
|
||||
* [extractor] Allow extractors to set their own login hint
|
||||
* [cleanup] Simplify format selector code with `LazyList` and `yield from`
|
||||
* [cleanup] Clean `extractor.common._merge_subtitles` signature
|
||||
* [cleanup] Fix some typos
|
||||
|
||||
|
||||
### 2021.06.23
|
||||
|
||||
* Merge youtube-dl: Upto [commit/379f52a](https://github.com/ytdl-org/youtube-dl/commit/379f52a4954013767219d25099cce9e0f9401961)
|
||||
* **Add option `--throttled-rate`** below which video data is re-extracted
|
||||
* [fragment] **Merge during download for `-N`**, and refactor `hls`/`dash`
|
||||
* [websockets] Add `WebSocketFragmentFD`by [nao20010128nao](https://github.com/nao20010128nao), [pukkandan](https://github.com/pukkandan)
|
||||
* [websockets] Add `WebSocketFragmentFD` by [nao20010128nao](https://github.com/nao20010128nao), [pukkandan](https://github.com/pukkandan)
|
||||
* Allow `images` formats in addition to video/audio
|
||||
* [downloader/mhtml] Add new downloader for slideshows/storyboards by [fstirlitz](https://github.com/fstirlitz)
|
||||
* [youtube] Temporary **fix for age-gate**
|
||||
* [youtube] Support ongoing live chat by [siikamiika](https://github.com/siikamiika)
|
||||
* [youtube] Improve SAPISID cookie handling by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Improve SAPISID cookie handling by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Login is not needed for `:ytrec`
|
||||
* [youtube] Non-fatal alert reporting for unavailable videos page by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Non-fatal alert reporting for unavailable videos page by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [twitcasting] Websocket support by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [mediasite] Extract slides by [fstirlitz](https://github.com/fstirlitz)
|
||||
* [funimation] Extract subtitles
|
||||
@@ -89,7 +425,7 @@
|
||||
* Merge youtube-dl: Upto [commit/d495292](https://github.com/ytdl-org/youtube-dl/commit/d495292852b6c2f1bd58bc2141ff2b0265c952cf)
|
||||
* Pre-check archive and filters during playlist extraction
|
||||
* Handle Basic Auth `user:pass` in URLs by [hhirtz](https://github.com/hhirtz) and [pukkandan](https://github.com/pukkandan)
|
||||
* [archiveorg] Add YoutubeWebArchiveIE by [colethedj](https://github.com/colethedj) and [alex-gedeon](https://github.com/alex-gedeon)
|
||||
* [archiveorg] Add YoutubeWebArchiveIE by [coletdjnz](https://github.com/coletdjnz) and [alex-gedeon](https://github.com/alex-gedeon)
|
||||
* [fancode] Add extractor by [rhsmachine](https://github.com/rhsmachine)
|
||||
* [patreon] Support vimeo embeds by [rhsmachine](https://github.com/rhsmachine)
|
||||
* [Saitosan] Add new extractor by [llacb47](https://github.com/llacb47)
|
||||
@@ -132,7 +468,7 @@
|
||||
|
||||
* **Youtube improvements**:
|
||||
* Support youtube music `MP`, `VL` and `browse` pages
|
||||
* Extract more formats for youtube music by [craftingmod](https://github.com/craftingmod), [colethedj](https://github.com/colethedj) and [pukkandan](https://github.com/pukkandan)
|
||||
* Extract more formats for youtube music by [craftingmod](https://github.com/craftingmod), [coletdjnz](https://github.com/coletdjnz) and [pukkandan](https://github.com/pukkandan)
|
||||
* Extract multiple subtitles in same language by [pukkandan](https://github.com/pukkandan) and [tpikonen](https://github.com/tpikonen)
|
||||
* Redirect channels that doesn't have a `videos` tab to their `UU` playlists
|
||||
* Support in-channel search
|
||||
@@ -141,10 +477,10 @@
|
||||
* Extract audio language
|
||||
* Add subtitle language names by [nixxo](https://github.com/nixxo) and [tpikonen](https://github.com/tpikonen)
|
||||
* Show alerts only from the final webpage
|
||||
* Add `html5=1` param to `get_video_info` page requests by [colethedj](https://github.com/colethedj)
|
||||
* Add `html5=1` param to `get_video_info` page requests by [coletdjnz](https://github.com/coletdjnz)
|
||||
* Better message when login required
|
||||
* **Add option `--print`**: to print any field/template
|
||||
* Deprecates: `--get-description`, `--get-duration`, `--get-filename`, `--get-format`, `--get-id`, `--get-thumbnail`, `--get-title`, `--get-url`
|
||||
* Makes redundant: `--get-description`, `--get-duration`, `--get-filename`, `--get-format`, `--get-id`, `--get-thumbnail`, `--get-title`, `--get-url`
|
||||
* Field `additional_urls` to download additional videos from metadata using [`--parse-metadata`](https://github.com/yt-dlp/yt-dlp#modifying-metadata)
|
||||
* Merge youtube-dl: Upto [commit/dfbbe29](https://github.com/ytdl-org/youtube-dl/commit/dfbbe2902fc67f0f93ee47a8077c148055c67a9b)
|
||||
* Write thumbnail of playlist and add `pl_thumbnail` outtmpl key
|
||||
@@ -238,11 +574,11 @@
|
||||
* [TubiTv] Add TubiTvShowIE by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [twitcasting] Fix extractor
|
||||
* [viu:ott] Fix extractor and support series by [lkho](https://github.com/lkho) and [pukkandan](https://github.com/pukkandan)
|
||||
* [youtube:tab] Show unavailable videos in playlists by [colethedj](https://github.com/colethedj)
|
||||
* [youtube:tab] Show unavailable videos in playlists by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube:tab] Reload with unavailable videos for all playlists
|
||||
* [youtube] Ignore invalid stretch ratio
|
||||
* [youtube] Improve channel syncid extraction to support ytcfg by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Standardize API calls for tabs, mixes and search by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Improve channel syncid extraction to support ytcfg by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Standardize API calls for tabs, mixes and search by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Bugfix in `_extract_ytcfg`
|
||||
* [mildom:user:vod] Download only necessary amount of pages
|
||||
* [mildom] Remove proxy completely by [fstirlitz](https://github.com/fstirlitz)
|
||||
@@ -254,8 +590,8 @@
|
||||
* Improve the yt-dlp.sh script by [fstirlitz](https://github.com/fstirlitz)
|
||||
* [lazy_extractor] Do not load plugins
|
||||
* [ci] Disable fail-fast
|
||||
* [documentation] Clarify which deprecated options still work
|
||||
* [documentation] Fix typos
|
||||
* [docs] Clarify which deprecated options still work
|
||||
* [docs] Fix typos
|
||||
|
||||
|
||||
### 2021.04.11
|
||||
@@ -272,17 +608,17 @@
|
||||
* [nitter] Fix extraction of reply tweets and update instance list by [B0pol](https://github.com/B0pol)
|
||||
* [nitter] Fix thumbnails by [B0pol](https://github.com/B0pol)
|
||||
* [youtube] Fix thumbnail URL
|
||||
* [youtube] Parse API parameters from initial webpage by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Extract comments' approximate timestamp by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Parse API parameters from initial webpage by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Extract comments' approximate timestamp by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Fix alert extraction
|
||||
* [bilibili] Fix uploader
|
||||
* [utils] Add `datetime_from_str` and `datetime_add_months` by [colethedj](https://github.com/colethedj)
|
||||
* [utils] Add `datetime_from_str` and `datetime_add_months` by [coletdjnz](https://github.com/coletdjnz)
|
||||
* Run some `postprocessors` before actual download
|
||||
* Improve argument parsing for `-P`, `-o`, `-S`
|
||||
* Fix some `m3u8` not obeying `--allow-unplayable-formats`
|
||||
* Fix default of `dynamic_mpd`
|
||||
* Deprecate `--all-formats`, `--include-ads`, `--hls-prefer-native`, `--hls-prefer-ffmpeg`
|
||||
* [documentation] Improvements
|
||||
* [docs] Improvements
|
||||
|
||||
### 2021.04.03
|
||||
* Merge youtube-dl: Upto [commit/654b4f4](https://github.com/ytdl-org/youtube-dl/commit/654b4f4ff2718f38b3182c1188c5d569c14cc70a)
|
||||
@@ -293,10 +629,10 @@
|
||||
* [mildom] Update extractor with current proxy by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [ard:mediathek] Fix video id extraction
|
||||
* [generic] Detect Invidious' link element
|
||||
* [youtube] Show premium state in `availability` by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Show premium state in `availability` by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [viewsource] Add extractor to handle `view-source:`
|
||||
* [sponskrub] Run before embedding thumbnail
|
||||
* [documentation] Improve `--parse-metadata` documentation
|
||||
* [docs] Improve `--parse-metadata` documentation
|
||||
|
||||
|
||||
### 2021.03.24.1
|
||||
@@ -328,8 +664,8 @@
|
||||
* Use headers and cookies when downloading subtitles by [damianoamatruda](https://github.com/damianoamatruda)
|
||||
* Parse resolution in info dictionary by [damianoamatruda](https://github.com/damianoamatruda)
|
||||
* More consistent warning messages by [damianoamatruda](https://github.com/damianoamatruda) and [pukkandan](https://github.com/pukkandan)
|
||||
* [documentation] Add deprecated options and aliases in readme
|
||||
* [documentation] Fix some minor mistakes
|
||||
* [docs] Add deprecated options and aliases in readme
|
||||
* [docs] Fix some minor mistakes
|
||||
|
||||
* [niconico] Partial fix adapted from [animelover1984/youtube-dl@b5eff52](https://github.com/animelover1984/youtube-dl/commit/b5eff52dd9ed5565672ea1694b38c9296db3fade) (login and smile formats still don't work)
|
||||
* [niconico] Add user extractor by [animelover1984](https://github.com/animelover1984)
|
||||
@@ -338,7 +674,7 @@
|
||||
* [stitcher] Merge from youtube-dl by [nixxo](https://github.com/nixxo)
|
||||
* [rcs] Improved extraction by [nixxo](https://github.com/nixxo)
|
||||
* [linuxacadamy] Improve regex
|
||||
* [youtube] Show if video is `private`, `unlisted` etc in info (`availability`) by [colethedj](https://github.com/colethedj) and [pukkandan](https://github.com/pukkandan)
|
||||
* [youtube] Show if video is `private`, `unlisted` etc in info (`availability`) by [coletdjnz](https://github.com/coletdjnz) and [pukkandan](https://github.com/pukkandan)
|
||||
* [youtube] bugfix for channel playlist extraction
|
||||
* [nbc] Improve metadata extraction by [2ShedsJackson](https://github.com/2ShedsJackson)
|
||||
|
||||
@@ -355,15 +691,15 @@
|
||||
* [wimtv] Add extractor by [nixxo](https://github.com/nixxo)
|
||||
* [mtv] Add mtv.it and extract series metadata by [nixxo](https://github.com/nixxo)
|
||||
* [pluto.tv] Add extractor by [kevinoconnor7](https://github.com/kevinoconnor7)
|
||||
* [youtube] Rewrite comment extraction by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Rewrite comment extraction by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [embedthumbnail] Set mtime correctly
|
||||
* Refactor some postprocessor/downloader code by [pukkandan](https://github.com/pukkandan) and [shirt](https://github.com/shirt-dev)
|
||||
|
||||
|
||||
### 2021.03.07
|
||||
* [youtube] Fix history, mixes, community pages and trending by [pukkandan](https://github.com/pukkandan) and [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Fix private feeds/playlists on multi-channel accounts by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Extract alerts from continuation by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Fix history, mixes, community pages and trending by [pukkandan](https://github.com/pukkandan) and [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Fix private feeds/playlists on multi-channel accounts by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Extract alerts from continuation by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [cbs] Add support for ParamountPlus by [shirt](https://github.com/shirt-dev)
|
||||
* [mxplayer] Rewrite extractor with show support by [pukkandan](https://github.com/pukkandan) and [Ashish0804](https://github.com/Ashish0804)
|
||||
* [gedi] Improvements from youtube-dl by [nixxo](https://github.com/nixxo)
|
||||
@@ -375,7 +711,7 @@
|
||||
* [downloader] Fix bug for `ffmpeg`/`httpie`
|
||||
* [update] Fix updater removing the executable bit on some UNIX distros
|
||||
* [update] Fix current build hash for UNIX
|
||||
* [documentation] Include wget/curl/aria2c install instructions for Unix by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [docs] Include wget/curl/aria2c install instructions for Unix by [Ashish0804](https://github.com/Ashish0804)
|
||||
* Fix some videos downloading with `m3u8` extension
|
||||
* Remove "fixup is ignored" warning when fixup wasn't passed by user
|
||||
|
||||
@@ -384,7 +720,7 @@
|
||||
* [build] Fix bug
|
||||
|
||||
### 2021.03.03
|
||||
* [youtube] Use new browse API for continuation page extraction by [colethedj](https://github.com/colethedj) and [pukkandan](https://github.com/pukkandan)
|
||||
* [youtube] Use new browse API for continuation page extraction by [coletdjnz](https://github.com/coletdjnz) and [pukkandan](https://github.com/pukkandan)
|
||||
* Fix HLS playlist downloading by [shirt](https://github.com/shirt-dev)
|
||||
* Merge youtube-dl: Upto [2021.03.03](https://github.com/ytdl-org/youtube-dl/releases/tag/2021.03.03)
|
||||
* [mtv] Fix extractor
|
||||
@@ -432,7 +768,7 @@
|
||||
* [ffmpeg] Allow passing custom arguments before -i using `--ppa "ffmpeg_i1:ARGS"` syntax
|
||||
* Fix `--windows-filenames` removing `/` from UNIX paths
|
||||
* [hls] Show warning if pycryptodome is not found
|
||||
* [documentation] Improvements
|
||||
* [docs] Improvements
|
||||
* Fix documentation of `Extractor Options`
|
||||
* Document `all` in format selection
|
||||
* Document `playable_in_embed` in output templates
|
||||
@@ -460,7 +796,7 @@
|
||||
* Exclude `vcruntime140.dll` from UPX by [jbruchon](https://github.com/jbruchon)
|
||||
* Set version number based on UTC time, not local time
|
||||
* Publish on PyPi only if token is set
|
||||
* [documentation] Better document `--prefer-free-formats` and add `--no-prefer-free-format`
|
||||
* [docs] Better document `--prefer-free-formats` and add `--no-prefer-free-format`
|
||||
|
||||
|
||||
### 2021.02.15
|
||||
@@ -503,7 +839,7 @@
|
||||
* [movefiles] Fix compatibility with python2
|
||||
* [remuxvideo] Fix validation of conditional remux
|
||||
* [sponskrub] Don't raise error when the video does not exist
|
||||
* [documentation] Crypto is an optional dependency
|
||||
* [docs] Crypto is an optional dependency
|
||||
|
||||
|
||||
### 2021.02.04
|
||||
@@ -564,10 +900,10 @@
|
||||
* Merge youtube-dl: Upto [2021.01.24](https://github.com/ytdl-org/youtube-dl/releases/tag/2021.01.16)
|
||||
* Plugin support ([documentation](https://github.com/yt-dlp/yt-dlp#plugins))
|
||||
* **Multiple paths**: New option `-P`/`--paths` to give different paths for different types of files
|
||||
* The syntax is `-P "type:path" -P "type:path"` ([documentation](https://github.com/yt-dlp/yt-dlp#:~:text=-P,%20--paths%20TYPE:PATH))
|
||||
* The syntax is `-P "type:path" -P "type:path"`
|
||||
* Valid types are: home, temp, description, annotation, subtitle, infojson, thumbnail
|
||||
* Additionally, configuration file is taken from home directory or current directory ([documentation](https://github.com/yt-dlp/yt-dlp#:~:text=Home%20Configuration))
|
||||
* Allow passing different arguments to different external downloaders ([documentation](https://github.com/yt-dlp/yt-dlp#:~:text=--downloader-args%20NAME:ARGS))
|
||||
* Additionally, configuration file is taken from home directory or current directory
|
||||
* Allow passing different arguments to different external downloaders
|
||||
* [mildom] Add extractor by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* Warn when using old style `--external-downloader-args` and `--post-processor-args`
|
||||
* Fix `--no-overwrite` when using `--write-link`
|
||||
@@ -602,9 +938,9 @@
|
||||
* [roosterteeth.com] Fix for bonus episodes by [Zocker1999NET](https://github.com/Zocker1999NET)
|
||||
* [tiktok] Fix for when share_info is empty
|
||||
* [EmbedThumbnail] Fix bug due to incorrect function name
|
||||
* [documentation] Changed sponskrub links to point to [yt-dlp/SponSkrub](https://github.com/yt-dlp/SponSkrub) since I am now providing both linux and windows releases
|
||||
* [documentation] Change all links to correctly point to new fork URL
|
||||
* [documentation] Fixes typos
|
||||
* [docs] Changed sponskrub links to point to [yt-dlp/SponSkrub](https://github.com/yt-dlp/SponSkrub) since I am now providing both linux and windows releases
|
||||
* [docs] Change all links to correctly point to new fork URL
|
||||
* [docs] Fixes typos
|
||||
|
||||
|
||||
### 2021.01.12
|
||||
@@ -700,7 +1036,7 @@
|
||||
* Redirect channel home to /video
|
||||
* Print youtube's warning message
|
||||
* Handle Multiple pages for feeds better
|
||||
* [youtube] Fix ytsearch not returning results sometimes due to promoted content by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Fix ytsearch not returning results sometimes due to promoted content by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Temporary fix for automatic captions - disable json3 by [blackjack4494](https://github.com/blackjack4494)
|
||||
* Add --break-on-existing by [gergesh](https://github.com/gergesh)
|
||||
* Pre-check video IDs in the archive before downloading by [pukkandan](https://github.com/pukkandan)
|
||||
|
||||
39
Collaborators.md
Normal file
39
Collaborators.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# Collaborators
|
||||
|
||||
This is a list of the collaborators of the project and their major contributions. See the [Changelog](Changelog.md) for more details.
|
||||
|
||||
You can also find lists of all [contributors of yt-dlp](CONTRIBUTORS) and [authors of youtube-dl](https://github.com/ytdl-org/youtube-dl/blob/master/AUTHORS)
|
||||
|
||||
|
||||
## [pukkandan](https://github.com/pukkandan)
|
||||
|
||||
[](https://ko-fi.com/pukkandan)
|
||||
|
||||
* Owner of the fork
|
||||
|
||||
|
||||
|
||||
## [shirt](https://github.com/shirt-dev)
|
||||
|
||||
[](https://ko-fi.com/shirt)
|
||||
|
||||
* Multithreading (`-N`) and aria2c support for fragment downloads
|
||||
* Support for media initialization and discontinuity in HLS
|
||||
* The self-updater (`-U`)
|
||||
|
||||
|
||||
|
||||
## [coletdjnz](https://github.com/coletdjnz)
|
||||
|
||||
[](https://github.com/sponsors/coletdjnz)
|
||||
|
||||
* YouTube improvements including: age-gate bypass, private playlists, multiple-clients (to avoid throttling) and a lot of under-the-hood improvements
|
||||
|
||||
|
||||
|
||||
## [Ashish0804](https://github.com/Ashish0804)
|
||||
|
||||
[](https://ko-fi.com/ashish0804)
|
||||
|
||||
* Added support for new websites Zee5, MXPlayer, DiscoveryPlusIndia, ShemarooMe, Utreon etc
|
||||
* Added playlist/series downloads for TubiTv, SonyLIV, Voot, HotStar etc
|
||||
22
Makefile
22
Makefile
@@ -13,7 +13,7 @@ pypi-files: AUTHORS Changelog.md LICENSE README.md README.txt supportedsites com
|
||||
.PHONY: all clean install test tar pypi-files completions ot offlinetest codetest supportedsites
|
||||
|
||||
clean-test:
|
||||
rm -rf *.dump *.part* *.ytdl *.info.json *.mp4 *.m4a *.flv *.mp3 *.avi *.mkv *.webm *.3gp *.wav *.ape *.swf *.jpg *.png *.frag *.frag.urls *.frag.aria2
|
||||
rm -rf *.dump *.part* *.ytdl *.info.json *.mp4 *.m4a *.flv *.mp3 *.avi *.mkv *.webm *.3gp *.wav *.ape *.swf *.jpg *.png *.frag *.frag.urls *.frag.aria2 test/testdata/player-*.js *.opus *.webp *.ttml *.vtt *.jpeg
|
||||
clean-dist:
|
||||
rm -rf yt-dlp.1.temp.md yt-dlp.1 README.txt MANIFEST build/ dist/ .coverage cover/ yt-dlp.tar.gz completions/ yt_dlp/extractor/lazy_extractors.py *.spec CONTRIBUTING.md.tmp yt-dlp yt-dlp.exe yt_dlp.egg-info/ AUTHORS .mailmap
|
||||
clean-cache:
|
||||
@@ -49,23 +49,11 @@ codetest:
|
||||
flake8 .
|
||||
|
||||
test:
|
||||
#nosetests --with-coverage --cover-package=yt_dlp --cover-html --verbose --processes 4 test
|
||||
nosetests --verbose test
|
||||
$(PYTHON) -m pytest
|
||||
$(MAKE) codetest
|
||||
|
||||
# Keep this list in sync with devscripts/run_tests.sh
|
||||
offlinetest: codetest
|
||||
$(PYTHON) -m nose --verbose test \
|
||||
--exclude test_age_restriction.py \
|
||||
--exclude test_download.py \
|
||||
--exclude test_iqiyi_sdk_interpreter.py \
|
||||
--exclude test_overwrites.py \
|
||||
--exclude test_socks.py \
|
||||
--exclude test_subtitles.py \
|
||||
--exclude test_write_annotations.py \
|
||||
--exclude test_youtube_lists.py \
|
||||
--exclude test_youtube_signature.py \
|
||||
--exclude test_post_hooks.py
|
||||
$(PYTHON) -m pytest -k "not download"
|
||||
|
||||
yt-dlp: yt_dlp/*.py yt_dlp/*/*.py
|
||||
mkdir -p zip
|
||||
@@ -122,7 +110,7 @@ _EXTRACTOR_FILES = $(shell find yt_dlp/extractor -iname '*.py' -and -not -iname
|
||||
yt_dlp/extractor/lazy_extractors.py: devscripts/make_lazy_extractors.py devscripts/lazy_load_template.py $(_EXTRACTOR_FILES)
|
||||
$(PYTHON) devscripts/make_lazy_extractors.py $@
|
||||
|
||||
yt-dlp.tar.gz: README.md yt-dlp.1 completions Changelog.md AUTHORS
|
||||
yt-dlp.tar.gz: yt-dlp README.md supportedsites.md yt-dlp.1 completions Changelog.md AUTHORS
|
||||
@tar -czf $(DESTDIR)/yt-dlp.tar.gz --transform "s|^|yt-dlp/|" --owner 0 --group 0 \
|
||||
--exclude '*.DS_Store' \
|
||||
--exclude '*.kate-swp' \
|
||||
@@ -136,7 +124,7 @@ yt-dlp.tar.gz: README.md yt-dlp.1 completions Changelog.md AUTHORS
|
||||
devscripts test \
|
||||
Changelog.md AUTHORS LICENSE README.md supportedsites.md \
|
||||
Makefile MANIFEST.in yt-dlp.1 completions \
|
||||
setup.py setup.cfg yt-dlp
|
||||
setup.py setup.cfg yt-dlp yt_dlp
|
||||
|
||||
AUTHORS: .mailmap
|
||||
git shortlog -s -n | cut -f2 | sort > AUTHORS
|
||||
|
||||
482
README.md
482
README.md
@@ -1,17 +1,16 @@
|
||||
<div align="center">
|
||||
|
||||
# YT-DLP
|
||||
A command-line program to download videos from YouTube and many other [video platforms](supportedsites.md)
|
||||
[](#readme)
|
||||
|
||||
<!-- GHA doesn't have for-the-badge style
|
||||
[](https://github.com/yt-dlp/yt-dlp/actions)
|
||||
-->
|
||||
[](https://github.com/yt-dlp/yt-dlp/releases/latest)
|
||||
[](LICENSE)
|
||||
[](https://github.com/yt-dlp/yt-dlp/releases/latest)
|
||||
[](https://github.com/yt-dlp/yt-dlp/actions)
|
||||
[](LICENSE)
|
||||
[](Collaborators.md#collaborators)
|
||||
[](supportedsites.md)
|
||||
[](https://discord.gg/H5MNcFW63r)
|
||||
[](https://yt-dlp.readthedocs.io)
|
||||
[](https://discord.gg/H5MNcFW63r)
|
||||
[](https://github.com/yt-dlp/yt-dlp/commits)
|
||||
[](https://github.com/yt-dlp/yt-dlp/commits)
|
||||
[](https://github.com/yt-dlp/yt-dlp/commits)
|
||||
[](https://github.com/yt-dlp/yt-dlp/releases/latest)
|
||||
[](https://pypi.org/project/yt-dlp)
|
||||
|
||||
@@ -40,7 +39,7 @@ yt-dlp is a [youtube-dl](https://github.com/ytdl-org/youtube-dl) fork based on t
|
||||
* [Subtitle Options](#subtitle-options)
|
||||
* [Authentication Options](#authentication-options)
|
||||
* [Post-processing Options](#post-processing-options)
|
||||
* [SponSkrub (SponsorBlock) Options](#sponskrub-sponsorblock-options)
|
||||
* [SponsorBlock Options](#sponsorblock-options)
|
||||
* [Extractor Options](#extractor-options)
|
||||
* [CONFIGURATION](#configuration)
|
||||
* [Authentication with .netrc file](#authentication-with-netrc-file)
|
||||
@@ -53,6 +52,7 @@ yt-dlp is a [youtube-dl](https://github.com/ytdl-org/youtube-dl) fork based on t
|
||||
* [Format Selection examples](#format-selection-examples)
|
||||
* [MODIFYING METADATA](#modifying-metadata)
|
||||
* [Modifying metadata examples](#modifying-metadata-examples)
|
||||
* [EXTRACTOR ARGUMENTS](#extractor-arguments)
|
||||
* [PLUGINS](#plugins)
|
||||
* [DEPRECATED OPTIONS](#deprecated-options)
|
||||
* [MORE](#more)
|
||||
@@ -62,21 +62,25 @@ yt-dlp is a [youtube-dl](https://github.com/ytdl-org/youtube-dl) fork based on t
|
||||
# NEW FEATURES
|
||||
The major new features from the latest release of [blackjack4494/yt-dlc](https://github.com/blackjack4494/yt-dlc) are:
|
||||
|
||||
* **[SponSkrub Integration](#sponskrub-sponsorblock-options)**: You can use [SponSkrub](https://github.com/yt-dlp/SponSkrub) to mark/remove sponsor sections in youtube videos by utilizing the [SponsorBlock](https://sponsor.ajay.app) API
|
||||
* **[SponsorBlock Integration](#sponsorblock-options)**: You can mark/remove sponsor sections in youtube videos by utilizing the [SponsorBlock](https://sponsor.ajay.app) API
|
||||
|
||||
* **[Format Sorting](#sorting-formats)**: The default format sorting options have been changed so that higher resolution and better codecs will be now preferred instead of simply using larger bitrate. Furthermore, you can now specify the sort order using `-S`. This allows for much easier format selection that what is possible by simply using `--format` ([examples](#format-selection-examples))
|
||||
* **[Format Sorting](#sorting-formats)**: The default format sorting options have been changed so that higher resolution and better codecs will be now preferred instead of simply using larger bitrate. Furthermore, you can now specify the sort order using `-S`. This allows for much easier format selection than what is possible by simply using `--format` ([examples](#format-selection-examples))
|
||||
|
||||
* **Merged with youtube-dl [commit/379f52a](https://github.com/ytdl-org/youtube-dl/commit/379f52a4954013767219d25099cce9e0f9401961)**: (v2021.06.06) You get all the latest features and patches of [youtube-dl](https://github.com/ytdl-org/youtube-dl) in addition to all the features of [youtube-dlc](https://github.com/blackjack4494/yt-dlc)
|
||||
|
||||
* **Merged with animelover1984/youtube-dl**: You get most of the features and improvements from [animelover1984/youtube-dl](https://github.com/animelover1984/youtube-dl) including `--write-comments`, `BiliBiliSearch`, `BilibiliChannel`, Embedding thumbnail in mp4/ogg/opus, playlist infojson etc. Note that the NicoNico improvements are not available. See [#31](https://github.com/yt-dlp/yt-dlp/pull/31) for details.
|
||||
|
||||
* **Youtube improvements**:
|
||||
* All Feeds (`:ytfav`, `:ytwatchlater`, `:ytsubs`, `:ythistory`, `:ytrec`) supports downloading multiple pages of content
|
||||
* All Feeds (`:ytfav`, `:ytwatchlater`, `:ytsubs`, `:ythistory`, `:ytrec`) and private playlists supports downloading multiple pages of content
|
||||
* Search (`ytsearch:`, `ytsearchdate:`), search URLs and in-channel search works
|
||||
* Mixes supports downloading multiple pages of content
|
||||
* Most (but not all) age-gated content can be downloaded without cookies
|
||||
* Partial workaround for throttling issue
|
||||
* Redirect channel's home URL automatically to `/video` to preserve the old behaviour
|
||||
* `255kbps` audio is extracted from youtube music if premium cookies are given
|
||||
* Youtube music Albums, channels etc can be downloaded
|
||||
* Youtube music Albums, channels etc can be downloaded ([except self-uploaded music](https://github.com/yt-dlp/yt-dlp/issues/723))
|
||||
|
||||
* **Cookies from browser**: Cookies can be automatically extracted from all major web browsers using `--cookies-from-browser BROWSER[:PROFILE]`
|
||||
|
||||
* **Split video by chapters**: Videos can be split into multiple files based on chapters using `--split-chapters`
|
||||
|
||||
@@ -84,9 +88,9 @@ The major new features from the latest release of [blackjack4494/yt-dlc](https:/
|
||||
|
||||
* **Aria2c with HLS/DASH**: You can use `aria2c` as the external downloader for DASH(mpd) and HLS(m3u8) formats
|
||||
|
||||
* **New extractors**: AnimeLab, Philo MSO, Rcs, Gedi, bitwave.tv, mildom, audius, zee5, mtv.it, wimtv, pluto.tv, niconico users, discoveryplus.in, mediathek, NFHSNetwork, nebula, ukcolumn, whowatch, MxplayerShow, parlview (au), YoutubeWebArchive, fancode, Saitosan, ShemarooMe, telemundo, VootSeries, SonyLIVSeries, HotstarSeries, VidioPremier, VidioLive
|
||||
* **New extractors**: AnimeLab, Philo MSO, Spectrum MSO, SlingTV MSO, Cablevision MSO, Rcs, Gedi, bitwave.tv, mildom, audius, zee5, mtv.it, wimtv, pluto.tv, niconico users, discoveryplus.in, mediathek, NFHSNetwork, nebula, ukcolumn, whowatch, MxplayerShow, parlview (au), YoutubeWebArchive, fancode, Saitosan, ShemarooMe, telemundo, VootSeries, SonyLIVSeries, HotstarSeries, VidioPremier, VidioLive, RCTIPlus, TBS Live, douyin, pornflip, ParamountPlusSeries, ScienceChannel, Utreon, OpenRec, BandcampMusic, blackboardcollaborate, eroprofile albums, mirrativ, BannedVideo, bilibili categories, Epicon, filmmodu, GabTV, HungamaAlbum, ManotoTV, Niconico search, Patreon User, peloton, ProjectVeritas, radiko, StarTV, tiktok user, Tokentube, voicy, TV2HuSeries
|
||||
|
||||
* **Fixed extractors**: archive.org, roosterteeth.com, skyit, instagram, itv, SouthparkDe, spreaker, Vlive, akamai, ina, rumble, tennistv, amcnetworks, la7 podcasts, linuxacadamy, nitter, twitcasting, viu, crackle, curiositystream, mediasite, rmcdecouverte, sonyliv, tubi, tenplay, patreon
|
||||
* **Fixed/improved extractors**: archive.org, roosterteeth.com, skyit, instagram, itv, SouthparkDe, spreaker, Vlive, akamai, ina, rumble, tennistv, amcnetworks, la7 podcasts, linuxacadamy, nitter, twitcasting, viu, crackle, curiositystream, mediasite, rmcdecouverte, sonyliv, tubi, tenplay, patreon, videa, yahoo, BravoTV, crunchyroll playlist, RTP, viki, Hotstar, vidio, vimeo, mediaset, Mxplayer, nbcolympics, ParamountPlus, Newgrounds, SAML Verizon login, Hungama, afreecatv, aljazeera, ATV, bitchute, camtube, CDA, eroprofile, facebook, HearThisAtIE, iwara, kakao, Motherless, Nova, peertube, pornhub, reddit, tiktok, TV2, TV2Hu, tv5mondeplus, VH1, Viafree, XHamster
|
||||
|
||||
* **Subtitle extraction from manifests**: Subtitles can be extracted from streaming media manifests. See [commit/be6202f](https://github.com/yt-dlp/yt-dlp/commit/be6202f12b97858b9d716e608394b51065d0419f) for details
|
||||
|
||||
@@ -94,11 +98,11 @@ The major new features from the latest release of [blackjack4494/yt-dlc](https:/
|
||||
|
||||
* **Portable Configuration**: Configuration files are automatically loaded from the home and root directories. See [configuration](#configuration) for details
|
||||
|
||||
* **Output template improvements**: Output templates can now have date-time formatting, numeric offsets, object traversal etc. See [output template](#output-template) for details. Even more advanced operations can also be done with the help of `--parse-metadata`
|
||||
* **Output template improvements**: Output templates can now have date-time formatting, numeric offsets, object traversal etc. See [output template](#output-template) for details. Even more advanced operations can also be done with the help of `--parse-metadata` and `--replace-in-metadata`
|
||||
|
||||
* **Other new options**: `--sleep-requests`, `--convert-thumbnails`, `--write-link`, `--force-download-archive`, `--force-overwrites`, `--break-on-reject` etc
|
||||
* **Other new options**: `--print`, `--sleep-requests`, `--convert-thumbnails`, `--write-link`, `--force-download-archive`, `--force-overwrites`, `--break-on-reject` etc
|
||||
|
||||
* **Improvements**: Multiple `--postprocessor-args` and `--downloader-args`, faster archive checking, more [format selection options](#format-selection) etc
|
||||
* **Improvements**: Regex and other operators in `--match-filter`, multiple `--postprocessor-args` and `--downloader-args`, faster archive checking, more [format selection options](#format-selection) etc
|
||||
|
||||
* **Plugin extractors**: Extractors can be loaded from an external file. See [plugins](#plugins) for details
|
||||
|
||||
@@ -119,7 +123,7 @@ Some of yt-dlp's default options are different from that of youtube-dl and youtu
|
||||
* The options `--id`, `--auto-number` (`-A`), `--title` (`-t`) and `--literal` (`-l`), no longer work. See [removed options](#Removed) for details
|
||||
* `avconv` is not supported as as an alternative to `ffmpeg`
|
||||
* The default [output template](#output-template) is `%(title)s [%(id)s].%(ext)s`. There is no real reason for this change. This was changed before yt-dlp was ever made public and now there are no plans to change it back to `%(title)s.%(id)s.%(ext)s`. Instead, you may use `--compat-options filename`
|
||||
* The default [format sorting](sorting-formats) is different from youtube-dl and prefers higher resolution and better codecs rather than higher bitrates. You can use the `--format-sort` option to change this to any order you prefer, or use `--compat-options format-sort` to use youtube-dl's sorting order
|
||||
* The default [format sorting](#sorting-formats) is different from youtube-dl and prefers higher resolution and better codecs rather than higher bitrates. You can use the `--format-sort` option to change this to any order you prefer, or use `--compat-options format-sort` to use youtube-dl's sorting order
|
||||
* The default format selector is `bv*+ba/b`. This means that if a combined video + audio format that is better than the best video-only format is found, the former will be prefered. Use `-f bv+ba/b` or `--compat-options format-spec` to revert this
|
||||
* Unlike youtube-dlc, yt-dlp does not allow merging multiple audio/video streams into one file by default (since this conflicts with the use of `-f bv*+ba`). If needed, this feature must be enabled using `--audio-multistreams` and `--video-multistreams`. You can also use `--compat-options multistreams` to enable both
|
||||
* `--ignore-errors` is enabled by default. Use `--abort-on-error` or `--compat-options abort-on-error` to abort on errors instead
|
||||
@@ -127,11 +131,14 @@ Some of yt-dlp's default options are different from that of youtube-dl and youtu
|
||||
* `--add-metadata` attaches the `infojson` to `mkv` files in addition to writing the metadata when used with `--write-infojson`. Use `--compat-options no-attach-info-json` to revert this
|
||||
* `playlist_index` behaves differently when used with options like `--playlist-reverse` and `--playlist-items`. See [#302](https://github.com/yt-dlp/yt-dlp/issues/302) for details. You can use `--compat-options playlist-index` if you want to keep the earlier behavior
|
||||
* The output of `-F` is listed in a new format. Use `--compat-options list-formats` to revert this
|
||||
* All *experiences* of a funimation episode are considered as a single video. This behavior breaks existing archives. Use `--compat-options seperate-video-versions` to extract information from only the default player
|
||||
* Youtube live chat (if available) is considered as a subtitle. Use `--sub-langs all,-live_chat` to download all subtitles except live chat. You can also use `--compat-options no-live-chat` to prevent live chat from downloading
|
||||
* Youtube channel URLs are automatically redirected to `/video`. Append a `/featured` to the URL to download only the videos in the home page. If the channel does not have a videos tab, we try to download the equivalent `UU` playlist instead. Also, `/live` URLs raise an error if there are no live videos instead of silently downloading the entire channel. You may use `--compat-options no-youtube-channel-redirect` to revert all these redirections
|
||||
* Unavailable videos are also listed for youtube playlists. Use `--compat-options no-youtube-unavailable-videos` to remove this
|
||||
* If `ffmpeg` is used as the downloader, the downloading and merging of formats happen in a single step when possible. Use `--compat-options no-direct-merge` to revert this
|
||||
* Thumbnail embedding in `mp4` is done with mutagen if possible. Use `--compat-options embed-thumbnail-atomicparsley` to force the use of AtomicParsley instead
|
||||
* Some private fields such as filenames are removed by default from the infojson. Use `--no-clean-infojson` or `--compat-options no-clean-infojson` to revert this
|
||||
* When `--embed-subs` and `--write-subs` are used together, the subtitles are written to disk and also embedded in the media file. You can use just `--embed-subs` to embed the subs and automatically delete the seperate file. See [#630 (comment)](https://github.com/yt-dlp/yt-dlp/issues/630#issuecomment-893659460) for more info. `--compat-options no-keep-subs` can be used to revert this.
|
||||
|
||||
For ease of use, a few more compat options are available:
|
||||
* `--compat-options all`: Use all compat options
|
||||
@@ -144,6 +151,7 @@ yt-dlp is not platform specific. So it should work on your Unix box, on Windows
|
||||
|
||||
You can install yt-dlp using one of the following methods:
|
||||
* Download the binary from the [latest release](https://github.com/yt-dlp/yt-dlp/releases/latest) (recommended method)
|
||||
* With Homebrew, `brew install yt-dlp/taps/yt-dlp`
|
||||
* Use [PyPI package](https://pypi.org/project/yt-dlp): `python3 -m pip install --upgrade yt-dlp`
|
||||
* Use pip+git: `python3 -m pip install --upgrade git+https://github.com/yt-dlp/yt-dlp.git@release`
|
||||
* Install master branch: `python3 -m pip install --upgrade git+https://github.com/yt-dlp/yt-dlp`
|
||||
@@ -167,9 +175,16 @@ sudo aria2c https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp -o
|
||||
sudo chmod a+rx /usr/local/bin/yt-dlp
|
||||
```
|
||||
|
||||
macOS or Linux users that are using Homebrew (formerly known as Linuxbrew for Linux users) can also install it by:
|
||||
|
||||
```
|
||||
brew install yt-dlp/taps/yt-dlp
|
||||
```
|
||||
|
||||
### UPDATE
|
||||
You can use `yt-dlp -U` to update if you are using the provided release.
|
||||
If you are using `pip`, simply re-run the same command that was used to install the program.
|
||||
If you have installed using Homebrew, run `brew upgrade yt-dlp/taps/yt-dlp`
|
||||
|
||||
### DEPENDENCIES
|
||||
Python versions 3.6+ (CPython and PyPy) are supported. Other versions and implementations may or may not work correctly.
|
||||
@@ -179,14 +194,15 @@ On windows, [Microsoft Visual C++ 2010 SP1 Redistributable Package (x86)](https:
|
||||
|
||||
While all the other dependancies are optional, `ffmpeg` and `ffprobe` are highly recommended
|
||||
* [**ffmpeg** and **ffprobe**](https://www.ffmpeg.org) - Required for [merging seperate video and audio files](#format-selection) as well as for various [post-processing](#post-processing-options) tasks. Licence [depends on the build](https://www.ffmpeg.org/legal.html)
|
||||
* [**sponskrub**](https://github.com/faissaloo/SponSkrub) - For using the [sponskrub options](#sponskrub-sponsorblock-options). Licenced under [GPLv3+](https://github.com/faissaloo/SponSkrub/blob/master/LICENCE.md)
|
||||
* [**mutagen**](https://github.com/quodlibet/mutagen) - For embedding thumbnail in certain formats. Licenced under [GPLv2+](https://github.com/quodlibet/mutagen/blob/master/COPYING)
|
||||
* [**pycryptodome**](https://github.com/Legrandin/pycryptodome) - For decrypting various data. Licenced under [BSD2](https://github.com/Legrandin/pycryptodome/blob/master/LICENSE.rst)
|
||||
* [**websockets**](https://github.com/aaugustin/websockets) - For downloading over websocket. Licenced under [BSD3](https://github.com/aaugustin/websockets/blob/main/LICENSE)
|
||||
* [**keyring**](https://github.com/jaraco/keyring) - For decrypting cookies of chromium-based browsers on Linux. Licenced under [MIT](https://github.com/jaraco/keyring/blob/main/LICENSE)
|
||||
* [**AtomicParsley**](https://github.com/wez/atomicparsley) - For embedding thumbnail in mp4/m4a if mutagen is not present. Licenced under [GPLv2+](https://github.com/wez/atomicparsley/blob/master/COPYING)
|
||||
* [**rtmpdump**](http://rtmpdump.mplayerhq.hu) - For downloading `rtmp` streams. ffmpeg will be used as a fallback. Licenced under [GPLv2+](http://rtmpdump.mplayerhq.hu)
|
||||
* [**mplayer**](http://mplayerhq.hu/design7/info.html) or [**mpv**](https://mpv.io) - For downloading `rstp` streams. ffmpeg will be used as a fallback. Licenced under [GPLv2+](https://github.com/mpv-player/mpv/blob/master/Copyright)
|
||||
* [**phantomjs**](https://github.com/ariya/phantomjs) - Used in extractors where javascript needs to be run. Licenced under [BSD3](https://github.com/ariya/phantomjs/blob/master/LICENSE.BSD)
|
||||
* [**sponskrub**](https://github.com/faissaloo/SponSkrub) - For using the now **deprecated** [sponskrub options](#sponskrub-options). Licenced under [GPLv3+](https://github.com/faissaloo/SponSkrub/blob/master/LICENCE.md)
|
||||
* Any external downloader that you want to use with `--downloader`
|
||||
|
||||
To use or redistribute the dependencies, you must agree to their respective licensing terms.
|
||||
@@ -205,11 +221,11 @@ Once you have all the necessary dependencies installed, just run `py pyinst.py`.
|
||||
You can also build the executable without any version info or metadata by using:
|
||||
|
||||
pyinstaller.exe yt_dlp\__main__.py --onefile --name yt-dlp
|
||||
|
||||
|
||||
Note that pyinstaller [does not support](https://github.com/pyinstaller/pyinstaller#requirements-and-tested-platforms) Python installed from the Windows store without using a virtual environment
|
||||
|
||||
**For Unix**:
|
||||
You will need the required build tools: `python`, `make` (GNU), `pandoc`, `zip`, `nosetests`
|
||||
You will need the required build tools: `python`, `make` (GNU), `pandoc`, `zip`, `pytest`
|
||||
Then simply run `make`. You can also run `make yt-dlp` instead to compile only the binary without updating any of the additional files
|
||||
|
||||
**Note**: In either platform, `devscripts\update-version.py` can be used to automatically update the version number
|
||||
@@ -232,17 +248,17 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
(default) (Alias: --no-abort-on-error)
|
||||
--abort-on-error Abort downloading of further videos if an
|
||||
error occurs (Alias: --no-ignore-errors)
|
||||
--dump-user-agent Display the current browser identification
|
||||
--list-extractors List all supported extractors
|
||||
--dump-user-agent Display the current user-agent and exit
|
||||
--list-extractors List all supported extractors and exit
|
||||
--extractor-descriptions Output descriptions of all supported
|
||||
extractors
|
||||
extractors and exit
|
||||
--force-generic-extractor Force extraction to use the generic
|
||||
extractor
|
||||
--default-search PREFIX Use this prefix for unqualified URLs. For
|
||||
example "gvsearch2:" downloads two videos
|
||||
from google videos for youtube-dl "large
|
||||
apple". Use the value "auto" to let
|
||||
youtube-dl guess ("auto_warning" to emit a
|
||||
from google videos for the search term
|
||||
"large apple". Use the value "auto" to let
|
||||
yt-dlp guess ("auto_warning" to emit a
|
||||
warning when guessing). "error" just throws
|
||||
an error. The default value "fixup_error"
|
||||
repairs broken URLs, but emits an error if
|
||||
@@ -265,7 +281,7 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
--no-mark-watched Do not mark videos watched (default)
|
||||
--no-colors Do not emit color codes in output
|
||||
--compat-options OPTS Options that can help keep compatibility
|
||||
with youtube-dl and youtube-dlc
|
||||
with youtube-dl or youtube-dlc
|
||||
configurations by reverting some of the
|
||||
changes made in yt-dlp. See "Differences in
|
||||
default behavior" for details
|
||||
@@ -309,10 +325,6 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
specify range: "--playlist-items
|
||||
1-3,7,10-13", it will download the videos
|
||||
at index 1, 2, 3, 7, 10, 11, 12 and 13
|
||||
--match-title REGEX Download only matching titles (regex or
|
||||
caseless sub-string)
|
||||
--reject-title REGEX Skip download for matching titles (regex or
|
||||
caseless sub-string)
|
||||
--max-downloads NUMBER Abort after downloading NUMBER files
|
||||
--min-filesize SIZE Do not download any videos smaller than
|
||||
SIZE (e.g. 50k or 44.6m)
|
||||
@@ -327,29 +339,24 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
--dateafter DATE Download only videos uploaded on or after
|
||||
this date. The date formats accepted is the
|
||||
same as --date
|
||||
--min-views COUNT Do not download any videos with less than
|
||||
COUNT views
|
||||
--max-views COUNT Do not download any videos with more than
|
||||
COUNT views
|
||||
--match-filter FILTER Generic video filter. Specify any key (see
|
||||
"OUTPUT TEMPLATE" for a list of available
|
||||
keys) to match if the key is present, !key
|
||||
to check if the key is not present,
|
||||
key>NUMBER (like "view_count > 12", also
|
||||
works with >=, <, <=, !=, =) to compare
|
||||
against a number, key = 'LITERAL' (like
|
||||
"uploader = 'Mike Smith'", also works with
|
||||
!=) to match against a string literal and &
|
||||
to require multiple matches. Values which
|
||||
are not known are excluded unless you put a
|
||||
question mark (?) after the operator. For
|
||||
example, to only match videos that have
|
||||
been liked more than 100 times and disliked
|
||||
less than 50 times (or the dislike
|
||||
functionality is not available at the given
|
||||
service), but who also have a description,
|
||||
use --match-filter "like_count > 100 &
|
||||
dislike_count <? 50 & description"
|
||||
--match-filter FILTER Generic video filter. Any field (see
|
||||
"OUTPUT TEMPLATE") can be compared with a
|
||||
number or a string using the operators
|
||||
defined in "Filtering formats". You can
|
||||
also simply specify a field to match if the
|
||||
field is present and "!field" to check if
|
||||
the field is not present. In addition,
|
||||
Python style regular expression matching
|
||||
can be done using "~=", and multiple
|
||||
filters can be checked with "&". Use a "\"
|
||||
to escape "&" or quotes if needed. Eg:
|
||||
--match-filter "!is_live & like_count>?100
|
||||
& description~='(?i)\bcats \& dogs\b'"
|
||||
matches only videos that are not live, has
|
||||
a like count more than 100 (or the like
|
||||
field is not available), and also has a
|
||||
description that contains the phrase "cats
|
||||
& dogs" (ignoring case)
|
||||
--no-match-filter Do not use generic video filter (default)
|
||||
--no-playlist Download only the video, if the URL refers
|
||||
to a video and a playlist
|
||||
@@ -432,8 +439,12 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
(Alias: --external-downloader)
|
||||
--downloader-args NAME:ARGS Give these arguments to the external
|
||||
downloader. Specify the downloader name and
|
||||
the arguments separated by a colon ":". You
|
||||
can use this option multiple times
|
||||
the arguments separated by a colon ":". For
|
||||
ffmpeg, arguments can be passed to
|
||||
different positions using the same syntax
|
||||
as --postprocessor-args. You can use this
|
||||
option multiple times to give different
|
||||
arguments to different downloaders
|
||||
(Alias: --external-downloader-args)
|
||||
|
||||
## Filesystem Options:
|
||||
@@ -441,17 +452,17 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
stdin), one URL per line. Lines starting
|
||||
with '#', ';' or ']' are considered as
|
||||
comments and ignored
|
||||
-P, --paths TYPES:PATH The paths where the files should be
|
||||
-P, --paths [TYPES:]PATH The paths where the files should be
|
||||
downloaded. Specify the type of file and
|
||||
the path separated by a colon ":". All the
|
||||
same types as --output are supported.
|
||||
Additionally, you can also provide "home"
|
||||
and "temp" paths. All intermediary files
|
||||
are first downloaded to the temp path and
|
||||
then the final files are moved over to the
|
||||
home path after download is finished. This
|
||||
option is ignored if --output is an
|
||||
absolute path
|
||||
(default) and "temp" paths. All
|
||||
intermediary files are first downloaded to
|
||||
the temp path and then the final files are
|
||||
moved over to the home path after download
|
||||
is finished. This option is ignored if
|
||||
--output is an absolute path
|
||||
-o, --output [TYPES:]TEMPLATE Output filename template; see "OUTPUT
|
||||
TEMPLATE" for details
|
||||
--output-na-placeholder TEXT Placeholder value for unavailable meta
|
||||
@@ -492,9 +503,6 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
--write-info-json Write video metadata to a .info.json file
|
||||
(this may contain personal information)
|
||||
--no-write-info-json Do not write video metadata (default)
|
||||
--write-annotations Write video annotations to a
|
||||
.annotations.xml file
|
||||
--no-write-annotations Do not write video annotations (default)
|
||||
--write-playlist-metafiles Write playlist metadata in addition to the
|
||||
video metadata when using --write-info-json,
|
||||
--write-description etc. (default)
|
||||
@@ -517,12 +525,24 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
option)
|
||||
--cookies FILE File to read cookies from and dump cookie
|
||||
jar in
|
||||
--no-cookies Do not read/dump cookies (default)
|
||||
--no-cookies Do not read/dump cookies from/to file
|
||||
(default)
|
||||
--cookies-from-browser BROWSER[:PROFILE]
|
||||
Load cookies from a user profile of the
|
||||
given web browser. Currently supported
|
||||
browsers are: brave|chrome|chromium|edge|fi
|
||||
refox|opera|safari|vivaldi. You can specify
|
||||
the user profile name or directory using
|
||||
"BROWSER:PROFILE_NAME" or
|
||||
"BROWSER:PROFILE_PATH". If no profile is
|
||||
given, the most recently accessed one is
|
||||
used
|
||||
--no-cookies-from-browser Do not load cookies from browser (default)
|
||||
--cache-dir DIR Location in the filesystem where youtube-dl
|
||||
can store some downloaded information (such
|
||||
as client ids and signatures) permanently.
|
||||
By default $XDG_CACHE_HOME/youtube-dl or
|
||||
~/.cache/youtube-dl
|
||||
By default $XDG_CACHE_HOME/yt-dlp or
|
||||
~/.cache/yt-dlp
|
||||
--no-cache-dir Disable filesystem caching
|
||||
--rm-cache-dir Delete all filesystem cache files
|
||||
|
||||
@@ -531,8 +551,8 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
--no-write-thumbnail Do not write thumbnail image to disk
|
||||
(default)
|
||||
--write-all-thumbnails Write all thumbnail image formats to disk
|
||||
--list-thumbnails Simulate and list all available thumbnail
|
||||
formats
|
||||
--list-thumbnails List available thumbnails of each video.
|
||||
Simulate unless --no-simulate is used
|
||||
|
||||
## Internet Shortcut Options:
|
||||
--write-link Write an internet shortcut file, depending
|
||||
@@ -544,30 +564,34 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
--write-desktop-link Write a .desktop Linux internet shortcut
|
||||
|
||||
## Verbosity and Simulation Options:
|
||||
-q, --quiet Activate quiet mode
|
||||
-q, --quiet Activate quiet mode. If used with
|
||||
--verbose, print the log to stderr
|
||||
--no-warnings Ignore warnings
|
||||
-s, --simulate Do not download the video and do not write
|
||||
anything to disk
|
||||
--no-simulate Download the video even if printing/listing
|
||||
options are used
|
||||
--ignore-no-formats-error Ignore "No video formats" error. Usefull
|
||||
for extracting metadata even if the video
|
||||
is not actually available for download
|
||||
for extracting metadata even if the videos
|
||||
are not actually available for download
|
||||
(experimental)
|
||||
--no-ignore-no-formats-error Throw error when no downloadable video
|
||||
formats are found (default)
|
||||
--skip-download Do not download the video but write all
|
||||
related files (Alias: --no-download)
|
||||
-O, --print TEMPLATE Simulate, quiet but print the given fields.
|
||||
Either a field name or similar formatting
|
||||
as the output template can be used
|
||||
-j, --dump-json Simulate, quiet but print JSON information.
|
||||
See "OUTPUT TEMPLATE" for a description of
|
||||
available keys
|
||||
-J, --dump-single-json Simulate, quiet but print JSON information
|
||||
for each command-line argument. If the URL
|
||||
refers to a playlist, dump the whole
|
||||
playlist information in a single line
|
||||
--print-json Be quiet and print the video information as
|
||||
JSON (video is still being downloaded)
|
||||
-O, --print TEMPLATE Quiet, but print the given fields for each
|
||||
video. Simulate unless --no-simulate is
|
||||
used. Either a field name or same syntax as
|
||||
the output template can be used
|
||||
-j, --dump-json Quiet, but print JSON information for each
|
||||
video. Simulate unless --no-simulate is
|
||||
used. See "OUTPUT TEMPLATE" for a
|
||||
description of available keys
|
||||
-J, --dump-single-json Quiet, but print JSON information for each
|
||||
url or infojson passed. Simulate unless
|
||||
--no-simulate is used. If the URL refers to
|
||||
a playlist, the whole playlist information
|
||||
is dumped in a single line
|
||||
--force-write-archive Force download archive entries to be
|
||||
written as far as no errors occur, even if
|
||||
-s or another simulation option is used
|
||||
@@ -635,18 +659,15 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
--no-prefer-free-formats Don't give any special preference to free
|
||||
containers (default)
|
||||
--check-formats Check that the formats selected are
|
||||
actually downloadable (Experimental)
|
||||
-F, --list-formats List all available formats of requested
|
||||
videos
|
||||
actually downloadable
|
||||
--no-check-formats Do not check that the formats selected are
|
||||
actually downloadable
|
||||
-F, --list-formats List available formats of each video.
|
||||
Simulate unless --no-simulate is used
|
||||
--merge-output-format FORMAT If a merge is required (e.g.
|
||||
bestvideo+bestaudio), output to given
|
||||
container format. One of mkv, mp4, ogg,
|
||||
webm, flv. Ignored if no merge is required
|
||||
--allow-unplayable-formats Allow unplayable formats to be listed and
|
||||
downloaded. All video post-processing will
|
||||
also be turned off
|
||||
--no-allow-unplayable-formats Do not allow unplayable formats to be
|
||||
listed or downloaded (default)
|
||||
|
||||
## Subtitle Options:
|
||||
--write-subs Write subtitle file
|
||||
@@ -655,7 +676,8 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
(Alias: --write-automatic-subs)
|
||||
--no-write-auto-subs Do not write auto-generated subtitles
|
||||
(default) (Alias: --no-write-automatic-subs)
|
||||
--list-subs List all available subtitles for the video
|
||||
--list-subs List available subtitles of each video.
|
||||
Simulate unless --no-simulate is used
|
||||
--sub-format FORMAT Subtitle format, accepts formats
|
||||
preference, for example: "srt" or
|
||||
"ass/srt/best"
|
||||
@@ -690,7 +712,7 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
--audio-format FORMAT Specify audio format to convert the audio
|
||||
to when -x is used. Currently supported
|
||||
formats are: best (default) or one of
|
||||
aac|flac|mp3|m4a|opus|vorbis|wav
|
||||
best|aac|flac|mp3|m4a|opus|vorbis|wav
|
||||
--audio-quality QUALITY Specify ffmpeg audio quality, insert a
|
||||
value between 0 (better) and 9 (worse) for
|
||||
VBR or a specific bitrate like 128K
|
||||
@@ -711,24 +733,23 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
and the arguments separated by a colon ":"
|
||||
to give the argument to the specified
|
||||
postprocessor/executable. Supported PP are:
|
||||
Merger, ExtractAudio, SplitChapters,
|
||||
Merger, ModifyChapters, SplitChapters,
|
||||
ExtractAudio, VideoRemuxer, VideoConvertor,
|
||||
Metadata, EmbedSubtitle, EmbedThumbnail,
|
||||
SubtitlesConvertor, ThumbnailsConvertor,
|
||||
VideoRemuxer, VideoConvertor, SponSkrub,
|
||||
FixupStretched, FixupM4a, FixupM3u8,
|
||||
FixupTimestamp and FixupDuration. The
|
||||
supported executables are: AtomicParsley,
|
||||
FFmpeg, FFprobe, and SponSkrub. You can
|
||||
also specify "PP+EXE:ARGS" to give the
|
||||
arguments to the specified executable only
|
||||
when being used by the specified
|
||||
postprocessor. Additionally, for
|
||||
ffmpeg/ffprobe, "_i"/"_o" can be appended
|
||||
to the prefix optionally followed by a
|
||||
number to pass the argument before the
|
||||
specified input/output file. Eg: --ppa
|
||||
"Merger+ffmpeg_i1:-v quiet". You can use
|
||||
this option multiple times to give
|
||||
FFmpeg and FFprobe. You can also specify
|
||||
"PP+EXE:ARGS" to give the arguments to the
|
||||
specified executable only when being used
|
||||
by the specified postprocessor.
|
||||
Additionally, for ffmpeg/ffprobe, "_i"/"_o"
|
||||
can be appended to the prefix optionally
|
||||
followed by a number to pass the argument
|
||||
before the specified input/output file. Eg:
|
||||
--ppa "Merger+ffmpeg_i1:-v quiet". You can
|
||||
use this option multiple times to give
|
||||
different arguments to different
|
||||
postprocessors. (Alias: --ppa)
|
||||
-k, --keep-video Keep the intermediate video file on disk
|
||||
@@ -742,14 +763,22 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
--no-embed-subs Do not embed subtitles (default)
|
||||
--embed-thumbnail Embed thumbnail in the video as cover art
|
||||
--no-embed-thumbnail Do not embed thumbnail (default)
|
||||
--embed-metadata Embed metadata including chapter markers
|
||||
(if supported by the format) to the video
|
||||
file (Alias: --add-metadata)
|
||||
--no-embed-metadata Do not write metadata (default)
|
||||
--embed-metadata Embed metadata to the video file. Also adds
|
||||
chapters to file unless --no-add-chapters
|
||||
is used (Alias: --add-metadata)
|
||||
--no-embed-metadata Do not add metadata to file (default)
|
||||
(Alias: --no-add-metadata)
|
||||
--embed-chapters Add chapter markers to the video file
|
||||
(Alias: --add-chapters)
|
||||
--no-embed-chapters Do not add chapter markers (default)
|
||||
(Alias: --no-add-chapters)
|
||||
--parse-metadata FROM:TO Parse additional metadata like title/artist
|
||||
from other fields; see "MODIFYING METADATA"
|
||||
for details
|
||||
--replace-in-metadata FIELDS REGEX REPLACE
|
||||
Replace text in a metadata field using the
|
||||
given regex. This option can be used
|
||||
multiple times
|
||||
--xattrs Write metadata to the video file's xattrs
|
||||
(using dublin core and xdg standards)
|
||||
--fixup POLICY Automatically correct known faults of the
|
||||
@@ -762,14 +791,22 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
path to the binary or its containing
|
||||
directory
|
||||
--exec CMD Execute a command on the file after
|
||||
downloading and post-processing. Similar
|
||||
syntax to the output template can be used
|
||||
downloading and post-processing. Same
|
||||
syntax as the output template can be used
|
||||
to pass any field as arguments to the
|
||||
command. An additional field "filepath"
|
||||
that contains the final path of the
|
||||
downloaded file is also available. If no
|
||||
fields are passed, "%(filepath)s" is
|
||||
appended to the end of the command
|
||||
fields are passed, %(filepath)q is appended
|
||||
to the end of the command. This option can
|
||||
be used multiple times
|
||||
--no-exec Remove any previously defined --exec
|
||||
--exec-before-download CMD Execute a command before the actual
|
||||
download. The syntax is the same as --exec
|
||||
but "filepath" is not available. This
|
||||
option can be used multiple times
|
||||
--no-exec-before-download Remove any previously defined
|
||||
--exec-before-download
|
||||
--convert-subs FORMAT Convert the subtitles to another format
|
||||
(currently supported: srt|vtt|ass|lrc)
|
||||
(Alias: --convert-subtitles)
|
||||
@@ -782,27 +819,51 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
files. See "OUTPUT TEMPLATE" for details
|
||||
--no-split-chapters Do not split video based on chapters
|
||||
(default)
|
||||
--remove-chapters REGEX Remove chapters whose title matches the
|
||||
given regular expression. This option can
|
||||
be used multiple times
|
||||
--no-remove-chapters Do not remove any chapters from the file
|
||||
(default)
|
||||
--force-keyframes-at-cuts Force keyframes around the chapters before
|
||||
removing/splitting them. Requires a
|
||||
reencode and thus is very slow, but the
|
||||
resulting video may have fewer artifacts
|
||||
around the cuts
|
||||
--no-force-keyframes-at-cuts Do not force keyframes around the chapters
|
||||
when cutting/splitting (default)
|
||||
|
||||
## SponSkrub (SponsorBlock) Options:
|
||||
[SponSkrub](https://github.com/yt-dlp/SponSkrub) is a utility to
|
||||
mark/remove sponsor segments from downloaded YouTube videos using
|
||||
## SponsorBlock Options:
|
||||
Make chapter entries for, or remove various segments (sponsor,
|
||||
introductions, etc.) from downloaded YouTube videos using the
|
||||
[SponsorBlock API](https://sponsor.ajay.app)
|
||||
|
||||
--sponskrub Use sponskrub to mark sponsored sections.
|
||||
This is enabled by default if the sponskrub
|
||||
binary exists (Youtube only)
|
||||
--no-sponskrub Do not use sponskrub
|
||||
--sponskrub-cut Cut out the sponsor sections instead of
|
||||
simply marking them
|
||||
--no-sponskrub-cut Simply mark the sponsor sections, not cut
|
||||
them out (default)
|
||||
--sponskrub-force Run sponskrub even if the video was already
|
||||
downloaded
|
||||
--no-sponskrub-force Do not cut out the sponsor sections if the
|
||||
video was already downloaded (default)
|
||||
--sponskrub-location PATH Location of the sponskrub binary; either
|
||||
the path to the binary or its containing
|
||||
directory
|
||||
--sponsorblock-mark CATS SponsorBlock categories to create chapters
|
||||
for, separated by commas. Available
|
||||
categories are all, sponsor, intro, outro,
|
||||
selfpromo, interaction, preview,
|
||||
music_offtopic. You can prefix the category
|
||||
with a "-" to exempt it. See
|
||||
https://wiki.sponsor.ajay.app/index.php/Segment_Categories
|
||||
for description of the categories. Eg:
|
||||
--sponsorblock-query all,-preview
|
||||
--sponsorblock-remove CATS SponsorBlock categories to be removed from
|
||||
the video file, separated by commas. If a
|
||||
category is present in both mark and
|
||||
remove, remove takes precedence. The syntax
|
||||
and available categories are the same as
|
||||
for --sponsorblock-mark
|
||||
--sponsorblock-chapter-title TEMPLATE
|
||||
The title template for SponsorBlock
|
||||
chapters created by --sponsorblock-mark.
|
||||
The same syntax as the output template is
|
||||
used, but the only available fields are
|
||||
start_time, end_time, category, categories,
|
||||
name, category_names. Defaults to
|
||||
"[SponsorBlock]: %(category_names)l"
|
||||
--no-sponsorblock Disable both --sponsorblock-mark and
|
||||
--sponsorblock-remove
|
||||
--sponsorblock-api URL SponsorBlock API location, defaults to
|
||||
https://sponsor.ajay.app
|
||||
|
||||
## Extractor Options:
|
||||
--extractor-retries RETRIES Number of retries for known extractor
|
||||
@@ -816,18 +877,10 @@ Then simply run `make`. You can also run `make yt-dlp` instead to compile only t
|
||||
--no-hls-split-discontinuity Do not split HLS playlists to different
|
||||
formats at discontinuities such as ad
|
||||
breaks (default)
|
||||
--youtube-include-dash-manifest Download the DASH manifests and related
|
||||
data on YouTube videos (default)
|
||||
(Alias: --no-youtube-skip-dash-manifest)
|
||||
--youtube-skip-dash-manifest Do not download the DASH manifests and
|
||||
related data on YouTube videos
|
||||
(Alias: --no-youtube-include-dash-manifest)
|
||||
--youtube-include-hls-manifest Download the HLS manifests and related data
|
||||
on YouTube videos (default)
|
||||
(Alias: --no-youtube-skip-hls-manifest)
|
||||
--youtube-skip-hls-manifest Do not download the HLS manifests and
|
||||
related data on YouTube videos
|
||||
(Alias: --no-youtube-include-hls-manifest)
|
||||
--extractor-args KEY:ARGS Pass these arguments to the extractor. See
|
||||
"EXTRACTOR ARGUMENTS" for details. You can
|
||||
use this option multiple times to give
|
||||
arguments for different extractors
|
||||
|
||||
# CONFIGURATION
|
||||
|
||||
@@ -902,10 +955,11 @@ The simplest usage of `-o` is not to set any template arguments when downloading
|
||||
It may however also contain special sequences that will be replaced when downloading each video. The special sequences may be formatted according to [python string formatting operations](https://docs.python.org/2/library/stdtypes.html#string-formatting). For example, `%(NAME)s` or `%(NAME)05d`. To clarify, that is a percent symbol followed by a name in parentheses, followed by formatting operations.
|
||||
|
||||
The field names themselves (the part inside the parenthesis) can also have some special formatting:
|
||||
1. **Object traversal**: The dictionaries and lists available in metadata can be traversed by using a `.` (dot) separator. You can also do python slicing using `:`. Eg: `%(tags.0)s`, `%(subtitles.en.-1.ext)`, `%(id.3:7:-1)s`. Note that the fields that become available using this method are not listed below. Use `-j` to see such fields
|
||||
1. **Object traversal**: The dictionaries and lists available in metadata can be traversed by using a `.` (dot) separator. You can also do python slicing using `:`. Eg: `%(tags.0)s`, `%(subtitles.en.-1.ext)s`, `%(id.3:7:-1)s`, `%(formats.:.format_id)s`. `%()s` refers to the entire infodict. Note that all the fields that become available using this method are not listed below. Use `-j` to see such fields
|
||||
1. **Addition**: Addition and subtraction of numeric fields can be done using `+` and `-` respectively. Eg: `%(playlist_index+10)03d`, `%(n_entries+1-playlist_index)d`
|
||||
1. **Date/time Formatting**: Date/time fields can be formatted according to [strftime formatting](https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes) by specifying it separated from the field name using a `>`. Eg: `%(duration>%H-%M-%S)s`, `%(upload_date>%Y-%m-%d)s`, `%(epoch-3600>%H-%M-%S)s`
|
||||
1. **Default**: A default value can be specified for when the field is empty using a `|` seperator. This overrides `--output-na-template`. Eg: `%(uploader|Unknown)s`
|
||||
1. **More Conversions**: In addition to the normal format types `diouxXeEfFgGcrs`, `j`, `l`, `q` can be used for converting to **j**son, a comma seperated **l**ist and a string **q**uoted for the terminal respectively
|
||||
|
||||
To summarize, the general syntax for a field is:
|
||||
```
|
||||
@@ -942,6 +996,7 @@ The available fields are:
|
||||
- `average_rating` (numeric): Average rating give by users, the scale used depends on the webpage
|
||||
- `comment_count` (numeric): Number of comments on the video (For some extractors, comments are only downloaded at the end, and so this field cannot be used)
|
||||
- `age_limit` (numeric): Age restriction for the video (years)
|
||||
- `live_status` (string): One of 'is_live', 'was_live', 'is_upcoming', 'not_live'
|
||||
- `is_live` (boolean): Whether this video is a live stream or a fixed-length video
|
||||
- `was_live` (boolean): Whether this video was originally a live stream
|
||||
- `playable_in_embed` (string): Whether this video is allowed to play in embedded players on other sites
|
||||
@@ -1018,10 +1073,19 @@ Available only when used in `--print`:
|
||||
|
||||
- `urls` (string): The URLs of all requested formats, one in each line
|
||||
- `filename` (string): Name of the video file. Note that the actual filename may be different due to post-processing. Use `--exec echo` to get the name after all postprocessing is complete
|
||||
|
||||
Available only in `--sponsorblock-chapter-title`:
|
||||
|
||||
- `start_time` (numeric): Start time of the chapter in seconds
|
||||
- `end_time` (numeric): End time of the chapter in seconds
|
||||
- `categories` (list): The SponsorBlock categories the chapter belongs to
|
||||
- `category` (string): The smallest SponsorBlock category the chapter belongs to
|
||||
- `category_names` (list): Friendly names of the categories
|
||||
- `name` (string): Friendly name of the smallest category
|
||||
|
||||
Each aforementioned sequence when referenced in an output template will be replaced by the actual value corresponding to the sequence name. Note that some of the sequences are not guaranteed to be present since they depend on the metadata obtained by a particular extractor. Such sequences will be replaced with placeholder value provided with `--output-na-placeholder` (`NA` by default).
|
||||
|
||||
For example for `-o %(title)s-%(id)s.%(ext)s` and an mp4 video with title `yt-dlp test video` and id `BaW_jenozKcj`, this will result in a `yt-dlp test video-BaW_jenozKcj.mp4` file created in the current directory.
|
||||
For example for `-o %(title)s-%(id)s.%(ext)s` and an mp4 video with title `yt-dlp test video` and id `BaW_jenozKc`, this will result in a `yt-dlp test video-BaW_jenozKc.mp4` file created in the current directory.
|
||||
|
||||
For numeric sequences you can use numeric related formatting, for example, `%(view_count)05d` will result in a string with view count padded with zeros up to 5 characters, like in `00042`.
|
||||
|
||||
@@ -1105,7 +1169,7 @@ If you want to download multiple videos and they don't have the same formats ava
|
||||
|
||||
If you want to download several formats of the same video use a comma as a separator, e.g. `-f 22,17,18` will download all these three formats, of course if they are available. Or a more sophisticated example combined with the precedence feature: `-f 136/137/mp4/bestvideo,140/m4a/bestaudio`.
|
||||
|
||||
You can merge the video and audio of multiple formats into a single file using `-f <format1>+<format2>+...` (requires ffmpeg installed), for example `-f bestvideo+bestaudio` will download the best video-only format, the best audio-only format and mux them together with ffmpeg. If `--no-video-multistreams` is used, all formats with a video stream except the first one are ignored. Similarly, if `--no-audio-multistreams` is used, all formats with an audio stream except the first one are ignored. For example, `-f bestvideo+best+bestaudio` will download and merge all 3 given formats. The resulting file will have 2 video streams and 2 audio streams. But `-f bestvideo+best+bestaudio --no-video-multistreams` will download and merge only `bestvideo` and `bestaudio`. `best` is ignored since another format containing a video stream (`bestvideo`) has already been selected. The order of the formats is therefore important. `-f best+bestaudio --no-audio-multistreams` will download and merge both formats while `-f bestaudio+best --no-audio-multistreams` will ignore `best` and download only `bestaudio`.
|
||||
You can merge the video and audio of multiple formats into a single file using `-f <format1>+<format2>+...` (requires ffmpeg installed), for example `-f bestvideo+bestaudio` will download the best video-only format, the best audio-only format and mux them together with ffmpeg. Unless `--video-multistreams` is used, all formats with a video stream except the first one are ignored. Similarly, unless `--audio-multistreams` is used, all formats with an audio stream except the first one are ignored. For example, `-f bestvideo+best+bestaudio --video-multistreams --audio-multistreams` will download and merge all 3 given formats. The resulting file will have 2 video streams and 2 audio streams. But `-f bestvideo+best+bestaudio --no-video-multistreams` will download and merge only `bestvideo` and `bestaudio`. `best` is ignored since another format containing a video stream (`bestvideo`) has already been selected. The order of the formats is therefore important. `-f best+bestaudio --no-audio-multistreams` will download and merge both formats while `-f bestaudio+best --no-audio-multistreams` will ignore `best` and download only `bestaudio`.
|
||||
|
||||
## Filtering Formats
|
||||
|
||||
@@ -1142,7 +1206,9 @@ Format selectors can also be grouped using parentheses, for example if you want
|
||||
|
||||
## Sorting Formats
|
||||
|
||||
You can change the criteria for being considered the `best` by using `-S` (`--format-sort`). The general format for this is `--format-sort field1,field2...`. The available fields are:
|
||||
You can change the criteria for being considered the `best` by using `-S` (`--format-sort`). The general format for this is `--format-sort field1,field2...`.
|
||||
|
||||
The available fields are:
|
||||
|
||||
- `hasvid`: Gives priority to formats that has a video stream
|
||||
- `hasaud`: Gives priority to formats that has a audio stream
|
||||
@@ -1170,9 +1236,11 @@ You can change the criteria for being considered the `best` by using `-S` (`--fo
|
||||
- `br`: Equivalent to using `tbr,vbr,abr`
|
||||
- `asr`: Audio sample rate in Hz
|
||||
|
||||
Note that any other **numerical** field made available by the extractor can also be used. All fields, unless specified otherwise, are sorted in descending order. To reverse this, prefix the field with a `+`. Eg: `+res` prefers format with the smallest resolution. Additionally, you can suffix a preferred value for the fields, separated by a `:`. Eg: `res:720` prefers larger videos, but no larger than 720p and the smallest video if there are no videos less than 720p. For `codec` and `ext`, you can provide two preferred values, the first for video and the second for audio. Eg: `+codec:avc:m4a` (equivalent to `+vcodec:avc,+acodec:m4a`) sets the video codec preference to `h264` > `h265` > `vp9` > `vp9.2` > `av01` > `vp8` > `h263` > `theora` and audio codec preference to `mp4a` > `aac` > `vorbis` > `opus` > `mp3` > `ac3` > `dts`. You can also make the sorting prefer the nearest values to the provided by using `~` as the delimiter. Eg: `filesize~1G` prefers the format with filesize closest to 1 GiB.
|
||||
All fields, unless specified otherwise, are sorted in descending order. To reverse this, prefix the field with a `+`. Eg: `+res` prefers format with the smallest resolution. Additionally, you can suffix a preferred value for the fields, separated by a `:`. Eg: `res:720` prefers larger videos, but no larger than 720p and the smallest video if there are no videos less than 720p. For `codec` and `ext`, you can provide two preferred values, the first for video and the second for audio. Eg: `+codec:avc:m4a` (equivalent to `+vcodec:avc,+acodec:m4a`) sets the video codec preference to `h264` > `h265` > `vp9` > `vp9.2` > `av01` > `vp8` > `h263` > `theora` and audio codec preference to `mp4a` > `aac` > `vorbis` > `opus` > `mp3` > `ac3` > `dts`. You can also make the sorting prefer the nearest values to the provided by using `~` as the delimiter. Eg: `filesize~1G` prefers the format with filesize closest to 1 GiB.
|
||||
|
||||
The fields `hasvid`, `ie_pref`, `lang` are always given highest priority in sorting, irrespective of the user-defined order. This behaviour can be changed by using `--force-format-sort`. Apart from these, the default order used is: `quality,res,fps,codec:vp9.2,size,br,asr,proto,ext,hasaud,source,id`. Note that the extractors may override this default order, but they cannot override the user-provided order.
|
||||
The fields `hasvid` and `ie_pref` are always given highest priority in sorting, irrespective of the user-defined order. This behaviour can be changed by using `--force-format-sort`. Apart from these, the default order used is: `lang,quality,res,fps,codec:vp9.2,size,br,asr,proto,ext,hasaud,source,id`. The extractors may override this default order, but they cannot override the user-provided order.
|
||||
|
||||
Note that the default has `codec:vp9.2`; i.e. `av1` is not prefered
|
||||
|
||||
If your format selector is `worst`, the last item is selected after sorting. This means it will select the format that is worst in all respects. Most of the time, what you actually want is the video with the smallest filesize instead. So it is generally better to use `-f best -S +size,+br,+res,+fps`.
|
||||
|
||||
@@ -1304,13 +1372,39 @@ $ yt-dlp -S '+res:480,codec,br'
|
||||
|
||||
# MODIFYING METADATA
|
||||
|
||||
The metadata obtained the the extractors can be modified by using `--parse-metadata FROM:TO`. The general syntax is to give the name of a field or a template (with similar syntax to [output template](#output-template)) to extract data from, and the format to interpret it as, separated by a colon `:`. Either a [python regular expression](https://docs.python.org/3/library/re.html#regular-expression-syntax) with named capture groups or a similar syntax to the [output template](#output-template) (only `%(field)s` formatting is supported) can be used for `TO`. The option can be used multiple times to parse and modify various fields.
|
||||
The metadata obtained the the extractors can be modified by using `--parse-metadata` and `--replace-in-metadata`
|
||||
|
||||
`--replace-in-metadata FIELDS REGEX REPLACE` is used to replace text in any metadata field using [python regular expression](https://docs.python.org/3/library/re.html#regular-expression-syntax). [Backreferences](https://docs.python.org/3/library/re.html?highlight=backreferences#re.sub) can be used in the replace string for advanced use.
|
||||
|
||||
The general syntax of `--parse-metadata FROM:TO` is to give the name of a field or an [output template](#output-template) to extract data from, and the format to interpret it as, separated by a colon `:`. Either a [python regular expression](https://docs.python.org/3/library/re.html#regular-expression-syntax) with named capture groups or a similar syntax to the [output template](#output-template) (only `%(field)s` formatting is supported) can be used for `TO`. The option can be used multiple times to parse and modify various fields.
|
||||
|
||||
Note that any field created by this can be used in the [output template](#output-template) and will also affect the media file's metadata added when using `--add-metadata`.
|
||||
|
||||
This option also has a few special uses:
|
||||
* You can use this to change the metadata that is embedded in the media file. To do this, set the value of the corresponding field with a `meta_` prefix. For example, any value you set to `meta_description` field will be added to the `description` field in the file. You can use this to set a different "description" and "synopsis", for example
|
||||
* You can download an additional URL based on the metadata of the currently downloaded video. To do this, set the field `additional_urls` to the URL that you want to download. Eg: `--parse-metadata "description:(?P<additional_urls>https?://www\.vimeo\.com/\d+)` will download the first vimeo video found in the description
|
||||
* You can use this to change the metadata that is embedded in the media file. To do this, set the value of the corresponding field with a `meta_` prefix. For example, any value you set to `meta_description` field will be added to the `description` field in the file. For example, you can use this to set a different "description" and "synopsis"
|
||||
|
||||
For reference, these are the fields yt-dlp adds by default to the file metadata:
|
||||
|
||||
Metadata fields|From
|
||||
:---|:---
|
||||
`title`|`track` or `title`
|
||||
`date`|`upload_date`
|
||||
`description`, `synopsis`|`description`
|
||||
`purl`, `comment`|`webpage_url`
|
||||
`track`|`track_number`
|
||||
`artist`|`artist`, `creator`, `uploader` or `uploader_id`
|
||||
`genre`|`genre`
|
||||
`album`|`album`
|
||||
`album_artist`|`album_artist`
|
||||
`disc`|`disc_number`
|
||||
`show`|`series`
|
||||
`season_number`|`season_number`
|
||||
`episode_id`|`episode` or `episode_id`
|
||||
`episode_sort`|`episode_number`
|
||||
`language` of each stream|From the format's `language`
|
||||
**Note**: The file format may not support some of these fields
|
||||
|
||||
|
||||
## Modifying metadata examples
|
||||
|
||||
@@ -1329,8 +1423,35 @@ $ yt-dlp --parse-metadata '%(series)s S%(season_number)02dE%(episode_number)02d:
|
||||
# Set "comment" field in video metadata using description instead of webpage_url
|
||||
$ yt-dlp --parse-metadata 'description:(?s)(?P<meta_comment>.+)' --add-metadata
|
||||
|
||||
# Replace all spaces and "_" in title and uploader with a `-`
|
||||
$ yt-dlp --replace-in-metadata 'title,uploader' '[ _]' '-'
|
||||
|
||||
```
|
||||
|
||||
# EXTRACTOR ARGUMENTS
|
||||
|
||||
Some extractors accept additional arguments which can be passed using `--extractor-args KEY:ARGS`. `ARGS` is a `;` (semicolon) seperated string of `ARG=VAL1,VAL2`. Eg: `--extractor-args "youtube:player_client=android_agegate,web;include_live_dash" --extractor-args "funimation:version=uncut"`
|
||||
|
||||
The following extractors use this feature:
|
||||
* **youtube**
|
||||
* `skip`: `hls` or `dash` (or both) to skip download of the respective manifests
|
||||
* `player_client`: Clients to extract video data from. The main clients are `web`, `android`, `ios`, `mweb`. These also have `_music`, `_embedded`, `_agegate`, and `_creator` variants (Eg: `web_embedded`) (`mweb` has only `_agegate`). By default, `android,web` is used, but the agegate and creator variants are added as required for age-gated videos. Similarly the music variants are added for `music.youtube.com` urls. You can also use `all` to use all the clients
|
||||
* `player_skip`: `configs` - skip any requests for client configs and use defaults
|
||||
* `include_live_dash`: Include live dash formats (These formats don't download properly)
|
||||
* `comment_sort`: `top` or `new` (default) - choose comment sorting mode (on YouTube's side).
|
||||
* `max_comments`: Maximum amount of comments to download (default all).
|
||||
* `max_comment_depth`: Maximum depth for nested comments. YouTube supports depths 1 or 2 (default).
|
||||
|
||||
* **funimation**
|
||||
* `language`: Languages to extract. Eg: `funimation:language=english,japanese`
|
||||
* `version`: The video version to extract - `uncut` or `simulcast`
|
||||
|
||||
* **vikiChannel**
|
||||
* `video_types`: Types of videos to download - one or more of `episodes`, `movies`, `clips`, `trailers`
|
||||
|
||||
NOTE: These options may be changed/removed in the future without concern for backward compatibility
|
||||
|
||||
|
||||
# PLUGINS
|
||||
|
||||
Plugins are loaded from `<root-dir>/ytdlp_plugins/<type>/__init__.py`. Currently only `extractor` plugins are supported. Support for `downloader` and `postprocessor` plugins may be added in the future. See [ytdlp_plugins](ytdlp_plugins) for example.
|
||||
@@ -1341,8 +1462,8 @@ Plugins are loaded from `<root-dir>/ytdlp_plugins/<type>/__init__.py`. Currently
|
||||
|
||||
These are all the deprecated options and the current alternative to achieve the same effect
|
||||
|
||||
#### Not recommended
|
||||
While these options still work, their use is not recommended since there are other alternatives to achieve the same
|
||||
#### Redundant options
|
||||
While these options are redundant, they are still expected to be used due to their ease of use
|
||||
|
||||
--get-description --print description
|
||||
--get-duration --print duration_string
|
||||
@@ -1352,8 +1473,19 @@ While these options still work, their use is not recommended since there are oth
|
||||
--get-thumbnail --print thumbnail
|
||||
-e, --get-title --print title
|
||||
-g, --get-url --print urls
|
||||
-j, --dump-json --print "%()j"
|
||||
--match-title REGEX --match-filter "title ~= (?i)REGEX"
|
||||
--reject-title REGEX --match-filter "title !~= (?i)REGEX"
|
||||
--min-views COUNT --match-filter "view_count >=? COUNT"
|
||||
--max-views COUNT --match-filter "view_count <=? COUNT"
|
||||
|
||||
|
||||
#### Not recommended
|
||||
While these options still work, their use is not recommended since there are other alternatives to achieve the same
|
||||
|
||||
--all-formats -f all
|
||||
--all-subs --sub-langs all --write-subs
|
||||
--print-json -j --no-simulate
|
||||
--autonumber-size NUMBER Use string formatting. Eg: %(autonumber)03d
|
||||
--autonumber-start NUMBER Use internal field formatting like %(autonumber+NUMBER)s
|
||||
--metadata-from-title FORMAT --parse-metadata "%(title)s:FORMAT"
|
||||
@@ -1361,9 +1493,19 @@ While these options still work, their use is not recommended since there are oth
|
||||
--hls-prefer-ffmpeg --downloader "m3u8:ffmpeg"
|
||||
--list-formats-old --compat-options list-formats (Alias: --no-list-formats-as-table)
|
||||
--list-formats-as-table --compat-options -list-formats [Default] (Alias: --no-list-formats-old)
|
||||
--sponskrub-args ARGS --ppa "sponskrub:ARGS"
|
||||
--test Used by developers for testing extractors. Not intended for the end user
|
||||
--youtube-print-sig-code Used for testing youtube signatures
|
||||
--youtube-skip-dash-manifest --extractor-args "youtube:skip=dash" (Alias: --no-youtube-include-dash-manifest)
|
||||
--youtube-skip-hls-manifest --extractor-args "youtube:skip=hls" (Alias: --no-youtube-include-hls-manifest)
|
||||
--youtube-include-dash-manifest Default (Alias: --no-youtube-skip-dash-manifest)
|
||||
--youtube-include-hls-manifest Default (Alias: --no-youtube-skip-hls-manifest)
|
||||
|
||||
|
||||
#### Developer options
|
||||
These options are not intended to be used by the end-user
|
||||
|
||||
--test Download only part of video for testing extractors
|
||||
--youtube-print-sig-code For testing youtube signatures
|
||||
--allow-unplayable-formats List unplayable formats also
|
||||
--no-allow-unplayable-formats Default
|
||||
|
||||
|
||||
#### Old aliases
|
||||
@@ -1385,6 +1527,18 @@ These are aliases that are no longer documented for various reasons
|
||||
--write-srt --write-subs
|
||||
--yes-overwrites --force-overwrites
|
||||
|
||||
#### Sponskrub Options
|
||||
Support for [SponSkrub](https://github.com/faissaloo/SponSkrub) has been deprecated in favor of `--sponsorblock`
|
||||
|
||||
--sponskrub --sponsorblock-mark all
|
||||
--no-sponskrub --no-sponsorblock
|
||||
--sponskrub-cut --sponsorblock-remove all
|
||||
--no-sponskrub-cut --sponsorblock-remove -all
|
||||
--sponskrub-force Not applicable
|
||||
--no-sponskrub-force Not applicable
|
||||
--sponskrub-location Not applicable
|
||||
--sponskrub-args Not applicable
|
||||
|
||||
#### No longer supported
|
||||
These options may no longer work as intended
|
||||
|
||||
@@ -1394,6 +1548,8 @@ These options may no longer work as intended
|
||||
--no-call-home Default
|
||||
--include-ads No longer supported
|
||||
--no-include-ads Default
|
||||
--write-annotations No supported site has annotations now
|
||||
--no-write-annotations Default
|
||||
|
||||
#### Removed
|
||||
These options were deprecated since 2014 and have now been entirely removed
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 4.2 KiB |
@@ -1,20 +1,25 @@
|
||||
#!/usr/bin/env python3
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
|
||||
class LazyLoadExtractor(object):
|
||||
class LazyLoadMetaClass(type):
|
||||
def __getattr__(cls, name):
|
||||
return getattr(cls._get_real_class(), name)
|
||||
|
||||
|
||||
class LazyLoadExtractor(metaclass=LazyLoadMetaClass):
|
||||
_module = None
|
||||
_WORKING = True
|
||||
|
||||
@classmethod
|
||||
def ie_key(cls):
|
||||
return cls.__name__[:-2]
|
||||
def _get_real_class(cls):
|
||||
if '__real_class' not in cls.__dict__:
|
||||
mod = __import__(cls._module, fromlist=(cls.__name__,))
|
||||
cls.__real_class = getattr(mod, cls.__name__)
|
||||
return cls.__real_class
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
mod = __import__(cls._module, fromlist=(cls.__name__,))
|
||||
real_cls = getattr(mod, cls.__name__)
|
||||
real_cls = cls._get_real_class()
|
||||
instance = real_cls.__new__(real_cls)
|
||||
instance.__init__(*args, **kwargs)
|
||||
return instance
|
||||
|
||||
BIN
devscripts/logo.ico
Normal file
BIN
devscripts/logo.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 40 KiB |
@@ -16,23 +16,28 @@ if os.path.exists(lazy_extractors_filename):
|
||||
os.remove(lazy_extractors_filename)
|
||||
|
||||
# Block plugins from loading
|
||||
os.rename('ytdlp_plugins', 'ytdlp_plugins_blocked')
|
||||
plugins_dirname = 'ytdlp_plugins'
|
||||
plugins_blocked_dirname = 'ytdlp_plugins_blocked'
|
||||
if os.path.exists(plugins_dirname):
|
||||
os.rename(plugins_dirname, plugins_blocked_dirname)
|
||||
|
||||
from yt_dlp.extractor import _ALL_CLASSES
|
||||
from yt_dlp.extractor.common import InfoExtractor, SearchInfoExtractor
|
||||
|
||||
os.rename('ytdlp_plugins_blocked', 'ytdlp_plugins')
|
||||
if os.path.exists(plugins_blocked_dirname):
|
||||
os.rename(plugins_blocked_dirname, plugins_dirname)
|
||||
|
||||
with open('devscripts/lazy_load_template.py', 'rt') as f:
|
||||
module_template = f.read()
|
||||
|
||||
CLASS_PROPERTIES = ['ie_key', 'working', '_match_valid_url', 'suitable', '_match_id', 'get_temp_id']
|
||||
module_contents = [
|
||||
module_template + '\n' + getsource(InfoExtractor.suitable) + '\n',
|
||||
'class LazyLoadSearchExtractor(LazyLoadExtractor):\n pass\n']
|
||||
module_template,
|
||||
*[getsource(getattr(InfoExtractor, k)) for k in CLASS_PROPERTIES],
|
||||
'\nclass LazyLoadSearchExtractor(LazyLoadExtractor):\n pass\n']
|
||||
|
||||
ie_template = '''
|
||||
class {name}({bases}):
|
||||
_VALID_URL = {valid_url!r}
|
||||
_module = '{module}'
|
||||
'''
|
||||
|
||||
@@ -53,14 +58,17 @@ def get_base_name(base):
|
||||
|
||||
|
||||
def build_lazy_ie(ie, name):
|
||||
valid_url = getattr(ie, '_VALID_URL', None)
|
||||
s = ie_template.format(
|
||||
name=name,
|
||||
bases=', '.join(map(get_base_name, ie.__bases__)),
|
||||
valid_url=valid_url,
|
||||
module=ie.__module__)
|
||||
valid_url = getattr(ie, '_VALID_URL', None)
|
||||
if valid_url:
|
||||
s += f' _VALID_URL = {valid_url!r}\n'
|
||||
if not ie._WORKING:
|
||||
s += ' _WORKING = False\n'
|
||||
if ie.suitable.__func__ is not InfoExtractor.suitable.__func__:
|
||||
s += '\n' + getsource(ie.suitable)
|
||||
s += f'\n{getsource(ie.suitable)}'
|
||||
if hasattr(ie, '_make_valid_url'):
|
||||
# search extractors
|
||||
s += make_valid_template.format(valid_url=ie._make_valid_url())
|
||||
@@ -98,7 +106,7 @@ for ie in ordered_cls:
|
||||
names.append(name)
|
||||
|
||||
module_contents.append(
|
||||
'_ALL_CLASSES = [{0}]'.format(', '.join(names)))
|
||||
'\n_ALL_CLASSES = [{0}]'.format(', '.join(names)))
|
||||
|
||||
module_src = '\n'.join(module_contents) + '\n'
|
||||
|
||||
|
||||
@@ -1,17 +1,16 @@
|
||||
@setlocal
|
||||
@echo off
|
||||
cd /d %~dp0..
|
||||
|
||||
rem Keep this list in sync with the `offlinetest` target in Makefile
|
||||
set DOWNLOAD_TESTS="age_restriction^|download^|iqiyi_sdk_interpreter^|socks^|subtitles^|write_annotations^|youtube_lists^|youtube_signature^|post_hooks"
|
||||
|
||||
if "%YTDL_TEST_SET%" == "core" (
|
||||
set test_set="-I test_("%DOWNLOAD_TESTS%")\.py"
|
||||
set multiprocess_args=""
|
||||
) else if "%YTDL_TEST_SET%" == "download" (
|
||||
set test_set="-I test_(?!"%DOWNLOAD_TESTS%").+\.py"
|
||||
set multiprocess_args="--processes=4 --process-timeout=540"
|
||||
if ["%~1"]==[""] (
|
||||
set "test_set="
|
||||
) else if ["%~1"]==["core"] (
|
||||
set "test_set=-k "not download""
|
||||
) else if ["%~1"]==["download"] (
|
||||
set "test_set=-k download"
|
||||
) else (
|
||||
echo YTDL_TEST_SET is not set or invalid
|
||||
echo.Invalid test type "%~1". Use "core" ^| "download"
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
nosetests test --verbose %test_set:"=% %multiprocess_args:"=%
|
||||
pytest %test_set%
|
||||
|
||||
@@ -1,22 +1,14 @@
|
||||
#!/bin/bash
|
||||
#!/bin/sh
|
||||
|
||||
# Keep this list in sync with the `offlinetest` target in Makefile
|
||||
DOWNLOAD_TESTS="age_restriction|download|iqiyi_sdk_interpreter|overwrites|socks|subtitles|write_annotations|youtube_lists|youtube_signature|post_hooks"
|
||||
if [ -z $1 ]; then
|
||||
test_set='test'
|
||||
elif [ $1 = 'core' ]; then
|
||||
test_set='not download'
|
||||
elif [ $1 = 'download' ]; then
|
||||
test_set='download'
|
||||
else
|
||||
echo 'Invalid test type "'$1'". Use "core" | "download"'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
test_set=""
|
||||
multiprocess_args=""
|
||||
|
||||
case "$YTDL_TEST_SET" in
|
||||
core)
|
||||
test_set="-I test_($DOWNLOAD_TESTS)\.py"
|
||||
;;
|
||||
download)
|
||||
test_set="-I test_(?!$DOWNLOAD_TESTS).+\.py"
|
||||
multiprocess_args="--processes=4 --process-timeout=540"
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
|
||||
nosetests test --verbose $test_set $multiprocess_args
|
||||
python3 -m pytest -k "$test_set"
|
||||
|
||||
37
devscripts/update-formulae.py
Normal file
37
devscripts/update-formulae.py
Normal file
@@ -0,0 +1,37 @@
|
||||
#!/usr/bin/env python3
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from yt_dlp.compat import compat_urllib_request
|
||||
|
||||
|
||||
# usage: python3 ./devscripts/update-formulae.py <path-to-formulae-rb> <version>
|
||||
# version can be either 0-aligned (yt-dlp version) or normalized (PyPl version)
|
||||
|
||||
filename, version = sys.argv[1:]
|
||||
|
||||
normalized_version = '.'.join(str(int(x)) for x in version.split('.'))
|
||||
|
||||
pypi_release = json.loads(compat_urllib_request.urlopen(
|
||||
'https://pypi.org/pypi/yt-dlp/%s/json' % normalized_version
|
||||
).read().decode('utf-8'))
|
||||
|
||||
tarball_file = next(x for x in pypi_release['urls'] if x['filename'].endswith('.tar.gz'))
|
||||
|
||||
sha256sum = tarball_file['digests']['sha256']
|
||||
url = tarball_file['url']
|
||||
|
||||
with open(filename, 'r') as r:
|
||||
formulae_text = r.read()
|
||||
|
||||
formulae_text = re.sub(r'sha256 "[0-9a-f]*?"', 'sha256 "%s"' % sha256sum, formulae_text)
|
||||
formulae_text = re.sub(r'url "[^"]*?"', 'url "%s"' % url, formulae_text)
|
||||
|
||||
with open(filename, 'w') as w:
|
||||
w.write(formulae_text)
|
||||
5
docs/Collaborators.md
Normal file
5
docs/Collaborators.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
orphan: true
|
||||
---
|
||||
```{include} ../Collaborators.md
|
||||
```
|
||||
@@ -73,7 +73,7 @@ excluded_modules = ['test', 'ytdlp_plugins', 'youtube-dl', 'youtube-dlc']
|
||||
PyInstaller.__main__.run([
|
||||
'--name=yt-dlp%s' % _x86,
|
||||
'--onefile',
|
||||
'--icon=devscripts/cloud.ico',
|
||||
'--icon=devscripts/logo.ico',
|
||||
*[f'--exclude-module={module}' for module in excluded_modules],
|
||||
*[f'--hidden-import={module}' for module in dependancies],
|
||||
'--upx-exclude=vcruntime140.dll',
|
||||
|
||||
4
pytest.ini
Normal file
4
pytest.ini
Normal file
@@ -0,0 +1,4 @@
|
||||
[pytest]
|
||||
addopts = -ra -v --strict-markers
|
||||
markers =
|
||||
download
|
||||
@@ -95,7 +95,9 @@
|
||||
- **Bandcamp**
|
||||
- **Bandcamp:album**
|
||||
- **Bandcamp:weekly**
|
||||
- **BandcampMusic**
|
||||
- **bangumi.bilibili.com**: BiliBili番剧
|
||||
- **BannedVideo**
|
||||
- **bbc**: BBC
|
||||
- **bbc.co.uk**: BBC iPlayer
|
||||
- **bbc.co.uk:article**: BBC articles
|
||||
@@ -117,6 +119,7 @@
|
||||
- **Bigflix**
|
||||
- **Bild**: Bild.de
|
||||
- **BiliBili**
|
||||
- **Bilibili category extractor**
|
||||
- **BilibiliAudio**
|
||||
- **BilibiliAudioAlbum**
|
||||
- **BilibiliChannel**
|
||||
@@ -129,6 +132,7 @@
|
||||
- **BitChuteChannel**
|
||||
- **bitwave:replay**
|
||||
- **bitwave:stream**
|
||||
- **BlackboardCollaborate**
|
||||
- **BleacherReport**
|
||||
- **BleacherReportCMS**
|
||||
- **Bloomberg**
|
||||
@@ -151,7 +155,6 @@
|
||||
- **Camdemy**
|
||||
- **CamdemyFolder**
|
||||
- **CamModels**
|
||||
- **CamTube**
|
||||
- **CamWithHer**
|
||||
- **canalc2.tv**
|
||||
- **Canalplus**: mycanal.fr and piwiplus.fr
|
||||
@@ -259,6 +262,7 @@
|
||||
- **dlive:vod**
|
||||
- **DoodStream**
|
||||
- **Dotsub**
|
||||
- **Douyin**
|
||||
- **DouyuShow**
|
||||
- **DouyuTV**: 斗鱼
|
||||
- **DPlay**
|
||||
@@ -292,8 +296,11 @@
|
||||
- **Embedly**
|
||||
- **EMPFlix**
|
||||
- **Engadget**
|
||||
- **Epicon**
|
||||
- **EpiconSeries**
|
||||
- **Eporner**
|
||||
- **EroProfile**
|
||||
- **EroProfile:album**
|
||||
- **Escapist**
|
||||
- **ESPN**
|
||||
- **ESPNArticle**
|
||||
@@ -306,11 +313,13 @@
|
||||
- **EyedoTV**
|
||||
- **facebook**
|
||||
- **FacebookPluginsVideo**
|
||||
- **fancode:live**
|
||||
- **fancode:vod**
|
||||
- **faz.net**
|
||||
- **fc2**
|
||||
- **fc2:embed**
|
||||
- **Fczenit**
|
||||
- **Filmmodu**
|
||||
- **filmon**
|
||||
- **filmon:channel**
|
||||
- **Filmweb**
|
||||
@@ -343,9 +352,12 @@
|
||||
- **FrontendMastersLesson**
|
||||
- **FujiTVFODPlus7**
|
||||
- **Funimation**
|
||||
- **funimation:page**
|
||||
- **funimation:show**
|
||||
- **Funk**
|
||||
- **Fusion**
|
||||
- **Fux**
|
||||
- **GabTV**
|
||||
- **Gaia**
|
||||
- **GameInformer**
|
||||
- **GameSpot**
|
||||
@@ -401,6 +413,7 @@
|
||||
- **Huajiao**: 花椒直播
|
||||
- **HuffPost**: Huffington Post
|
||||
- **Hungama**
|
||||
- **HungamaAlbumPlaylist**
|
||||
- **HungamaSong**
|
||||
- **Hypem**
|
||||
- **ign.com**
|
||||
@@ -513,6 +526,9 @@
|
||||
- **MallTV**
|
||||
- **mangomolo:live**
|
||||
- **mangomolo:video**
|
||||
- **ManotoTV**: Manoto TV (Episode)
|
||||
- **ManotoTVLive**: Manoto TV (Live)
|
||||
- **ManotoTVShow**: Manoto TV (Show)
|
||||
- **ManyVids**
|
||||
- **MaoriTV**
|
||||
- **Markiza**
|
||||
@@ -548,6 +564,8 @@
|
||||
- **MinistryGrid**
|
||||
- **Minoto**
|
||||
- **miomio.tv**
|
||||
- **mirrativ**
|
||||
- **mirrativ:user**
|
||||
- **MiTele**: mitele.es
|
||||
- **mixcloud**
|
||||
- **mixcloud:playlist**
|
||||
@@ -649,6 +667,9 @@
|
||||
- **niconico**: ニコニコ動画
|
||||
- **NiconicoPlaylist**
|
||||
- **NiconicoUser**
|
||||
- **nicovideo:search**: Nico video searches
|
||||
- **nicovideo:search:date**: Nico video searches, newest first
|
||||
- **nicovideo:search_url**: Nico video search URLs
|
||||
- **Nintendo**
|
||||
- **Nitter**
|
||||
- **njoy**: N-JOY
|
||||
@@ -699,6 +720,8 @@
|
||||
- **OnionStudios**
|
||||
- **Ooyala**
|
||||
- **OoyalaExternal**
|
||||
- **openrec**
|
||||
- **openrec:capture**
|
||||
- **OraTV**
|
||||
- **orf:burgenland**: Radio Burgenland
|
||||
- **orf:fm4**: radio FM4
|
||||
@@ -724,12 +747,17 @@
|
||||
- **PalcoMP3:video**
|
||||
- **pandora.tv**: 판도라TV
|
||||
- **ParamountNetwork**
|
||||
- **ParamountPlus**
|
||||
- **ParamountPlusSeries**
|
||||
- **parliamentlive.tv**: UK parliament videos
|
||||
- **Parlview**
|
||||
- **Patreon**
|
||||
- **PatreonUser**
|
||||
- **pbs**: Public Broadcasting Service (PBS) and member stations: PBS: Public Broadcasting Service, APT - Alabama Public Television (WBIQ), GPB/Georgia Public Broadcasting (WGTV), Mississippi Public Broadcasting (WMPN), Nashville Public Television (WNPT), WFSU-TV (WFSU), WSRE (WSRE), WTCI (WTCI), WPBA/Channel 30 (WPBA), Alaska Public Media (KAKM), Arizona PBS (KAET), KNME-TV/Channel 5 (KNME), Vegas PBS (KLVX), AETN/ARKANSAS ETV NETWORK (KETS), KET (WKLE), WKNO/Channel 10 (WKNO), LPB/LOUISIANA PUBLIC BROADCASTING (WLPB), OETA (KETA), Ozarks Public Television (KOZK), WSIU Public Broadcasting (WSIU), KEET TV (KEET), KIXE/Channel 9 (KIXE), KPBS San Diego (KPBS), KQED (KQED), KVIE Public Television (KVIE), PBS SoCal/KOCE (KOCE), ValleyPBS (KVPT), CONNECTICUT PUBLIC TELEVISION (WEDH), KNPB Channel 5 (KNPB), SOPTV (KSYS), Rocky Mountain PBS (KRMA), KENW-TV3 (KENW), KUED Channel 7 (KUED), Wyoming PBS (KCWC), Colorado Public Television / KBDI 12 (KBDI), KBYU-TV (KBYU), Thirteen/WNET New York (WNET), WGBH/Channel 2 (WGBH), WGBY (WGBY), NJTV Public Media NJ (WNJT), WLIW21 (WLIW), mpt/Maryland Public Television (WMPB), WETA Television and Radio (WETA), WHYY (WHYY), PBS 39 (WLVT), WVPT - Your Source for PBS and More! (WVPT), Howard University Television (WHUT), WEDU PBS (WEDU), WGCU Public Media (WGCU), WPBT2 (WPBT), WUCF TV (WUCF), WUFT/Channel 5 (WUFT), WXEL/Channel 42 (WXEL), WLRN/Channel 17 (WLRN), WUSF Public Broadcasting (WUSF), ETV (WRLK), UNC-TV (WUNC), PBS Hawaii - Oceanic Cable Channel 10 (KHET), Idaho Public Television (KAID), KSPS (KSPS), OPB (KOPB), KWSU/Channel 10 & KTNW/Channel 31 (KWSU), WILL-TV (WILL), Network Knowledge - WSEC/Springfield (WSEC), WTTW11 (WTTW), Iowa Public Television/IPTV (KDIN), Nine Network (KETC), PBS39 Fort Wayne (WFWA), WFYI Indianapolis (WFYI), Milwaukee Public Television (WMVS), WNIN (WNIN), WNIT Public Television (WNIT), WPT (WPNE), WVUT/Channel 22 (WVUT), WEIU/Channel 51 (WEIU), WQPT-TV (WQPT), WYCC PBS Chicago (WYCC), WIPB-TV (WIPB), WTIU (WTIU), CET (WCET), ThinkTVNetwork (WPTD), WBGU-TV (WBGU), WGVU TV (WGVU), NET1 (KUON), Pioneer Public Television (KWCM), SDPB Television (KUSD), TPT (KTCA), KSMQ (KSMQ), KPTS/Channel 8 (KPTS), KTWU/Channel 11 (KTWU), East Tennessee PBS (WSJK), WCTE-TV (WCTE), WLJT, Channel 11 (WLJT), WOSU TV (WOSU), WOUB/WOUC (WOUB), WVPB (WVPB), WKYU-PBS (WKYU), KERA 13 (KERA), MPBN (WCBB), Mountain Lake PBS (WCFE), NHPTV (WENH), Vermont PBS (WETK), witf (WITF), WQED Multimedia (WQED), WMHT Educational Telecommunications (WMHT), Q-TV (WDCQ), WTVS Detroit Public TV (WTVS), CMU Public Television (WCMU), WKAR-TV (WKAR), WNMU-TV Public TV 13 (WNMU), WDSE - WRPT (WDSE), WGTE TV (WGTE), Lakeland Public Television (KAWE), KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS), MontanaPBS (KUSM), KRWG/Channel 22 (KRWG), KACV (KACV), KCOS/Channel 13 (KCOS), WCNY/Channel 24 (WCNY), WNED (WNED), WPBS (WPBS), WSKG Public TV (WSKG), WXXI (WXXI), WPSU (WPSU), WVIA Public Media Studios (WVIA), WTVI (WTVI), Western Reserve PBS (WNEO), WVIZ/PBS ideastream (WVIZ), KCTS 9 (KCTS), Basin PBS (KPBT), KUHT / Channel 8 (KUHT), KLRN (KLRN), KLRU (KLRU), WTJX Channel 12 (WTJX), WCVE PBS (WCVE), KBTC Public Television (KBTC)
|
||||
- **PearVideo**
|
||||
- **PeerTube**
|
||||
- **peloton**
|
||||
- **peloton:live**: Peloton Live
|
||||
- **People**
|
||||
- **PerformGroup**
|
||||
- **periscope**: Periscope
|
||||
@@ -766,9 +794,11 @@
|
||||
- **PopcornTV**
|
||||
- **PornCom**
|
||||
- **PornerBros**
|
||||
- **PornFlip**
|
||||
- **PornHd**
|
||||
- **PornHub**: PornHub and Thumbzilla
|
||||
- **PornHubPagedVideoList**
|
||||
- **PornHubPlaylist**
|
||||
- **PornHubUser**
|
||||
- **PornHubUserVideosUpload**
|
||||
- **Pornotube**
|
||||
@@ -776,6 +806,7 @@
|
||||
- **PornoXO**
|
||||
- **PornTube**
|
||||
- **PressTV**
|
||||
- **ProjectVeritas**
|
||||
- **prosiebensat1**: ProSiebenSat.1 Digital
|
||||
- **puhutv**
|
||||
- **puhutv:serie**
|
||||
@@ -792,6 +823,8 @@
|
||||
- **QuicklineLive**
|
||||
- **R7**
|
||||
- **R7Article**
|
||||
- **Radiko**
|
||||
- **RadikoRadio**
|
||||
- **radio.de**
|
||||
- **radiobremen**
|
||||
- **radiocanada**
|
||||
@@ -808,6 +841,9 @@
|
||||
- **RCS**
|
||||
- **RCSEmbeds**
|
||||
- **RCSVarious**
|
||||
- **RCTIPlus**
|
||||
- **RCTIPlusSeries**
|
||||
- **RCTIPlusTV**
|
||||
- **RDS**: RDS.ca
|
||||
- **RedBull**
|
||||
- **RedBullEmbed**
|
||||
@@ -866,6 +902,7 @@
|
||||
- **savefrom.net**
|
||||
- **SBS**: sbs.com.au
|
||||
- **schooltv**
|
||||
- **ScienceChannel**
|
||||
- **screen.yahoo:search**: Yahoo screen search
|
||||
- **Screencast**
|
||||
- **ScreencastOMatic**
|
||||
@@ -938,6 +975,7 @@
|
||||
- **SRGSSR**
|
||||
- **SRGSSRPlay**: srf.ch, rts.ch, rsi.ch, rtr.ch and swissinfo.ch play sites
|
||||
- **stanfordoc**: Stanford Open ClassRoom
|
||||
- **startv**
|
||||
- **Steam**
|
||||
- **Stitcher**
|
||||
- **StitcherShow**
|
||||
@@ -1004,13 +1042,15 @@
|
||||
- **ThisAmericanLife**
|
||||
- **ThisAV**
|
||||
- **ThisOldHouse**
|
||||
- **ThisVid**
|
||||
- **TikTok**
|
||||
- **tiktok:user**
|
||||
- **tinypic**: tinypic.com videos
|
||||
- **TMZ**
|
||||
- **TNAFlix**
|
||||
- **TNAFlixNetworkEmbed**
|
||||
- **toggle**
|
||||
- **Tokentube**
|
||||
- **Tokentube:channel**
|
||||
- **ToonGoggles**
|
||||
- **tou.tv**
|
||||
- **Toypics**: Toypics video
|
||||
@@ -1033,10 +1073,11 @@
|
||||
- **Turbo**
|
||||
- **tv.dfb.de**
|
||||
- **TV2**
|
||||
- **tv2.hu**
|
||||
- **TV2Article**
|
||||
- **TV2DK**
|
||||
- **TV2DKBornholmPlay**
|
||||
- **tv2play.hu**
|
||||
- **tv2playseries.hu**
|
||||
- **TV4**: tv4.se and tv4play.se
|
||||
- **TV5MondePlus**: TV5MONDE+
|
||||
- **tv5unis**
|
||||
@@ -1101,6 +1142,7 @@
|
||||
- **ustream:channel**
|
||||
- **ustudio**
|
||||
- **ustudio:embed**
|
||||
- **Utreon**
|
||||
- **Varzesh3**
|
||||
- **Vbox7**
|
||||
- **VeeHD**
|
||||
@@ -1169,6 +1211,8 @@
|
||||
- **VODPl**
|
||||
- **VODPlatform**
|
||||
- **VoiceRepublic**
|
||||
- **voicy**
|
||||
- **voicy:channel**
|
||||
- **Voot**
|
||||
- **VootSeries**
|
||||
- **VoxMedia**
|
||||
|
||||
@@ -22,6 +22,14 @@ from yt_dlp.utils import (
|
||||
)
|
||||
|
||||
|
||||
if "pytest" in sys.modules:
|
||||
import pytest
|
||||
is_download_test = pytest.mark.download
|
||||
else:
|
||||
def is_download_test(testClass):
|
||||
return testClass
|
||||
|
||||
|
||||
def get_params(override=None):
|
||||
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
|
||||
"parameters.json")
|
||||
@@ -190,7 +198,10 @@ def expect_info_dict(self, got_dict, expected_dict):
|
||||
expect_dict(self, got_dict, expected_dict)
|
||||
# Check for the presence of mandatory fields
|
||||
if got_dict.get('_type') not in ('playlist', 'multi_video'):
|
||||
for key in ('id', 'url', 'title', 'ext'):
|
||||
mandatory_fields = ['id', 'title']
|
||||
if expected_dict.get('ext'):
|
||||
mandatory_fields.extend(('url', 'ext'))
|
||||
for key in mandatory_fields:
|
||||
self.assertTrue(got_dict.get(key), 'Missing mandatory field %s' % key)
|
||||
# Check for mandatory fields that are automatically set by YoutubeDL
|
||||
for key in ['webpage_url', 'extractor', 'extractor_key']:
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
{
|
||||
"check_formats": false,
|
||||
"consoletitle": false,
|
||||
"continuedl": true,
|
||||
"forcedescription": false,
|
||||
|
||||
@@ -35,13 +35,13 @@ class InfoExtractorTestRequestHandler(compat_http_server.BaseHTTPRequestHandler)
|
||||
assert False
|
||||
|
||||
|
||||
class TestIE(InfoExtractor):
|
||||
class DummyIE(InfoExtractor):
|
||||
pass
|
||||
|
||||
|
||||
class TestInfoExtractor(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.ie = TestIE(FakeYDL())
|
||||
self.ie = DummyIE(FakeYDL())
|
||||
|
||||
def test_ie_key(self):
|
||||
self.assertEqual(get_info_extractor(YoutubeIE.ie_key()), YoutubeIE)
|
||||
|
||||
@@ -10,14 +10,15 @@ import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
import copy
|
||||
import json
|
||||
|
||||
from test.helper import FakeYDL, assertRegexpMatches
|
||||
from yt_dlp import YoutubeDL
|
||||
from yt_dlp.compat import compat_str, compat_urllib_error
|
||||
from yt_dlp.compat import compat_os_name, compat_setenv, compat_str, compat_urllib_error
|
||||
from yt_dlp.extractor import YoutubeIE
|
||||
from yt_dlp.extractor.common import InfoExtractor
|
||||
from yt_dlp.postprocessor.common import PostProcessor
|
||||
from yt_dlp.utils import ExtractorError, int_or_none, match_filter_func
|
||||
from yt_dlp.utils import ExtractorError, int_or_none, match_filter_func, LazyList
|
||||
|
||||
TEST_URL = 'http://localhost/sample.mp4'
|
||||
|
||||
@@ -35,6 +36,9 @@ class YDL(FakeYDL):
|
||||
def to_screen(self, msg):
|
||||
self.msgs.append(msg)
|
||||
|
||||
def dl(self, *args, **kwargs):
|
||||
assert False, 'Downloader must not be invoked for test_YoutubeDL'
|
||||
|
||||
|
||||
def _make_result(formats, **kwargs):
|
||||
res = {
|
||||
@@ -117,35 +121,24 @@ class TestFormatSelection(unittest.TestCase):
|
||||
]
|
||||
info_dict = _make_result(formats)
|
||||
|
||||
ydl = YDL({'format': '20/47'})
|
||||
ydl.process_ie_result(info_dict.copy())
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], '47')
|
||||
def test(inp, *expected, multi=False):
|
||||
ydl = YDL({
|
||||
'format': inp,
|
||||
'allow_multiple_video_streams': multi,
|
||||
'allow_multiple_audio_streams': multi,
|
||||
})
|
||||
ydl.process_ie_result(info_dict.copy())
|
||||
downloaded = map(lambda x: x['format_id'], ydl.downloaded_info_dicts)
|
||||
self.assertEqual(list(downloaded), list(expected))
|
||||
|
||||
ydl = YDL({'format': '20/71/worst'})
|
||||
ydl.process_ie_result(info_dict.copy())
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], '35')
|
||||
|
||||
ydl = YDL()
|
||||
ydl.process_ie_result(info_dict.copy())
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], '2')
|
||||
|
||||
ydl = YDL({'format': 'webm/mp4'})
|
||||
ydl.process_ie_result(info_dict.copy())
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], '47')
|
||||
|
||||
ydl = YDL({'format': '3gp/40/mp4'})
|
||||
ydl.process_ie_result(info_dict.copy())
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], '35')
|
||||
|
||||
ydl = YDL({'format': 'example-with-dashes'})
|
||||
ydl.process_ie_result(info_dict.copy())
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], 'example-with-dashes')
|
||||
test('20/47', '47')
|
||||
test('20/71/worst', '35')
|
||||
test(None, '2')
|
||||
test('webm/mp4', '47')
|
||||
test('3gp/40/mp4', '35')
|
||||
test('example-with-dashes', 'example-with-dashes')
|
||||
test('all', '35', 'example-with-dashes', '45', '47', '2') # Order doesn't actually matter for this
|
||||
test('mergeall', '2+47+45+example-with-dashes+35', multi=True)
|
||||
|
||||
def test_format_selection_audio(self):
|
||||
formats = [
|
||||
@@ -655,6 +648,7 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
'title1': '$PATH',
|
||||
'title2': '%PATH%',
|
||||
'title3': 'foo/bar\\test',
|
||||
'title4': 'foo "bar" test',
|
||||
'timestamp': 1618488000,
|
||||
'duration': 100000,
|
||||
'playlist_index': 1,
|
||||
@@ -671,21 +665,28 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
self.assertEqual(ydl.validate_outtmpl(tmpl), None)
|
||||
|
||||
outtmpl, tmpl_dict = ydl.prepare_outtmpl(tmpl, info or self.outtmpl_info)
|
||||
out = outtmpl % tmpl_dict
|
||||
out = ydl.escape_outtmpl(outtmpl) % tmpl_dict
|
||||
fname = ydl.prepare_filename(info or self.outtmpl_info)
|
||||
|
||||
if callable(expected):
|
||||
self.assertTrue(expected(out))
|
||||
self.assertTrue(expected(fname))
|
||||
elif isinstance(expected, compat_str):
|
||||
self.assertEqual((out, fname), (expected, expected))
|
||||
else:
|
||||
self.assertEqual((out, fname), expected)
|
||||
if not isinstance(expected, (list, tuple)):
|
||||
expected = (expected, expected)
|
||||
for (name, got), expect in zip((('outtmpl', out), ('filename', fname)), expected):
|
||||
if callable(expect):
|
||||
self.assertTrue(expect(got), f'Wrong {name} from {tmpl}')
|
||||
else:
|
||||
self.assertEqual(got, expect, f'Wrong {name} from {tmpl}')
|
||||
|
||||
# Side-effects
|
||||
original_infodict = dict(self.outtmpl_info)
|
||||
test('foo.bar', 'foo.bar')
|
||||
original_infodict['epoch'] = self.outtmpl_info.get('epoch')
|
||||
self.assertTrue(isinstance(original_infodict['epoch'], int))
|
||||
test('%(epoch)d', int_or_none)
|
||||
self.assertEqual(original_infodict, self.outtmpl_info)
|
||||
|
||||
# Auto-generated fields
|
||||
test('%(id)s.%(ext)s', '1234.mp4')
|
||||
test('%(duration_string)s', ('27:46:40', '27-46-40'))
|
||||
test('%(epoch)d', int_or_none)
|
||||
test('%(resolution)s', '1080p')
|
||||
test('%(playlist_index)s', '001')
|
||||
test('%(autonumber)s', '00001')
|
||||
@@ -693,9 +694,15 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
test('%(autonumber)s', '001', autonumber_size=3)
|
||||
|
||||
# Escaping %
|
||||
test('%', '%')
|
||||
test('%%', '%')
|
||||
test('%%%%', '%%')
|
||||
test('%s', '%s')
|
||||
test('%%%s', '%%s')
|
||||
test('%d', '%d')
|
||||
test('%abc%', '%abc%')
|
||||
test('%%(width)06d.%(ext)s', '%(width)06d.mp4')
|
||||
test('%%%(height)s', '%1080')
|
||||
test('%(width)06d.%(ext)s', 'NA.mp4')
|
||||
test('%(width)06d.%%(ext)s', 'NA.%(ext)s')
|
||||
test('%%(width)06d.%(ext)s', '%(width)06d.mp4')
|
||||
@@ -710,12 +717,18 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
test('%(id)s', ('ab:cd', 'ab -cd'), info={'id': 'ab:cd'})
|
||||
|
||||
# Invalid templates
|
||||
self.assertTrue(isinstance(YoutubeDL.validate_outtmpl('%'), ValueError))
|
||||
self.assertTrue(isinstance(YoutubeDL.validate_outtmpl('%(title)'), ValueError))
|
||||
test('%(invalid@tmpl|def)s', 'none', outtmpl_na_placeholder='none')
|
||||
test('%()s', 'NA')
|
||||
test('%s', '%s')
|
||||
test('%d', '%d')
|
||||
test('%(..)s', 'NA')
|
||||
|
||||
# Entire info_dict
|
||||
def expect_same_infodict(out):
|
||||
got_dict = json.loads(out)
|
||||
for info_field, expected in self.outtmpl_info.items():
|
||||
self.assertEqual(got_dict.get(info_field), expected, info_field)
|
||||
return True
|
||||
|
||||
test('%()j', (expect_same_infodict, str))
|
||||
|
||||
# NA placeholder
|
||||
NA_TEST_OUTTMPL = '%(uploader_date)s-%(width)d-%(x|def)s-%(id)s.%(ext)s'
|
||||
@@ -746,13 +759,26 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
test('%(width|0)04d', '0000')
|
||||
test('a%(width|)d', 'a', outtmpl_na_placeholder='none')
|
||||
|
||||
# Internal formatting
|
||||
FORMATS = self.outtmpl_info['formats']
|
||||
sanitize = lambda x: x.replace(':', ' -').replace('"', "'")
|
||||
|
||||
# Custom type casting
|
||||
test('%(formats.:.id)l', 'id1, id2, id3')
|
||||
test('%(ext)l', 'mp4')
|
||||
test('%(formats.:.id) 15l', ' id1, id2, id3')
|
||||
test('%(formats)j', (json.dumps(FORMATS), sanitize(json.dumps(FORMATS))))
|
||||
if compat_os_name == 'nt':
|
||||
test('%(title4)q', ('"foo \\"bar\\" test"', "'foo _'bar_' test'"))
|
||||
else:
|
||||
test('%(title4)q', ('\'foo "bar" test\'', "'foo 'bar' test'"))
|
||||
|
||||
# Internal formatting
|
||||
test('%(timestamp-1000>%H-%M-%S)s', '11-43-20')
|
||||
test('%(title|%)s %(title|%%)s', '% %%')
|
||||
test('%(id+1-height+3)05d', '00158')
|
||||
test('%(width+100)05d', 'NA')
|
||||
test('%(formats.0) 15s', ('% 15s' % FORMATS[0], '% 15s' % str(FORMATS[0]).replace(':', ' -')))
|
||||
test('%(formats.0)r', (repr(FORMATS[0]), repr(FORMATS[0]).replace(':', ' -')))
|
||||
test('%(formats.0) 15s', ('% 15s' % FORMATS[0], '% 15s' % sanitize(str(FORMATS[0]))))
|
||||
test('%(formats.0)r', (repr(FORMATS[0]), sanitize(repr(FORMATS[0]))))
|
||||
test('%(height.0)03d', '001')
|
||||
test('%(-height.0)04d', '-001')
|
||||
test('%(formats.-1.id)s', FORMATS[-1]['id'])
|
||||
@@ -762,11 +788,22 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
test('%(formats.0.id.-1+id)f', '1235.000000')
|
||||
test('%(formats.0.id.-1+formats.1.id.-1)d', '3')
|
||||
|
||||
# Laziness
|
||||
def gen():
|
||||
yield from range(5)
|
||||
raise self.assertTrue(False, 'LazyList should not be evaluated till here')
|
||||
test('%(key.4)s', '4', info={'key': LazyList(gen())})
|
||||
|
||||
# Empty filename
|
||||
test('%(foo|)s-%(bar|)s.%(ext)s', '-.mp4')
|
||||
# test('%(foo|)s.%(ext)s', ('.mp4', '_.mp4')) # fixme
|
||||
# test('%(foo|)s', ('', '_')) # fixme
|
||||
|
||||
# Environment variable expansion for prepare_filename
|
||||
compat_setenv('__yt_dlp_var', 'expanded')
|
||||
envvar = '%__yt_dlp_var%' if compat_os_name == 'nt' else '$__yt_dlp_var'
|
||||
test(envvar, (envvar, 'expanded'))
|
||||
|
||||
# Path expansion and escaping
|
||||
test('Hello %(title1)s', 'Hello $PATH')
|
||||
test('Hello %(title2)s', 'Hello %PATH%')
|
||||
@@ -941,54 +978,31 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
ydl.process_ie_result(copy.deepcopy(playlist))
|
||||
return ydl.downloaded_info_dicts
|
||||
|
||||
def get_ids(params):
|
||||
return [int(v['id']) for v in get_downloaded_info_dicts(params)]
|
||||
def test_selection(params, expected_ids):
|
||||
results = [
|
||||
(v['playlist_autonumber'] - 1, (int(v['id']), v['playlist_index']))
|
||||
for v in get_downloaded_info_dicts(params)]
|
||||
self.assertEqual(results, list(enumerate(zip(expected_ids, expected_ids))))
|
||||
|
||||
result = get_ids({})
|
||||
self.assertEqual(result, [1, 2, 3, 4])
|
||||
|
||||
result = get_ids({'playlistend': 10})
|
||||
self.assertEqual(result, [1, 2, 3, 4])
|
||||
|
||||
result = get_ids({'playlistend': 2})
|
||||
self.assertEqual(result, [1, 2])
|
||||
|
||||
result = get_ids({'playliststart': 10})
|
||||
self.assertEqual(result, [])
|
||||
|
||||
result = get_ids({'playliststart': 2})
|
||||
self.assertEqual(result, [2, 3, 4])
|
||||
|
||||
result = get_ids({'playlist_items': '2-4'})
|
||||
self.assertEqual(result, [2, 3, 4])
|
||||
|
||||
result = get_ids({'playlist_items': '2,4'})
|
||||
self.assertEqual(result, [2, 4])
|
||||
|
||||
result = get_ids({'playlist_items': '10'})
|
||||
self.assertEqual(result, [])
|
||||
|
||||
result = get_ids({'playlist_items': '3-10'})
|
||||
self.assertEqual(result, [3, 4])
|
||||
|
||||
result = get_ids({'playlist_items': '2-4,3-4,3'})
|
||||
self.assertEqual(result, [2, 3, 4])
|
||||
test_selection({}, [1, 2, 3, 4])
|
||||
test_selection({'playlistend': 10}, [1, 2, 3, 4])
|
||||
test_selection({'playlistend': 2}, [1, 2])
|
||||
test_selection({'playliststart': 10}, [])
|
||||
test_selection({'playliststart': 2}, [2, 3, 4])
|
||||
test_selection({'playlist_items': '2-4'}, [2, 3, 4])
|
||||
test_selection({'playlist_items': '2,4'}, [2, 4])
|
||||
test_selection({'playlist_items': '10'}, [])
|
||||
|
||||
# Tests for https://github.com/ytdl-org/youtube-dl/issues/10591
|
||||
# @{
|
||||
result = get_downloaded_info_dicts({'playlist_items': '2-4,3-4,3'})
|
||||
self.assertEqual(result[0]['playlist_index'], 2)
|
||||
self.assertEqual(result[1]['playlist_index'], 3)
|
||||
test_selection({'playlist_items': '2-4,3-4,3'}, [2, 3, 4])
|
||||
test_selection({'playlist_items': '4,2'}, [4, 2])
|
||||
|
||||
result = get_downloaded_info_dicts({'playlist_items': '2-4,3-4,3'})
|
||||
self.assertEqual(result[0]['playlist_index'], 2)
|
||||
self.assertEqual(result[1]['playlist_index'], 3)
|
||||
self.assertEqual(result[2]['playlist_index'], 4)
|
||||
|
||||
result = get_downloaded_info_dicts({'playlist_items': '4,2'})
|
||||
self.assertEqual(result[0]['playlist_index'], 4)
|
||||
self.assertEqual(result[1]['playlist_index'], 2)
|
||||
# @}
|
||||
# Tests for https://github.com/yt-dlp/yt-dlp/issues/720
|
||||
# https://github.com/yt-dlp/yt-dlp/issues/302
|
||||
test_selection({'playlistreverse': True}, [4, 3, 2, 1])
|
||||
test_selection({'playliststart': 2, 'playlistreverse': True}, [4, 3, 2])
|
||||
test_selection({'playlist_items': '2,4', 'playlistreverse': True}, [4, 2])
|
||||
test_selection({'playlist_items': '4,2'}, [4, 2])
|
||||
|
||||
def test_urlopen_no_file_protocol(self):
|
||||
# see https://github.com/ytdl-org/youtube-dl/issues/8227
|
||||
|
||||
@@ -7,8 +7,7 @@ import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from test.helper import try_rm
|
||||
|
||||
from test.helper import try_rm, is_download_test
|
||||
|
||||
from yt_dlp import YoutubeDL
|
||||
|
||||
@@ -32,6 +31,7 @@ def _download_restricted(url, filename, age):
|
||||
return res
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestAgeRestriction(unittest.TestCase):
|
||||
def _assert_restricted(self, url, filename, age, old_age=None):
|
||||
self.assertTrue(_download_restricted(url, filename, old_age))
|
||||
|
||||
96
test/test_cookies.py
Normal file
96
test/test_cookies.py
Normal file
@@ -0,0 +1,96 @@
|
||||
import unittest
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from yt_dlp import cookies
|
||||
from yt_dlp.cookies import (
|
||||
CRYPTO_AVAILABLE,
|
||||
LinuxChromeCookieDecryptor,
|
||||
MacChromeCookieDecryptor,
|
||||
WindowsChromeCookieDecryptor,
|
||||
YDLLogger,
|
||||
parse_safari_cookies,
|
||||
pbkdf2_sha1,
|
||||
)
|
||||
|
||||
|
||||
class MonkeyPatch:
|
||||
def __init__(self, module, temporary_values):
|
||||
self._module = module
|
||||
self._temporary_values = temporary_values
|
||||
self._backup_values = {}
|
||||
|
||||
def __enter__(self):
|
||||
for name, temp_value in self._temporary_values.items():
|
||||
self._backup_values[name] = getattr(self._module, name)
|
||||
setattr(self._module, name, temp_value)
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
for name, backup_value in self._backup_values.items():
|
||||
setattr(self._module, name, backup_value)
|
||||
|
||||
|
||||
class TestCookies(unittest.TestCase):
|
||||
def test_chrome_cookie_decryptor_linux_derive_key(self):
|
||||
key = LinuxChromeCookieDecryptor.derive_key(b'abc')
|
||||
self.assertEqual(key, b'7\xa1\xec\xd4m\xfcA\xc7\xb19Z\xd0\x19\xdcM\x17')
|
||||
|
||||
def test_chrome_cookie_decryptor_mac_derive_key(self):
|
||||
key = MacChromeCookieDecryptor.derive_key(b'abc')
|
||||
self.assertEqual(key, b'Y\xe2\xc0\xd0P\xf6\xf4\xe1l\xc1\x8cQ\xcb|\xcdY')
|
||||
|
||||
def test_chrome_cookie_decryptor_linux_v10(self):
|
||||
with MonkeyPatch(cookies, {'_get_linux_keyring_password': lambda *args, **kwargs: b''}):
|
||||
encrypted_value = b'v10\xccW%\xcd\xe6\xe6\x9fM" \xa7\xb0\xca\xe4\x07\xd6'
|
||||
value = 'USD'
|
||||
decryptor = LinuxChromeCookieDecryptor('Chrome', YDLLogger())
|
||||
self.assertEqual(decryptor.decrypt(encrypted_value), value)
|
||||
|
||||
def test_chrome_cookie_decryptor_linux_v11(self):
|
||||
with MonkeyPatch(cookies, {'_get_linux_keyring_password': lambda *args, **kwargs: b'',
|
||||
'KEYRING_AVAILABLE': True}):
|
||||
encrypted_value = b'v11#\x81\x10>`w\x8f)\xc0\xb2\xc1\r\xf4\x1al\xdd\x93\xfd\xf8\xf8N\xf2\xa9\x83\xf1\xe9o\x0elVQd'
|
||||
value = 'tz=Europe.London'
|
||||
decryptor = LinuxChromeCookieDecryptor('Chrome', YDLLogger())
|
||||
self.assertEqual(decryptor.decrypt(encrypted_value), value)
|
||||
|
||||
@unittest.skipIf(not CRYPTO_AVAILABLE, 'cryptography library not available')
|
||||
def test_chrome_cookie_decryptor_windows_v10(self):
|
||||
with MonkeyPatch(cookies, {
|
||||
'_get_windows_v10_key': lambda *args, **kwargs: b'Y\xef\xad\xad\xeerp\xf0Y\xe6\x9b\x12\xc2<z\x16]\n\xbb\xb8\xcb\xd7\x9bA\xc3\x14e\x99{\xd6\xf4&'
|
||||
}):
|
||||
encrypted_value = b'v10T\xb8\xf3\xb8\x01\xa7TtcV\xfc\x88\xb8\xb8\xef\x05\xb5\xfd\x18\xc90\x009\xab\xb1\x893\x85)\x87\xe1\xa9-\xa3\xad='
|
||||
value = '32101439'
|
||||
decryptor = WindowsChromeCookieDecryptor('', YDLLogger())
|
||||
self.assertEqual(decryptor.decrypt(encrypted_value), value)
|
||||
|
||||
def test_chrome_cookie_decryptor_mac_v10(self):
|
||||
with MonkeyPatch(cookies, {'_get_mac_keyring_password': lambda *args, **kwargs: b'6eIDUdtKAacvlHwBVwvg/Q=='}):
|
||||
encrypted_value = b'v10\xb3\xbe\xad\xa1[\x9fC\xa1\x98\xe0\x9a\x01\xd9\xcf\xbfc'
|
||||
value = '2021-06-01-22'
|
||||
decryptor = MacChromeCookieDecryptor('', YDLLogger())
|
||||
self.assertEqual(decryptor.decrypt(encrypted_value), value)
|
||||
|
||||
def test_safari_cookie_parsing(self):
|
||||
cookies = \
|
||||
b'cook\x00\x00\x00\x01\x00\x00\x00i\x00\x00\x01\x00\x01\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00Y' \
|
||||
b'\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x008\x00\x00\x00B\x00\x00\x00F\x00\x00\x00H' \
|
||||
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x03\xa5>\xc3A\x00\x00\x80\xc3\x07:\xc3A' \
|
||||
b'localhost\x00foo\x00/\x00test%20%3Bcookie\x00\x00\x00\x054\x07\x17 \x05\x00\x00\x00Kbplist00\xd1\x01' \
|
||||
b'\x02_\x10\x18NSHTTPCookieAcceptPolicy\x10\x02\x08\x0b&\x00\x00\x00\x00\x00\x00\x01\x01\x00\x00\x00' \
|
||||
b'\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00('
|
||||
|
||||
jar = parse_safari_cookies(cookies)
|
||||
self.assertEqual(len(jar), 1)
|
||||
cookie = list(jar)[0]
|
||||
self.assertEqual(cookie.domain, 'localhost')
|
||||
self.assertEqual(cookie.port, None)
|
||||
self.assertEqual(cookie.path, '/')
|
||||
self.assertEqual(cookie.name, 'foo')
|
||||
self.assertEqual(cookie.value, 'test%20%3Bcookie')
|
||||
self.assertFalse(cookie.secure)
|
||||
expected_expiration = datetime(2021, 6, 18, 21, 39, 19, tzinfo=timezone.utc)
|
||||
self.assertEqual(cookie.expires, int(expected_expiration.timestamp()))
|
||||
|
||||
def test_pbkdf2_sha1(self):
|
||||
key = pbkdf2_sha1(b'peanuts', b' ' * 16, 1, 16)
|
||||
self.assertEqual(key, b'g\xe1\x8e\x0fQ\x1c\x9b\xf3\xc9`!\xaa\x90\xd9\xd34')
|
||||
51
test/test_download.py
Normal file → Executable file
51
test/test_download.py
Normal file → Executable file
@@ -10,12 +10,13 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from test.helper import (
|
||||
assertGreaterEqual,
|
||||
expect_info_dict,
|
||||
expect_warnings,
|
||||
get_params,
|
||||
gettestcases,
|
||||
expect_info_dict,
|
||||
try_rm,
|
||||
is_download_test,
|
||||
report_warning,
|
||||
try_rm,
|
||||
)
|
||||
|
||||
|
||||
@@ -64,6 +65,7 @@ def _file_md5(fn):
|
||||
defs = gettestcases()
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestDownload(unittest.TestCase):
|
||||
# Parallel testing in nosetests. See
|
||||
# http://nose.readthedocs.org/en/latest/doc_tests/test_multiprocess/multiprocess.html
|
||||
@@ -71,6 +73,8 @@ class TestDownload(unittest.TestCase):
|
||||
|
||||
maxDiff = None
|
||||
|
||||
COMPLETED_TESTS = {}
|
||||
|
||||
def __str__(self):
|
||||
"""Identify each test with the `add_ie` attribute, if available."""
|
||||
|
||||
@@ -92,6 +96,9 @@ class TestDownload(unittest.TestCase):
|
||||
def generator(test_case, tname):
|
||||
|
||||
def test_template(self):
|
||||
if self.COMPLETED_TESTS.get(tname):
|
||||
return
|
||||
self.COMPLETED_TESTS[tname] = True
|
||||
ie = yt_dlp.extractor.get_info_extractor(test_case['name'])()
|
||||
other_ies = [get_info_extractor(ie_key)() for ie_key in test_case.get('add_ie', [])]
|
||||
is_playlist = any(k.startswith('playlist') for k in test_case)
|
||||
@@ -106,8 +113,13 @@ def generator(test_case, tname):
|
||||
|
||||
for tc in test_cases:
|
||||
info_dict = tc.get('info_dict', {})
|
||||
if not (info_dict.get('id') and info_dict.get('ext')):
|
||||
raise Exception('Test definition incorrect. The output file cannot be known. Are both \'id\' and \'ext\' keys present?')
|
||||
params = tc.get('params', {})
|
||||
if not info_dict.get('id'):
|
||||
raise Exception('Test definition incorrect. \'id\' key is not present')
|
||||
elif not info_dict.get('ext'):
|
||||
if params.get('skip_download') and params.get('ignore_no_formats_error'):
|
||||
continue
|
||||
raise Exception('Test definition incorrect. The output file cannot be known. \'ext\' key is not present')
|
||||
|
||||
if 'skip' in test_case:
|
||||
print_skipping(test_case['skip'])
|
||||
@@ -135,7 +147,7 @@ def generator(test_case, tname):
|
||||
expect_warnings(ydl, test_case.get('expected_warnings', []))
|
||||
|
||||
def get_tc_filename(tc):
|
||||
return ydl.prepare_filename(tc.get('info_dict', {}))
|
||||
return ydl.prepare_filename(dict(tc.get('info_dict', {})))
|
||||
|
||||
res_dict = None
|
||||
|
||||
@@ -248,12 +260,12 @@ def generator(test_case, tname):
|
||||
|
||||
|
||||
# And add them to TestDownload
|
||||
for n, test_case in enumerate(defs):
|
||||
tname = 'test_' + str(test_case['name'])
|
||||
i = 1
|
||||
while hasattr(TestDownload, tname):
|
||||
tname = 'test_%s_%d' % (test_case['name'], i)
|
||||
i += 1
|
||||
tests_counter = {}
|
||||
for test_case in defs:
|
||||
name = test_case['name']
|
||||
i = tests_counter.get(name, 0)
|
||||
tests_counter[name] = i + 1
|
||||
tname = f'test_{name}_{i}' if i else f'test_{name}'
|
||||
test_method = generator(test_case, tname)
|
||||
test_method.__name__ = str(tname)
|
||||
ie_list = test_case.get('add_ie')
|
||||
@@ -262,5 +274,22 @@ for n, test_case in enumerate(defs):
|
||||
del test_method
|
||||
|
||||
|
||||
def batch_generator(name, num_tests):
|
||||
|
||||
def test_template(self):
|
||||
for i in range(num_tests):
|
||||
getattr(self, f'test_{name}_{i}' if i else f'test_{name}')()
|
||||
|
||||
return test_template
|
||||
|
||||
|
||||
for name, num_tests in tests_counter.items():
|
||||
test_method = batch_generator(name, num_tests)
|
||||
test_method.__name__ = f'test_{name}_all'
|
||||
test_method.add_ie = ''
|
||||
setattr(TestDownload, test_method.__name__, test_method)
|
||||
del test_method
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
@@ -8,7 +8,7 @@ import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from test.helper import FakeYDL
|
||||
from test.helper import FakeYDL, is_download_test
|
||||
from yt_dlp.extractor import IqiyiIE
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@ class WarningLogger(object):
|
||||
pass
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestIqiyiSDKInterpreter(unittest.TestCase):
|
||||
def test_iqiyi_sdk_interpreter(self):
|
||||
'''
|
||||
|
||||
@@ -8,13 +8,14 @@ import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from test.helper import try_rm
|
||||
from test.helper import is_download_test, try_rm
|
||||
|
||||
|
||||
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
download_file = join(root_dir, 'test.webm')
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestOverwrites(unittest.TestCase):
|
||||
def setUp(self):
|
||||
# create an empty file
|
||||
|
||||
@@ -7,7 +7,7 @@ import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from test.helper import get_params, try_rm
|
||||
from test.helper import get_params, try_rm, is_download_test
|
||||
import yt_dlp.YoutubeDL
|
||||
from yt_dlp.utils import DownloadError
|
||||
|
||||
@@ -22,6 +22,7 @@ TEST_ID = 'gr51aVj-mLg'
|
||||
EXPECTED_NAME = 'gr51aVj-mLg'
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestPostHooks(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.stored_name_1 = None
|
||||
|
||||
@@ -6,37 +6,38 @@ from __future__ import unicode_literals
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from yt_dlp import YoutubeDL
|
||||
from yt_dlp.compat import compat_shlex_quote
|
||||
from yt_dlp.postprocessor import (
|
||||
ExecAfterDownloadPP,
|
||||
ExecPP,
|
||||
FFmpegThumbnailsConvertorPP,
|
||||
MetadataFromFieldPP,
|
||||
MetadataFromTitlePP,
|
||||
MetadataParserPP,
|
||||
ModifyChaptersPP
|
||||
)
|
||||
|
||||
|
||||
class TestMetadataFromField(unittest.TestCase):
|
||||
|
||||
def test_format_to_regex(self):
|
||||
pp = MetadataFromFieldPP(None, ['title:%(title)s - %(artist)s'])
|
||||
self.assertEqual(pp._data[0]['regex'], r'(?P<title>.+)\ \-\ (?P<artist>.+)')
|
||||
self.assertEqual(
|
||||
MetadataParserPP.format_to_regex('%(title)s - %(artist)s'),
|
||||
r'(?P<title>.+)\ \-\ (?P<artist>.+)')
|
||||
self.assertEqual(MetadataParserPP.format_to_regex(r'(?P<x>.+)'), r'(?P<x>.+)')
|
||||
|
||||
def test_field_to_outtmpl(self):
|
||||
pp = MetadataFromFieldPP(None, ['title:%(title)s : %(artist)s'])
|
||||
self.assertEqual(pp._data[0]['tmpl'], '%(title)s')
|
||||
def test_field_to_template(self):
|
||||
self.assertEqual(MetadataParserPP.field_to_template('title'), '%(title)s')
|
||||
self.assertEqual(MetadataParserPP.field_to_template('1'), '1')
|
||||
self.assertEqual(MetadataParserPP.field_to_template('foo bar'), 'foo bar')
|
||||
self.assertEqual(MetadataParserPP.field_to_template(' literal'), ' literal')
|
||||
|
||||
def test_in_out_seperation(self):
|
||||
pp = MetadataFromFieldPP(None, ['%(title)s \\: %(artist)s:%(title)s : %(artist)s'])
|
||||
self.assertEqual(pp._data[0]['in'], '%(title)s : %(artist)s')
|
||||
self.assertEqual(pp._data[0]['out'], '%(title)s : %(artist)s')
|
||||
|
||||
|
||||
class TestMetadataFromTitle(unittest.TestCase):
|
||||
def test_format_to_regex(self):
|
||||
pp = MetadataFromTitlePP(None, '%(title)s - %(artist)s')
|
||||
self.assertEqual(pp._titleregex, r'(?P<title>.+)\ \-\ (?P<artist>.+)')
|
||||
def test_metadatafromfield(self):
|
||||
self.assertEqual(
|
||||
MetadataFromFieldPP.to_action('%(title)s \\: %(artist)s:%(title)s : %(artist)s'),
|
||||
(MetadataParserPP.Actions.INTERPRET, '%(title)s : %(artist)s', '%(title)s : %(artist)s'))
|
||||
|
||||
|
||||
class TestConvertThumbnail(unittest.TestCase):
|
||||
@@ -60,12 +61,470 @@ class TestConvertThumbnail(unittest.TestCase):
|
||||
os.remove(file.format(out))
|
||||
|
||||
|
||||
class TestExecAfterDownload(unittest.TestCase):
|
||||
class TestExec(unittest.TestCase):
|
||||
def test_parse_cmd(self):
|
||||
pp = ExecAfterDownloadPP(YoutubeDL(), '')
|
||||
pp = ExecPP(YoutubeDL(), '')
|
||||
info = {'filepath': 'file name'}
|
||||
quoted_filepath = compat_shlex_quote(info['filepath'])
|
||||
cmd = 'echo %s' % compat_shlex_quote(info['filepath'])
|
||||
|
||||
self.assertEqual(pp.parse_cmd('echo', info), 'echo %s' % quoted_filepath)
|
||||
self.assertEqual(pp.parse_cmd('echo.{}', info), 'echo.%s' % quoted_filepath)
|
||||
self.assertEqual(pp.parse_cmd('echo "%(filepath)s"', info), 'echo "%s"' % info['filepath'])
|
||||
self.assertEqual(pp.parse_cmd('echo', info), cmd)
|
||||
self.assertEqual(pp.parse_cmd('echo {}', info), cmd)
|
||||
self.assertEqual(pp.parse_cmd('echo %(filepath)q', info), cmd)
|
||||
|
||||
|
||||
class TestModifyChaptersPP(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self._pp = ModifyChaptersPP(YoutubeDL())
|
||||
|
||||
@staticmethod
|
||||
def _sponsor_chapter(start, end, cat, remove=False):
|
||||
c = {'start_time': start, 'end_time': end, '_categories': [(cat, start, end)]}
|
||||
if remove:
|
||||
c['remove'] = True
|
||||
return c
|
||||
|
||||
@staticmethod
|
||||
def _chapter(start, end, title=None, remove=False):
|
||||
c = {'start_time': start, 'end_time': end}
|
||||
if title is not None:
|
||||
c['title'] = title
|
||||
if remove:
|
||||
c['remove'] = True
|
||||
return c
|
||||
|
||||
def _chapters(self, ends, titles):
|
||||
self.assertEqual(len(ends), len(titles))
|
||||
start = 0
|
||||
chapters = []
|
||||
for e, t in zip(ends, titles):
|
||||
chapters.append(self._chapter(start, e, t))
|
||||
start = e
|
||||
return chapters
|
||||
|
||||
def _remove_marked_arrange_sponsors_test_impl(
|
||||
self, chapters, expected_chapters, expected_removed):
|
||||
actual_chapters, actual_removed = (
|
||||
self._pp._remove_marked_arrange_sponsors(chapters))
|
||||
for c in actual_removed:
|
||||
c.pop('title', None)
|
||||
c.pop('_categories', None)
|
||||
actual_chapters = [{
|
||||
'start_time': c['start_time'],
|
||||
'end_time': c['end_time'],
|
||||
'title': c['title'],
|
||||
} for c in actual_chapters]
|
||||
self.assertSequenceEqual(expected_chapters, actual_chapters)
|
||||
self.assertSequenceEqual(expected_removed, actual_removed)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_CanGetThroughUnaltered(self):
|
||||
chapters = self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, chapters, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithSponsors(self):
|
||||
chapters = self._chapters([70], ['c']) + [
|
||||
self._sponsor_chapter(10, 20, 'sponsor'),
|
||||
self._sponsor_chapter(30, 40, 'preview'),
|
||||
self._sponsor_chapter(50, 60, 'sponsor')]
|
||||
expected = self._chapters(
|
||||
[10, 20, 30, 40, 50, 60, 70],
|
||||
['c', '[SponsorBlock]: Sponsor', 'c', '[SponsorBlock]: Preview/Recap',
|
||||
'c', '[SponsorBlock]: Sponsor', 'c'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_UniqueNamesForOverlappingSponsors(self):
|
||||
chapters = self._chapters([120], ['c']) + [
|
||||
self._sponsor_chapter(10, 45, 'sponsor'), self._sponsor_chapter(20, 40, 'selfpromo'),
|
||||
self._sponsor_chapter(50, 70, 'sponsor'), self._sponsor_chapter(60, 85, 'selfpromo'),
|
||||
self._sponsor_chapter(90, 120, 'selfpromo'), self._sponsor_chapter(100, 110, 'sponsor')]
|
||||
expected = self._chapters(
|
||||
[10, 20, 40, 45, 50, 60, 70, 85, 90, 100, 110, 120],
|
||||
['c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Sponsor, Unpaid/Self Promotion',
|
||||
'[SponsorBlock]: Sponsor',
|
||||
'c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Sponsor, Unpaid/Self Promotion',
|
||||
'[SponsorBlock]: Unpaid/Self Promotion',
|
||||
'c', '[SponsorBlock]: Unpaid/Self Promotion', '[SponsorBlock]: Unpaid/Self Promotion, Sponsor',
|
||||
'[SponsorBlock]: Unpaid/Self Promotion'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithCuts(self):
|
||||
cuts = [self._chapter(10, 20, remove=True),
|
||||
self._sponsor_chapter(30, 40, 'sponsor', remove=True),
|
||||
self._chapter(50, 60, remove=True)]
|
||||
chapters = self._chapters([70], ['c']) + cuts
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, self._chapters([40], ['c']), cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithSponsorsAndCuts(self):
|
||||
chapters = self._chapters([70], ['c']) + [
|
||||
self._sponsor_chapter(10, 20, 'sponsor'),
|
||||
self._sponsor_chapter(30, 40, 'selfpromo', remove=True),
|
||||
self._sponsor_chapter(50, 60, 'interaction')]
|
||||
expected = self._chapters([10, 20, 40, 50, 60],
|
||||
['c', '[SponsorBlock]: Sponsor', 'c',
|
||||
'[SponsorBlock]: Interaction Reminder', 'c'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, expected, [self._chapter(30, 40, remove=True)])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithSponsorCutInTheMiddle(self):
|
||||
cuts = [self._sponsor_chapter(20, 30, 'selfpromo', remove=True),
|
||||
self._chapter(40, 50, remove=True)]
|
||||
chapters = self._chapters([70], ['c']) + [self._sponsor_chapter(10, 60, 'sponsor')] + cuts
|
||||
expected = self._chapters(
|
||||
[10, 40, 50], ['c', '[SponsorBlock]: Sponsor', 'c'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithCutHidingSponsor(self):
|
||||
cuts = [self._sponsor_chapter(20, 50, 'selpromo', remove=True)]
|
||||
chapters = self._chapters([60], ['c']) + [
|
||||
self._sponsor_chapter(10, 20, 'intro'),
|
||||
self._sponsor_chapter(30, 40, 'sponsor'),
|
||||
self._sponsor_chapter(50, 60, 'outro'),
|
||||
] + cuts
|
||||
expected = self._chapters(
|
||||
[10, 20, 30], ['c', '[SponsorBlock]: Intermission/Intro Animation', '[SponsorBlock]: Endcards/Credits'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithAdjacentSponsors(self):
|
||||
chapters = self._chapters([70], ['c']) + [
|
||||
self._sponsor_chapter(10, 20, 'sponsor'),
|
||||
self._sponsor_chapter(20, 30, 'selfpromo'),
|
||||
self._sponsor_chapter(30, 40, 'interaction')]
|
||||
expected = self._chapters(
|
||||
[10, 20, 30, 40, 70],
|
||||
['c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Unpaid/Self Promotion',
|
||||
'[SponsorBlock]: Interaction Reminder', 'c'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithAdjacentCuts(self):
|
||||
chapters = self._chapters([70], ['c']) + [
|
||||
self._sponsor_chapter(10, 20, 'sponsor'),
|
||||
self._sponsor_chapter(20, 30, 'interaction', remove=True),
|
||||
self._chapter(30, 40, remove=True),
|
||||
self._sponsor_chapter(40, 50, 'selpromo', remove=True),
|
||||
self._sponsor_chapter(50, 60, 'interaction')]
|
||||
expected = self._chapters([10, 20, 30, 40],
|
||||
['c', '[SponsorBlock]: Sponsor',
|
||||
'[SponsorBlock]: Interaction Reminder', 'c'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, expected, [self._chapter(20, 50, remove=True)])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithOverlappingSponsors(self):
|
||||
chapters = self._chapters([70], ['c']) + [
|
||||
self._sponsor_chapter(10, 30, 'sponsor'),
|
||||
self._sponsor_chapter(20, 50, 'selfpromo'),
|
||||
self._sponsor_chapter(40, 60, 'interaction')]
|
||||
expected = self._chapters(
|
||||
[10, 20, 30, 40, 50, 60, 70],
|
||||
['c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Sponsor, Unpaid/Self Promotion',
|
||||
'[SponsorBlock]: Unpaid/Self Promotion', '[SponsorBlock]: Unpaid/Self Promotion, Interaction Reminder',
|
||||
'[SponsorBlock]: Interaction Reminder', 'c'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithOverlappingCuts(self):
|
||||
chapters = self._chapters([70], ['c']) + [
|
||||
self._sponsor_chapter(10, 30, 'sponsor', remove=True),
|
||||
self._sponsor_chapter(20, 50, 'selfpromo', remove=True),
|
||||
self._sponsor_chapter(40, 60, 'interaction', remove=True)]
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, self._chapters([20], ['c']), [self._chapter(10, 60, remove=True)])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingSponsors(self):
|
||||
chapters = self._chapters([170], ['c']) + [
|
||||
self._sponsor_chapter(0, 30, 'intro'),
|
||||
self._sponsor_chapter(20, 50, 'sponsor'),
|
||||
self._sponsor_chapter(40, 60, 'selfpromo'),
|
||||
self._sponsor_chapter(70, 90, 'sponsor'),
|
||||
self._sponsor_chapter(80, 100, 'sponsor'),
|
||||
self._sponsor_chapter(90, 110, 'sponsor'),
|
||||
self._sponsor_chapter(120, 140, 'selfpromo'),
|
||||
self._sponsor_chapter(130, 160, 'interaction'),
|
||||
self._sponsor_chapter(150, 170, 'outro')]
|
||||
expected = self._chapters(
|
||||
[20, 30, 40, 50, 60, 70, 110, 120, 130, 140, 150, 160, 170],
|
||||
['[SponsorBlock]: Intermission/Intro Animation', '[SponsorBlock]: Intermission/Intro Animation, Sponsor', '[SponsorBlock]: Sponsor',
|
||||
'[SponsorBlock]: Sponsor, Unpaid/Self Promotion', '[SponsorBlock]: Unpaid/Self Promotion', 'c',
|
||||
'[SponsorBlock]: Sponsor', 'c', '[SponsorBlock]: Unpaid/Self Promotion',
|
||||
'[SponsorBlock]: Unpaid/Self Promotion, Interaction Reminder',
|
||||
'[SponsorBlock]: Interaction Reminder',
|
||||
'[SponsorBlock]: Interaction Reminder, Endcards/Credits', '[SponsorBlock]: Endcards/Credits'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingCuts(self):
|
||||
chapters = self._chapters([170], ['c']) + [
|
||||
self._chapter(0, 30, remove=True),
|
||||
self._sponsor_chapter(20, 50, 'sponsor', remove=True),
|
||||
self._chapter(40, 60, remove=True),
|
||||
self._sponsor_chapter(70, 90, 'sponsor', remove=True),
|
||||
self._chapter(80, 100, remove=True),
|
||||
self._chapter(90, 110, remove=True),
|
||||
self._sponsor_chapter(120, 140, 'sponsor', remove=True),
|
||||
self._sponsor_chapter(130, 160, 'selfpromo', remove=True),
|
||||
self._chapter(150, 170, remove=True)]
|
||||
expected_cuts = [self._chapter(0, 60, remove=True),
|
||||
self._chapter(70, 110, remove=True),
|
||||
self._chapter(120, 170, remove=True)]
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, self._chapters([20], ['c']), expected_cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_OverlappingSponsorsDifferentTitlesAfterCut(self):
|
||||
chapters = self._chapters([60], ['c']) + [
|
||||
self._sponsor_chapter(10, 60, 'sponsor'),
|
||||
self._sponsor_chapter(10, 40, 'intro'),
|
||||
self._sponsor_chapter(30, 50, 'interaction'),
|
||||
self._sponsor_chapter(30, 50, 'selfpromo', remove=True),
|
||||
self._sponsor_chapter(40, 50, 'interaction'),
|
||||
self._sponsor_chapter(50, 60, 'outro')]
|
||||
expected = self._chapters(
|
||||
[10, 30, 40], ['c', '[SponsorBlock]: Sponsor, Intermission/Intro Animation', '[SponsorBlock]: Sponsor, Endcards/Credits'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, expected, [self._chapter(30, 50, remove=True)])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorsNoLongerOverlapAfterCut(self):
|
||||
chapters = self._chapters([70], ['c']) + [
|
||||
self._sponsor_chapter(10, 30, 'sponsor'),
|
||||
self._sponsor_chapter(20, 50, 'interaction'),
|
||||
self._sponsor_chapter(30, 50, 'selpromo', remove=True),
|
||||
self._sponsor_chapter(40, 60, 'sponsor'),
|
||||
self._sponsor_chapter(50, 60, 'interaction')]
|
||||
expected = self._chapters(
|
||||
[10, 20, 40, 50], ['c', '[SponsorBlock]: Sponsor',
|
||||
'[SponsorBlock]: Sponsor, Interaction Reminder', 'c'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, expected, [self._chapter(30, 50, remove=True)])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorsStillOverlapAfterCut(self):
|
||||
chapters = self._chapters([70], ['c']) + [
|
||||
self._sponsor_chapter(10, 60, 'sponsor'),
|
||||
self._sponsor_chapter(20, 60, 'interaction'),
|
||||
self._sponsor_chapter(30, 50, 'selfpromo', remove=True)]
|
||||
expected = self._chapters(
|
||||
[10, 20, 40, 50], ['c', '[SponsorBlock]: Sponsor',
|
||||
'[SponsorBlock]: Sponsor, Interaction Reminder', 'c'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, expected, [self._chapter(30, 50, remove=True)])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingSponsorsAndCuts(self):
|
||||
chapters = self._chapters([200], ['c']) + [
|
||||
self._sponsor_chapter(10, 40, 'sponsor'),
|
||||
self._sponsor_chapter(10, 30, 'intro'),
|
||||
self._chapter(20, 30, remove=True),
|
||||
self._sponsor_chapter(30, 40, 'selfpromo'),
|
||||
self._sponsor_chapter(50, 70, 'sponsor'),
|
||||
self._sponsor_chapter(60, 80, 'interaction'),
|
||||
self._chapter(70, 80, remove=True),
|
||||
self._sponsor_chapter(70, 90, 'sponsor'),
|
||||
self._sponsor_chapter(80, 100, 'interaction'),
|
||||
self._sponsor_chapter(120, 170, 'selfpromo'),
|
||||
self._sponsor_chapter(130, 180, 'outro'),
|
||||
self._chapter(140, 150, remove=True),
|
||||
self._chapter(150, 160, remove=True)]
|
||||
expected = self._chapters(
|
||||
[10, 20, 30, 40, 50, 70, 80, 100, 110, 130, 140, 160],
|
||||
['c', '[SponsorBlock]: Sponsor, Intermission/Intro Animation', '[SponsorBlock]: Sponsor, Unpaid/Self Promotion',
|
||||
'c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Sponsor, Interaction Reminder',
|
||||
'[SponsorBlock]: Interaction Reminder', 'c', '[SponsorBlock]: Unpaid/Self Promotion',
|
||||
'[SponsorBlock]: Unpaid/Self Promotion, Endcards/Credits', '[SponsorBlock]: Endcards/Credits', 'c'])
|
||||
expected_cuts = [self._chapter(20, 30, remove=True),
|
||||
self._chapter(70, 80, remove=True),
|
||||
self._chapter(140, 160, remove=True)]
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, expected_cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorOverlapsMultipleChapters(self):
|
||||
chapters = (self._chapters([20, 40, 60, 80, 100], ['c1', 'c2', 'c3', 'c4', 'c5'])
|
||||
+ [self._sponsor_chapter(10, 90, 'sponsor')])
|
||||
expected = self._chapters([10, 90, 100], ['c1', '[SponsorBlock]: Sponsor', 'c5'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_CutOverlapsMultipleChapters(self):
|
||||
cuts = [self._chapter(10, 90, remove=True)]
|
||||
chapters = self._chapters([20, 40, 60, 80, 100], ['c1', 'c2', 'c3', 'c4', 'c5']) + cuts
|
||||
expected = self._chapters([10, 20], ['c1', 'c5'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorsWithinSomeChaptersAndOverlappingOthers(self):
|
||||
chapters = (self._chapters([10, 40, 60, 80], ['c1', 'c2', 'c3', 'c4'])
|
||||
+ [self._sponsor_chapter(20, 30, 'sponsor'),
|
||||
self._sponsor_chapter(50, 70, 'selfpromo')])
|
||||
expected = self._chapters([10, 20, 30, 40, 50, 70, 80],
|
||||
['c1', 'c2', '[SponsorBlock]: Sponsor', 'c2', 'c3',
|
||||
'[SponsorBlock]: Unpaid/Self Promotion', 'c4'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_CutsWithinSomeChaptersAndOverlappingOthers(self):
|
||||
cuts = [self._chapter(20, 30, remove=True), self._chapter(50, 70, remove=True)]
|
||||
chapters = self._chapters([10, 40, 60, 80], ['c1', 'c2', 'c3', 'c4']) + cuts
|
||||
expected = self._chapters([10, 30, 40, 50], ['c1', 'c2', 'c3', 'c4'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChaptersAfterLastSponsor(self):
|
||||
chapters = (self._chapters([20, 40, 50, 60], ['c1', 'c2', 'c3', 'c4'])
|
||||
+ [self._sponsor_chapter(10, 30, 'music_offtopic')])
|
||||
expected = self._chapters(
|
||||
[10, 30, 40, 50, 60],
|
||||
['c1', '[SponsorBlock]: Non-Music Section', 'c2', 'c3', 'c4'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChaptersAfterLastCut(self):
|
||||
cuts = [self._chapter(10, 30, remove=True)]
|
||||
chapters = self._chapters([20, 40, 50, 60], ['c1', 'c2', 'c3', 'c4']) + cuts
|
||||
expected = self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorStartsAtChapterStart(self):
|
||||
chapters = (self._chapters([10, 20, 40], ['c1', 'c2', 'c3'])
|
||||
+ [self._sponsor_chapter(20, 30, 'sponsor')])
|
||||
expected = self._chapters([10, 20, 30, 40], ['c1', 'c2', '[SponsorBlock]: Sponsor', 'c3'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_CutStartsAtChapterStart(self):
|
||||
cuts = [self._chapter(20, 30, remove=True)]
|
||||
chapters = self._chapters([10, 20, 40], ['c1', 'c2', 'c3']) + cuts
|
||||
expected = self._chapters([10, 20, 30], ['c1', 'c2', 'c3'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorEndsAtChapterEnd(self):
|
||||
chapters = (self._chapters([10, 30, 40], ['c1', 'c2', 'c3'])
|
||||
+ [self._sponsor_chapter(20, 30, 'sponsor')])
|
||||
expected = self._chapters([10, 20, 30, 40], ['c1', 'c2', '[SponsorBlock]: Sponsor', 'c3'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_CutEndsAtChapterEnd(self):
|
||||
cuts = [self._chapter(20, 30, remove=True)]
|
||||
chapters = self._chapters([10, 30, 40], ['c1', 'c2', 'c3']) + cuts
|
||||
expected = self._chapters([10, 20, 30], ['c1', 'c2', 'c3'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorCoincidesWithChapters(self):
|
||||
chapters = (self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4'])
|
||||
+ [self._sponsor_chapter(10, 30, 'sponsor')])
|
||||
expected = self._chapters([10, 30, 40], ['c1', '[SponsorBlock]: Sponsor', 'c4'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_CutCoincidesWithChapters(self):
|
||||
cuts = [self._chapter(10, 30, remove=True)]
|
||||
chapters = self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4']) + cuts
|
||||
expected = self._chapters([10, 20], ['c1', 'c4'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorsAtVideoBoundaries(self):
|
||||
chapters = (self._chapters([20, 40, 60], ['c1', 'c2', 'c3'])
|
||||
+ [self._sponsor_chapter(0, 10, 'intro'), self._sponsor_chapter(50, 60, 'outro')])
|
||||
expected = self._chapters(
|
||||
[10, 20, 40, 50, 60], ['[SponsorBlock]: Intermission/Intro Animation', 'c1', 'c2', 'c3', '[SponsorBlock]: Endcards/Credits'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_CutsAtVideoBoundaries(self):
|
||||
cuts = [self._chapter(0, 10, remove=True), self._chapter(50, 60, remove=True)]
|
||||
chapters = self._chapters([20, 40, 60], ['c1', 'c2', 'c3']) + cuts
|
||||
expected = self._chapters([10, 30, 40], ['c1', 'c2', 'c3'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorsOverlapChaptersAtVideoBoundaries(self):
|
||||
chapters = (self._chapters([10, 40, 50], ['c1', 'c2', 'c3'])
|
||||
+ [self._sponsor_chapter(0, 20, 'intro'), self._sponsor_chapter(30, 50, 'outro')])
|
||||
expected = self._chapters(
|
||||
[20, 30, 50], ['[SponsorBlock]: Intermission/Intro Animation', 'c2', '[SponsorBlock]: Endcards/Credits'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_CutsOverlapChaptersAtVideoBoundaries(self):
|
||||
cuts = [self._chapter(0, 20, remove=True), self._chapter(30, 50, remove=True)]
|
||||
chapters = self._chapters([10, 40, 50], ['c1', 'c2', 'c3']) + cuts
|
||||
expected = self._chapters([10], ['c2'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_EverythingSponsored(self):
|
||||
chapters = (self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4'])
|
||||
+ [self._sponsor_chapter(0, 20, 'intro'), self._sponsor_chapter(20, 40, 'outro')])
|
||||
expected = self._chapters([20, 40], ['[SponsorBlock]: Intermission/Intro Animation', '[SponsorBlock]: Endcards/Credits'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_EverythingCut(self):
|
||||
cuts = [self._chapter(0, 20, remove=True), self._chapter(20, 40, remove=True)]
|
||||
chapters = self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4']) + cuts
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, [], [self._chapter(0, 40, remove=True)])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_TinyChaptersInTheOriginalArePreserved(self):
|
||||
chapters = self._chapters([0.1, 0.2, 0.3, 0.4], ['c1', 'c2', 'c3', 'c4'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, chapters, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_TinySponsorsAreIgnored(self):
|
||||
chapters = [self._sponsor_chapter(0, 0.1, 'intro'), self._chapter(0.1, 0.2, 'c1'),
|
||||
self._sponsor_chapter(0.2, 0.3, 'sponsor'), self._chapter(0.3, 0.4, 'c2'),
|
||||
self._sponsor_chapter(0.4, 0.5, 'outro')]
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, self._chapters([0.3, 0.5], ['c1', 'c2']), [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_TinyChaptersResultingFromCutsAreIgnored(self):
|
||||
cuts = [self._chapter(1.5, 2.5, remove=True)]
|
||||
chapters = self._chapters([2, 3, 3.5], ['c1', 'c2', 'c3']) + cuts
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, self._chapters([2, 2.5], ['c1', 'c3']), cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_TinyChaptersResultingFromSponsorOverlapAreIgnored(self):
|
||||
chapters = self._chapters([1, 3, 4], ['c1', 'c2', 'c3']) + [
|
||||
self._sponsor_chapter(1.5, 2.5, 'sponsor')]
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, self._chapters([1.5, 3, 4], ['c1', '[SponsorBlock]: Sponsor', 'c3']), [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_TinySponsorsOverlapsAreIgnored(self):
|
||||
chapters = self._chapters([2, 3, 5], ['c1', 'c2', 'c3']) + [
|
||||
self._sponsor_chapter(1, 3, 'sponsor'),
|
||||
self._sponsor_chapter(2.5, 4, 'selfpromo')
|
||||
]
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, self._chapters([1, 3, 4, 5], [
|
||||
'c1', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Unpaid/Self Promotion', 'c3']), [])
|
||||
|
||||
def test_make_concat_opts_CommonCase(self):
|
||||
sponsor_chapters = [self._chapter(1, 2, 's1'), self._chapter(10, 20, 's2')]
|
||||
expected = '''ffconcat version 1.0
|
||||
file 'file:test'
|
||||
outpoint 1.000000
|
||||
file 'file:test'
|
||||
inpoint 2.000000
|
||||
outpoint 10.000000
|
||||
file 'file:test'
|
||||
inpoint 20.000000
|
||||
'''
|
||||
opts = self._pp._make_concat_opts(sponsor_chapters, 30)
|
||||
self.assertEqual(expected, ''.join(self._pp._concat_spec(['test'] * len(opts), opts)))
|
||||
|
||||
def test_make_concat_opts_NoZeroDurationChunkAtVideoStart(self):
|
||||
sponsor_chapters = [self._chapter(0, 1, 's1'), self._chapter(10, 20, 's2')]
|
||||
expected = '''ffconcat version 1.0
|
||||
file 'file:test'
|
||||
inpoint 1.000000
|
||||
outpoint 10.000000
|
||||
file 'file:test'
|
||||
inpoint 20.000000
|
||||
'''
|
||||
opts = self._pp._make_concat_opts(sponsor_chapters, 30)
|
||||
self.assertEqual(expected, ''.join(self._pp._concat_spec(['test'] * len(opts), opts)))
|
||||
|
||||
def test_make_concat_opts_NoZeroDurationChunkAtVideoEnd(self):
|
||||
sponsor_chapters = [self._chapter(1, 2, 's1'), self._chapter(10, 20, 's2')]
|
||||
expected = '''ffconcat version 1.0
|
||||
file 'file:test'
|
||||
outpoint 1.000000
|
||||
file 'file:test'
|
||||
inpoint 2.000000
|
||||
outpoint 10.000000
|
||||
'''
|
||||
opts = self._pp._make_concat_opts(sponsor_chapters, 20)
|
||||
self.assertEqual(expected, ''.join(self._pp._concat_spec(['test'] * len(opts), opts)))
|
||||
|
||||
def test_quote_for_concat_RunsOfQuotes(self):
|
||||
self.assertEqual(
|
||||
r"'special '\'' '\'\''characters'\'\'\''galore'",
|
||||
self._pp._quote_for_ffmpeg("special ' ''characters'''galore"))
|
||||
|
||||
def test_quote_for_concat_QuotesAtStart(self):
|
||||
self.assertEqual(
|
||||
r"\'\'\''special '\'' characters '\'' galore'",
|
||||
self._pp._quote_for_ffmpeg("'''special ' characters ' galore"))
|
||||
|
||||
def test_quote_for_concat_QuotesAtEnd(self):
|
||||
self.assertEqual(
|
||||
r"'special '\'' characters '\'' galore'\'\'\'",
|
||||
self._pp._quote_for_ffmpeg("special ' characters ' galore'''"))
|
||||
|
||||
@@ -14,6 +14,7 @@ import subprocess
|
||||
from test.helper import (
|
||||
FakeYDL,
|
||||
get_params,
|
||||
is_download_test,
|
||||
)
|
||||
from yt_dlp.compat import (
|
||||
compat_str,
|
||||
@@ -21,6 +22,7 @@ from yt_dlp.compat import (
|
||||
)
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestMultipleSocks(unittest.TestCase):
|
||||
@staticmethod
|
||||
def _check_params(attrs):
|
||||
@@ -76,6 +78,7 @@ class TestMultipleSocks(unittest.TestCase):
|
||||
params['secondary_server_ip'])
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestSocks(unittest.TestCase):
|
||||
_SKIP_SOCKS_TEST = True
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from test.helper import FakeYDL, md5
|
||||
from test.helper import FakeYDL, md5, is_download_test
|
||||
|
||||
|
||||
from yt_dlp.extractor import (
|
||||
@@ -30,6 +30,7 @@ from yt_dlp.extractor import (
|
||||
)
|
||||
|
||||
|
||||
@is_download_test
|
||||
class BaseTestSubtitles(unittest.TestCase):
|
||||
url = None
|
||||
IE = None
|
||||
@@ -55,6 +56,7 @@ class BaseTestSubtitles(unittest.TestCase):
|
||||
return dict((l, sub_info['data']) for l, sub_info in subtitles.items())
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestYoutubeSubtitles(BaseTestSubtitles):
|
||||
url = 'QRS8MkLhQmM'
|
||||
IE = YoutubeIE
|
||||
@@ -111,6 +113,7 @@ class TestYoutubeSubtitles(BaseTestSubtitles):
|
||||
self.assertFalse(subtitles)
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestDailymotionSubtitles(BaseTestSubtitles):
|
||||
url = 'http://www.dailymotion.com/video/xczg00'
|
||||
IE = DailymotionIE
|
||||
@@ -134,6 +137,7 @@ class TestDailymotionSubtitles(BaseTestSubtitles):
|
||||
self.assertFalse(subtitles)
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestTedSubtitles(BaseTestSubtitles):
|
||||
url = 'http://www.ted.com/talks/dan_dennett_on_our_consciousness.html'
|
||||
IE = TEDIE
|
||||
@@ -149,6 +153,7 @@ class TestTedSubtitles(BaseTestSubtitles):
|
||||
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestVimeoSubtitles(BaseTestSubtitles):
|
||||
url = 'http://vimeo.com/76979871'
|
||||
IE = VimeoIE
|
||||
@@ -170,6 +175,7 @@ class TestVimeoSubtitles(BaseTestSubtitles):
|
||||
self.assertFalse(subtitles)
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestWallaSubtitles(BaseTestSubtitles):
|
||||
url = 'http://vod.walla.co.il/movie/2705958/the-yes-men'
|
||||
IE = WallaIE
|
||||
@@ -191,6 +197,7 @@ class TestWallaSubtitles(BaseTestSubtitles):
|
||||
self.assertFalse(subtitles)
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestCeskaTelevizeSubtitles(BaseTestSubtitles):
|
||||
url = 'http://www.ceskatelevize.cz/ivysilani/10600540290-u6-uzasny-svet-techniky'
|
||||
IE = CeskaTelevizeIE
|
||||
@@ -212,6 +219,7 @@ class TestCeskaTelevizeSubtitles(BaseTestSubtitles):
|
||||
self.assertFalse(subtitles)
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestLyndaSubtitles(BaseTestSubtitles):
|
||||
url = 'http://www.lynda.com/Bootstrap-tutorials/Using-exercise-files/110885/114408-4.html'
|
||||
IE = LyndaIE
|
||||
@@ -224,6 +232,7 @@ class TestLyndaSubtitles(BaseTestSubtitles):
|
||||
self.assertEqual(md5(subtitles['en']), '09bbe67222259bed60deaa26997d73a7')
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestNPOSubtitles(BaseTestSubtitles):
|
||||
url = 'http://www.npo.nl/nos-journaal/28-08-2014/POW_00722860'
|
||||
IE = NPOIE
|
||||
@@ -236,6 +245,7 @@ class TestNPOSubtitles(BaseTestSubtitles):
|
||||
self.assertEqual(md5(subtitles['nl']), 'fc6435027572b63fb4ab143abd5ad3f4')
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestMTVSubtitles(BaseTestSubtitles):
|
||||
url = 'http://www.cc.com/video-clips/p63lk0/adam-devine-s-house-party-chasing-white-swans'
|
||||
IE = ComedyCentralIE
|
||||
@@ -251,6 +261,7 @@ class TestMTVSubtitles(BaseTestSubtitles):
|
||||
self.assertEqual(md5(subtitles['en']), '78206b8d8a0cfa9da64dc026eea48961')
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestNRKSubtitles(BaseTestSubtitles):
|
||||
url = 'http://tv.nrk.no/serie/ikke-gjoer-dette-hjemme/DMPV73000411/sesong-2/episode-1'
|
||||
IE = NRKTVIE
|
||||
@@ -263,6 +274,7 @@ class TestNRKSubtitles(BaseTestSubtitles):
|
||||
self.assertEqual(md5(subtitles['no']), '544fa917d3197fcbee64634559221cc2')
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestRaiPlaySubtitles(BaseTestSubtitles):
|
||||
IE = RaiPlayIE
|
||||
|
||||
@@ -283,6 +295,7 @@ class TestRaiPlaySubtitles(BaseTestSubtitles):
|
||||
self.assertEqual(md5(subtitles['it']), '4b3264186fbb103508abe5311cfcb9cd')
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestVikiSubtitles(BaseTestSubtitles):
|
||||
url = 'http://www.viki.com/videos/1060846v-punch-episode-18'
|
||||
IE = VikiIE
|
||||
@@ -295,6 +308,7 @@ class TestVikiSubtitles(BaseTestSubtitles):
|
||||
self.assertEqual(md5(subtitles['en']), '53cb083a5914b2d84ef1ab67b880d18a')
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestThePlatformSubtitles(BaseTestSubtitles):
|
||||
# from http://www.3playmedia.com/services-features/tools/integrations/theplatform/
|
||||
# (see http://theplatform.com/about/partners/type/subtitles-closed-captioning/)
|
||||
@@ -309,6 +323,7 @@ class TestThePlatformSubtitles(BaseTestSubtitles):
|
||||
self.assertEqual(md5(subtitles['en']), '97e7670cbae3c4d26ae8bcc7fdd78d4b')
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestThePlatformFeedSubtitles(BaseTestSubtitles):
|
||||
url = 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207'
|
||||
IE = ThePlatformFeedIE
|
||||
@@ -321,6 +336,7 @@ class TestThePlatformFeedSubtitles(BaseTestSubtitles):
|
||||
self.assertEqual(md5(subtitles['en']), '48649a22e82b2da21c9a67a395eedade')
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestRtveSubtitles(BaseTestSubtitles):
|
||||
url = 'http://www.rtve.es/alacarta/videos/los-misterios-de-laura/misterios-laura-capitulo-32-misterio-del-numero-17-2-parte/2428621/'
|
||||
IE = RTVEALaCartaIE
|
||||
@@ -335,6 +351,7 @@ class TestRtveSubtitles(BaseTestSubtitles):
|
||||
self.assertEqual(md5(subtitles['es']), '69e70cae2d40574fb7316f31d6eb7fca')
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestDemocracynowSubtitles(BaseTestSubtitles):
|
||||
url = 'http://www.democracynow.org/shows/2015/7/3'
|
||||
IE = DemocracynowIE
|
||||
|
||||
@@ -62,6 +62,7 @@ from yt_dlp.utils import (
|
||||
parse_iso8601,
|
||||
parse_resolution,
|
||||
parse_bitrate,
|
||||
parse_qs,
|
||||
pkcs1pad,
|
||||
read_batch_urls,
|
||||
sanitize_filename,
|
||||
@@ -117,8 +118,6 @@ from yt_dlp.compat import (
|
||||
compat_getenv,
|
||||
compat_os_name,
|
||||
compat_setenv,
|
||||
compat_urlparse,
|
||||
compat_parse_qs,
|
||||
)
|
||||
|
||||
|
||||
@@ -688,38 +687,36 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertTrue(isinstance(data, bytes))
|
||||
|
||||
def test_update_url_query(self):
|
||||
def query_dict(url):
|
||||
return compat_parse_qs(compat_urlparse.urlparse(url).query)
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
'http://example.com/path', {'quality': ['HD'], 'format': ['mp4']})),
|
||||
query_dict('http://example.com/path?quality=HD&format=mp4'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
parse_qs('http://example.com/path?quality=HD&format=mp4'))
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
'http://example.com/path', {'system': ['LINUX', 'WINDOWS']})),
|
||||
query_dict('http://example.com/path?system=LINUX&system=WINDOWS'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
parse_qs('http://example.com/path?system=LINUX&system=WINDOWS'))
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
'http://example.com/path', {'fields': 'id,formats,subtitles'})),
|
||||
query_dict('http://example.com/path?fields=id,formats,subtitles'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
parse_qs('http://example.com/path?fields=id,formats,subtitles'))
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
'http://example.com/path', {'fields': ('id,formats,subtitles', 'thumbnails')})),
|
||||
query_dict('http://example.com/path?fields=id,formats,subtitles&fields=thumbnails'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
parse_qs('http://example.com/path?fields=id,formats,subtitles&fields=thumbnails'))
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
'http://example.com/path?manifest=f4m', {'manifest': []})),
|
||||
query_dict('http://example.com/path'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
parse_qs('http://example.com/path'))
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
'http://example.com/path?system=LINUX&system=WINDOWS', {'system': 'LINUX'})),
|
||||
query_dict('http://example.com/path?system=LINUX'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
parse_qs('http://example.com/path?system=LINUX'))
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
'http://example.com/path', {'fields': b'id,formats,subtitles'})),
|
||||
query_dict('http://example.com/path?fields=id,formats,subtitles'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
parse_qs('http://example.com/path?fields=id,formats,subtitles'))
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
'http://example.com/path', {'width': 1080, 'height': 720})),
|
||||
query_dict('http://example.com/path?width=1080&height=720'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
parse_qs('http://example.com/path?width=1080&height=720'))
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
'http://example.com/path', {'bitrate': 5020.43})),
|
||||
query_dict('http://example.com/path?bitrate=5020.43'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
parse_qs('http://example.com/path?bitrate=5020.43'))
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
'http://example.com/path', {'test': '第二行тест'})),
|
||||
query_dict('http://example.com/path?test=%E7%AC%AC%E4%BA%8C%E8%A1%8C%D1%82%D0%B5%D1%81%D1%82'))
|
||||
parse_qs('http://example.com/path?test=%E7%AC%AC%E4%BA%8C%E8%A1%8C%D1%82%D0%B5%D1%81%D1%82'))
|
||||
|
||||
def test_multipart_encode(self):
|
||||
self.assertEqual(
|
||||
@@ -1054,6 +1051,9 @@ class TestUtil(unittest.TestCase):
|
||||
on = js_to_json('{ "040": "040" }')
|
||||
self.assertEqual(json.loads(on), {'040': '040'})
|
||||
|
||||
on = js_to_json('[1,//{},\n2]')
|
||||
self.assertEqual(json.loads(on), [1, 2])
|
||||
|
||||
def test_js_to_json_malformed(self):
|
||||
self.assertEqual(js_to_json('42a1'), '42"a1"')
|
||||
self.assertEqual(js_to_json('42a-1'), '42"a"-1')
|
||||
@@ -1204,35 +1204,12 @@ ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4')
|
||||
'9999 51')
|
||||
|
||||
def test_match_str(self):
|
||||
self.assertRaises(ValueError, match_str, 'xy>foobar', {})
|
||||
# Unary
|
||||
self.assertFalse(match_str('xy', {'x': 1200}))
|
||||
self.assertTrue(match_str('!xy', {'x': 1200}))
|
||||
self.assertTrue(match_str('x', {'x': 1200}))
|
||||
self.assertFalse(match_str('!x', {'x': 1200}))
|
||||
self.assertTrue(match_str('x', {'x': 0}))
|
||||
self.assertFalse(match_str('x>0', {'x': 0}))
|
||||
self.assertFalse(match_str('x>0', {}))
|
||||
self.assertTrue(match_str('x>?0', {}))
|
||||
self.assertTrue(match_str('x>1K', {'x': 1200}))
|
||||
self.assertFalse(match_str('x>2K', {'x': 1200}))
|
||||
self.assertTrue(match_str('x>=1200 & x < 1300', {'x': 1200}))
|
||||
self.assertFalse(match_str('x>=1100 & x < 1200', {'x': 1200}))
|
||||
self.assertFalse(match_str('y=a212', {'y': 'foobar42'}))
|
||||
self.assertTrue(match_str('y=foobar42', {'y': 'foobar42'}))
|
||||
self.assertFalse(match_str('y!=foobar42', {'y': 'foobar42'}))
|
||||
self.assertTrue(match_str('y!=foobar2', {'y': 'foobar42'}))
|
||||
self.assertFalse(match_str(
|
||||
'like_count > 100 & dislike_count <? 50 & description',
|
||||
{'like_count': 90, 'description': 'foo'}))
|
||||
self.assertTrue(match_str(
|
||||
'like_count > 100 & dislike_count <? 50 & description',
|
||||
{'like_count': 190, 'description': 'foo'}))
|
||||
self.assertFalse(match_str(
|
||||
'like_count > 100 & dislike_count <? 50 & description',
|
||||
{'like_count': 190, 'dislike_count': 60, 'description': 'foo'}))
|
||||
self.assertFalse(match_str(
|
||||
'like_count > 100 & dislike_count <? 50 & description',
|
||||
{'like_count': 190, 'dislike_count': 10}))
|
||||
self.assertTrue(match_str('is_live', {'is_live': True}))
|
||||
self.assertFalse(match_str('is_live', {'is_live': False}))
|
||||
self.assertFalse(match_str('is_live', {'is_live': None}))
|
||||
@@ -1246,6 +1223,75 @@ ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4')
|
||||
self.assertFalse(match_str('!title', {'title': 'abc'}))
|
||||
self.assertFalse(match_str('!title', {'title': ''}))
|
||||
|
||||
# Numeric
|
||||
self.assertFalse(match_str('x>0', {'x': 0}))
|
||||
self.assertFalse(match_str('x>0', {}))
|
||||
self.assertTrue(match_str('x>?0', {}))
|
||||
self.assertTrue(match_str('x>1K', {'x': 1200}))
|
||||
self.assertFalse(match_str('x>2K', {'x': 1200}))
|
||||
self.assertTrue(match_str('x>=1200 & x < 1300', {'x': 1200}))
|
||||
self.assertFalse(match_str('x>=1100 & x < 1200', {'x': 1200}))
|
||||
|
||||
# String
|
||||
self.assertFalse(match_str('y=a212', {'y': 'foobar42'}))
|
||||
self.assertTrue(match_str('y=foobar42', {'y': 'foobar42'}))
|
||||
self.assertFalse(match_str('y!=foobar42', {'y': 'foobar42'}))
|
||||
self.assertTrue(match_str('y!=foobar2', {'y': 'foobar42'}))
|
||||
self.assertTrue(match_str('y^=foo', {'y': 'foobar42'}))
|
||||
self.assertFalse(match_str('y!^=foo', {'y': 'foobar42'}))
|
||||
self.assertFalse(match_str('y^=bar', {'y': 'foobar42'}))
|
||||
self.assertTrue(match_str('y!^=bar', {'y': 'foobar42'}))
|
||||
self.assertRaises(ValueError, match_str, 'x^=42', {'x': 42})
|
||||
self.assertTrue(match_str('y*=bar', {'y': 'foobar42'}))
|
||||
self.assertFalse(match_str('y!*=bar', {'y': 'foobar42'}))
|
||||
self.assertFalse(match_str('y*=baz', {'y': 'foobar42'}))
|
||||
self.assertTrue(match_str('y!*=baz', {'y': 'foobar42'}))
|
||||
self.assertTrue(match_str('y$=42', {'y': 'foobar42'}))
|
||||
self.assertFalse(match_str('y$=43', {'y': 'foobar42'}))
|
||||
|
||||
# And
|
||||
self.assertFalse(match_str(
|
||||
'like_count > 100 & dislike_count <? 50 & description',
|
||||
{'like_count': 90, 'description': 'foo'}))
|
||||
self.assertTrue(match_str(
|
||||
'like_count > 100 & dislike_count <? 50 & description',
|
||||
{'like_count': 190, 'description': 'foo'}))
|
||||
self.assertFalse(match_str(
|
||||
'like_count > 100 & dislike_count <? 50 & description',
|
||||
{'like_count': 190, 'dislike_count': 60, 'description': 'foo'}))
|
||||
self.assertFalse(match_str(
|
||||
'like_count > 100 & dislike_count <? 50 & description',
|
||||
{'like_count': 190, 'dislike_count': 10}))
|
||||
|
||||
# Regex
|
||||
self.assertTrue(match_str(r'x~=\bbar', {'x': 'foo bar'}))
|
||||
self.assertFalse(match_str(r'x~=\bbar.+', {'x': 'foo bar'}))
|
||||
self.assertFalse(match_str(r'x~=^FOO', {'x': 'foo bar'}))
|
||||
self.assertTrue(match_str(r'x~=(?i)^FOO', {'x': 'foo bar'}))
|
||||
|
||||
# Quotes
|
||||
self.assertTrue(match_str(r'x^="foo"', {'x': 'foo "bar"'}))
|
||||
self.assertFalse(match_str(r'x^="foo "', {'x': 'foo "bar"'}))
|
||||
self.assertFalse(match_str(r'x$="bar"', {'x': 'foo "bar"'}))
|
||||
self.assertTrue(match_str(r'x$=" \"bar\""', {'x': 'foo "bar"'}))
|
||||
|
||||
# Escaping &
|
||||
self.assertFalse(match_str(r'x=foo & bar', {'x': 'foo & bar'}))
|
||||
self.assertTrue(match_str(r'x=foo \& bar', {'x': 'foo & bar'}))
|
||||
self.assertTrue(match_str(r'x=foo \& bar & x^=foo', {'x': 'foo & bar'}))
|
||||
self.assertTrue(match_str(r'x="foo \& bar" & x^=foo', {'x': 'foo & bar'}))
|
||||
|
||||
# Example from docs
|
||||
self.assertTrue(match_str(
|
||||
r"!is_live & like_count>?100 & description~='(?i)\bcats \& dogs\b'",
|
||||
{'description': 'Raining Cats & Dogs'}))
|
||||
|
||||
# Incomplete
|
||||
self.assertFalse(match_str('id!=foo', {'id': 'foo'}, True))
|
||||
self.assertTrue(match_str('x', {'id': 'foo'}, True))
|
||||
self.assertTrue(match_str('!x', {'id': 'foo'}, True))
|
||||
self.assertFalse(match_str('x', {'id': 'foo'}, False))
|
||||
|
||||
def test_parse_dfxp_time_expr(self):
|
||||
self.assertEqual(parse_dfxp_time_expr(None), None)
|
||||
self.assertEqual(parse_dfxp_time_expr(''), None)
|
||||
@@ -1534,8 +1580,11 @@ Line 1
|
||||
self.assertEqual(LazyList(it).exhaust(), it)
|
||||
self.assertEqual(LazyList(it)[5], it[5])
|
||||
|
||||
self.assertEqual(LazyList(it)[5:], it[5:])
|
||||
self.assertEqual(LazyList(it)[:5], it[:5])
|
||||
self.assertEqual(LazyList(it)[::2], it[::2])
|
||||
self.assertEqual(LazyList(it)[1::2], it[1::2])
|
||||
self.assertEqual(LazyList(it)[5::-1], it[5::-1])
|
||||
self.assertEqual(LazyList(it)[6:2:-2], it[6:2:-2])
|
||||
self.assertEqual(LazyList(it)[::-1], it[::-1])
|
||||
|
||||
@@ -1545,8 +1594,9 @@ Line 1
|
||||
self.assertEqual(repr(LazyList(it)), repr(it))
|
||||
self.assertEqual(str(LazyList(it)), str(it))
|
||||
|
||||
self.assertEqual(list(reversed(LazyList(it))), it[::-1])
|
||||
self.assertEqual(list(reversed(LazyList(it))[1:3:7]), it[::-1][1:3:7])
|
||||
self.assertEqual(list(LazyList(it).reverse()), it[::-1])
|
||||
self.assertEqual(list(LazyList(it).reverse()[1:3:7]), it[::-1][1:3:7])
|
||||
self.assertEqual(list(LazyList(it).reverse()[::-1]), it)
|
||||
|
||||
def test_LazyList_laziness(self):
|
||||
|
||||
@@ -1559,13 +1609,13 @@ Line 1
|
||||
test(ll, 5, 5, range(6))
|
||||
test(ll, -3, 7, range(10))
|
||||
|
||||
ll = reversed(LazyList(range(10)))
|
||||
ll = LazyList(range(10)).reverse()
|
||||
test(ll, -1, 0, range(1))
|
||||
test(ll, 3, 6, range(10))
|
||||
|
||||
ll = LazyList(itertools.count())
|
||||
test(ll, 10, 10, range(11))
|
||||
reversed(ll)
|
||||
ll.reverse()
|
||||
test(ll, -15, 14, range(15))
|
||||
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from test.helper import get_params, try_rm
|
||||
from test.helper import get_params, try_rm, is_download_test
|
||||
|
||||
|
||||
import io
|
||||
@@ -38,6 +38,7 @@ ANNOTATIONS_FILE = TEST_ID + '.annotations.xml'
|
||||
EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label']
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestAnnotations(unittest.TestCase):
|
||||
def setUp(self):
|
||||
# Clear old files
|
||||
@@ -7,7 +7,7 @@ import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from test.helper import FakeYDL
|
||||
from test.helper import FakeYDL, is_download_test
|
||||
|
||||
|
||||
from yt_dlp.extractor import (
|
||||
@@ -17,6 +17,7 @@ from yt_dlp.extractor import (
|
||||
)
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestYoutubeLists(unittest.TestCase):
|
||||
def assertIsPlaylist(self, info):
|
||||
"""Make sure the info has '_type' set to 'playlist'"""
|
||||
|
||||
@@ -12,7 +12,7 @@ import io
|
||||
import re
|
||||
import string
|
||||
|
||||
from test.helper import FakeYDL
|
||||
from test.helper import FakeYDL, is_download_test
|
||||
from yt_dlp.extractor import YoutubeIE
|
||||
from yt_dlp.compat import compat_str, compat_urlretrieve
|
||||
|
||||
@@ -65,6 +65,7 @@ _TESTS = [
|
||||
]
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestPlayerInfo(unittest.TestCase):
|
||||
def test_youtube_extract_player_info(self):
|
||||
PLAYER_URLS = (
|
||||
@@ -87,6 +88,7 @@ class TestPlayerInfo(unittest.TestCase):
|
||||
self.assertEqual(player_id, expected_player_id)
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestSignature(unittest.TestCase):
|
||||
def setUp(self):
|
||||
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
2
tox.ini
2
tox.ini
@@ -1,5 +1,7 @@
|
||||
[tox]
|
||||
envlist = py26,py27,py33,py34,py35
|
||||
|
||||
# Needed?
|
||||
[testenv]
|
||||
deps =
|
||||
nose
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,25 +1,27 @@
|
||||
#!/usr/bin/env python3
|
||||
# coding: utf-8
|
||||
|
||||
from __future__ import unicode_literals
|
||||
f'You are using an unsupported version of Python. Only Python versions 3.6 and above are supported by yt-dlp' # noqa: F541
|
||||
|
||||
__license__ = 'Public Domain'
|
||||
|
||||
import codecs
|
||||
import io
|
||||
import itertools
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import sys
|
||||
|
||||
|
||||
from .options import (
|
||||
parseOpts,
|
||||
)
|
||||
from .compat import (
|
||||
compat_getpass,
|
||||
compat_shlex_quote,
|
||||
workaround_optparse_bug9161,
|
||||
)
|
||||
from .cookies import SUPPORTED_BROWSERS
|
||||
from .utils import (
|
||||
DateRange,
|
||||
decodeOption,
|
||||
@@ -45,14 +47,15 @@ from .downloader import (
|
||||
from .extractor import gen_extractors, list_extractors
|
||||
from .extractor.common import InfoExtractor
|
||||
from .extractor.adobepass import MSO_INFO
|
||||
from .postprocessor.ffmpeg import (
|
||||
from .postprocessor import (
|
||||
FFmpegExtractAudioPP,
|
||||
FFmpegSubtitlesConvertorPP,
|
||||
FFmpegThumbnailsConvertorPP,
|
||||
FFmpegVideoConvertorPP,
|
||||
FFmpegVideoRemuxerPP,
|
||||
MetadataFromFieldPP,
|
||||
MetadataParserPP,
|
||||
)
|
||||
from .postprocessor.metadatafromfield import MetadataFromFieldPP
|
||||
from .YoutubeDL import YoutubeDL
|
||||
|
||||
|
||||
@@ -106,14 +109,14 @@ def _real_main(argv=None):
|
||||
|
||||
if opts.list_extractors:
|
||||
for ie in list_extractors(opts.age_limit):
|
||||
write_string(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else '') + '\n', out=sys.stdout)
|
||||
write_string(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie.working() else '') + '\n', out=sys.stdout)
|
||||
matchedUrls = [url for url in all_urls if ie.suitable(url)]
|
||||
for mu in matchedUrls:
|
||||
write_string(' ' + mu + '\n', out=sys.stdout)
|
||||
sys.exit(0)
|
||||
if opts.list_extractor_descriptions:
|
||||
for ie in list_extractors(opts.age_limit):
|
||||
if not ie._WORKING:
|
||||
if not ie.working():
|
||||
continue
|
||||
desc = getattr(ie, 'IE_DESC', ie.IE_NAME)
|
||||
if desc is False:
|
||||
@@ -242,40 +245,18 @@ def _real_main(argv=None):
|
||||
if opts.convertthumbnails not in FFmpegThumbnailsConvertorPP.SUPPORTED_EXTS:
|
||||
parser.error('invalid thumbnail format specified')
|
||||
|
||||
if opts.cookiesfrombrowser is not None:
|
||||
opts.cookiesfrombrowser = [
|
||||
part.strip() or None for part in opts.cookiesfrombrowser.split(':', 1)]
|
||||
if opts.cookiesfrombrowser[0] not in SUPPORTED_BROWSERS:
|
||||
parser.error('unsupported browser specified for cookies')
|
||||
|
||||
if opts.date is not None:
|
||||
date = DateRange.day(opts.date)
|
||||
else:
|
||||
date = DateRange(opts.dateafter, opts.datebefore)
|
||||
|
||||
def parse_compat_opts():
|
||||
parsed_compat_opts, compat_opts = set(), opts.compat_opts[::-1]
|
||||
while compat_opts:
|
||||
actual_opt = opt = compat_opts.pop().lower()
|
||||
if opt == 'youtube-dl':
|
||||
compat_opts.extend(['-multistreams', 'all'])
|
||||
elif opt == 'youtube-dlc':
|
||||
compat_opts.extend(['-no-youtube-channel-redirect', '-no-live-chat', 'all'])
|
||||
elif opt == 'all':
|
||||
parsed_compat_opts.update(all_compat_opts)
|
||||
elif opt == '-all':
|
||||
parsed_compat_opts = set()
|
||||
else:
|
||||
if opt[0] == '-':
|
||||
opt = opt[1:]
|
||||
parsed_compat_opts.discard(opt)
|
||||
else:
|
||||
parsed_compat_opts.update([opt])
|
||||
if opt not in all_compat_opts:
|
||||
parser.error('Invalid compatibility option %s' % actual_opt)
|
||||
return parsed_compat_opts
|
||||
|
||||
all_compat_opts = [
|
||||
'filename', 'format-sort', 'abort-on-error', 'format-spec', 'no-playlist-metafiles',
|
||||
'multistreams', 'no-live-chat', 'playlist-index', 'list-formats', 'no-direct-merge',
|
||||
'no-youtube-channel-redirect', 'no-youtube-unavailable-videos', 'no-attach-info-json',
|
||||
'embed-thumbnail-atomicparsley',
|
||||
]
|
||||
compat_opts = parse_compat_opts()
|
||||
compat_opts = opts.compat_opts
|
||||
|
||||
def _unused_compat_opt(name):
|
||||
if name not in compat_opts:
|
||||
@@ -284,7 +265,7 @@ def _real_main(argv=None):
|
||||
compat_opts.update(['*%s' % name])
|
||||
return True
|
||||
|
||||
def set_default_compat(compat_name, opt_name, default=True, remove_compat=False):
|
||||
def set_default_compat(compat_name, opt_name, default=True, remove_compat=True):
|
||||
attr = getattr(opts, opt_name)
|
||||
if compat_name in compat_opts:
|
||||
if attr is None:
|
||||
@@ -300,6 +281,7 @@ def _real_main(argv=None):
|
||||
|
||||
set_default_compat('abort-on-error', 'ignoreerrors')
|
||||
set_default_compat('no-playlist-metafiles', 'allow_playlist_files')
|
||||
set_default_compat('no-clean-infojson', 'clean_infojson')
|
||||
if 'format-sort' in compat_opts:
|
||||
opts.format_sort.extend(InfoExtractor.FormatSort.ytdl_default)
|
||||
_video_multistreams_set = set_default_compat('multistreams', 'allow_multiple_video_streams', False, remove_compat=False)
|
||||
@@ -309,7 +291,7 @@ def _real_main(argv=None):
|
||||
outtmpl_default = opts.outtmpl.get('default')
|
||||
if 'filename' in compat_opts:
|
||||
if outtmpl_default is None:
|
||||
outtmpl_default = '%(title)s.%(id)s.%(ext)s'
|
||||
outtmpl_default = '%(title)s-%(id)s.%(ext)s'
|
||||
opts.outtmpl.update({'default': outtmpl_default})
|
||||
else:
|
||||
_unused_compat_opt('filename')
|
||||
@@ -321,8 +303,10 @@ def _real_main(argv=None):
|
||||
|
||||
for k, tmpl in opts.outtmpl.items():
|
||||
validate_outtmpl(tmpl, '%s output template' % k)
|
||||
for tmpl in opts.forceprint:
|
||||
opts.forceprint = opts.forceprint or []
|
||||
for tmpl in opts.forceprint or []:
|
||||
validate_outtmpl(tmpl, 'print template')
|
||||
validate_outtmpl(opts.sponsorblock_chapter_title, 'SponsorBlock chapter title')
|
||||
|
||||
if opts.extractaudio and not opts.keepvideo and opts.format is None:
|
||||
opts.format = 'bestaudio/best'
|
||||
@@ -336,13 +320,29 @@ def _real_main(argv=None):
|
||||
if re.match(InfoExtractor.FormatSort.regex, f) is None:
|
||||
parser.error('invalid format sort string "%s" specified' % f)
|
||||
|
||||
if opts.metafromfield is None:
|
||||
opts.metafromfield = []
|
||||
def metadataparser_actions(f):
|
||||
if isinstance(f, str):
|
||||
cmd = '--parse-metadata %s' % compat_shlex_quote(f)
|
||||
try:
|
||||
actions = [MetadataFromFieldPP.to_action(f)]
|
||||
except Exception as err:
|
||||
parser.error(f'{cmd} is invalid; {err}')
|
||||
else:
|
||||
cmd = '--replace-in-metadata %s' % ' '.join(map(compat_shlex_quote, f))
|
||||
actions = ((MetadataParserPP.Actions.REPLACE, x, *f[1:]) for x in f[0].split(','))
|
||||
|
||||
for action in actions:
|
||||
try:
|
||||
MetadataParserPP.validate_action(*action)
|
||||
except Exception as err:
|
||||
parser.error(f'{cmd} is invalid; {err}')
|
||||
yield action
|
||||
|
||||
if opts.parse_metadata is None:
|
||||
opts.parse_metadata = []
|
||||
if opts.metafromtitle is not None:
|
||||
opts.metafromfield.append('title:%s' % opts.metafromtitle)
|
||||
for f in opts.metafromfield:
|
||||
if re.match(MetadataFromFieldPP.regex, f) is None:
|
||||
parser.error('invalid format string "%s" specified for --parse-metadata' % f)
|
||||
opts.parse_metadata.append('title:%s' % opts.metafromtitle)
|
||||
opts.parse_metadata = list(itertools.chain(*map(metadataparser_actions, opts.parse_metadata)))
|
||||
|
||||
any_getting = opts.forceprint or opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson or opts.dump_single_json
|
||||
any_printing = opts.print_json
|
||||
@@ -353,15 +353,34 @@ def _real_main(argv=None):
|
||||
if opts.getcomments and not printing_json:
|
||||
opts.writeinfojson = True
|
||||
|
||||
if opts.no_sponsorblock:
|
||||
opts.sponsorblock_mark = set()
|
||||
opts.sponsorblock_remove = set()
|
||||
sponsorblock_query = opts.sponsorblock_mark | opts.sponsorblock_remove
|
||||
|
||||
if (opts.addmetadata or opts.sponsorblock_mark) and opts.addchapters is None:
|
||||
opts.addchapters = True
|
||||
opts.remove_chapters = opts.remove_chapters or []
|
||||
|
||||
def report_conflict(arg1, arg2):
|
||||
warnings.append('%s is ignored since %s was given' % (arg2, arg1))
|
||||
|
||||
if (opts.remove_chapters or sponsorblock_query) and opts.sponskrub is not False:
|
||||
if opts.sponskrub:
|
||||
if opts.remove_chapters:
|
||||
report_conflict('--remove-chapters', '--sponskrub')
|
||||
if opts.sponsorblock_mark:
|
||||
report_conflict('--sponsorblock-mark', '--sponskrub')
|
||||
if opts.sponsorblock_remove:
|
||||
report_conflict('--sponsorblock-remove', '--sponskrub')
|
||||
opts.sponskrub = False
|
||||
if opts.sponskrub_cut and opts.split_chapters and opts.sponskrub is not False:
|
||||
report_conflict('--split-chapter', '--sponskrub-cut')
|
||||
opts.sponskrub_cut = False
|
||||
|
||||
if opts.remuxvideo and opts.recodevideo:
|
||||
report_conflict('--recode-video', '--remux-video')
|
||||
opts.remuxvideo = False
|
||||
if opts.sponskrub_cut and opts.split_chapters and opts.sponskrub is not False:
|
||||
report_conflict('--split-chapter', '--sponskrub-cut')
|
||||
opts.sponskrub_cut = False
|
||||
|
||||
if opts.allow_unplayable_formats:
|
||||
if opts.extractaudio:
|
||||
@@ -388,16 +407,30 @@ def _real_main(argv=None):
|
||||
if opts.fixup and opts.fixup.lower() not in ('never', 'ignore'):
|
||||
report_conflict('--allow-unplayable-formats', '--fixup')
|
||||
opts.fixup = 'never'
|
||||
if opts.remove_chapters:
|
||||
report_conflict('--allow-unplayable-formats', '--remove-chapters')
|
||||
opts.remove_chapters = []
|
||||
if opts.sponsorblock_remove:
|
||||
report_conflict('--allow-unplayable-formats', '--sponsorblock-remove')
|
||||
opts.sponsorblock_remove = set()
|
||||
if opts.sponskrub:
|
||||
report_conflict('--allow-unplayable-formats', '--sponskrub')
|
||||
opts.sponskrub = False
|
||||
|
||||
# PostProcessors
|
||||
postprocessors = []
|
||||
if opts.metafromfield:
|
||||
if sponsorblock_query:
|
||||
postprocessors.append({
|
||||
'key': 'MetadataFromField',
|
||||
'formats': opts.metafromfield,
|
||||
'key': 'SponsorBlock',
|
||||
'categories': sponsorblock_query,
|
||||
'api': opts.sponsorblock_api,
|
||||
# Run this immediately after extraction is complete
|
||||
'when': 'pre_process'
|
||||
})
|
||||
if opts.parse_metadata:
|
||||
postprocessors.append({
|
||||
'key': 'MetadataParser',
|
||||
'actions': opts.parse_metadata,
|
||||
# Run this immediately after extraction is complete
|
||||
'when': 'pre_process'
|
||||
})
|
||||
@@ -415,6 +448,13 @@ def _real_main(argv=None):
|
||||
# Run this before the actual video download
|
||||
'when': 'before_dl'
|
||||
})
|
||||
# Must be after all other before_dl
|
||||
if opts.exec_before_dl_cmd:
|
||||
postprocessors.append({
|
||||
'key': 'Exec',
|
||||
'exec_cmd': opts.exec_before_dl_cmd,
|
||||
'when': 'before_dl'
|
||||
})
|
||||
if opts.extractaudio:
|
||||
postprocessors.append({
|
||||
'key': 'FFmpegExtractAudio',
|
||||
@@ -432,29 +472,47 @@ def _real_main(argv=None):
|
||||
'key': 'FFmpegVideoConvertor',
|
||||
'preferedformat': opts.recodevideo,
|
||||
})
|
||||
# FFmpegMetadataPP should be run after FFmpegVideoConvertorPP and
|
||||
# FFmpegExtractAudioPP as containers before conversion may not support
|
||||
# metadata (3gp, webm, etc.)
|
||||
# And this post-processor should be placed before other metadata
|
||||
# manipulating post-processors (FFmpegEmbedSubtitle) to prevent loss of
|
||||
# extra metadata. By default ffmpeg preserves metadata applicable for both
|
||||
# source and target containers. From this point the container won't change,
|
||||
# so metadata can be added here.
|
||||
if opts.addmetadata:
|
||||
postprocessors.append({'key': 'FFmpegMetadata'})
|
||||
# If ModifyChapters is going to remove chapters, subtitles must already be in the container.
|
||||
if opts.embedsubtitles:
|
||||
already_have_subtitle = opts.writesubtitles
|
||||
already_have_subtitle = opts.writesubtitles and 'no-keep-subs' not in compat_opts
|
||||
postprocessors.append({
|
||||
'key': 'FFmpegEmbedSubtitle',
|
||||
# already_have_subtitle = True prevents the file from being deleted after embedding
|
||||
'already_have_subtitle': already_have_subtitle
|
||||
})
|
||||
if not already_have_subtitle:
|
||||
if not opts.writeautomaticsub and 'no-keep-subs' not in compat_opts:
|
||||
opts.writesubtitles = True
|
||||
# --all-sub automatically sets --write-sub if --write-auto-sub is not given
|
||||
# this was the old behaviour if only --all-sub was given.
|
||||
if opts.allsubtitles and not opts.writeautomaticsub:
|
||||
opts.writesubtitles = True
|
||||
# ModifyChapters must run before FFmpegMetadataPP
|
||||
remove_chapters_patterns = []
|
||||
for regex in opts.remove_chapters:
|
||||
try:
|
||||
remove_chapters_patterns.append(re.compile(regex))
|
||||
except re.error as err:
|
||||
parser.error(f'invalid --remove-chapters regex {regex!r} - {err}')
|
||||
if opts.remove_chapters or sponsorblock_query:
|
||||
postprocessors.append({
|
||||
'key': 'ModifyChapters',
|
||||
'remove_chapters_patterns': remove_chapters_patterns,
|
||||
'remove_sponsor_segments': opts.sponsorblock_remove,
|
||||
'sponsorblock_chapter_title': opts.sponsorblock_chapter_title,
|
||||
'force_keyframes': opts.force_keyframes_at_cuts
|
||||
})
|
||||
# FFmpegMetadataPP should be run after FFmpegVideoConvertorPP and
|
||||
# FFmpegExtractAudioPP as containers before conversion may not support
|
||||
# metadata (3gp, webm, etc.)
|
||||
# By default ffmpeg preserves metadata applicable for both
|
||||
# source and target containers. From this point the container won't change,
|
||||
# so metadata can be added here.
|
||||
if opts.addmetadata or opts.addchapters:
|
||||
postprocessors.append({
|
||||
'key': 'FFmpegMetadata',
|
||||
'add_chapters': opts.addchapters,
|
||||
'add_metadata': opts.addmetadata,
|
||||
})
|
||||
# This should be above EmbedThumbnail since sponskrub removes the thumbnail attachment
|
||||
# but must be below EmbedSubtitle and FFmpegMetadata
|
||||
# See https://github.com/yt-dlp/yt-dlp/issues/204 , https://github.com/faissaloo/SponSkrub/issues/29
|
||||
@@ -478,14 +536,17 @@ def _real_main(argv=None):
|
||||
if not already_have_thumbnail:
|
||||
opts.writethumbnail = True
|
||||
if opts.split_chapters:
|
||||
postprocessors.append({'key': 'FFmpegSplitChapters'})
|
||||
postprocessors.append({
|
||||
'key': 'FFmpegSplitChapters',
|
||||
'force_keyframes': opts.force_keyframes_at_cuts,
|
||||
})
|
||||
# XAttrMetadataPP should be run after post-processors that may change file contents
|
||||
if opts.xattrs:
|
||||
postprocessors.append({'key': 'XAttrMetadata'})
|
||||
# ExecAfterDownload must be the last PP
|
||||
# Exec must be the last PP
|
||||
if opts.exec_cmd:
|
||||
postprocessors.append({
|
||||
'key': 'ExecAfterDownload',
|
||||
'key': 'Exec',
|
||||
'exec_cmd': opts.exec_cmd,
|
||||
# Run this only after the files have been moved to their final locations
|
||||
'when': 'after_move'
|
||||
@@ -535,7 +596,7 @@ def _real_main(argv=None):
|
||||
'forcejson': opts.dumpjson or opts.print_json,
|
||||
'dump_single_json': opts.dump_single_json,
|
||||
'force_write_download_archive': opts.force_write_download_archive,
|
||||
'simulate': opts.simulate or any_getting,
|
||||
'simulate': (any_getting or None) if opts.simulate is None else opts.simulate,
|
||||
'skip_download': opts.skip_download,
|
||||
'format': opts.format,
|
||||
'allow_unplayable_formats': opts.allow_unplayable_formats,
|
||||
@@ -621,6 +682,7 @@ def _real_main(argv=None):
|
||||
'break_on_reject': opts.break_on_reject,
|
||||
'skip_playlist_after_errors': opts.skip_playlist_after_errors,
|
||||
'cookiefile': opts.cookiefile,
|
||||
'cookiesfrombrowser': opts.cookiesfrombrowser,
|
||||
'nocheckcertificate': opts.no_check_certificate,
|
||||
'prefer_insecure': opts.prefer_insecure,
|
||||
'proxy': opts.proxy,
|
||||
@@ -631,6 +693,7 @@ def _real_main(argv=None):
|
||||
'include_ads': opts.include_ads,
|
||||
'default_search': opts.default_search,
|
||||
'dynamic_mpd': opts.dynamic_mpd,
|
||||
'extractor_args': opts.extractor_args,
|
||||
'youtube_include_dash_manifest': opts.youtube_include_dash_manifest,
|
||||
'youtube_include_hls_manifest': opts.youtube_include_hls_manifest,
|
||||
'encoding': opts.encoding,
|
||||
@@ -717,6 +780,11 @@ def main(argv=None):
|
||||
sys.exit('ERROR: fixed output name but more than one file to download')
|
||||
except KeyboardInterrupt:
|
||||
sys.exit('\nERROR: Interrupted by user')
|
||||
except BrokenPipeError:
|
||||
# https://docs.python.org/3/library/signal.html#note-on-sigpipe
|
||||
devnull = os.open(os.devnull, os.O_WRONLY)
|
||||
os.dup2(devnull, sys.stdout.fileno())
|
||||
sys.exit(r'\nERROR: {err}')
|
||||
|
||||
|
||||
__all__ = ['main', 'YoutubeDL', 'gen_extractors', 'list_extractors']
|
||||
|
||||
3029
yt_dlp/compat.py
3029
yt_dlp/compat.py
File diff suppressed because it is too large
Load Diff
755
yt_dlp/cookies.py
Normal file
755
yt_dlp/cookies.py
Normal file
@@ -0,0 +1,755 @@
|
||||
import ctypes
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import struct
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from hashlib import pbkdf2_hmac
|
||||
|
||||
from yt_dlp.aes import aes_cbc_decrypt
|
||||
from yt_dlp.compat import (
|
||||
compat_b64decode,
|
||||
compat_cookiejar_Cookie,
|
||||
)
|
||||
from yt_dlp.utils import (
|
||||
bug_reports_message,
|
||||
bytes_to_intlist,
|
||||
expand_path,
|
||||
intlist_to_bytes,
|
||||
process_communicate_or_kill,
|
||||
YoutubeDLCookieJar,
|
||||
)
|
||||
|
||||
try:
|
||||
import sqlite3
|
||||
SQLITE_AVAILABLE = True
|
||||
except ImportError:
|
||||
# although sqlite3 is part of the standard library, it is possible to compile python without
|
||||
# sqlite support. See: https://github.com/yt-dlp/yt-dlp/issues/544
|
||||
SQLITE_AVAILABLE = False
|
||||
|
||||
|
||||
try:
|
||||
from Crypto.Cipher import AES
|
||||
CRYPTO_AVAILABLE = True
|
||||
except ImportError:
|
||||
CRYPTO_AVAILABLE = False
|
||||
|
||||
try:
|
||||
import keyring
|
||||
KEYRING_AVAILABLE = True
|
||||
KEYRING_UNAVAILABLE_REASON = f'due to unknown reasons{bug_reports_message()}'
|
||||
except ImportError:
|
||||
KEYRING_AVAILABLE = False
|
||||
KEYRING_UNAVAILABLE_REASON = (
|
||||
'as the `keyring` module is not installed. '
|
||||
'Please install by running `python3 -m pip install keyring`. '
|
||||
'Depending on your platform, additional packages may be required '
|
||||
'to access the keyring; see https://pypi.org/project/keyring')
|
||||
except Exception as _err:
|
||||
KEYRING_AVAILABLE = False
|
||||
KEYRING_UNAVAILABLE_REASON = 'as the `keyring` module could not be initialized: %s' % _err
|
||||
|
||||
|
||||
CHROMIUM_BASED_BROWSERS = {'brave', 'chrome', 'chromium', 'edge', 'opera', 'vivaldi'}
|
||||
SUPPORTED_BROWSERS = CHROMIUM_BASED_BROWSERS | {'firefox', 'safari'}
|
||||
|
||||
|
||||
class YDLLogger:
|
||||
def __init__(self, ydl=None):
|
||||
self._ydl = ydl
|
||||
|
||||
def debug(self, message):
|
||||
if self._ydl:
|
||||
self._ydl.write_debug(message)
|
||||
|
||||
def info(self, message):
|
||||
if self._ydl:
|
||||
self._ydl.to_screen(f'[Cookies] {message}')
|
||||
|
||||
def warning(self, message, only_once=False):
|
||||
if self._ydl:
|
||||
self._ydl.report_warning(message, only_once)
|
||||
|
||||
def error(self, message):
|
||||
if self._ydl:
|
||||
self._ydl.report_error(message)
|
||||
|
||||
|
||||
def load_cookies(cookie_file, browser_specification, ydl):
|
||||
cookie_jars = []
|
||||
if browser_specification is not None:
|
||||
browser_name, profile = _parse_browser_specification(*browser_specification)
|
||||
cookie_jars.append(extract_cookies_from_browser(browser_name, profile, YDLLogger(ydl)))
|
||||
|
||||
if cookie_file is not None:
|
||||
cookie_file = expand_path(cookie_file)
|
||||
jar = YoutubeDLCookieJar(cookie_file)
|
||||
if os.access(cookie_file, os.R_OK):
|
||||
jar.load(ignore_discard=True, ignore_expires=True)
|
||||
cookie_jars.append(jar)
|
||||
|
||||
return _merge_cookie_jars(cookie_jars)
|
||||
|
||||
|
||||
def extract_cookies_from_browser(browser_name, profile=None, logger=YDLLogger()):
|
||||
if browser_name == 'firefox':
|
||||
return _extract_firefox_cookies(profile, logger)
|
||||
elif browser_name == 'safari':
|
||||
return _extract_safari_cookies(profile, logger)
|
||||
elif browser_name in CHROMIUM_BASED_BROWSERS:
|
||||
return _extract_chrome_cookies(browser_name, profile, logger)
|
||||
else:
|
||||
raise ValueError('unknown browser: {}'.format(browser_name))
|
||||
|
||||
|
||||
def _extract_firefox_cookies(profile, logger):
|
||||
logger.info('Extracting cookies from firefox')
|
||||
if not SQLITE_AVAILABLE:
|
||||
logger.warning('Cannot extract cookies from firefox without sqlite3 support. '
|
||||
'Please use a python interpreter compiled with sqlite3 support')
|
||||
return YoutubeDLCookieJar()
|
||||
|
||||
if profile is None:
|
||||
search_root = _firefox_browser_dir()
|
||||
elif _is_path(profile):
|
||||
search_root = profile
|
||||
else:
|
||||
search_root = os.path.join(_firefox_browser_dir(), profile)
|
||||
|
||||
cookie_database_path = _find_most_recently_used_file(search_root, 'cookies.sqlite')
|
||||
if cookie_database_path is None:
|
||||
raise FileNotFoundError('could not find firefox cookies database in {}'.format(search_root))
|
||||
logger.debug('extracting from: "{}"'.format(cookie_database_path))
|
||||
|
||||
with tempfile.TemporaryDirectory(prefix='youtube_dl') as tmpdir:
|
||||
cursor = None
|
||||
try:
|
||||
cursor = _open_database_copy(cookie_database_path, tmpdir)
|
||||
cursor.execute('SELECT host, name, value, path, expiry, isSecure FROM moz_cookies')
|
||||
jar = YoutubeDLCookieJar()
|
||||
for host, name, value, path, expiry, is_secure in cursor.fetchall():
|
||||
cookie = compat_cookiejar_Cookie(
|
||||
version=0, name=name, value=value, port=None, port_specified=False,
|
||||
domain=host, domain_specified=bool(host), domain_initial_dot=host.startswith('.'),
|
||||
path=path, path_specified=bool(path), secure=is_secure, expires=expiry, discard=False,
|
||||
comment=None, comment_url=None, rest={})
|
||||
jar.set_cookie(cookie)
|
||||
logger.info('Extracted {} cookies from firefox'.format(len(jar)))
|
||||
return jar
|
||||
finally:
|
||||
if cursor is not None:
|
||||
cursor.connection.close()
|
||||
|
||||
|
||||
def _firefox_browser_dir():
|
||||
if sys.platform in ('linux', 'linux2'):
|
||||
return os.path.expanduser('~/.mozilla/firefox')
|
||||
elif sys.platform == 'win32':
|
||||
return os.path.expandvars(r'%APPDATA%\Mozilla\Firefox\Profiles')
|
||||
elif sys.platform == 'darwin':
|
||||
return os.path.expanduser('~/Library/Application Support/Firefox')
|
||||
else:
|
||||
raise ValueError('unsupported platform: {}'.format(sys.platform))
|
||||
|
||||
|
||||
def _get_chromium_based_browser_settings(browser_name):
|
||||
# https://chromium.googlesource.com/chromium/src/+/HEAD/docs/user_data_dir.md
|
||||
if sys.platform in ('linux', 'linux2'):
|
||||
config = _config_home()
|
||||
browser_dir = {
|
||||
'brave': os.path.join(config, 'BraveSoftware/Brave-Browser'),
|
||||
'chrome': os.path.join(config, 'google-chrome'),
|
||||
'chromium': os.path.join(config, 'chromium'),
|
||||
'edge': os.path.join(config, 'microsoft-edge'),
|
||||
'opera': os.path.join(config, 'opera'),
|
||||
'vivaldi': os.path.join(config, 'vivaldi'),
|
||||
}[browser_name]
|
||||
|
||||
elif sys.platform == 'win32':
|
||||
appdata_local = os.path.expandvars('%LOCALAPPDATA%')
|
||||
appdata_roaming = os.path.expandvars('%APPDATA%')
|
||||
browser_dir = {
|
||||
'brave': os.path.join(appdata_local, r'BraveSoftware\Brave-Browser\User Data'),
|
||||
'chrome': os.path.join(appdata_local, r'Google\Chrome\User Data'),
|
||||
'chromium': os.path.join(appdata_local, r'Chromium\User Data'),
|
||||
'edge': os.path.join(appdata_local, r'Microsoft\Edge\User Data'),
|
||||
'opera': os.path.join(appdata_roaming, r'Opera Software\Opera Stable'),
|
||||
'vivaldi': os.path.join(appdata_local, r'Vivaldi\User Data'),
|
||||
}[browser_name]
|
||||
|
||||
elif sys.platform == 'darwin':
|
||||
appdata = os.path.expanduser('~/Library/Application Support')
|
||||
browser_dir = {
|
||||
'brave': os.path.join(appdata, 'BraveSoftware/Brave-Browser'),
|
||||
'chrome': os.path.join(appdata, 'Google/Chrome'),
|
||||
'chromium': os.path.join(appdata, 'Chromium'),
|
||||
'edge': os.path.join(appdata, 'Microsoft Edge'),
|
||||
'opera': os.path.join(appdata, 'com.operasoftware.Opera'),
|
||||
'vivaldi': os.path.join(appdata, 'Vivaldi'),
|
||||
}[browser_name]
|
||||
|
||||
else:
|
||||
raise ValueError('unsupported platform: {}'.format(sys.platform))
|
||||
|
||||
# Linux keyring names can be determined by snooping on dbus while opening the browser in KDE:
|
||||
# dbus-monitor "interface='org.kde.KWallet'" "type=method_return"
|
||||
keyring_name = {
|
||||
'brave': 'Brave',
|
||||
'chrome': 'Chrome',
|
||||
'chromium': 'Chromium',
|
||||
'edge': 'Microsoft Edge' if sys.platform == 'darwin' else 'Chromium',
|
||||
'opera': 'Opera' if sys.platform == 'darwin' else 'Chromium',
|
||||
'vivaldi': 'Vivaldi' if sys.platform == 'darwin' else 'Chrome',
|
||||
}[browser_name]
|
||||
|
||||
browsers_without_profiles = {'opera'}
|
||||
|
||||
return {
|
||||
'browser_dir': browser_dir,
|
||||
'keyring_name': keyring_name,
|
||||
'supports_profiles': browser_name not in browsers_without_profiles
|
||||
}
|
||||
|
||||
|
||||
def _extract_chrome_cookies(browser_name, profile, logger):
|
||||
logger.info('Extracting cookies from {}'.format(browser_name))
|
||||
|
||||
if not SQLITE_AVAILABLE:
|
||||
logger.warning(('Cannot extract cookies from {} without sqlite3 support. '
|
||||
'Please use a python interpreter compiled with sqlite3 support').format(browser_name))
|
||||
return YoutubeDLCookieJar()
|
||||
|
||||
config = _get_chromium_based_browser_settings(browser_name)
|
||||
|
||||
if profile is None:
|
||||
search_root = config['browser_dir']
|
||||
elif _is_path(profile):
|
||||
search_root = profile
|
||||
config['browser_dir'] = os.path.dirname(profile) if config['supports_profiles'] else profile
|
||||
else:
|
||||
if config['supports_profiles']:
|
||||
search_root = os.path.join(config['browser_dir'], profile)
|
||||
else:
|
||||
logger.error('{} does not support profiles'.format(browser_name))
|
||||
search_root = config['browser_dir']
|
||||
|
||||
cookie_database_path = _find_most_recently_used_file(search_root, 'Cookies')
|
||||
if cookie_database_path is None:
|
||||
raise FileNotFoundError('could not find {} cookies database in "{}"'.format(browser_name, search_root))
|
||||
logger.debug('extracting from: "{}"'.format(cookie_database_path))
|
||||
|
||||
decryptor = get_cookie_decryptor(config['browser_dir'], config['keyring_name'], logger)
|
||||
|
||||
with tempfile.TemporaryDirectory(prefix='youtube_dl') as tmpdir:
|
||||
cursor = None
|
||||
try:
|
||||
cursor = _open_database_copy(cookie_database_path, tmpdir)
|
||||
cursor.connection.text_factory = bytes
|
||||
column_names = _get_column_names(cursor, 'cookies')
|
||||
secure_column = 'is_secure' if 'is_secure' in column_names else 'secure'
|
||||
cursor.execute('SELECT host_key, name, value, encrypted_value, path, '
|
||||
'expires_utc, {} FROM cookies'.format(secure_column))
|
||||
jar = YoutubeDLCookieJar()
|
||||
failed_cookies = 0
|
||||
for host_key, name, value, encrypted_value, path, expires_utc, is_secure in cursor.fetchall():
|
||||
host_key = host_key.decode('utf-8')
|
||||
name = name.decode('utf-8')
|
||||
value = value.decode('utf-8')
|
||||
path = path.decode('utf-8')
|
||||
|
||||
if not value and encrypted_value:
|
||||
value = decryptor.decrypt(encrypted_value)
|
||||
if value is None:
|
||||
failed_cookies += 1
|
||||
continue
|
||||
|
||||
cookie = compat_cookiejar_Cookie(
|
||||
version=0, name=name, value=value, port=None, port_specified=False,
|
||||
domain=host_key, domain_specified=bool(host_key), domain_initial_dot=host_key.startswith('.'),
|
||||
path=path, path_specified=bool(path), secure=is_secure, expires=expires_utc, discard=False,
|
||||
comment=None, comment_url=None, rest={})
|
||||
jar.set_cookie(cookie)
|
||||
if failed_cookies > 0:
|
||||
failed_message = ' ({} could not be decrypted)'.format(failed_cookies)
|
||||
else:
|
||||
failed_message = ''
|
||||
logger.info('Extracted {} cookies from {}{}'.format(len(jar), browser_name, failed_message))
|
||||
return jar
|
||||
finally:
|
||||
if cursor is not None:
|
||||
cursor.connection.close()
|
||||
|
||||
|
||||
class ChromeCookieDecryptor:
|
||||
"""
|
||||
Overview:
|
||||
|
||||
Linux:
|
||||
- cookies are either v10 or v11
|
||||
- v10: AES-CBC encrypted with a fixed key
|
||||
- v11: AES-CBC encrypted with an OS protected key (keyring)
|
||||
- v11 keys can be stored in various places depending on the activate desktop environment [2]
|
||||
|
||||
Mac:
|
||||
- cookies are either v10 or not v10
|
||||
- v10: AES-CBC encrypted with an OS protected key (keyring) and more key derivation iterations than linux
|
||||
- not v10: 'old data' stored as plaintext
|
||||
|
||||
Windows:
|
||||
- cookies are either v10 or not v10
|
||||
- v10: AES-GCM encrypted with a key which is encrypted with DPAPI
|
||||
- not v10: encrypted with DPAPI
|
||||
|
||||
Sources:
|
||||
- [1] https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/
|
||||
- [2] https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/key_storage_linux.cc
|
||||
- KeyStorageLinux::CreateService
|
||||
"""
|
||||
|
||||
def decrypt(self, encrypted_value):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
def get_cookie_decryptor(browser_root, browser_keyring_name, logger):
|
||||
if sys.platform in ('linux', 'linux2'):
|
||||
return LinuxChromeCookieDecryptor(browser_keyring_name, logger)
|
||||
elif sys.platform == 'darwin':
|
||||
return MacChromeCookieDecryptor(browser_keyring_name, logger)
|
||||
elif sys.platform == 'win32':
|
||||
return WindowsChromeCookieDecryptor(browser_root, logger)
|
||||
else:
|
||||
raise NotImplementedError('Chrome cookie decryption is not supported '
|
||||
'on this platform: {}'.format(sys.platform))
|
||||
|
||||
|
||||
class LinuxChromeCookieDecryptor(ChromeCookieDecryptor):
|
||||
def __init__(self, browser_keyring_name, logger):
|
||||
self._logger = logger
|
||||
self._v10_key = self.derive_key(b'peanuts')
|
||||
if KEYRING_AVAILABLE:
|
||||
self._v11_key = self.derive_key(_get_linux_keyring_password(browser_keyring_name))
|
||||
else:
|
||||
self._v11_key = None
|
||||
|
||||
@staticmethod
|
||||
def derive_key(password):
|
||||
# values from
|
||||
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_linux.cc
|
||||
return pbkdf2_sha1(password, salt=b'saltysalt', iterations=1, key_length=16)
|
||||
|
||||
def decrypt(self, encrypted_value):
|
||||
version = encrypted_value[:3]
|
||||
ciphertext = encrypted_value[3:]
|
||||
|
||||
if version == b'v10':
|
||||
return _decrypt_aes_cbc(ciphertext, self._v10_key, self._logger)
|
||||
|
||||
elif version == b'v11':
|
||||
if self._v11_key is None:
|
||||
self._logger.warning(f'cannot decrypt cookie {KEYRING_UNAVAILABLE_REASON}', only_once=True)
|
||||
return None
|
||||
return _decrypt_aes_cbc(ciphertext, self._v11_key, self._logger)
|
||||
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
class MacChromeCookieDecryptor(ChromeCookieDecryptor):
|
||||
def __init__(self, browser_keyring_name, logger):
|
||||
self._logger = logger
|
||||
password = _get_mac_keyring_password(browser_keyring_name)
|
||||
self._v10_key = None if password is None else self.derive_key(password)
|
||||
|
||||
@staticmethod
|
||||
def derive_key(password):
|
||||
# values from
|
||||
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_mac.mm
|
||||
return pbkdf2_sha1(password, salt=b'saltysalt', iterations=1003, key_length=16)
|
||||
|
||||
def decrypt(self, encrypted_value):
|
||||
version = encrypted_value[:3]
|
||||
ciphertext = encrypted_value[3:]
|
||||
|
||||
if version == b'v10':
|
||||
if self._v10_key is None:
|
||||
self._logger.warning('cannot decrypt v10 cookies: no key found', only_once=True)
|
||||
return None
|
||||
|
||||
return _decrypt_aes_cbc(ciphertext, self._v10_key, self._logger)
|
||||
|
||||
else:
|
||||
# other prefixes are considered 'old data' which were stored as plaintext
|
||||
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_mac.mm
|
||||
return encrypted_value
|
||||
|
||||
|
||||
class WindowsChromeCookieDecryptor(ChromeCookieDecryptor):
|
||||
def __init__(self, browser_root, logger):
|
||||
self._logger = logger
|
||||
self._v10_key = _get_windows_v10_key(browser_root, logger)
|
||||
|
||||
def decrypt(self, encrypted_value):
|
||||
version = encrypted_value[:3]
|
||||
ciphertext = encrypted_value[3:]
|
||||
|
||||
if version == b'v10':
|
||||
if self._v10_key is None:
|
||||
self._logger.warning('cannot decrypt v10 cookies: no key found', only_once=True)
|
||||
return None
|
||||
elif not CRYPTO_AVAILABLE:
|
||||
self._logger.warning('cannot decrypt cookie as the `pycryptodome` module is not installed. '
|
||||
'Please install by running `python3 -m pip install pycryptodome`',
|
||||
only_once=True)
|
||||
return None
|
||||
|
||||
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_win.cc
|
||||
# kNonceLength
|
||||
nonce_length = 96 // 8
|
||||
# boringssl
|
||||
# EVP_AEAD_AES_GCM_TAG_LEN
|
||||
authentication_tag_length = 16
|
||||
|
||||
raw_ciphertext = ciphertext
|
||||
nonce = raw_ciphertext[:nonce_length]
|
||||
ciphertext = raw_ciphertext[nonce_length:-authentication_tag_length]
|
||||
authentication_tag = raw_ciphertext[-authentication_tag_length:]
|
||||
|
||||
return _decrypt_aes_gcm(ciphertext, self._v10_key, nonce, authentication_tag, self._logger)
|
||||
|
||||
else:
|
||||
# any other prefix means the data is DPAPI encrypted
|
||||
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_win.cc
|
||||
return _decrypt_windows_dpapi(encrypted_value, self._logger).decode('utf-8')
|
||||
|
||||
|
||||
def _extract_safari_cookies(profile, logger):
|
||||
if profile is not None:
|
||||
logger.error('safari does not support profiles')
|
||||
if sys.platform != 'darwin':
|
||||
raise ValueError('unsupported platform: {}'.format(sys.platform))
|
||||
|
||||
cookies_path = os.path.expanduser('~/Library/Cookies/Cookies.binarycookies')
|
||||
|
||||
if not os.path.isfile(cookies_path):
|
||||
raise FileNotFoundError('could not find safari cookies database')
|
||||
|
||||
with open(cookies_path, 'rb') as f:
|
||||
cookies_data = f.read()
|
||||
|
||||
jar = parse_safari_cookies(cookies_data, logger=logger)
|
||||
logger.info('Extracted {} cookies from safari'.format(len(jar)))
|
||||
return jar
|
||||
|
||||
|
||||
class ParserError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class DataParser:
|
||||
def __init__(self, data, logger):
|
||||
self._data = data
|
||||
self.cursor = 0
|
||||
self._logger = logger
|
||||
|
||||
def read_bytes(self, num_bytes):
|
||||
if num_bytes < 0:
|
||||
raise ParserError('invalid read of {} bytes'.format(num_bytes))
|
||||
end = self.cursor + num_bytes
|
||||
if end > len(self._data):
|
||||
raise ParserError('reached end of input')
|
||||
data = self._data[self.cursor:end]
|
||||
self.cursor = end
|
||||
return data
|
||||
|
||||
def expect_bytes(self, expected_value, message):
|
||||
value = self.read_bytes(len(expected_value))
|
||||
if value != expected_value:
|
||||
raise ParserError('unexpected value: {} != {} ({})'.format(value, expected_value, message))
|
||||
|
||||
def read_uint(self, big_endian=False):
|
||||
data_format = '>I' if big_endian else '<I'
|
||||
return struct.unpack(data_format, self.read_bytes(4))[0]
|
||||
|
||||
def read_double(self, big_endian=False):
|
||||
data_format = '>d' if big_endian else '<d'
|
||||
return struct.unpack(data_format, self.read_bytes(8))[0]
|
||||
|
||||
def read_cstring(self):
|
||||
buffer = []
|
||||
while True:
|
||||
c = self.read_bytes(1)
|
||||
if c == b'\x00':
|
||||
return b''.join(buffer).decode('utf-8')
|
||||
else:
|
||||
buffer.append(c)
|
||||
|
||||
def skip(self, num_bytes, description='unknown'):
|
||||
if num_bytes > 0:
|
||||
self._logger.debug('skipping {} bytes ({}): {}'.format(
|
||||
num_bytes, description, self.read_bytes(num_bytes)))
|
||||
elif num_bytes < 0:
|
||||
raise ParserError('invalid skip of {} bytes'.format(num_bytes))
|
||||
|
||||
def skip_to(self, offset, description='unknown'):
|
||||
self.skip(offset - self.cursor, description)
|
||||
|
||||
def skip_to_end(self, description='unknown'):
|
||||
self.skip_to(len(self._data), description)
|
||||
|
||||
|
||||
def _mac_absolute_time_to_posix(timestamp):
|
||||
return int((datetime(2001, 1, 1, 0, 0, tzinfo=timezone.utc) + timedelta(seconds=timestamp)).timestamp())
|
||||
|
||||
|
||||
def _parse_safari_cookies_header(data, logger):
|
||||
p = DataParser(data, logger)
|
||||
p.expect_bytes(b'cook', 'database signature')
|
||||
number_of_pages = p.read_uint(big_endian=True)
|
||||
page_sizes = [p.read_uint(big_endian=True) for _ in range(number_of_pages)]
|
||||
return page_sizes, p.cursor
|
||||
|
||||
|
||||
def _parse_safari_cookies_page(data, jar, logger):
|
||||
p = DataParser(data, logger)
|
||||
p.expect_bytes(b'\x00\x00\x01\x00', 'page signature')
|
||||
number_of_cookies = p.read_uint()
|
||||
record_offsets = [p.read_uint() for _ in range(number_of_cookies)]
|
||||
if number_of_cookies == 0:
|
||||
logger.debug('a cookies page of size {} has no cookies'.format(len(data)))
|
||||
return
|
||||
|
||||
p.skip_to(record_offsets[0], 'unknown page header field')
|
||||
|
||||
for record_offset in record_offsets:
|
||||
p.skip_to(record_offset, 'space between records')
|
||||
record_length = _parse_safari_cookies_record(data[record_offset:], jar, logger)
|
||||
p.read_bytes(record_length)
|
||||
p.skip_to_end('space in between pages')
|
||||
|
||||
|
||||
def _parse_safari_cookies_record(data, jar, logger):
|
||||
p = DataParser(data, logger)
|
||||
record_size = p.read_uint()
|
||||
p.skip(4, 'unknown record field 1')
|
||||
flags = p.read_uint()
|
||||
is_secure = bool(flags & 0x0001)
|
||||
p.skip(4, 'unknown record field 2')
|
||||
domain_offset = p.read_uint()
|
||||
name_offset = p.read_uint()
|
||||
path_offset = p.read_uint()
|
||||
value_offset = p.read_uint()
|
||||
p.skip(8, 'unknown record field 3')
|
||||
expiration_date = _mac_absolute_time_to_posix(p.read_double())
|
||||
_creation_date = _mac_absolute_time_to_posix(p.read_double()) # noqa: F841
|
||||
|
||||
try:
|
||||
p.skip_to(domain_offset)
|
||||
domain = p.read_cstring()
|
||||
|
||||
p.skip_to(name_offset)
|
||||
name = p.read_cstring()
|
||||
|
||||
p.skip_to(path_offset)
|
||||
path = p.read_cstring()
|
||||
|
||||
p.skip_to(value_offset)
|
||||
value = p.read_cstring()
|
||||
except UnicodeDecodeError:
|
||||
logger.warning('failed to parse cookie because UTF-8 decoding failed')
|
||||
return record_size
|
||||
|
||||
p.skip_to(record_size, 'space at the end of the record')
|
||||
|
||||
cookie = compat_cookiejar_Cookie(
|
||||
version=0, name=name, value=value, port=None, port_specified=False,
|
||||
domain=domain, domain_specified=bool(domain), domain_initial_dot=domain.startswith('.'),
|
||||
path=path, path_specified=bool(path), secure=is_secure, expires=expiration_date, discard=False,
|
||||
comment=None, comment_url=None, rest={})
|
||||
jar.set_cookie(cookie)
|
||||
return record_size
|
||||
|
||||
|
||||
def parse_safari_cookies(data, jar=None, logger=YDLLogger()):
|
||||
"""
|
||||
References:
|
||||
- https://github.com/libyal/dtformats/blob/main/documentation/Safari%20Cookies.asciidoc
|
||||
- this data appears to be out of date but the important parts of the database structure is the same
|
||||
- there are a few bytes here and there which are skipped during parsing
|
||||
"""
|
||||
if jar is None:
|
||||
jar = YoutubeDLCookieJar()
|
||||
page_sizes, body_start = _parse_safari_cookies_header(data, logger)
|
||||
p = DataParser(data[body_start:], logger)
|
||||
for page_size in page_sizes:
|
||||
_parse_safari_cookies_page(p.read_bytes(page_size), jar, logger)
|
||||
p.skip_to_end('footer')
|
||||
return jar
|
||||
|
||||
|
||||
def _get_linux_keyring_password(browser_keyring_name):
|
||||
password = keyring.get_password('{} Keys'.format(browser_keyring_name),
|
||||
'{} Safe Storage'.format(browser_keyring_name))
|
||||
if password is None:
|
||||
# this sometimes occurs in KDE because chrome does not check hasEntry and instead
|
||||
# just tries to read the value (which kwallet returns "") whereas keyring checks hasEntry
|
||||
# to verify this:
|
||||
# dbus-monitor "interface='org.kde.KWallet'" "type=method_return"
|
||||
# while starting chrome.
|
||||
# this may be a bug as the intended behaviour is to generate a random password and store
|
||||
# it, but that doesn't matter here.
|
||||
password = ''
|
||||
return password.encode('utf-8')
|
||||
|
||||
|
||||
def _get_mac_keyring_password(browser_keyring_name):
|
||||
if KEYRING_AVAILABLE:
|
||||
password = keyring.get_password('{} Safe Storage'.format(browser_keyring_name), browser_keyring_name)
|
||||
return password.encode('utf-8')
|
||||
else:
|
||||
proc = subprocess.Popen(['security', 'find-generic-password',
|
||||
'-w', # write password to stdout
|
||||
'-a', browser_keyring_name, # match 'account'
|
||||
'-s', '{} Safe Storage'.format(browser_keyring_name)], # match 'service'
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.DEVNULL)
|
||||
try:
|
||||
stdout, stderr = process_communicate_or_kill(proc)
|
||||
return stdout
|
||||
except BaseException:
|
||||
return None
|
||||
|
||||
|
||||
def _get_windows_v10_key(browser_root, logger):
|
||||
path = _find_most_recently_used_file(browser_root, 'Local State')
|
||||
if path is None:
|
||||
logger.error('could not find local state file')
|
||||
return None
|
||||
with open(path, 'r') as f:
|
||||
data = json.load(f)
|
||||
try:
|
||||
base64_key = data['os_crypt']['encrypted_key']
|
||||
except KeyError:
|
||||
logger.error('no encrypted key in Local State')
|
||||
return None
|
||||
encrypted_key = compat_b64decode(base64_key)
|
||||
prefix = b'DPAPI'
|
||||
if not encrypted_key.startswith(prefix):
|
||||
logger.error('invalid key')
|
||||
return None
|
||||
return _decrypt_windows_dpapi(encrypted_key[len(prefix):], logger)
|
||||
|
||||
|
||||
def pbkdf2_sha1(password, salt, iterations, key_length):
|
||||
return pbkdf2_hmac('sha1', password, salt, iterations, key_length)
|
||||
|
||||
|
||||
def _decrypt_aes_cbc(ciphertext, key, logger, initialization_vector=b' ' * 16):
|
||||
plaintext = aes_cbc_decrypt(bytes_to_intlist(ciphertext),
|
||||
bytes_to_intlist(key),
|
||||
bytes_to_intlist(initialization_vector))
|
||||
padding_length = plaintext[-1]
|
||||
try:
|
||||
return intlist_to_bytes(plaintext[:-padding_length]).decode('utf-8')
|
||||
except UnicodeDecodeError:
|
||||
logger.warning('failed to decrypt cookie because UTF-8 decoding failed. Possibly the key is wrong?')
|
||||
return None
|
||||
|
||||
|
||||
def _decrypt_aes_gcm(ciphertext, key, nonce, authentication_tag, logger):
|
||||
cipher = AES.new(key, AES.MODE_GCM, nonce)
|
||||
try:
|
||||
plaintext = cipher.decrypt_and_verify(ciphertext, authentication_tag)
|
||||
except ValueError:
|
||||
logger.warning('failed to decrypt cookie because the MAC check failed. Possibly the key is wrong?')
|
||||
return None
|
||||
|
||||
try:
|
||||
return plaintext.decode('utf-8')
|
||||
except UnicodeDecodeError:
|
||||
logger.warning('failed to decrypt cookie because UTF-8 decoding failed. Possibly the key is wrong?')
|
||||
return None
|
||||
|
||||
|
||||
def _decrypt_windows_dpapi(ciphertext, logger):
|
||||
"""
|
||||
References:
|
||||
- https://docs.microsoft.com/en-us/windows/win32/api/dpapi/nf-dpapi-cryptunprotectdata
|
||||
"""
|
||||
from ctypes.wintypes import DWORD
|
||||
|
||||
class DATA_BLOB(ctypes.Structure):
|
||||
_fields_ = [('cbData', DWORD),
|
||||
('pbData', ctypes.POINTER(ctypes.c_char))]
|
||||
|
||||
buffer = ctypes.create_string_buffer(ciphertext)
|
||||
blob_in = DATA_BLOB(ctypes.sizeof(buffer), buffer)
|
||||
blob_out = DATA_BLOB()
|
||||
ret = ctypes.windll.crypt32.CryptUnprotectData(
|
||||
ctypes.byref(blob_in), # pDataIn
|
||||
None, # ppszDataDescr: human readable description of pDataIn
|
||||
None, # pOptionalEntropy: salt?
|
||||
None, # pvReserved: must be NULL
|
||||
None, # pPromptStruct: information about prompts to display
|
||||
0, # dwFlags
|
||||
ctypes.byref(blob_out) # pDataOut
|
||||
)
|
||||
if not ret:
|
||||
logger.warning('failed to decrypt with DPAPI')
|
||||
return None
|
||||
|
||||
result = ctypes.string_at(blob_out.pbData, blob_out.cbData)
|
||||
ctypes.windll.kernel32.LocalFree(blob_out.pbData)
|
||||
return result
|
||||
|
||||
|
||||
def _config_home():
|
||||
return os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config'))
|
||||
|
||||
|
||||
def _open_database_copy(database_path, tmpdir):
|
||||
# cannot open sqlite databases if they are already in use (e.g. by the browser)
|
||||
database_copy_path = os.path.join(tmpdir, 'temporary.sqlite')
|
||||
shutil.copy(database_path, database_copy_path)
|
||||
conn = sqlite3.connect(database_copy_path)
|
||||
return conn.cursor()
|
||||
|
||||
|
||||
def _get_column_names(cursor, table_name):
|
||||
table_info = cursor.execute('PRAGMA table_info({})'.format(table_name)).fetchall()
|
||||
return [row[1].decode('utf-8') for row in table_info]
|
||||
|
||||
|
||||
def _find_most_recently_used_file(root, filename):
|
||||
# if there are multiple browser profiles, take the most recently used one
|
||||
paths = []
|
||||
for root, dirs, files in os.walk(root):
|
||||
for file in files:
|
||||
if file == filename:
|
||||
paths.append(os.path.join(root, file))
|
||||
return None if not paths else max(paths, key=lambda path: os.lstat(path).st_mtime)
|
||||
|
||||
|
||||
def _merge_cookie_jars(jars):
|
||||
output_jar = YoutubeDLCookieJar()
|
||||
for jar in jars:
|
||||
for cookie in jar:
|
||||
output_jar.set_cookie(cookie)
|
||||
if jar.filename is not None:
|
||||
output_jar.filename = jar.filename
|
||||
return output_jar
|
||||
|
||||
|
||||
def _is_path(value):
|
||||
return os.path.sep in value
|
||||
|
||||
|
||||
def _parse_browser_specification(browser_name, profile=None):
|
||||
if browser_name not in SUPPORTED_BROWSERS:
|
||||
raise ValueError(f'unsupported browser: "{browser_name}"')
|
||||
if profile is not None and _is_path(profile):
|
||||
profile = os.path.expanduser(profile)
|
||||
return browser_name, profile
|
||||
@@ -3,17 +3,20 @@ from __future__ import unicode_literals
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
determine_protocol,
|
||||
NO_DEFAULT
|
||||
)
|
||||
|
||||
|
||||
def _get_real_downloader(info_dict, protocol=None, *args, **kwargs):
|
||||
def get_suitable_downloader(info_dict, params={}, default=NO_DEFAULT, protocol=None, to_stdout=False):
|
||||
info_dict['protocol'] = determine_protocol(info_dict)
|
||||
info_copy = info_dict.copy()
|
||||
if protocol:
|
||||
info_copy['protocol'] = protocol
|
||||
return get_suitable_downloader(info_copy, *args, **kwargs)
|
||||
info_copy['to_stdout'] = to_stdout
|
||||
return _get_suitable_downloader(info_copy, params, default)
|
||||
|
||||
|
||||
# Some of these require _get_real_downloader
|
||||
# Some of these require get_suitable_downloader
|
||||
from .common import FileDownloader
|
||||
from .dash import DashSegmentsFD
|
||||
from .f4m import F4mFD
|
||||
@@ -69,32 +72,39 @@ def shorten_protocol_name(proto, simplify=False):
|
||||
return short_protocol_names.get(proto, proto)
|
||||
|
||||
|
||||
def get_suitable_downloader(info_dict, params={}, default=HttpFD):
|
||||
def _get_suitable_downloader(info_dict, params, default):
|
||||
"""Get the downloader class that can handle the info dict."""
|
||||
protocol = determine_protocol(info_dict)
|
||||
info_dict['protocol'] = protocol
|
||||
if default is NO_DEFAULT:
|
||||
default = HttpFD
|
||||
|
||||
# if (info_dict.get('start_time') or info_dict.get('end_time')) and not info_dict.get('requested_formats') and FFmpegFD.can_download(info_dict):
|
||||
# return FFmpegFD
|
||||
|
||||
protocol = info_dict['protocol']
|
||||
downloaders = params.get('external_downloader')
|
||||
external_downloader = (
|
||||
downloaders if isinstance(downloaders, compat_str) or downloaders is None
|
||||
else downloaders.get(shorten_protocol_name(protocol, True), downloaders.get('default')))
|
||||
if external_downloader and external_downloader.lower() == 'native':
|
||||
external_downloader = 'native'
|
||||
|
||||
if external_downloader not in (None, 'native'):
|
||||
if external_downloader is None:
|
||||
if info_dict['to_stdout'] and FFmpegFD.can_merge_formats(info_dict, params):
|
||||
return FFmpegFD
|
||||
elif external_downloader.lower() != 'native':
|
||||
ed = get_external_downloader(external_downloader)
|
||||
if ed.can_download(info_dict, external_downloader):
|
||||
return ed
|
||||
|
||||
if protocol == 'http_dash_segments':
|
||||
if info_dict.get('is_live') and (external_downloader or '').lower() != 'native':
|
||||
return FFmpegFD
|
||||
|
||||
if protocol in ('m3u8', 'm3u8_native'):
|
||||
if info_dict.get('is_live'):
|
||||
return FFmpegFD
|
||||
elif external_downloader == 'native':
|
||||
elif (external_downloader or '').lower() == 'native':
|
||||
return HlsFD
|
||||
elif _get_real_downloader(info_dict, 'm3u8_frag_urls', params, None):
|
||||
elif get_suitable_downloader(
|
||||
info_dict, params, None, protocol='m3u8_frag_urls', to_stdout=info_dict['to_stdout']):
|
||||
return HlsFD
|
||||
elif params.get('hls_prefer_native') is True:
|
||||
return HlsFD
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from __future__ import division, unicode_literals
|
||||
|
||||
import copy
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
@@ -46,8 +47,11 @@ class FileDownloader(object):
|
||||
min_filesize: Skip files smaller than this size
|
||||
max_filesize: Skip files larger than this size
|
||||
xattr_set_filesize: Set ytdl.filesize user xattribute with expected size.
|
||||
external_downloader_args: A list of additional command-line arguments for the
|
||||
external downloader.
|
||||
external_downloader_args: A dictionary of downloader keys (in lower case)
|
||||
and a list of additional command-line arguments for the
|
||||
executable. Use 'default' as the name for arguments to be
|
||||
passed to all downloaders. For compatibility with youtube-dl,
|
||||
a single list of args can also be used
|
||||
hls_use_mpegts: Use the mpegts container for HLS videos.
|
||||
http_chunk_size: Size of a chunk for chunk-based HTTP downloading. May be
|
||||
useful for bypassing bandwidth throttling imposed by
|
||||
@@ -200,12 +204,12 @@ class FileDownloader(object):
|
||||
return filename + '.ytdl'
|
||||
|
||||
def try_rename(self, old_filename, new_filename):
|
||||
if old_filename == new_filename:
|
||||
return
|
||||
try:
|
||||
if old_filename == new_filename:
|
||||
return
|
||||
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
|
||||
os.replace(old_filename, new_filename)
|
||||
except (IOError, OSError) as err:
|
||||
self.report_error('unable to rename file: %s' % error_to_compat_str(err))
|
||||
self.report_error(f'unable to rename file: {err}')
|
||||
|
||||
def try_utime(self, filename, last_modified_hdr):
|
||||
"""Try to set the last-modified time of the given file."""
|
||||
@@ -319,12 +323,9 @@ class FileDownloader(object):
|
||||
'[download] Got server HTTP error: %s. Retrying (attempt %d of %s) ...'
|
||||
% (error_to_compat_str(err), count, self.format_retries(retries)))
|
||||
|
||||
def report_file_already_downloaded(self, file_name):
|
||||
def report_file_already_downloaded(self, *args, **kwargs):
|
||||
"""Report file has already been fully downloaded."""
|
||||
try:
|
||||
self.to_screen('[download] %s has already been downloaded' % file_name)
|
||||
except UnicodeEncodeError:
|
||||
self.to_screen('[download] The file has already been downloaded')
|
||||
return self.ydl.report_file_already_downloaded(*args, **kwargs)
|
||||
|
||||
def report_unable_to_resume(self):
|
||||
"""Report it was impossible to resume download."""
|
||||
@@ -342,7 +343,7 @@ class FileDownloader(object):
|
||||
"""
|
||||
|
||||
nooverwrites_and_exists = (
|
||||
not self.params.get('overwrites', subtitle)
|
||||
not self.params.get('overwrites', True)
|
||||
and os.path.exists(encodeFilename(filename))
|
||||
)
|
||||
|
||||
@@ -360,7 +361,7 @@ class FileDownloader(object):
|
||||
'filename': filename,
|
||||
'status': 'finished',
|
||||
'total_bytes': os.path.getsize(encodeFilename(filename)),
|
||||
})
|
||||
}, info_dict)
|
||||
return True, False
|
||||
|
||||
if subtitle is False:
|
||||
@@ -388,7 +389,16 @@ class FileDownloader(object):
|
||||
"""Real download process. Redefine in subclasses."""
|
||||
raise NotImplementedError('This method must be implemented by subclasses')
|
||||
|
||||
def _hook_progress(self, status):
|
||||
def _hook_progress(self, status, info_dict):
|
||||
if not self._progress_hooks:
|
||||
return
|
||||
info_dict = dict(info_dict)
|
||||
for key in ('__original_infodict', '__postprocessors'):
|
||||
info_dict.pop(key, None)
|
||||
# youtube-dl passes the same status object to all the hooks.
|
||||
# Some third party scripts seems to be relying on this.
|
||||
# So keep this behavior if possible
|
||||
status['info_dict'] = copy.deepcopy(info_dict)
|
||||
for ph in self._progress_hooks:
|
||||
ph(status)
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ..downloader import _get_real_downloader
|
||||
from ..downloader import get_suitable_downloader
|
||||
from .fragment import FragmentFD
|
||||
|
||||
from ..utils import urljoin
|
||||
@@ -15,11 +15,15 @@ class DashSegmentsFD(FragmentFD):
|
||||
FD_NAME = 'dashsegments'
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
if info_dict.get('is_live'):
|
||||
self.report_error('Live DASH videos are not supported')
|
||||
|
||||
fragment_base_url = info_dict.get('fragment_base_url')
|
||||
fragments = info_dict['fragments'][:1] if self.params.get(
|
||||
'test', False) else info_dict['fragments']
|
||||
|
||||
real_downloader = _get_real_downloader(info_dict, 'dash_frag_urls', self.params, None)
|
||||
real_downloader = get_suitable_downloader(
|
||||
info_dict, self.params, None, protocol='dash_frag_urls', to_stdout=(filename == '-'))
|
||||
|
||||
ctx = {
|
||||
'filename': filename,
|
||||
@@ -29,7 +33,7 @@ class DashSegmentsFD(FragmentFD):
|
||||
if real_downloader:
|
||||
self._prepare_external_frag_download(ctx)
|
||||
else:
|
||||
self._prepare_and_start_frag_download(ctx)
|
||||
self._prepare_and_start_frag_download(ctx, info_dict)
|
||||
|
||||
fragments_to_download = []
|
||||
frag_index = 0
|
||||
@@ -54,12 +58,6 @@ class DashSegmentsFD(FragmentFD):
|
||||
info_copy = info_dict.copy()
|
||||
info_copy['fragments'] = fragments_to_download
|
||||
fd = real_downloader(self.ydl, self.params)
|
||||
# TODO: Make progress updates work without hooking twice
|
||||
# for ph in self._progress_hooks:
|
||||
# fd.add_progress_hook(ph)
|
||||
success = fd.real_download(filename, info_copy)
|
||||
if not success:
|
||||
return False
|
||||
else:
|
||||
self.download_and_append_fragments(ctx, fragments_to_download, info_dict)
|
||||
return True
|
||||
return fd.real_download(filename, info_copy)
|
||||
|
||||
return self.download_and_append_fragments(ctx, fragments_to_download, info_dict)
|
||||
|
||||
@@ -22,7 +22,7 @@ from ..utils import (
|
||||
cli_option,
|
||||
cli_valueless_option,
|
||||
cli_bool_option,
|
||||
cli_configuration_args,
|
||||
_configuration_args,
|
||||
encodeFilename,
|
||||
encodeArgument,
|
||||
handle_youtubedl_headers,
|
||||
@@ -36,6 +36,7 @@ from ..utils import (
|
||||
|
||||
class ExternalFD(FileDownloader):
|
||||
SUPPORTED_PROTOCOLS = ('http', 'https', 'ftp', 'ftps')
|
||||
can_download_to_stdout = False
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
self.report_destination(filename)
|
||||
@@ -67,7 +68,7 @@ class ExternalFD(FileDownloader):
|
||||
'downloaded_bytes': fsize,
|
||||
'total_bytes': fsize,
|
||||
})
|
||||
self._hook_progress(status)
|
||||
self._hook_progress(status, info_dict)
|
||||
return True
|
||||
else:
|
||||
self.to_stderr('\n')
|
||||
@@ -93,7 +94,9 @@ class ExternalFD(FileDownloader):
|
||||
|
||||
@classmethod
|
||||
def supports(cls, info_dict):
|
||||
return info_dict['protocol'] in cls.SUPPORTED_PROTOCOLS
|
||||
return (
|
||||
(cls.can_download_to_stdout or not info_dict.get('to_stdout'))
|
||||
and info_dict['protocol'] in cls.SUPPORTED_PROTOCOLS)
|
||||
|
||||
@classmethod
|
||||
def can_download(cls, info_dict, path=None):
|
||||
@@ -108,11 +111,10 @@ class ExternalFD(FileDownloader):
|
||||
def _valueless_option(self, command_option, param, expected_value=True):
|
||||
return cli_valueless_option(self.params, command_option, param, expected_value)
|
||||
|
||||
def _configuration_args(self, *args, **kwargs):
|
||||
return cli_configuration_args(
|
||||
self.params.get('external_downloader_args'),
|
||||
[self.get_basename(), 'default'],
|
||||
*args, **kwargs)
|
||||
def _configuration_args(self, keys=None, *args, **kwargs):
|
||||
return _configuration_args(
|
||||
self.get_basename(), self.params.get('external_downloader_args'), self.get_basename(),
|
||||
keys, *args, **kwargs)
|
||||
|
||||
def _call_downloader(self, tmpfilename, info_dict):
|
||||
""" Either overwrite this or implement _make_cmd """
|
||||
@@ -286,6 +288,7 @@ class Aria2cFD(ExternalFD):
|
||||
if info_dict.get('http_headers') is not None:
|
||||
for key, val in info_dict['http_headers'].items():
|
||||
cmd += ['--header', '%s: %s' % (key, val)]
|
||||
cmd += self._option('--max-overall-download-limit', 'ratelimit')
|
||||
cmd += self._option('--interface', 'source_address')
|
||||
cmd += self._option('--all-proxy', 'proxy')
|
||||
cmd += self._bool_option('--check-certificate', 'nocheckcertificate', 'false', 'true', '=')
|
||||
@@ -340,17 +343,28 @@ class HttpieFD(ExternalFD):
|
||||
|
||||
|
||||
class FFmpegFD(ExternalFD):
|
||||
SUPPORTED_PROTOCOLS = ('http', 'https', 'ftp', 'ftps', 'm3u8', 'm3u8_native', 'rtsp', 'rtmp', 'rtmp_ffmpeg', 'mms')
|
||||
SUPPORTED_PROTOCOLS = ('http', 'https', 'ftp', 'ftps', 'm3u8', 'm3u8_native', 'rtsp', 'rtmp', 'rtmp_ffmpeg', 'mms', 'http_dash_segments')
|
||||
can_download_to_stdout = True
|
||||
|
||||
@classmethod
|
||||
def available(cls, path=None):
|
||||
# TODO: Fix path for ffmpeg
|
||||
# Fixme: This may be wrong when --ffmpeg-location is used
|
||||
return FFmpegPostProcessor().available
|
||||
|
||||
def on_process_started(self, proc, stdin):
|
||||
""" Override this in subclasses """
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def can_merge_formats(cls, info_dict, params={}):
|
||||
return (
|
||||
info_dict.get('requested_formats')
|
||||
and info_dict.get('protocol')
|
||||
and not params.get('allow_unplayable_formats')
|
||||
and 'no-direct-merge' not in params.get('compat_opts', [])
|
||||
and cls.can_download(info_dict))
|
||||
|
||||
def _call_downloader(self, tmpfilename, info_dict):
|
||||
urls = [f['url'] for f in info_dict.get('requested_formats', [])] or [info_dict['url']]
|
||||
ffpp = FFmpegPostProcessor(downloader=self)
|
||||
@@ -368,6 +382,9 @@ class FFmpegFD(ExternalFD):
|
||||
if not self.params.get('verbose'):
|
||||
args += ['-hide_banner']
|
||||
|
||||
args += info_dict.get('_ffmpeg_args', [])
|
||||
|
||||
# This option exists only for compatibility. Extractors should use `_ffmpeg_args` instead
|
||||
seekable = info_dict.get('_seekable')
|
||||
if seekable is not None:
|
||||
# setting -seekable prevents ffmpeg from guessing if the server
|
||||
@@ -377,8 +394,6 @@ class FFmpegFD(ExternalFD):
|
||||
# http://trac.ffmpeg.org/ticket/6125#comment:10
|
||||
args += ['-seekable', '1' if seekable else '0']
|
||||
|
||||
args += self._configuration_args()
|
||||
|
||||
# start_time = info_dict.get('start_time') or 0
|
||||
# if start_time:
|
||||
# args += ['-ss', compat_str(start_time)]
|
||||
@@ -444,19 +459,20 @@ class FFmpegFD(ExternalFD):
|
||||
elif isinstance(conn, compat_str):
|
||||
args += ['-rtmp_conn', conn]
|
||||
|
||||
for url in urls:
|
||||
args += ['-i', url]
|
||||
for i, url in enumerate(urls):
|
||||
args += self._configuration_args((f'_i{i + 1}', '_i')) + ['-i', url]
|
||||
|
||||
args += ['-c', 'copy']
|
||||
if info_dict.get('requested_formats'):
|
||||
for (i, fmt) in enumerate(info_dict['requested_formats']):
|
||||
if fmt.get('acodec') != 'none':
|
||||
args.extend(['-map', '%d:a:0' % i])
|
||||
if fmt.get('vcodec') != 'none':
|
||||
args.extend(['-map', '%d:v:0' % i])
|
||||
if info_dict.get('requested_formats') or protocol == 'http_dash_segments':
|
||||
for (i, fmt) in enumerate(info_dict.get('requested_formats') or [info_dict]):
|
||||
stream_number = fmt.get('manifest_stream_number', 0)
|
||||
a_or_v = 'a' if fmt.get('acodec') != 'none' else 'v'
|
||||
args.extend(['-map', f'{i}:{a_or_v}:{stream_number}'])
|
||||
|
||||
if self.params.get('test', False):
|
||||
args += ['-fs', compat_str(self._TEST_FILE_SIZE)]
|
||||
|
||||
ext = info_dict['ext']
|
||||
if protocol in ('m3u8', 'm3u8_native'):
|
||||
use_mpegts = (tmpfilename == '-') or self.params.get('hls_use_mpegts')
|
||||
if use_mpegts is None:
|
||||
@@ -469,12 +485,15 @@ class FFmpegFD(ExternalFD):
|
||||
args += ['-bsf:a', 'aac_adtstoasc']
|
||||
elif protocol == 'rtmp':
|
||||
args += ['-f', 'flv']
|
||||
elif ext == 'mp4' and tmpfilename == '-':
|
||||
args += ['-f', 'mpegts']
|
||||
else:
|
||||
args += ['-f', EXT_TO_OUT_FORMATS.get(info_dict['ext'], info_dict['ext'])]
|
||||
args += ['-f', EXT_TO_OUT_FORMATS.get(ext, ext)]
|
||||
|
||||
args += self._configuration_args(('_o1', '_o', ''))
|
||||
|
||||
args = [encodeArgument(opt) for opt in args]
|
||||
args.append(encodeFilename(ffpp._ffmpeg_filename_argument(tmpfilename), True))
|
||||
|
||||
self._debug_cmd(args)
|
||||
|
||||
proc = subprocess.Popen(args, stdin=subprocess.PIPE, env=env)
|
||||
|
||||
@@ -380,7 +380,7 @@ class F4mFD(FragmentFD):
|
||||
|
||||
base_url_parsed = compat_urllib_parse_urlparse(base_url)
|
||||
|
||||
self._start_frag_download(ctx)
|
||||
self._start_frag_download(ctx, info_dict)
|
||||
|
||||
frag_index = 0
|
||||
while fragments_list:
|
||||
@@ -434,6 +434,6 @@ class F4mFD(FragmentFD):
|
||||
msg = 'Missed %d fragments' % (fragments_list[0][1] - (frag_i + 1))
|
||||
self.report_warning(msg)
|
||||
|
||||
self._finish_frag_download(ctx)
|
||||
self._finish_frag_download(ctx, info_dict)
|
||||
|
||||
return True
|
||||
|
||||
@@ -83,9 +83,9 @@ class FragmentFD(FileDownloader):
|
||||
headers = info_dict.get('http_headers')
|
||||
return sanitized_Request(url, None, headers) if headers else url
|
||||
|
||||
def _prepare_and_start_frag_download(self, ctx):
|
||||
def _prepare_and_start_frag_download(self, ctx, info_dict):
|
||||
self._prepare_frag_download(ctx)
|
||||
self._start_frag_download(ctx)
|
||||
self._start_frag_download(ctx, info_dict)
|
||||
|
||||
def __do_ytdl_file(self, ctx):
|
||||
return not ctx['live'] and not ctx['tmpfilename'] == '-' and not self.params.get('_no_ytdl_file')
|
||||
@@ -105,17 +105,19 @@ class FragmentFD(FileDownloader):
|
||||
|
||||
def _write_ytdl_file(self, ctx):
|
||||
frag_index_stream, _ = sanitize_open(self.ytdl_filename(ctx['filename']), 'w')
|
||||
downloader = {
|
||||
'current_fragment': {
|
||||
'index': ctx['fragment_index'],
|
||||
},
|
||||
}
|
||||
if 'extra_state' in ctx:
|
||||
downloader['extra_state'] = ctx['extra_state']
|
||||
if ctx.get('fragment_count') is not None:
|
||||
downloader['fragment_count'] = ctx['fragment_count']
|
||||
frag_index_stream.write(json.dumps({'downloader': downloader}))
|
||||
frag_index_stream.close()
|
||||
try:
|
||||
downloader = {
|
||||
'current_fragment': {
|
||||
'index': ctx['fragment_index'],
|
||||
},
|
||||
}
|
||||
if 'extra_state' in ctx:
|
||||
downloader['extra_state'] = ctx['extra_state']
|
||||
if ctx.get('fragment_count') is not None:
|
||||
downloader['fragment_count'] = ctx['fragment_count']
|
||||
frag_index_stream.write(json.dumps({'downloader': downloader}))
|
||||
finally:
|
||||
frag_index_stream.close()
|
||||
|
||||
def _download_fragment(self, ctx, frag_url, info_dict, headers=None, request_data=None):
|
||||
fragment_filename = '%s-Frag%d' % (ctx['tmpfilename'], ctx['fragment_index'])
|
||||
@@ -219,7 +221,7 @@ class FragmentFD(FileDownloader):
|
||||
'complete_frags_downloaded_bytes': resume_len,
|
||||
})
|
||||
|
||||
def _start_frag_download(self, ctx):
|
||||
def _start_frag_download(self, ctx, info_dict):
|
||||
resume_len = ctx['complete_frags_downloaded_bytes']
|
||||
total_frags = ctx['total_frags']
|
||||
# This dict stores the download progress, it's updated by the progress
|
||||
@@ -248,6 +250,7 @@ class FragmentFD(FileDownloader):
|
||||
time_now = time.time()
|
||||
state['elapsed'] = time_now - start
|
||||
frag_total_bytes = s.get('total_bytes') or 0
|
||||
s['fragment_info_dict'] = s.pop('info_dict', {})
|
||||
if not ctx['live']:
|
||||
estimated_size = (
|
||||
(ctx['complete_frags_downloaded_bytes'] + frag_total_bytes)
|
||||
@@ -270,13 +273,13 @@ class FragmentFD(FileDownloader):
|
||||
state['speed'] = s.get('speed') or ctx.get('speed')
|
||||
ctx['speed'] = state['speed']
|
||||
ctx['prev_frag_downloaded_bytes'] = frag_downloaded_bytes
|
||||
self._hook_progress(state)
|
||||
self._hook_progress(state, info_dict)
|
||||
|
||||
ctx['dl'].add_progress_hook(frag_progress_hook)
|
||||
|
||||
return start
|
||||
|
||||
def _finish_frag_download(self, ctx):
|
||||
def _finish_frag_download(self, ctx, info_dict):
|
||||
ctx['dest_stream'].close()
|
||||
if self.__do_ytdl_file(ctx):
|
||||
ytdl_filename = encodeFilename(self.ytdl_filename(ctx['filename']))
|
||||
@@ -303,7 +306,7 @@ class FragmentFD(FileDownloader):
|
||||
'filename': ctx['filename'],
|
||||
'status': 'finished',
|
||||
'elapsed': elapsed,
|
||||
})
|
||||
}, info_dict)
|
||||
|
||||
def _prepare_external_frag_download(self, ctx):
|
||||
if 'live' not in ctx:
|
||||
@@ -326,10 +329,9 @@ class FragmentFD(FileDownloader):
|
||||
'fragment_index': 0,
|
||||
})
|
||||
|
||||
def download_and_append_fragments(self, ctx, fragments, info_dict, pack_func=None):
|
||||
def download_and_append_fragments(self, ctx, fragments, info_dict, *, pack_func=None, finish_func=None):
|
||||
fragment_retries = self.params.get('fragment_retries', 0)
|
||||
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
|
||||
test = self.params.get('test', False)
|
||||
is_fatal = (lambda idx: idx == 0) if self.params.get('skip_unavailable_fragments', True) else (lambda _: True)
|
||||
if not pack_func:
|
||||
pack_func = lambda frag_content, _: frag_content
|
||||
|
||||
@@ -341,7 +343,7 @@ class FragmentFD(FileDownloader):
|
||||
headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'] - 1)
|
||||
|
||||
# Never skip the first fragment
|
||||
fatal = (fragment.get('index') or frag_index) == 0 or not skip_unavailable_fragments
|
||||
fatal = is_fatal(fragment.get('index') or (frag_index - 1))
|
||||
count, frag_content = 0, None
|
||||
while count <= fragment_retries:
|
||||
try:
|
||||
@@ -382,14 +384,13 @@ class FragmentFD(FileDownloader):
|
||||
# Don't decrypt the content in tests since the data is explicitly truncated and it's not to a valid block
|
||||
# size (see https://github.com/ytdl-org/youtube-dl/pull/27660). Tests only care that the correct data downloaded,
|
||||
# not what it decrypts to.
|
||||
if test:
|
||||
if self.params.get('test', False):
|
||||
return frag_content
|
||||
return AES.new(decrypt_info['KEY'], AES.MODE_CBC, iv).decrypt(frag_content)
|
||||
|
||||
def append_fragment(frag_content, frag_index, ctx):
|
||||
if not frag_content:
|
||||
fatal = frag_index == 1 or not skip_unavailable_fragments
|
||||
if not fatal:
|
||||
if not is_fatal(frag_index - 1):
|
||||
self.report_skip_fragment(frag_index)
|
||||
return True
|
||||
else:
|
||||
@@ -404,13 +405,9 @@ class FragmentFD(FileDownloader):
|
||||
if can_threaded_download and max_workers > 1:
|
||||
|
||||
def _download_fragment(fragment):
|
||||
try:
|
||||
ctx_copy = ctx.copy()
|
||||
frag_content, frag_index = download_fragment(fragment, ctx_copy)
|
||||
return fragment, frag_content, frag_index, ctx_copy.get('fragment_filename_sanitized')
|
||||
except Exception:
|
||||
# Return immediately on exception so that it is raised in the main thread
|
||||
return
|
||||
ctx_copy = ctx.copy()
|
||||
frag_content, frag_index = download_fragment(fragment, ctx_copy)
|
||||
return fragment, frag_content, frag_index, ctx_copy.get('fragment_filename_sanitized')
|
||||
|
||||
self.report_warning('The download speed shown is only of one thread. This is a known issue and patches are welcome')
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers) as pool:
|
||||
@@ -427,4 +424,8 @@ class FragmentFD(FileDownloader):
|
||||
if not result:
|
||||
return False
|
||||
|
||||
self._finish_frag_download(ctx)
|
||||
if finish_func is not None:
|
||||
ctx['dest_stream'].write(finish_func())
|
||||
ctx['dest_stream'].flush()
|
||||
self._finish_frag_download(ctx, info_dict)
|
||||
return True
|
||||
|
||||
@@ -4,7 +4,7 @@ import re
|
||||
import io
|
||||
import binascii
|
||||
|
||||
from ..downloader import _get_real_downloader
|
||||
from ..downloader import get_suitable_downloader
|
||||
from .fragment import FragmentFD, can_decrypt_frag
|
||||
from .external import FFmpegFD
|
||||
|
||||
@@ -80,16 +80,14 @@ class HlsFD(FragmentFD):
|
||||
fd = FFmpegFD(self.ydl, self.params)
|
||||
self.report_warning(
|
||||
'%s detected unsupported features; extraction will be delegated to %s' % (self.FD_NAME, fd.get_basename()))
|
||||
# TODO: Make progress updates work without hooking twice
|
||||
# for ph in self._progress_hooks:
|
||||
# fd.add_progress_hook(ph)
|
||||
return fd.real_download(filename, info_dict)
|
||||
|
||||
is_webvtt = info_dict['ext'] == 'vtt'
|
||||
if is_webvtt:
|
||||
real_downloader = None # Packing the fragments is not currently supported for external downloader
|
||||
else:
|
||||
real_downloader = _get_real_downloader(info_dict, 'm3u8_frag_urls', self.params, None)
|
||||
real_downloader = get_suitable_downloader(
|
||||
info_dict, self.params, None, protocol='m3u8_frag_urls', to_stdout=(filename == '-'))
|
||||
if real_downloader and not real_downloader.supports_manifest(s):
|
||||
real_downloader = None
|
||||
if real_downloader:
|
||||
@@ -133,7 +131,7 @@ class HlsFD(FragmentFD):
|
||||
if real_downloader:
|
||||
self._prepare_external_frag_download(ctx)
|
||||
else:
|
||||
self._prepare_and_start_frag_download(ctx)
|
||||
self._prepare_and_start_frag_download(ctx, info_dict)
|
||||
|
||||
extra_state = ctx.setdefault('extra_state', {})
|
||||
|
||||
@@ -250,78 +248,100 @@ class HlsFD(FragmentFD):
|
||||
# TODO: Make progress updates work without hooking twice
|
||||
# for ph in self._progress_hooks:
|
||||
# fd.add_progress_hook(ph)
|
||||
success = fd.real_download(filename, info_copy)
|
||||
if not success:
|
||||
return False
|
||||
return fd.real_download(filename, info_copy)
|
||||
|
||||
if is_webvtt:
|
||||
def pack_fragment(frag_content, frag_index):
|
||||
output = io.StringIO()
|
||||
adjust = 0
|
||||
overflow = False
|
||||
mpegts_last = None
|
||||
for block in webvtt.parse_fragment(frag_content):
|
||||
if isinstance(block, webvtt.CueBlock):
|
||||
extra_state['webvtt_mpegts_last'] = mpegts_last
|
||||
if overflow:
|
||||
extra_state['webvtt_mpegts_adjust'] += 1
|
||||
overflow = False
|
||||
block.start += adjust
|
||||
block.end += adjust
|
||||
|
||||
dedup_window = extra_state.setdefault('webvtt_dedup_window', [])
|
||||
|
||||
ready = []
|
||||
|
||||
i = 0
|
||||
is_new = True
|
||||
while i < len(dedup_window):
|
||||
wcue = dedup_window[i]
|
||||
wblock = webvtt.CueBlock.from_json(wcue)
|
||||
i += 1
|
||||
if wblock.hinges(block):
|
||||
wcue['end'] = block.end
|
||||
is_new = False
|
||||
continue
|
||||
if wblock == block:
|
||||
is_new = False
|
||||
continue
|
||||
if wblock.end > block.start:
|
||||
continue
|
||||
ready.append(wblock)
|
||||
i -= 1
|
||||
del dedup_window[i]
|
||||
|
||||
if is_new:
|
||||
dedup_window.append(block.as_json)
|
||||
for block in ready:
|
||||
block.write_into(output)
|
||||
|
||||
# we only emit cues once they fall out of the duplicate window
|
||||
continue
|
||||
elif isinstance(block, webvtt.Magic):
|
||||
# take care of MPEG PES timestamp overflow
|
||||
if block.mpegts is None:
|
||||
block.mpegts = 0
|
||||
extra_state.setdefault('webvtt_mpegts_adjust', 0)
|
||||
block.mpegts += extra_state['webvtt_mpegts_adjust'] << 33
|
||||
if block.mpegts < extra_state.get('webvtt_mpegts_last', 0):
|
||||
overflow = True
|
||||
block.mpegts += 1 << 33
|
||||
mpegts_last = block.mpegts
|
||||
|
||||
if frag_index == 1:
|
||||
extra_state['webvtt_mpegts'] = block.mpegts or 0
|
||||
extra_state['webvtt_local'] = block.local or 0
|
||||
# XXX: block.local = block.mpegts = None ?
|
||||
else:
|
||||
if block.mpegts is not None and block.local is not None:
|
||||
adjust = (
|
||||
(block.mpegts - extra_state.get('webvtt_mpegts', 0))
|
||||
- (block.local - extra_state.get('webvtt_local', 0))
|
||||
)
|
||||
continue
|
||||
elif isinstance(block, webvtt.HeaderBlock):
|
||||
if frag_index != 1:
|
||||
# XXX: this should probably be silent as well
|
||||
# or verify that all segments contain the same data
|
||||
self.report_warning(bug_reports_message(
|
||||
'Discarding a %s block found in the middle of the stream; '
|
||||
'if the subtitles display incorrectly,'
|
||||
% (type(block).__name__)))
|
||||
continue
|
||||
block.write_into(output)
|
||||
|
||||
return output.getvalue().encode('utf-8')
|
||||
|
||||
def fin_fragments():
|
||||
dedup_window = extra_state.get('webvtt_dedup_window')
|
||||
if not dedup_window:
|
||||
return b''
|
||||
|
||||
output = io.StringIO()
|
||||
for cue in dedup_window:
|
||||
webvtt.CueBlock.from_json(cue).write_into(output)
|
||||
|
||||
return output.getvalue().encode('utf-8')
|
||||
|
||||
self.download_and_append_fragments(
|
||||
ctx, fragments, info_dict, pack_func=pack_fragment, finish_func=fin_fragments)
|
||||
else:
|
||||
if is_webvtt:
|
||||
def pack_fragment(frag_content, frag_index):
|
||||
output = io.StringIO()
|
||||
adjust = 0
|
||||
for block in webvtt.parse_fragment(frag_content):
|
||||
if isinstance(block, webvtt.CueBlock):
|
||||
block.start += adjust
|
||||
block.end += adjust
|
||||
|
||||
dedup_window = extra_state.setdefault('webvtt_dedup_window', [])
|
||||
cue = block.as_json
|
||||
|
||||
# skip the cue if an identical one appears
|
||||
# in the window of potential duplicates
|
||||
# and prune the window of unviable candidates
|
||||
i = 0
|
||||
skip = True
|
||||
while i < len(dedup_window):
|
||||
window_cue = dedup_window[i]
|
||||
if window_cue == cue:
|
||||
break
|
||||
if window_cue['end'] >= cue['start']:
|
||||
i += 1
|
||||
continue
|
||||
del dedup_window[i]
|
||||
else:
|
||||
skip = False
|
||||
|
||||
if skip:
|
||||
continue
|
||||
|
||||
# add the cue to the window
|
||||
dedup_window.append(cue)
|
||||
elif isinstance(block, webvtt.Magic):
|
||||
# take care of MPEG PES timestamp overflow
|
||||
if block.mpegts is None:
|
||||
block.mpegts = 0
|
||||
extra_state.setdefault('webvtt_mpegts_adjust', 0)
|
||||
block.mpegts += extra_state['webvtt_mpegts_adjust'] << 33
|
||||
if block.mpegts < extra_state.get('webvtt_mpegts_last', 0):
|
||||
extra_state['webvtt_mpegts_adjust'] += 1
|
||||
block.mpegts += 1 << 33
|
||||
extra_state['webvtt_mpegts_last'] = block.mpegts
|
||||
|
||||
if frag_index == 1:
|
||||
extra_state['webvtt_mpegts'] = block.mpegts or 0
|
||||
extra_state['webvtt_local'] = block.local or 0
|
||||
# XXX: block.local = block.mpegts = None ?
|
||||
else:
|
||||
if block.mpegts is not None and block.local is not None:
|
||||
adjust = (
|
||||
(block.mpegts - extra_state.get('webvtt_mpegts', 0))
|
||||
- (block.local - extra_state.get('webvtt_local', 0))
|
||||
)
|
||||
continue
|
||||
elif isinstance(block, webvtt.HeaderBlock):
|
||||
if frag_index != 1:
|
||||
# XXX: this should probably be silent as well
|
||||
# or verify that all segments contain the same data
|
||||
self.report_warning(bug_reports_message(
|
||||
'Discarding a %s block found in the middle of the stream; '
|
||||
'if the subtitles display incorrectly,'
|
||||
% (type(block).__name__)))
|
||||
continue
|
||||
block.write_into(output)
|
||||
|
||||
return output.getvalue().encode('utf-8')
|
||||
else:
|
||||
pack_fragment = None
|
||||
self.download_and_append_fragments(ctx, fragments, info_dict, pack_fragment)
|
||||
return True
|
||||
return self.download_and_append_fragments(ctx, fragments, info_dict)
|
||||
|
||||
@@ -177,7 +177,7 @@ class HttpFD(FileDownloader):
|
||||
'status': 'finished',
|
||||
'downloaded_bytes': ctx.resume_len,
|
||||
'total_bytes': ctx.resume_len,
|
||||
})
|
||||
}, info_dict)
|
||||
raise SucceedDownload()
|
||||
else:
|
||||
# The length does not match, we start the download over
|
||||
@@ -238,7 +238,7 @@ class HttpFD(FileDownloader):
|
||||
while True:
|
||||
try:
|
||||
# Download and write
|
||||
data_block = ctx.data.read(block_size if data_len is None else min(block_size, data_len - byte_counter))
|
||||
data_block = ctx.data.read(block_size if not is_test else min(block_size, data_len - byte_counter))
|
||||
# socket.timeout is a subclass of socket.error but may not have
|
||||
# errno set
|
||||
except socket.timeout as e:
|
||||
@@ -310,7 +310,7 @@ class HttpFD(FileDownloader):
|
||||
'eta': eta,
|
||||
'speed': speed,
|
||||
'elapsed': now - ctx.start_time,
|
||||
})
|
||||
}, info_dict)
|
||||
|
||||
if data_len is not None and byte_counter == data_len:
|
||||
break
|
||||
@@ -357,7 +357,7 @@ class HttpFD(FileDownloader):
|
||||
'filename': ctx.filename,
|
||||
'status': 'finished',
|
||||
'elapsed': time.time() - ctx.start_time,
|
||||
})
|
||||
}, info_dict)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
@@ -246,7 +246,7 @@ class IsmFD(FragmentFD):
|
||||
'total_frags': len(segments),
|
||||
}
|
||||
|
||||
self._prepare_and_start_frag_download(ctx)
|
||||
self._prepare_and_start_frag_download(ctx, info_dict)
|
||||
|
||||
extra_state = ctx.setdefault('extra_state', {
|
||||
'ism_track_written': False,
|
||||
@@ -284,6 +284,6 @@ class IsmFD(FragmentFD):
|
||||
self.report_error('giving up after %s fragment retries' % fragment_retries)
|
||||
return False
|
||||
|
||||
self._finish_frag_download(ctx)
|
||||
self._finish_frag_download(ctx, info_dict)
|
||||
|
||||
return True
|
||||
|
||||
@@ -122,7 +122,7 @@ body > figure > img {
|
||||
'total_frags': len(fragments),
|
||||
}
|
||||
|
||||
self._prepare_and_start_frag_download(ctx)
|
||||
self._prepare_and_start_frag_download(ctx, info_dict)
|
||||
|
||||
extra_state = ctx.setdefault('extra_state', {
|
||||
'header_written': False,
|
||||
@@ -198,5 +198,5 @@ body > figure > img {
|
||||
|
||||
ctx['dest_stream'].write(
|
||||
b'--%b--\r\n\r\n' % frag_boundary.encode('us-ascii'))
|
||||
self._finish_frag_download(ctx)
|
||||
self._finish_frag_download(ctx, info_dict)
|
||||
return True
|
||||
|
||||
@@ -4,7 +4,7 @@ from __future__ import unicode_literals
|
||||
import threading
|
||||
|
||||
from .common import FileDownloader
|
||||
from ..downloader import _get_real_downloader
|
||||
from ..downloader import get_suitable_downloader
|
||||
from ..extractor.niconico import NiconicoIE
|
||||
from ..compat import compat_urllib_request
|
||||
|
||||
@@ -20,7 +20,7 @@ class NiconicoDmcFD(FileDownloader):
|
||||
ie = NiconicoIE(self.ydl)
|
||||
info_dict, heartbeat_info_dict = ie._get_heartbeat_info(info_dict)
|
||||
|
||||
fd = _get_real_downloader(info_dict, params=self.params)(self.ydl, self.params)
|
||||
fd = get_suitable_downloader(info_dict, params=self.params)(self.ydl, self.params)
|
||||
|
||||
success = download_complete = False
|
||||
timer = [None]
|
||||
|
||||
@@ -66,7 +66,7 @@ class RtmpFD(FileDownloader):
|
||||
'eta': eta,
|
||||
'elapsed': time_now - start,
|
||||
'speed': speed,
|
||||
})
|
||||
}, info_dict)
|
||||
cursor_in_new_line = False
|
||||
else:
|
||||
# no percent for live streams
|
||||
@@ -82,7 +82,7 @@ class RtmpFD(FileDownloader):
|
||||
'status': 'downloading',
|
||||
'elapsed': time_now - start,
|
||||
'speed': speed,
|
||||
})
|
||||
}, info_dict)
|
||||
cursor_in_new_line = False
|
||||
elif self.params.get('verbose', False):
|
||||
if not cursor_in_new_line:
|
||||
@@ -208,7 +208,7 @@ class RtmpFD(FileDownloader):
|
||||
'filename': filename,
|
||||
'status': 'finished',
|
||||
'elapsed': time.time() - started,
|
||||
})
|
||||
}, info_dict)
|
||||
return True
|
||||
else:
|
||||
self.to_stderr('\n')
|
||||
|
||||
@@ -39,7 +39,7 @@ class RtspFD(FileDownloader):
|
||||
'total_bytes': fsize,
|
||||
'filename': filename,
|
||||
'status': 'finished',
|
||||
})
|
||||
}, info_dict)
|
||||
return True
|
||||
else:
|
||||
self.to_stderr('\n')
|
||||
|
||||
@@ -44,7 +44,7 @@ class YoutubeLiveChatFD(FragmentFD):
|
||||
return self._download_fragment(ctx, url, info_dict, http_headers, data)
|
||||
|
||||
def parse_actions_replay(live_chat_continuation):
|
||||
offset = continuation_id = None
|
||||
offset = continuation_id = click_tracking_params = None
|
||||
processed_fragment = bytearray()
|
||||
for action in live_chat_continuation.get('actions', []):
|
||||
if 'replayChatItemAction' in action:
|
||||
@@ -53,17 +53,34 @@ class YoutubeLiveChatFD(FragmentFD):
|
||||
processed_fragment.extend(
|
||||
json.dumps(action, ensure_ascii=False).encode('utf-8') + b'\n')
|
||||
if offset is not None:
|
||||
continuation_id = try_get(
|
||||
continuation = try_get(
|
||||
live_chat_continuation,
|
||||
lambda x: x['continuations'][0]['liveChatReplayContinuationData']['continuation'])
|
||||
lambda x: x['continuations'][0]['liveChatReplayContinuationData'], dict)
|
||||
if continuation:
|
||||
continuation_id = continuation.get('continuation')
|
||||
click_tracking_params = continuation.get('clickTrackingParams')
|
||||
self._append_fragment(ctx, processed_fragment)
|
||||
return continuation_id, offset
|
||||
return continuation_id, offset, click_tracking_params
|
||||
|
||||
def try_refresh_replay_beginning(live_chat_continuation):
|
||||
# choose the second option that contains the unfiltered live chat replay
|
||||
refresh_continuation = try_get(
|
||||
live_chat_continuation,
|
||||
lambda x: x['header']['liveChatHeaderRenderer']['viewSelector']['sortFilterSubMenuRenderer']['subMenuItems'][1]['continuation']['reloadContinuationData'], dict)
|
||||
if refresh_continuation:
|
||||
# no data yet but required to call _append_fragment
|
||||
self._append_fragment(ctx, b'')
|
||||
refresh_continuation_id = refresh_continuation.get('continuation')
|
||||
offset = 0
|
||||
click_tracking_params = refresh_continuation.get('trackingParams')
|
||||
return refresh_continuation_id, offset, click_tracking_params
|
||||
return parse_actions_replay(live_chat_continuation)
|
||||
|
||||
live_offset = 0
|
||||
|
||||
def parse_actions_live(live_chat_continuation):
|
||||
nonlocal live_offset
|
||||
continuation_id = None
|
||||
continuation_id = click_tracking_params = None
|
||||
processed_fragment = bytearray()
|
||||
for action in live_chat_continuation.get('actions', []):
|
||||
timestamp = self.parse_live_timestamp(action)
|
||||
@@ -84,45 +101,52 @@ class YoutubeLiveChatFD(FragmentFD):
|
||||
continuation_data = try_get(live_chat_continuation, continuation_data_getters, dict)
|
||||
if continuation_data:
|
||||
continuation_id = continuation_data.get('continuation')
|
||||
click_tracking_params = continuation_data.get('clickTrackingParams')
|
||||
timeout_ms = int_or_none(continuation_data.get('timeoutMs'))
|
||||
if timeout_ms is not None:
|
||||
time.sleep(timeout_ms / 1000)
|
||||
self._append_fragment(ctx, processed_fragment)
|
||||
return continuation_id, live_offset
|
||||
return continuation_id, live_offset, click_tracking_params
|
||||
|
||||
if info_dict['protocol'] == 'youtube_live_chat_replay':
|
||||
parse_actions = parse_actions_replay
|
||||
elif info_dict['protocol'] == 'youtube_live_chat':
|
||||
parse_actions = parse_actions_live
|
||||
|
||||
def download_and_parse_fragment(url, frag_index, request_data, headers):
|
||||
def download_and_parse_fragment(url, frag_index, request_data=None, headers=None):
|
||||
count = 0
|
||||
while count <= fragment_retries:
|
||||
try:
|
||||
success, raw_fragment = dl_fragment(url, request_data, headers)
|
||||
if not success:
|
||||
return False, None, None
|
||||
data = json.loads(raw_fragment)
|
||||
return False, None, None, None
|
||||
try:
|
||||
data = ie.extract_yt_initial_data(video_id, raw_fragment.decode('utf-8', 'replace'))
|
||||
except RegexNotFoundError:
|
||||
data = None
|
||||
if not data:
|
||||
data = json.loads(raw_fragment)
|
||||
live_chat_continuation = try_get(
|
||||
data,
|
||||
lambda x: x['continuationContents']['liveChatContinuation'], dict) or {}
|
||||
continuation_id, offset = parse_actions(live_chat_continuation)
|
||||
return True, continuation_id, offset
|
||||
if info_dict['protocol'] == 'youtube_live_chat_replay':
|
||||
if frag_index == 1:
|
||||
continuation_id, offset, click_tracking_params = try_refresh_replay_beginning(live_chat_continuation)
|
||||
else:
|
||||
continuation_id, offset, click_tracking_params = parse_actions_replay(live_chat_continuation)
|
||||
elif info_dict['protocol'] == 'youtube_live_chat':
|
||||
continuation_id, offset, click_tracking_params = parse_actions_live(live_chat_continuation)
|
||||
return True, continuation_id, offset, click_tracking_params
|
||||
except compat_urllib_error.HTTPError as err:
|
||||
count += 1
|
||||
if count <= fragment_retries:
|
||||
self.report_retry_fragment(err, frag_index, count, fragment_retries)
|
||||
if count > fragment_retries:
|
||||
self.report_error('giving up after %s fragment retries' % fragment_retries)
|
||||
return False, None, None
|
||||
return False, None, None, None
|
||||
|
||||
self._prepare_and_start_frag_download(ctx)
|
||||
self._prepare_and_start_frag_download(ctx, info_dict)
|
||||
|
||||
success, raw_fragment = dl_fragment(info_dict['url'])
|
||||
if not success:
|
||||
return False
|
||||
try:
|
||||
data = ie._extract_yt_initial_data(video_id, raw_fragment.decode('utf-8', 'replace'))
|
||||
data = ie.extract_yt_initial_data(video_id, raw_fragment.decode('utf-8', 'replace'))
|
||||
except RegexNotFoundError:
|
||||
return False
|
||||
continuation_id = try_get(
|
||||
@@ -131,7 +155,7 @@ class YoutubeLiveChatFD(FragmentFD):
|
||||
# no data yet but required to call _append_fragment
|
||||
self._append_fragment(ctx, b'')
|
||||
|
||||
ytcfg = ie._extract_ytcfg(video_id, raw_fragment.decode('utf-8', 'replace'))
|
||||
ytcfg = ie.extract_ytcfg(video_id, raw_fragment.decode('utf-8', 'replace'))
|
||||
|
||||
if not ytcfg:
|
||||
return False
|
||||
@@ -142,10 +166,13 @@ class YoutubeLiveChatFD(FragmentFD):
|
||||
visitor_data = try_get(innertube_context, lambda x: x['client']['visitorData'], str)
|
||||
if info_dict['protocol'] == 'youtube_live_chat_replay':
|
||||
url = 'https://www.youtube.com/youtubei/v1/live_chat/get_live_chat_replay?key=' + api_key
|
||||
chat_page_url = 'https://www.youtube.com/live_chat_replay?continuation=' + continuation_id
|
||||
elif info_dict['protocol'] == 'youtube_live_chat':
|
||||
url = 'https://www.youtube.com/youtubei/v1/live_chat/get_live_chat?key=' + api_key
|
||||
chat_page_url = 'https://www.youtube.com/live_chat?continuation=' + continuation_id
|
||||
|
||||
frag_index = offset = 0
|
||||
click_tracking_params = None
|
||||
while continuation_id is not None:
|
||||
frag_index += 1
|
||||
request_data = {
|
||||
@@ -154,17 +181,22 @@ class YoutubeLiveChatFD(FragmentFD):
|
||||
}
|
||||
if frag_index > 1:
|
||||
request_data['currentPlayerState'] = {'playerOffsetMs': str(max(offset - 5000, 0))}
|
||||
headers = ie._generate_api_headers(ytcfg, visitor_data=visitor_data)
|
||||
headers.update({'content-type': 'application/json'})
|
||||
fragment_request_data = json.dumps(request_data, ensure_ascii=False).encode('utf-8') + b'\n'
|
||||
success, continuation_id, offset = download_and_parse_fragment(
|
||||
url, frag_index, fragment_request_data, headers)
|
||||
if click_tracking_params:
|
||||
request_data['context']['clickTracking'] = {'clickTrackingParams': click_tracking_params}
|
||||
headers = ie.generate_api_headers(ytcfg, visitor_data=visitor_data)
|
||||
headers.update({'content-type': 'application/json'})
|
||||
fragment_request_data = json.dumps(request_data, ensure_ascii=False).encode('utf-8') + b'\n'
|
||||
success, continuation_id, offset, click_tracking_params = download_and_parse_fragment(
|
||||
url, frag_index, fragment_request_data, headers)
|
||||
else:
|
||||
success, continuation_id, offset, click_tracking_params = download_and_parse_fragment(
|
||||
chat_page_url, frag_index)
|
||||
if not success:
|
||||
return False
|
||||
if test:
|
||||
break
|
||||
|
||||
self._finish_frag_download(ctx)
|
||||
self._finish_frag_download(ctx, info_dict)
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .amp import AMPIE
|
||||
from .common import InfoExtractor
|
||||
@@ -59,7 +58,7 @@ class AbcNewsVideoIE(AMPIE):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
mobj = self._match_valid_url(url)
|
||||
display_id = mobj.group('display_id')
|
||||
video_id = mobj.group('id')
|
||||
info_dict = self._extract_feed_info(
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_str
|
||||
@@ -55,7 +54,7 @@ class ABCOTVSIE(InfoExtractor):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
site, display_id, video_id = re.match(self._VALID_URL, url).groups()
|
||||
site, display_id, video_id = self._match_valid_url(url).groups()
|
||||
display_id = display_id or video_id
|
||||
station = self._SITE_MAP[site]
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
@@ -80,7 +79,7 @@ class ACastIE(ACastBaseIE):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
channel, display_id = re.match(self._VALID_URL, url).groups()
|
||||
channel, display_id = self._match_valid_url(url).groups()
|
||||
episode = self._call_api(
|
||||
'%s/episodes/%s' % (channel, display_id),
|
||||
display_id, {'showInfo': 'true'})
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
import time
|
||||
import xml.etree.ElementTree as etree
|
||||
@@ -61,6 +62,11 @@ MSO_INFO = {
|
||||
'username_field': 'IDToken1',
|
||||
'password_field': 'IDToken2',
|
||||
},
|
||||
'Spectrum': {
|
||||
'name': 'Spectrum',
|
||||
'username_field': 'IDToken1',
|
||||
'password_field': 'IDToken2',
|
||||
},
|
||||
'Philo': {
|
||||
'name': 'Philo',
|
||||
'username_field': 'ident'
|
||||
@@ -70,6 +76,11 @@ MSO_INFO = {
|
||||
'username_field': 'IDToken1',
|
||||
'password_field': 'IDToken2',
|
||||
},
|
||||
'Cablevision': {
|
||||
'name': 'Optimum/Cablevision',
|
||||
'username_field': 'j_username',
|
||||
'password_field': 'j_password',
|
||||
},
|
||||
'thr030': {
|
||||
'name': '3 Rivers Communications'
|
||||
},
|
||||
@@ -1324,6 +1335,11 @@ MSO_INFO = {
|
||||
'cou060': {
|
||||
'name': 'Zito Media'
|
||||
},
|
||||
'slingtv': {
|
||||
'name': 'Sling TV',
|
||||
'username_field': 'username',
|
||||
'password_field': 'password',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -1492,7 +1508,8 @@ class AdobePassIE(InfoExtractor):
|
||||
# In general, if you're connecting from a Verizon-assigned IP,
|
||||
# you will not actually pass your credentials.
|
||||
provider_redirect_page, urlh = provider_redirect_page_res
|
||||
if 'Please wait ...' in provider_redirect_page:
|
||||
# From non-Verizon IP, still gave 'Please wait', but noticed N==Y; will need to try on Verizon IP
|
||||
if 'Please wait ...' in provider_redirect_page and '\'N\'== "Y"' not in provider_redirect_page:
|
||||
saml_redirect_url = self._html_search_regex(
|
||||
r'self\.parent\.location=(["\'])(?P<url>.+?)\1',
|
||||
provider_redirect_page,
|
||||
@@ -1500,7 +1517,8 @@ class AdobePassIE(InfoExtractor):
|
||||
saml_login_page = self._download_webpage(
|
||||
saml_redirect_url, video_id,
|
||||
'Downloading SAML Login Page')
|
||||
else:
|
||||
elif 'Verizon FiOS - sign in' in provider_redirect_page:
|
||||
# FXNetworks from non-Verizon IP
|
||||
saml_login_page_res = post_form(
|
||||
provider_redirect_page_res, 'Logging in', {
|
||||
mso_info['username_field']: username,
|
||||
@@ -1510,6 +1528,26 @@ class AdobePassIE(InfoExtractor):
|
||||
if 'Please try again.' in saml_login_page:
|
||||
raise ExtractorError(
|
||||
'We\'re sorry, but either the User ID or Password entered is not correct.')
|
||||
else:
|
||||
# ABC from non-Verizon IP
|
||||
saml_redirect_url = self._html_search_regex(
|
||||
r'var\surl\s*=\s*(["\'])(?P<url>.+?)\1',
|
||||
provider_redirect_page,
|
||||
'SAML Redirect URL', group='url')
|
||||
saml_redirect_url = saml_redirect_url.replace(r'\/', '/')
|
||||
saml_redirect_url = saml_redirect_url.replace(r'\-', '-')
|
||||
saml_redirect_url = saml_redirect_url.replace(r'\x26', '&')
|
||||
saml_login_page = self._download_webpage(
|
||||
saml_redirect_url, video_id,
|
||||
'Downloading SAML Login Page')
|
||||
saml_login_page, urlh = post_form(
|
||||
[saml_login_page, saml_redirect_url], 'Logging in', {
|
||||
mso_info['username_field']: username,
|
||||
mso_info['password_field']: password,
|
||||
})
|
||||
if 'Please try again.' in saml_login_page:
|
||||
raise ExtractorError(
|
||||
'Failed to login, incorrect User ID or Password.')
|
||||
saml_login_url = self._search_regex(
|
||||
r'xmlHttp\.open\("POST"\s*,\s*(["\'])(?P<url>.+?)\1',
|
||||
saml_login_page, 'SAML Login URL', group='url')
|
||||
@@ -1524,6 +1562,75 @@ class AdobePassIE(InfoExtractor):
|
||||
}), headers={
|
||||
'Content-Type': 'application/x-www-form-urlencoded'
|
||||
})
|
||||
elif mso_id == 'Spectrum':
|
||||
# Spectrum's login for is dynamically loaded via JS so we need to hardcode the flow
|
||||
# as a one-off implementation.
|
||||
provider_redirect_page, urlh = provider_redirect_page_res
|
||||
provider_login_page_res = post_form(
|
||||
provider_redirect_page_res, self._DOWNLOADING_LOGIN_PAGE)
|
||||
saml_login_page, urlh = provider_login_page_res
|
||||
relay_state = self._search_regex(
|
||||
r'RelayState\s*=\s*"(?P<relay>.+?)";',
|
||||
saml_login_page, 'RelayState', group='relay')
|
||||
saml_request = self._search_regex(
|
||||
r'SAMLRequest\s*=\s*"(?P<saml_request>.+?)";',
|
||||
saml_login_page, 'SAMLRequest', group='saml_request')
|
||||
login_json = {
|
||||
mso_info['username_field']: username,
|
||||
mso_info['password_field']: password,
|
||||
'RelayState': relay_state,
|
||||
'SAMLRequest': saml_request,
|
||||
}
|
||||
saml_response_json = self._download_json(
|
||||
'https://tveauthn.spectrum.net/tveauthentication/api/v1/manualAuth', video_id,
|
||||
'Downloading SAML Response',
|
||||
data=json.dumps(login_json).encode(),
|
||||
headers={
|
||||
'Content-Type': 'application/json',
|
||||
'Accept': 'application/json',
|
||||
})
|
||||
self._download_webpage(
|
||||
saml_response_json['SAMLRedirectUri'], video_id,
|
||||
'Confirming Login', data=urlencode_postdata({
|
||||
'SAMLResponse': saml_response_json['SAMLResponse'],
|
||||
'RelayState': relay_state,
|
||||
}), headers={
|
||||
'Content-Type': 'application/x-www-form-urlencoded'
|
||||
})
|
||||
elif mso_id == 'slingtv':
|
||||
# SlingTV has a meta-refresh based authentication, but also
|
||||
# looks at the tab history to count the number of times the
|
||||
# browser has been on a page
|
||||
|
||||
first_bookend_page, urlh = provider_redirect_page_res
|
||||
|
||||
hidden_data = self._hidden_inputs(first_bookend_page)
|
||||
hidden_data['history'] = 1
|
||||
|
||||
provider_login_page_res = self._download_webpage_handle(
|
||||
urlh.geturl(), video_id, 'Sending first bookend',
|
||||
query=hidden_data)
|
||||
|
||||
provider_association_redirect, urlh = post_form(
|
||||
provider_login_page_res, 'Logging in', {
|
||||
mso_info['username_field']: username,
|
||||
mso_info['password_field']: password
|
||||
})
|
||||
|
||||
provider_refresh_redirect_url = extract_redirect_url(
|
||||
provider_association_redirect, url=urlh.geturl())
|
||||
|
||||
last_bookend_page, urlh = self._download_webpage_handle(
|
||||
provider_refresh_redirect_url, video_id,
|
||||
'Downloading Auth Association Redirect Page')
|
||||
hidden_data = self._hidden_inputs(last_bookend_page)
|
||||
hidden_data['history'] = 3
|
||||
|
||||
mvpd_confirm_page_res = self._download_webpage_handle(
|
||||
urlh.geturl(), video_id, 'Sending final bookend',
|
||||
query=hidden_data)
|
||||
|
||||
post_form(mvpd_confirm_page_res, 'Confirming Login')
|
||||
else:
|
||||
# Some providers (e.g. DIRECTV NOW) have another meta refresh
|
||||
# based redirect that should be followed.
|
||||
@@ -1536,10 +1643,13 @@ class AdobePassIE(InfoExtractor):
|
||||
'Downloading Provider Redirect Page (meta refresh)')
|
||||
provider_login_page_res = post_form(
|
||||
provider_redirect_page_res, self._DOWNLOADING_LOGIN_PAGE)
|
||||
mvpd_confirm_page_res = post_form(provider_login_page_res, 'Logging in', {
|
||||
form_data = {
|
||||
mso_info.get('username_field', 'username'): username,
|
||||
mso_info.get('password_field', 'password'): password,
|
||||
})
|
||||
mso_info.get('password_field', 'password'): password
|
||||
}
|
||||
if mso_id == 'Cablevision':
|
||||
form_data['_eventId_proceed'] = ''
|
||||
mvpd_confirm_page_res = post_form(provider_login_page_res, 'Logging in', form_data)
|
||||
if mso_id != 'Rogers':
|
||||
post_form(mvpd_confirm_page_res, 'Confirming Login')
|
||||
|
||||
|
||||
@@ -132,7 +132,7 @@ class AdobeTVIE(AdobeTVBaseIE):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
language, show_urlname, urlname = re.match(self._VALID_URL, url).groups()
|
||||
language, show_urlname, urlname = self._match_valid_url(url).groups()
|
||||
if not language:
|
||||
language = 'en'
|
||||
|
||||
@@ -178,7 +178,7 @@ class AdobeTVShowIE(AdobeTVPlaylistBaseIE):
|
||||
_process_data = AdobeTVBaseIE._parse_video_data
|
||||
|
||||
def _real_extract(self, url):
|
||||
language, show_urlname = re.match(self._VALID_URL, url).groups()
|
||||
language, show_urlname = self._match_valid_url(url).groups()
|
||||
if not language:
|
||||
language = 'en'
|
||||
query = {
|
||||
@@ -215,7 +215,7 @@ class AdobeTVChannelIE(AdobeTVPlaylistBaseIE):
|
||||
show_data['url'], 'AdobeTVShow', str_or_none(show_data.get('id')))
|
||||
|
||||
def _real_extract(self, url):
|
||||
language, channel_urlname, category_urlname = re.match(self._VALID_URL, url).groups()
|
||||
language, channel_urlname, category_urlname = self._match_valid_url(url).groups()
|
||||
if not language:
|
||||
language = 'en'
|
||||
query = {
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from .turner import TurnerBaseIE
|
||||
from ..utils import (
|
||||
@@ -89,7 +88,7 @@ class AdultSwimIE(TurnerBaseIE):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
show_path, episode_path = re.match(self._VALID_URL, url).groups()
|
||||
show_path, episode_path = self._match_valid_url(url).groups()
|
||||
display_id = episode_path or show_path
|
||||
query = '''query {
|
||||
getShowBySlug(slug:"%s") {
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .theplatform import ThePlatformIE
|
||||
from ..utils import (
|
||||
@@ -20,8 +19,8 @@ class AENetworksBaseIE(ThePlatformIE):
|
||||
(?:history(?:vault)?|aetv|mylifetime|lifetimemovieclub)\.com|
|
||||
fyi\.tv
|
||||
)/'''
|
||||
_THEPLATFORM_KEY = 'crazyjava'
|
||||
_THEPLATFORM_SECRET = 's3cr3t'
|
||||
_THEPLATFORM_KEY = '43jXaGRQud'
|
||||
_THEPLATFORM_SECRET = 'S10BPXHMlb'
|
||||
_DOMAIN_MAP = {
|
||||
'history.com': ('HISTORY', 'history'),
|
||||
'aetv.com': ('AETV', 'aetv'),
|
||||
@@ -170,7 +169,7 @@ class AENetworksIE(AENetworksBaseIE):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
domain, canonical = re.match(self._VALID_URL, url).groups()
|
||||
domain, canonical = self._match_valid_url(url).groups()
|
||||
return self._extract_aetn_info(domain, 'canonical', '/' + canonical, url)
|
||||
|
||||
|
||||
@@ -187,7 +186,7 @@ class AENetworksListBaseIE(AENetworksBaseIE):
|
||||
}))['data'][resource]
|
||||
|
||||
def _real_extract(self, url):
|
||||
domain, slug = re.match(self._VALID_URL, url).groups()
|
||||
domain, slug = self._match_valid_url(url).groups()
|
||||
_, brand = self._DOMAIN_MAP[domain]
|
||||
playlist = self._call_api(self._RESOURCE, slug, brand, self._FIELDS)
|
||||
base_url = 'http://watch.%s' % domain
|
||||
@@ -309,7 +308,7 @@ class HistoryPlayerIE(AENetworksBaseIE):
|
||||
_TESTS = []
|
||||
|
||||
def _real_extract(self, url):
|
||||
domain, video_id = re.match(self._VALID_URL, url).groups()
|
||||
domain, video_id = self._match_valid_url(url).groups()
|
||||
return self._extract_aetn_info(domain, 'id', video_id, url)
|
||||
|
||||
|
||||
|
||||
@@ -6,9 +6,11 @@ import re
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_xpath
|
||||
from ..utils import (
|
||||
date_from_str,
|
||||
determine_ext,
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
unified_strdate,
|
||||
url_or_none,
|
||||
urlencode_postdata,
|
||||
xpath_text,
|
||||
@@ -237,6 +239,7 @@ class AfreecaTVIE(InfoExtractor):
|
||||
r'nTitleNo\s*=\s*(\d+)', webpage, 'title', default=video_id)
|
||||
|
||||
partial_view = False
|
||||
adult_view = False
|
||||
for _ in range(2):
|
||||
query = {
|
||||
'nTitleNo': video_id,
|
||||
@@ -245,6 +248,8 @@ class AfreecaTVIE(InfoExtractor):
|
||||
}
|
||||
if partial_view:
|
||||
query['partialView'] = 'SKIP_ADULT'
|
||||
if adult_view:
|
||||
query['adultView'] = 'ADULT_VIEW'
|
||||
video_xml = self._download_xml(
|
||||
'http://afbbs.afreecatv.com:8080/api/video/get_video_info.php',
|
||||
video_id, 'Downloading video info XML%s'
|
||||
@@ -264,6 +269,9 @@ class AfreecaTVIE(InfoExtractor):
|
||||
partial_view = True
|
||||
continue
|
||||
elif flag == 'ADULT':
|
||||
if not adult_view:
|
||||
adult_view = True
|
||||
continue
|
||||
error = 'Only users older than 19 are able to watch this video. Provide account credentials to download this content.'
|
||||
else:
|
||||
error = flag
|
||||
@@ -309,8 +317,15 @@ class AfreecaTVIE(InfoExtractor):
|
||||
if not file_url:
|
||||
continue
|
||||
key = file_element.get('key', '')
|
||||
upload_date = self._search_regex(
|
||||
r'^(\d{8})_', key, 'upload date', default=None)
|
||||
upload_date = unified_strdate(self._search_regex(
|
||||
r'^(\d{8})_', key, 'upload date', default=None))
|
||||
if upload_date is not None:
|
||||
# sometimes the upload date isn't included in the file name
|
||||
# instead, another random ID is, which may parse as a valid
|
||||
# date but be wildly out of a reasonable range
|
||||
parsed_date = date_from_str(upload_date)
|
||||
if parsed_date.year < 2000 or parsed_date.year >= 2100:
|
||||
upload_date = None
|
||||
file_duration = int_or_none(file_element.get('duration'))
|
||||
format_id = key if key else '%s_%s' % (video_id, file_num)
|
||||
if determine_ext(file_url) == 'm3u8':
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
@@ -32,7 +31,7 @@ class AlJazeeraIE(InfoExtractor):
|
||||
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_default/index.html?videoId=%s'
|
||||
|
||||
def _real_extract(self, url):
|
||||
post_type, name = re.match(self._VALID_URL, url).groups()
|
||||
post_type, name = self._match_valid_url(url).groups()
|
||||
post_type = {
|
||||
'features': 'post',
|
||||
'program': 'episode',
|
||||
@@ -40,7 +39,7 @@ class AlJazeeraIE(InfoExtractor):
|
||||
}[post_type.split('/')[0]]
|
||||
video = self._download_json(
|
||||
'https://www.aljazeera.com/graphql', name, query={
|
||||
'operationName': 'SingleArticleQuery',
|
||||
'operationName': 'ArchipelagoSingleArticleQuery',
|
||||
'variables': json.dumps({
|
||||
'name': name,
|
||||
'postType': post_type,
|
||||
|
||||
@@ -42,8 +42,7 @@ class AluraIE(InfoExtractor):
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
||||
video_id = self._match_id(url)
|
||||
course = self._search_regex(self._VALID_URL, url, 'post url', group='course_name')
|
||||
course, video_id = self._match_valid_url(url)
|
||||
video_url = self._VIDEO_URL % (course, video_id)
|
||||
|
||||
video_dict = self._download_json(video_url, video_id, 'Searching for videos')
|
||||
|
||||
@@ -63,7 +63,7 @@ class AMCNetworksIE(ThePlatformIE):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
site, display_id = re.match(self._VALID_URL, url).groups()
|
||||
site, display_id = self._match_valid_url(url).groups()
|
||||
requestor_id = self._REQUESTOR_ID_MAP[site]
|
||||
page_data = self._download_json(
|
||||
'https://content-delivery-gw.svc.ds.amcn.com/api/v2/content/amcn/%s/url/%s'
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
@@ -69,7 +68,7 @@ class AmericasTestKitchenIE(InfoExtractor):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
resource_type, video_id = re.match(self._VALID_URL, url).groups()
|
||||
resource_type, video_id = self._match_valid_url(url).groups()
|
||||
is_episode = resource_type == 'episode'
|
||||
if is_episode:
|
||||
resource_type = 'episodes'
|
||||
@@ -114,7 +113,7 @@ class AmericasTestKitchenSeasonIE(InfoExtractor):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
show_name, season_number = re.match(self._VALID_URL, url).groups()
|
||||
show_name, season_number = self._match_valid_url(url).groups()
|
||||
season_number = int(season_number)
|
||||
|
||||
slug = 'atk' if show_name == 'americastestkitchen' else 'cco'
|
||||
|
||||
@@ -390,7 +390,7 @@ class AnvatoIE(InfoExtractor):
|
||||
'countries': smuggled_data.get('geo_countries'),
|
||||
})
|
||||
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
mobj = self._match_valid_url(url)
|
||||
access_key, video_id = mobj.group('access_key_or_mcp', 'id')
|
||||
if access_key not in self._ANVACK_TABLE:
|
||||
access_key = self._MCP_TO_ACCESS_KEY_TABLE.get(
|
||||
|
||||
@@ -4,13 +4,10 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
|
||||
from .yahoo import YahooIE
|
||||
from ..compat import (
|
||||
compat_parse_qs,
|
||||
compat_urllib_parse_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
parse_qs,
|
||||
url_or_none,
|
||||
)
|
||||
|
||||
@@ -119,7 +116,7 @@ class AolIE(YahooIE):
|
||||
'height': int(mobj.group(2)),
|
||||
})
|
||||
else:
|
||||
qs = compat_parse_qs(compat_urllib_parse_urlparse(video_url).query)
|
||||
qs = parse_qs(video_url)
|
||||
f.update({
|
||||
'width': int_or_none(qs.get('w', [None])[0]),
|
||||
'height': int_or_none(qs.get('h', [None])[0]),
|
||||
|
||||
@@ -42,7 +42,7 @@ class APAIE(InfoExtractor):
|
||||
webpage)]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
mobj = self._match_valid_url(url)
|
||||
video_id, base_url = mobj.group('id', 'base_url')
|
||||
|
||||
webpage = self._download_webpage(
|
||||
|
||||
@@ -94,7 +94,7 @@ class AppleTrailersIE(InfoExtractor):
|
||||
_JSON_RE = r'iTunes.playURL\((.*?)\);'
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
mobj = self._match_valid_url(url)
|
||||
movie = mobj.group('movie')
|
||||
uploader_id = mobj.group('company')
|
||||
|
||||
|
||||
@@ -9,8 +9,6 @@ from .youtube import YoutubeIE
|
||||
from ..compat import (
|
||||
compat_urllib_parse_unquote,
|
||||
compat_urllib_parse_unquote_plus,
|
||||
compat_urlparse,
|
||||
compat_parse_qs,
|
||||
compat_HTTPError
|
||||
)
|
||||
from ..utils import (
|
||||
@@ -25,6 +23,7 @@ from ..utils import (
|
||||
merge_dicts,
|
||||
mimetype2ext,
|
||||
parse_duration,
|
||||
parse_qs,
|
||||
RegexNotFoundError,
|
||||
str_to_int,
|
||||
str_or_none,
|
||||
@@ -399,7 +398,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
|
||||
expected=True)
|
||||
raise
|
||||
video_file_url = compat_urllib_parse_unquote(video_file_webpage.url)
|
||||
video_file_url_qs = compat_parse_qs(compat_urlparse.urlparse(video_file_url).query)
|
||||
video_file_url_qs = parse_qs(video_file_url)
|
||||
|
||||
# Attempt to recover any ext & format info from playback url
|
||||
format = {'url': video_file_url}
|
||||
|
||||
@@ -86,7 +86,7 @@ class ArcPublishingIE(InfoExtractor):
|
||||
return entries
|
||||
|
||||
def _real_extract(self, url):
|
||||
org, uuid = re.match(self._VALID_URL, url).groups()
|
||||
org, uuid = self._match_valid_url(url).groups()
|
||||
for orgs, tmpl in self._POWA_DEFAULTS:
|
||||
if org in orgs:
|
||||
base_api_tmpl = tmpl
|
||||
|
||||
@@ -199,7 +199,7 @@ class ARDMediathekIE(ARDMediathekBaseIE):
|
||||
|
||||
def _real_extract(self, url):
|
||||
# determine video id from url
|
||||
m = re.match(self._VALID_URL, url)
|
||||
m = self._match_valid_url(url)
|
||||
|
||||
document_id = None
|
||||
|
||||
@@ -325,7 +325,7 @@ class ARDIE(InfoExtractor):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
mobj = self._match_valid_url(url)
|
||||
display_id = mobj.group('id')
|
||||
|
||||
player_url = mobj.group('mainurl') + '~playerXml.xml'
|
||||
@@ -525,7 +525,7 @@ class ARDBetaMediathekIE(ARDMediathekBaseIE):
|
||||
return self.playlist_result(entries, playlist_title=display_id)
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
mobj = self._match_valid_url(url)
|
||||
video_id = mobj.group('video_id')
|
||||
display_id = mobj.group('display_id')
|
||||
if display_id:
|
||||
|
||||
@@ -4,12 +4,12 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urlparse
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
parse_iso8601,
|
||||
parse_qs,
|
||||
try_get,
|
||||
)
|
||||
|
||||
@@ -63,13 +63,13 @@ class ArkenaIE(InfoExtractor):
|
||||
return mobj.group('url')
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
mobj = self._match_valid_url(url)
|
||||
video_id = mobj.group('id')
|
||||
account_id = mobj.group('account_id')
|
||||
|
||||
# Handle http://video.arkena.com/play2/embed/player URL
|
||||
if not video_id:
|
||||
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
|
||||
qs = parse_qs(url)
|
||||
video_id = qs.get('mediaId', [None])[0]
|
||||
account_id = qs.get('accountId', [None])[0]
|
||||
if not video_id or not account_id:
|
||||
|
||||
@@ -6,11 +6,11 @@ import re
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_str,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
parse_qs,
|
||||
qualities,
|
||||
try_get,
|
||||
unified_strdate,
|
||||
@@ -49,7 +49,7 @@ class ArteTVIE(ArteTVBaseIE):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
mobj = self._match_valid_url(url)
|
||||
video_id = mobj.group('id')
|
||||
lang = mobj.group('lang') or mobj.group('lang_2')
|
||||
|
||||
@@ -204,7 +204,7 @@ class ArteTVEmbedIE(InfoExtractor):
|
||||
webpage)]
|
||||
|
||||
def _real_extract(self, url):
|
||||
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
|
||||
qs = parse_qs(url)
|
||||
json_url = qs['json_url'][0]
|
||||
video_id = ArteTVIE._match_id(json_url)
|
||||
return self.url_result(
|
||||
@@ -227,7 +227,7 @@ class ArteTVPlaylistIE(ArteTVBaseIE):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
lang, playlist_id = re.match(self._VALID_URL, url).groups()
|
||||
lang, playlist_id = self._match_valid_url(url).groups()
|
||||
collection = self._download_json(
|
||||
'%s/collectionData/%s/%s?source=videos'
|
||||
% (self._API_BASE, lang, playlist_id), playlist_id)
|
||||
|
||||
@@ -111,7 +111,7 @@ class AsianCrushIE(AsianCrushBaseIE):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
host, video_id = re.match(self._VALID_URL, url).groups()
|
||||
host, video_id = self._match_valid_url(url).groups()
|
||||
|
||||
if host == 'cocoro.tv':
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
@@ -161,7 +161,7 @@ class AsianCrushPlaylistIE(AsianCrushBaseIE):
|
||||
yield self._parse_video_data(video)
|
||||
|
||||
def _real_extract(self, url):
|
||||
host, playlist_id = re.match(self._VALID_URL, url).groups()
|
||||
host, playlist_id = self._match_valid_url(url).groups()
|
||||
|
||||
if host == 'cocoro.tv':
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_HTTPError
|
||||
@@ -75,7 +74,7 @@ class AtresPlayerIE(InfoExtractor):
|
||||
self._request_webpage(target_url, None, 'Following Target URL')
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id, video_id = re.match(self._VALID_URL, url).groups()
|
||||
display_id, video_id = self._match_valid_url(url).groups()
|
||||
|
||||
try:
|
||||
episode = self._download_json(
|
||||
|
||||
@@ -4,6 +4,7 @@ from __future__ import unicode_literals
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
dict_get,
|
||||
int_or_none,
|
||||
unescapeHTML,
|
||||
)
|
||||
@@ -12,64 +13,62 @@ from ..utils import (
|
||||
class ATVAtIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?atv\.at/(?:[^/]+/){2}(?P<id>[dv]\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://atv.at/aktuell/di-210317-2005-uhr/v1698449/',
|
||||
'md5': 'c3b6b975fb3150fc628572939df205f2',
|
||||
'url': 'https://www.atv.at/bauer-sucht-frau-die-zweite-chance/folge-1/d3390693/',
|
||||
'md5': 'c471605591009dfb6e6c54f7e62e2807',
|
||||
'info_dict': {
|
||||
'id': '1698447',
|
||||
'id': '3390684',
|
||||
'ext': 'mp4',
|
||||
'title': 'DI, 21.03.17 | 20:05 Uhr 1/1',
|
||||
'title': 'Bauer sucht Frau - Die zweite Chance Folge 1',
|
||||
}
|
||||
}, {
|
||||
'url': 'http://atv.at/aktuell/meinrad-knapp/d8416/',
|
||||
'url': 'https://www.atv.at/bauer-sucht-frau-staffel-17/fuenfte-eventfolge/d3339537/',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _process_source_entry(self, source, part_id):
|
||||
source_url = source.get('url')
|
||||
if not source_url:
|
||||
return
|
||||
if determine_ext(source_url) == 'm3u8':
|
||||
return self._extract_m3u8_formats(
|
||||
source_url, part_id, 'mp4', 'm3u8_native',
|
||||
m3u8_id='hls', fatal=False)
|
||||
else:
|
||||
return [{
|
||||
'url': source_url,
|
||||
}]
|
||||
|
||||
def _process_entry(self, entry):
|
||||
part_id = entry.get('id')
|
||||
if not part_id:
|
||||
return
|
||||
formats = []
|
||||
for source in entry.get('sources', []):
|
||||
formats.extend(self._process_source_entry(source, part_id) or [])
|
||||
|
||||
self._sort_formats(formats)
|
||||
return {
|
||||
'id': part_id,
|
||||
'title': entry.get('title'),
|
||||
'duration': int_or_none(entry.get('duration')),
|
||||
'formats': formats
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
video_data = self._parse_json(unescapeHTML(self._search_regex(
|
||||
[r'flashPlayerOptions\s*=\s*(["\'])(?P<json>(?:(?!\1).)+)\1',
|
||||
r'class="[^"]*jsb_video/FlashPlayer[^"]*"[^>]+data-jsb="(?P<json>[^"]+)"'],
|
||||
r'var\splaylist\s*=\s*(?P<json>\[.*\]);',
|
||||
webpage, 'player data', group='json')),
|
||||
display_id)['config']['initial_video']
|
||||
display_id)
|
||||
|
||||
video_id = video_data['id']
|
||||
video_title = video_data['title']
|
||||
|
||||
parts = []
|
||||
for part in video_data.get('parts', []):
|
||||
part_id = part['id']
|
||||
part_title = part['title']
|
||||
|
||||
formats = []
|
||||
for source in part.get('sources', []):
|
||||
source_url = source.get('src')
|
||||
if not source_url:
|
||||
continue
|
||||
ext = determine_ext(source_url)
|
||||
if ext == 'm3u8':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
source_url, part_id, 'mp4', 'm3u8_native',
|
||||
m3u8_id='hls', fatal=False))
|
||||
else:
|
||||
formats.append({
|
||||
'format_id': source.get('delivery'),
|
||||
'url': source_url,
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
parts.append({
|
||||
'id': part_id,
|
||||
'title': part_title,
|
||||
'thumbnail': part.get('preview_image_url'),
|
||||
'duration': int_or_none(part.get('duration')),
|
||||
'is_live': part.get('is_livestream'),
|
||||
'formats': formats,
|
||||
})
|
||||
first_video = video_data[0]
|
||||
video_id = first_video['id']
|
||||
video_title = dict_get(first_video, ('tvShowTitle', 'title'))
|
||||
|
||||
return {
|
||||
'_type': 'multi_video',
|
||||
'id': video_id,
|
||||
'title': video_title,
|
||||
'entries': parts,
|
||||
'entries': (self._process_entry(entry) for entry in video_data),
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import random
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import ExtractorError, try_get, compat_str, str_or_none
|
||||
@@ -124,7 +123,7 @@ class AudiusIE(AudiusBaseIE):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
mobj = self._match_valid_url(url)
|
||||
track_id = try_get(mobj, lambda x: x.group('track_id'))
|
||||
if track_id is None:
|
||||
title = mobj.group('title')
|
||||
@@ -217,7 +216,7 @@ class AudiusPlaylistIE(AudiusBaseIE):
|
||||
|
||||
def _real_extract(self, url):
|
||||
self._select_api_base()
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
mobj = self._match_valid_url(url)
|
||||
title = mobj.group('title')
|
||||
# uploader = mobj.group('uploader')
|
||||
url = self._prepare_url(url, title)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import base64
|
||||
|
||||
from .common import InfoExtractor
|
||||
@@ -22,7 +21,7 @@ class AWAANIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?(?:awaan|dcndigital)\.ae/(?:#/)?show/(?P<show_id>\d+)/[^/]+(?:/(?P<id>\d+)/(?P<season_id>\d+))?'
|
||||
|
||||
def _real_extract(self, url):
|
||||
show_id, video_id, season_id = re.match(self._VALID_URL, url).groups()
|
||||
show_id, video_id, season_id = self._match_valid_url(url).groups()
|
||||
if video_id and int(video_id) > 0:
|
||||
return self.url_result(
|
||||
'http://awaan.ae/media/%s' % video_id, 'AWAANVideo')
|
||||
@@ -154,7 +153,7 @@ class AWAANSeasonIE(InfoExtractor):
|
||||
|
||||
def _real_extract(self, url):
|
||||
url, smuggled_data = unsmuggle_url(url, {})
|
||||
show_id, season_id = re.match(self._VALID_URL, url).groups()
|
||||
show_id, season_id = self._match_valid_url(url).groups()
|
||||
|
||||
data = {}
|
||||
if season_id:
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .kaltura import KalturaIE
|
||||
@@ -51,7 +50,7 @@ class AZMedienIE(InfoExtractor):
|
||||
_PARTNER_ID = '1719221'
|
||||
|
||||
def _real_extract(self, url):
|
||||
host, display_id, article_id, entry_id = re.match(self._VALID_URL, url).groups()
|
||||
host, display_id, article_id, entry_id = self._match_valid_url(url).groups()
|
||||
|
||||
if not entry_id:
|
||||
entry_id = self._download_json(
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import unescapeHTML
|
||||
@@ -33,7 +32,7 @@ class BaiduVideoIE(InfoExtractor):
|
||||
path, category, playlist_id), playlist_id, note)
|
||||
|
||||
def _real_extract(self, url):
|
||||
category, playlist_id = re.match(self._VALID_URL, url).groups()
|
||||
category, playlist_id = self._match_valid_url(url).groups()
|
||||
if category == 'show':
|
||||
category = 'tvshow'
|
||||
if category == 'tv':
|
||||
|
||||
@@ -212,7 +212,7 @@ class BandcampIE(InfoExtractor):
|
||||
|
||||
class BandcampAlbumIE(BandcampIE):
|
||||
IE_NAME = 'Bandcamp:album'
|
||||
_VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?:/album/(?P<id>[^/?#&]+))?'
|
||||
_VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?!/music)(?:/album/(?P<id>[^/?#&]+))?'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
|
||||
@@ -294,7 +294,7 @@ class BandcampAlbumIE(BandcampIE):
|
||||
else super(BandcampAlbumIE, cls).suitable(url))
|
||||
|
||||
def _real_extract(self, url):
|
||||
uploader_id, album_id = re.match(self._VALID_URL, url).groups()
|
||||
uploader_id, album_id = self._match_valid_url(url).groups()
|
||||
playlist_id = album_id or uploader_id
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
tralbum = self._extract_data_attr(webpage, playlist_id)
|
||||
@@ -389,3 +389,43 @@ class BandcampWeeklyIE(BandcampIE):
|
||||
'episode_id': show_id,
|
||||
'formats': formats
|
||||
}
|
||||
|
||||
|
||||
class BandcampMusicIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?P<id>[^/]+)\.bandcamp\.com/music'
|
||||
_TESTS = [{
|
||||
'url': 'https://steviasphere.bandcamp.com/music',
|
||||
'playlist_mincount': 47,
|
||||
'info_dict': {
|
||||
'id': 'steviasphere',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://coldworldofficial.bandcamp.com/music',
|
||||
'playlist_mincount': 10,
|
||||
'info_dict': {
|
||||
'id': 'coldworldofficial',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://nuclearwarnowproductions.bandcamp.com/music',
|
||||
'playlist_mincount': 399,
|
||||
'info_dict': {
|
||||
'id': 'nuclearwarnowproductions',
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
_TYPE_IE_DICT = {
|
||||
'album': BandcampAlbumIE.ie_key(),
|
||||
'track': BandcampIE.ie_key()
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, id)
|
||||
items = re.findall(r'href\=\"\/(?P<path>(?P<type>album|track)+/[^\"]+)', webpage)
|
||||
entries = [
|
||||
self.url_result(
|
||||
f'https://{id}.bandcamp.com/{item[0]}',
|
||||
ie=self._TYPE_IE_DICT[item[1]])
|
||||
for item in items]
|
||||
return self.playlist_result(entries, id)
|
||||
|
||||
165
yt_dlp/extractor/bannedvideo.py
Normal file
165
yt_dlp/extractor/bannedvideo.py
Normal file
@@ -0,0 +1,165 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
try_get,
|
||||
int_or_none,
|
||||
url_or_none,
|
||||
float_or_none,
|
||||
unified_timestamp,
|
||||
)
|
||||
|
||||
|
||||
class BannedVideoIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?banned\.video/watch\?id=(?P<id>[0-f]{24})'
|
||||
_TESTS = [{
|
||||
'url': 'https://banned.video/watch?id=5e7a859644e02200c6ef5f11',
|
||||
'md5': '14b6e81d41beaaee2215cd75c6ed56e4',
|
||||
'info_dict': {
|
||||
'id': '5e7a859644e02200c6ef5f11',
|
||||
'ext': 'mp4',
|
||||
'title': 'China Discovers Origin of Corona Virus: Issues Emergency Statement',
|
||||
'thumbnail': r're:^https?://(?:www\.)?assets\.infowarsmedia.com/images/',
|
||||
'description': 'md5:560d96f02abbebe6c6b78b47465f6b28',
|
||||
'upload_date': '20200324',
|
||||
'timestamp': 1585087895,
|
||||
}
|
||||
}]
|
||||
|
||||
_GRAPHQL_GETMETADATA_QUERY = '''
|
||||
query GetVideoAndComments($id: String!) {
|
||||
getVideo(id: $id) {
|
||||
streamUrl
|
||||
directUrl
|
||||
unlisted
|
||||
live
|
||||
tags {
|
||||
name
|
||||
}
|
||||
title
|
||||
summary
|
||||
playCount
|
||||
largeImage
|
||||
videoDuration
|
||||
channel {
|
||||
_id
|
||||
title
|
||||
}
|
||||
createdAt
|
||||
}
|
||||
getVideoComments(id: $id, limit: 999999, offset: 0) {
|
||||
_id
|
||||
content
|
||||
user {
|
||||
_id
|
||||
username
|
||||
}
|
||||
voteCount {
|
||||
positive
|
||||
}
|
||||
createdAt
|
||||
replyCount
|
||||
}
|
||||
}'''
|
||||
|
||||
_GRAPHQL_GETCOMMENTSREPLIES_QUERY = '''
|
||||
query GetCommentReplies($id: String!) {
|
||||
getCommentReplies(id: $id, limit: 999999, offset: 0) {
|
||||
_id
|
||||
content
|
||||
user {
|
||||
_id
|
||||
username
|
||||
}
|
||||
voteCount {
|
||||
positive
|
||||
}
|
||||
createdAt
|
||||
replyCount
|
||||
}
|
||||
}'''
|
||||
|
||||
_GRAPHQL_QUERIES = {
|
||||
'GetVideoAndComments': _GRAPHQL_GETMETADATA_QUERY,
|
||||
'GetCommentReplies': _GRAPHQL_GETCOMMENTSREPLIES_QUERY,
|
||||
}
|
||||
|
||||
def _call_api(self, video_id, id, operation, note):
|
||||
return self._download_json(
|
||||
'https://api.infowarsmedia.com/graphql', video_id, note=note,
|
||||
headers={
|
||||
'Content-Type': 'application/json; charset=utf-8'
|
||||
}, data=json.dumps({
|
||||
'variables': {'id': id},
|
||||
'operationName': operation,
|
||||
'query': self._GRAPHQL_QUERIES[operation]
|
||||
}).encode('utf8')).get('data')
|
||||
|
||||
def _extract_comments(self, video_id, comments, comment_data):
|
||||
for comment in comment_data.copy():
|
||||
comment_id = comment.get('_id')
|
||||
if comment.get('replyCount') > 0:
|
||||
reply_json = self._call_api(
|
||||
video_id, comment_id, 'GetCommentReplies',
|
||||
f'Downloading replies for comment {comment_id}')
|
||||
comments.extend(
|
||||
self._parse_comment(reply, comment_id)
|
||||
for reply in reply_json.get('getCommentReplies'))
|
||||
|
||||
return {
|
||||
'comments': comments,
|
||||
'comment_count': len(comments),
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _parse_comment(comment_data, parent):
|
||||
return {
|
||||
'id': comment_data.get('_id'),
|
||||
'text': comment_data.get('content'),
|
||||
'author': try_get(comment_data, lambda x: x['user']['username']),
|
||||
'author_id': try_get(comment_data, lambda x: x['user']['_id']),
|
||||
'timestamp': unified_timestamp(comment_data.get('createdAt')),
|
||||
'parent': parent,
|
||||
'like_count': try_get(comment_data, lambda x: x['voteCount']['positive']),
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
video_json = self._call_api(video_id, video_id, 'GetVideoAndComments', 'Downloading video metadata')
|
||||
video_info = video_json['getVideo']
|
||||
is_live = video_info.get('live')
|
||||
comments = [self._parse_comment(comment, 'root') for comment in video_json.get('getVideoComments')]
|
||||
|
||||
formats = [{
|
||||
'format_id': 'direct',
|
||||
'quality': 1,
|
||||
'url': video_info.get('directUrl'),
|
||||
'ext': 'mp4',
|
||||
}] if url_or_none(video_info.get('directUrl')) else []
|
||||
if video_info.get('streamUrl'):
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
video_info.get('streamUrl'), video_id, 'mp4',
|
||||
entry_protocol='m3u8_native', m3u8_id='hls', live=True))
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': video_info.get('title')[:-1],
|
||||
'formats': formats,
|
||||
'is_live': is_live,
|
||||
'description': video_info.get('summary'),
|
||||
'channel': try_get(video_info, lambda x: x['channel']['title']),
|
||||
'channel_id': try_get(video_info, lambda x: x['channel']['_id']),
|
||||
'view_count': int_or_none(video_info.get('playCount')),
|
||||
'thumbnail': url_or_none(video_info.get('largeImage')),
|
||||
'duration': float_or_none(video_info.get('videoDuration')),
|
||||
'timestamp': unified_timestamp(video_info.get('createdAt')),
|
||||
'tags': [tag.get('name') for tag in video_info.get('tags')],
|
||||
'availability': self._availability(is_unlisted=video_info.get('unlisted')),
|
||||
'comments': comments,
|
||||
'__post_extractor': (
|
||||
(lambda: self._extract_comments(video_id, comments, video_json.get('getVideoComments')))
|
||||
if self.get_param('getcomments') else None)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user