mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2025-12-08 07:11:39 +01:00
Compare commits
389 Commits
2021.07.24
...
2021.10.10
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8063de5109 | ||
|
|
dec0d56fa9 | ||
|
|
21186af70a | ||
|
|
84999521c8 | ||
|
|
d1d5c08f29 | ||
|
|
2e01ba6218 | ||
|
|
c9652aa418 | ||
|
|
91b6c884c9 | ||
|
|
28fe35b4e3 | ||
|
|
aa9a92fdbb | ||
|
|
a170527e1f | ||
|
|
90d55df330 | ||
|
|
81bcd43a03 | ||
|
|
b5ae35ee6d | ||
|
|
4e3b637d5b | ||
|
|
8cd69fc407 | ||
|
|
2614f64600 | ||
|
|
b922db9fe5 | ||
|
|
f2cad2e496 | ||
|
|
d6124e191e | ||
|
|
8c6f4daa4c | ||
|
|
ac56cf38a4 | ||
|
|
c08b8873ea | ||
|
|
819e05319b | ||
|
|
fee3f44f5f | ||
|
|
705e7c2005 | ||
|
|
49e7e9c3ce | ||
|
|
8472674399 | ||
|
|
1276a43a77 | ||
|
|
519804a92f | ||
|
|
1b6bb4a85a | ||
|
|
644149afec | ||
|
|
4e3d1898a8 | ||
|
|
f85e6be42e | ||
|
|
762e509d91 | ||
|
|
d92125aeba | ||
|
|
0f0ac87be3 | ||
|
|
755203fc3f | ||
|
|
943d5ab133 | ||
|
|
3001a84dca | ||
|
|
ebf2fb4d61 | ||
|
|
efc947fb3e | ||
|
|
b11c04a8ae | ||
|
|
5d535b4a55 | ||
|
|
a1c3967307 | ||
|
|
e919569e67 | ||
|
|
ff1dec819a | ||
|
|
9359f3d4f0 | ||
|
|
0eaec13ba6 | ||
|
|
ad095c4283 | ||
|
|
e6f21b3d92 | ||
|
|
d710cc6d36 | ||
|
|
3ae5e79774 | ||
|
|
8e3fd7e034 | ||
|
|
80c03fa98f | ||
|
|
1f2a268bd3 | ||
|
|
804ca01cc7 | ||
|
|
851876095b | ||
|
|
2d997542ca | ||
|
|
7756277882 | ||
|
|
7687c8ac6e | ||
|
|
80c360d7aa | ||
|
|
250a938de8 | ||
|
|
f1d42a83ab | ||
|
|
3cf4b91dc5 | ||
|
|
fecb20a503 | ||
|
|
360167b9fc | ||
|
|
28234287f1 | ||
|
|
91dd88b90f | ||
|
|
d31dab7084 | ||
|
|
c470901ccf | ||
|
|
2333ea1029 | ||
|
|
9a13345439 | ||
|
|
524e2e4fda | ||
|
|
f440b14f87 | ||
|
|
8dc831f715 | ||
|
|
e99b2d2771 | ||
|
|
1fed277349 | ||
|
|
0ef787d773 | ||
|
|
a5de4099cb | ||
|
|
ff1c7fc9d3 | ||
|
|
600e900300 | ||
|
|
20b91b9b63 | ||
|
|
4c88ff87fc | ||
|
|
e27cc5d864 | ||
|
|
eb6d4ad1ca | ||
|
|
99e9e001de | ||
|
|
51ff9ca0b0 | ||
|
|
b19404591a | ||
|
|
1f8471e22c | ||
|
|
77c4a9ef68 | ||
|
|
8f70b0b82f | ||
|
|
be867b03f5 | ||
|
|
1813a6ccd4 | ||
|
|
8100c77223 | ||
|
|
9ada988bfc | ||
|
|
d1a7768432 | ||
|
|
49fa4d9af7 | ||
|
|
ee2b3563f3 | ||
|
|
bdc196a444 | ||
|
|
388bc4a640 | ||
|
|
50eff38c1c | ||
|
|
4be9dbdc24 | ||
|
|
a21e0ab1a1 | ||
|
|
a76e2e0f88 | ||
|
|
bd50a52b0d | ||
|
|
c12977bdc4 | ||
|
|
f6d8776d34 | ||
|
|
d806c9fd97 | ||
|
|
5e3f2f8fc4 | ||
|
|
1009f67c2a | ||
|
|
bd6f722de8 | ||
|
|
d9d8b85747 | ||
|
|
daf7ac2b92 | ||
|
|
96933fc1b6 | ||
|
|
0d32e124c6 | ||
|
|
cb2ec90e91 | ||
|
|
3cd786dbd7 | ||
|
|
1b629e1b4c | ||
|
|
8f8e8eba24 | ||
|
|
09906f554d | ||
|
|
a63d9bd0b0 | ||
|
|
f137e4c27c | ||
|
|
4762621925 | ||
|
|
57aa7b8511 | ||
|
|
9c1c3ec016 | ||
|
|
f9cc0161e6 | ||
|
|
c6af2dd8e5 | ||
|
|
7738bd3272 | ||
|
|
7c37ff97d3 | ||
|
|
d47f46e17e | ||
|
|
298bf1d275 | ||
|
|
d1b39ad844 | ||
|
|
edf65256aa | ||
|
|
7303f84abe | ||
|
|
f5aa5cfbff | ||
|
|
f1f6ca78b4 | ||
|
|
2fac2e9136 | ||
|
|
23dd2d9a32 | ||
|
|
b89378a69a | ||
|
|
0001fcb586 | ||
|
|
c589c1d395 | ||
|
|
f7590d4764 | ||
|
|
dbf7eca917 | ||
|
|
d21bba7853 | ||
|
|
a8cb7eca61 | ||
|
|
92790da2bb | ||
|
|
b5a39ed43b | ||
|
|
cc33cc4395 | ||
|
|
1722099ded | ||
|
|
40b18348e7 | ||
|
|
e9a30b181e | ||
|
|
9c95ac677e | ||
|
|
ea706726d6 | ||
|
|
f60990ddfc | ||
|
|
ad226b1dc9 | ||
|
|
ca46b94134 | ||
|
|
67ad7759af | ||
|
|
d5fe04f5c7 | ||
|
|
03c862794f | ||
|
|
0fd6661edb | ||
|
|
02c7ae8104 | ||
|
|
16f7e6be3a | ||
|
|
ffecd3034b | ||
|
|
1c5ce74c04 | ||
|
|
81a136b80f | ||
|
|
eab3f867e2 | ||
|
|
a7e999beec | ||
|
|
71407b3eca | ||
|
|
dc9de9cbd2 | ||
|
|
92ddaa415e | ||
|
|
b6de707d13 | ||
|
|
bccdbd22d5 | ||
|
|
bd9ff55bcd | ||
|
|
526d74ec5a | ||
|
|
e04a1ff92e | ||
|
|
aa6c25309a | ||
|
|
d98b006b85 | ||
|
|
265a7a8ee5 | ||
|
|
826446bd82 | ||
|
|
bc79491368 | ||
|
|
421ddcb8b4 | ||
|
|
c0ac49bcca | ||
|
|
02def2714c | ||
|
|
f9be9cb9fd | ||
|
|
4614bc22c1 | ||
|
|
8e5fecc88c | ||
|
|
165efb823b | ||
|
|
dd594deb2a | ||
|
|
409e18286e | ||
|
|
8113999995 | ||
|
|
8026e50152 | ||
|
|
9ee4f0bb5b | ||
|
|
be4d9f4cd9 | ||
|
|
347182a0cd | ||
|
|
a7429aa9fa | ||
|
|
7a340e0df3 | ||
|
|
f0e5366335 | ||
|
|
49ca8db06b | ||
|
|
ee57a19d84 | ||
|
|
908b56eaf7 | ||
|
|
1461d7bef2 | ||
|
|
8a2d992389 | ||
|
|
8e25d624df | ||
|
|
e88dabb35e | ||
|
|
8eb7ba82ca | ||
|
|
b2eeee0ce0 | ||
|
|
875cfb8cbc | ||
|
|
b8773e63f0 | ||
|
|
05664a2f7b | ||
|
|
2ee6389bef | ||
|
|
62cdaaf0e2 | ||
|
|
419508eabb | ||
|
|
54153fb71b | ||
|
|
1dd6d9ca9d | ||
|
|
356ac009d3 | ||
|
|
9a292a620c | ||
|
|
7e55872286 | ||
|
|
2fc14b9925 | ||
|
|
58f68fe703 | ||
|
|
abafce59a1 | ||
|
|
2e7781a93c | ||
|
|
bc36bc36a1 | ||
|
|
d75201a873 | ||
|
|
691d5823d6 | ||
|
|
c311988d19 | ||
|
|
26e8e04454 | ||
|
|
198e3a04c9 | ||
|
|
61bfacb233 | ||
|
|
85a0021fb3 | ||
|
|
7a45a1590b | ||
|
|
1c36c1f320 | ||
|
|
e0493e90fc | ||
|
|
1931a55ee8 | ||
|
|
63b1ad0f05 | ||
|
|
0bb1bc1b10 | ||
|
|
45842107b9 | ||
|
|
6251555f1c | ||
|
|
330690a214 | ||
|
|
91d4b32bb6 | ||
|
|
a181cd0c60 | ||
|
|
ea81966e64 | ||
|
|
2acf2ce5cb | ||
|
|
f7f18f905c | ||
|
|
4f8b70b593 | ||
|
|
e43e9f3c2c | ||
|
|
71dd5d4a00 | ||
|
|
52a2f994c9 | ||
|
|
8b7491c8d1 | ||
|
|
251ae04e6a | ||
|
|
5bc4a65eea | ||
|
|
1151c4079a | ||
|
|
88acdbc269 | ||
|
|
9b5fa9ee7c | ||
|
|
aca5774e68 | ||
|
|
3fb4e21b38 | ||
|
|
4dfbf8696b | ||
|
|
8fc54b1230 | ||
|
|
da33e35b05 | ||
|
|
5ad28e7ffd | ||
|
|
f79ec47d71 | ||
|
|
45b0596290 | ||
|
|
96c23f3be8 | ||
|
|
6e7dfe4959 | ||
|
|
c34f505b04 | ||
|
|
14183d1f80 | ||
|
|
58adec4677 | ||
|
|
9e598870dd | ||
|
|
8f18aca871 | ||
|
|
3ad56b4236 | ||
|
|
5d62709bc7 | ||
|
|
7581d2467a | ||
|
|
5fa206fb54 | ||
|
|
df2a5633da | ||
|
|
7a6742b5f9 | ||
|
|
e040bb0a41 | ||
|
|
f8fabc9930 | ||
|
|
d967c68e4c | ||
|
|
3dd39c5f9a | ||
|
|
be44eefd5e | ||
|
|
f775c83110 | ||
|
|
b714b41f81 | ||
|
|
31654882e9 | ||
|
|
86c66b2d3e | ||
|
|
37242e56f2 | ||
|
|
6c7274ecd2 | ||
|
|
5c333d7496 | ||
|
|
641ad5d813 | ||
|
|
0715f7e19b | ||
|
|
a8731fcc1d | ||
|
|
5a64127f94 | ||
|
|
ade6dc5e9e | ||
|
|
418964fa91 | ||
|
|
c196640ff1 | ||
|
|
60c8fc73c6 | ||
|
|
bc8745480e | ||
|
|
ff5e16f2f6 | ||
|
|
be2fc5b212 | ||
|
|
7be9ccff0b | ||
|
|
245d43cacf | ||
|
|
246fb276e0 | ||
|
|
6e6e0d95b3 | ||
|
|
25a3f4f5d6 | ||
|
|
ad3dc496bb | ||
|
|
2831b4686c | ||
|
|
8c0ae192a4 | ||
|
|
e9f4ccd19e | ||
|
|
a38bd1defa | ||
|
|
476febeb3a | ||
|
|
b6a35ad83b | ||
|
|
bfd56b74b9 | ||
|
|
858a65ecc1 | ||
|
|
3b34e38813 | ||
|
|
3448870205 | ||
|
|
b868936cd6 | ||
|
|
c681cb5d93 | ||
|
|
379e44ed3c | ||
|
|
243c57cfe8 | ||
|
|
28f436bad0 | ||
|
|
2b8a2973bd | ||
|
|
b7b04c782e | ||
|
|
6e84b21559 | ||
|
|
575e17a1b9 | ||
|
|
57015a4a3f | ||
|
|
9cc1a3130a | ||
|
|
b51d2ae3ca | ||
|
|
fee5f0c909 | ||
|
|
7bb6434767 | ||
|
|
124bc071ee | ||
|
|
a047eeb6d2 | ||
|
|
77b87f0519 | ||
|
|
678da2f21b | ||
|
|
cc3fa8d39d | ||
|
|
89efdc15dd | ||
|
|
8012d892bd | ||
|
|
9d65e7bd6d | ||
|
|
36576d7c4c | ||
|
|
bb36a55c41 | ||
|
|
3dbb2a9dcb | ||
|
|
9997eee4af | ||
|
|
3e376d183e | ||
|
|
888299e6ca | ||
|
|
c31be5b009 | ||
|
|
e5611e8eda | ||
|
|
8e6cc12c80 | ||
|
|
e980017ac8 | ||
|
|
e9d9efc0f2 | ||
|
|
6ccf351a87 | ||
|
|
28dff70b51 | ||
|
|
1aebc0f79e | ||
|
|
cf87314d4e | ||
|
|
1bd3639f69 | ||
|
|
68f5867cf0 | ||
|
|
605cad0be7 | ||
|
|
0855702f3f | ||
|
|
e8384376c0 | ||
|
|
e7e94f2a5c | ||
|
|
a46a815b05 | ||
|
|
96fccc101f | ||
|
|
dbf5416a20 | ||
|
|
d74a58a186 | ||
|
|
f5510afef0 | ||
|
|
e4f0275711 | ||
|
|
e0f2b4b47d | ||
|
|
eca330cb88 | ||
|
|
d24734daea | ||
|
|
d9e6e9481e | ||
|
|
3619f78d2c | ||
|
|
65c2fde23f | ||
|
|
000c15a4ca | ||
|
|
9275f62cf8 | ||
|
|
6552469433 | ||
|
|
11cc45718c | ||
|
|
fe07e2c69f | ||
|
|
89ce723edd | ||
|
|
45d1f15725 | ||
|
|
a318f59d14 | ||
|
|
7d1eb38af1 | ||
|
|
901130bbcf | ||
|
|
c0bc527bca | ||
|
|
2a9c6dcd22 | ||
|
|
5a1fc62b41 | ||
|
|
b4c055bac2 | ||
|
|
ea05b3020d | ||
|
|
9536bc072d | ||
|
|
8242bf220d | ||
|
|
4bfa401d40 | ||
|
|
0222620725 | ||
|
|
1fe3c4c27e |
13
.github/FUNDING.yml
vendored
Normal file
13
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
||||
patreon: # Replace with a single Patreon username
|
||||
open_collective: # Replace with a single Open Collective username
|
||||
ko_fi: # Replace with a single Ko-fi username
|
||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
liberapay: # Replace with a single Liberapay username
|
||||
issuehunt: # Replace with a single IssueHunt username
|
||||
otechie: # Replace with a single Otechie username
|
||||
|
||||
custom: ['https://github.com/yt-dlp/yt-dlp/blob/master/Collaborators.md#collaborators']
|
||||
19
.github/ISSUE_TEMPLATE/1_broken_site.md
vendored
19
.github/ISSUE_TEMPLATE/1_broken_site.md
vendored
@@ -1,8 +1,8 @@
|
||||
---
|
||||
name: Broken site support
|
||||
about: Report broken or misfunctioning site
|
||||
title: "[Broken]"
|
||||
labels: Broken
|
||||
title: "[Broken] Website Name: A short description of the issue"
|
||||
labels: ['triage', 'extractor-bug']
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
@@ -21,18 +21,21 @@ assignees: ''
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.07.21. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.10.10. If it's not, see https://github.com/yt-dlp/yt-dlp#update on how to update. Issues with outdated version will be REJECTED.
|
||||
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
||||
- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in https://github.com/yt-dlp/yt-dlp.
|
||||
- Search the bugtracker for similar issues: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
||||
- Make sure that all URLs and arguments with special characters are properly quoted or escaped.
|
||||
- Search the bugtracker for similar issues: https://github.com/yt-dlp/yt-dlp/issues. DO NOT post duplicates.
|
||||
- Read "opening an issue" section in CONTRIBUTING.md: https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue
|
||||
- Finally, confirm all RELEVANT tasks from the following by putting x into all the boxes like this [x] (Dont forget to delete the empty space)
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a broken site support
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.07.21**
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.10.10**
|
||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
||||
- [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
|
||||
- [ ] I've searched the bugtracker for similar issues including closed ones
|
||||
- [ ] I've read the opening an issue section in CONTRIBUTING.md
|
||||
- [ ] I have given an appropriate title to the issue
|
||||
|
||||
|
||||
## Verbose log
|
||||
@@ -44,7 +47,7 @@ Add the `-v` flag to your command line you run yt-dlp with (`yt-dlp -v <your com
|
||||
[debug] User config: []
|
||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||
[debug] yt-dlp version 2021.07.21
|
||||
[debug] yt-dlp version 2021.10.10
|
||||
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
||||
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
||||
[debug] Proxy map: {}
|
||||
|
||||
18
.github/ISSUE_TEMPLATE/2_site_support_request.md
vendored
18
.github/ISSUE_TEMPLATE/2_site_support_request.md
vendored
@@ -1,8 +1,8 @@
|
||||
---
|
||||
name: Site support request
|
||||
about: Request support for a new site
|
||||
title: "[Site Request]"
|
||||
labels: Request
|
||||
title: "[Site Request] Website Name"
|
||||
labels: ['triage', 'site-request']
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
@@ -21,18 +21,22 @@ assignees: ''
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.07.21. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.10.10. If it's not, see https://github.com/yt-dlp/yt-dlp#update on how to update. Issues with outdated version will be REJECTED.
|
||||
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
||||
- Make sure that site you are requesting is not dedicated to copyright infringement, see https://github.com/yt-dlp/yt-dlp. yt-dlp does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
|
||||
- Search the bugtracker for similar site support requests: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
||||
- Make sure that site you are requesting is not dedicated to copyright infringement. yt-dlp does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
|
||||
- Search the bugtracker for similar site support requests: https://github.com/yt-dlp/yt-dlp/issues. DO NOT post duplicates.
|
||||
- Read "opening an issue" section in CONTRIBUTING.md: https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue
|
||||
- Finally, confirm all RELEVANT tasks from the following by putting x into all the boxes like this [x] (Dont forget to delete the empty space)
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a new site support request
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.07.21**
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.10.10**
|
||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
||||
- [ ] I've checked that none of provided URLs violate any copyrights
|
||||
- [ ] The provided URLs do not contain any DRM to the best of my knowledge
|
||||
- [ ] I've searched the bugtracker for similar site support requests including closed ones
|
||||
- [ ] I've read the opening an issue section in CONTRIBUTING.md
|
||||
- [ ] I have given an appropriate title to the issue
|
||||
|
||||
|
||||
## Example URLs
|
||||
|
||||
15
.github/ISSUE_TEMPLATE/3_site_feature_request.md
vendored
15
.github/ISSUE_TEMPLATE/3_site_feature_request.md
vendored
@@ -1,8 +1,8 @@
|
||||
---
|
||||
name: Site feature request
|
||||
about: Request a new functionality for a site
|
||||
title: "[Site Request]"
|
||||
labels: Request
|
||||
title: "[Site Feature] Website Name: A short description of the feature"
|
||||
labels: ['triage', 'site-enhancement']
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
@@ -21,14 +21,17 @@ assignees: ''
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.07.21. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- Search the bugtracker for similar site feature requests: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.10.10. If it's not, see https://github.com/yt-dlp/yt-dlp#update on how to update. Issues with outdated version will be REJECTED.
|
||||
- Search the bugtracker for similar site feature requests: https://github.com/yt-dlp/yt-dlp/issues. DO NOT post duplicates.
|
||||
- Read "opening an issue" section in CONTRIBUTING.md: https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue
|
||||
- Finally, confirm all RELEVANT tasks from the following by putting x into all the boxes like this [x] (Dont forget to delete the empty space)
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a site feature request
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.07.21**
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.10.10**
|
||||
- [ ] I've searched the bugtracker for similar site feature requests including closed ones
|
||||
- [ ] I've read the opening an issue section in CONTRIBUTING.md
|
||||
- [ ] I have given an appropriate title to the issue
|
||||
|
||||
|
||||
## Description
|
||||
|
||||
24
.github/ISSUE_TEMPLATE/4_bug_report.md
vendored
24
.github/ISSUE_TEMPLATE/4_bug_report.md
vendored
@@ -1,8 +1,8 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Report a bug unrelated to any particular site or extractor
|
||||
title: ''
|
||||
labels: ''
|
||||
title: '[Bug] A short description of the issue'
|
||||
labels: ['triage', 'bug']
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
@@ -21,20 +21,22 @@ assignees: ''
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.07.21. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.10.10. If it's not, see https://github.com/yt-dlp/yt-dlp#update on how to update. Issues with outdated version will be REJECTED.
|
||||
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
||||
- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in https://github.com/yt-dlp/yt-dlp.
|
||||
- Search the bugtracker for similar issues: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
||||
- Read bugs section in FAQ: https://github.com/yt-dlp/yt-dlp
|
||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
||||
- Make sure that all URLs and arguments with special characters are properly quoted or escaped.
|
||||
- Search the bugtracker for similar issues: https://github.com/yt-dlp/yt-dlp/issues. DO NOT post duplicates.
|
||||
- Read "opening an issue" section in CONTRIBUTING.md: https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue
|
||||
- Finally, confirm all RELEVANT tasks from the following by putting x into all the boxes like this [x] (Dont forget to delete the empty space)
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a broken site support issue
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.07.21**
|
||||
- [ ] I'm reporting a bug unrelated to a specific site
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.10.10**
|
||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
||||
- [ ] The provided URLs do not contain any DRM to the best of my knowledge
|
||||
- [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
|
||||
- [ ] I've searched the bugtracker for similar bug reports including closed ones
|
||||
- [ ] I've read bugs section in FAQ
|
||||
- [ ] I've read the opening an issue section in CONTRIBUTING.md
|
||||
- [ ] I have given an appropriate title to the issue
|
||||
|
||||
|
||||
## Verbose log
|
||||
@@ -46,7 +48,7 @@ Add the `-v` flag to your command line you run yt-dlp with (`yt-dlp -v <your com
|
||||
[debug] User config: []
|
||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||
[debug] yt-dlp version 2021.07.21
|
||||
[debug] yt-dlp version 2021.10.10
|
||||
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
||||
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
||||
[debug] Proxy map: {}
|
||||
|
||||
13
.github/ISSUE_TEMPLATE/5_feature_request.md
vendored
13
.github/ISSUE_TEMPLATE/5_feature_request.md
vendored
@@ -1,8 +1,8 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Request a new functionality unrelated to any particular site or extractor
|
||||
title: "[Feature Request]"
|
||||
labels: Request
|
||||
title: "[Feature Request] A short description of your feature"
|
||||
labels: ['triage', 'enhancement']
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
@@ -21,14 +21,17 @@ assignees: ''
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.07.21. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- Search the bugtracker for similar feature requests: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.10.10. If it's not, see https://github.com/yt-dlp/yt-dlp#update on how to update. Issues with outdated version will be REJECTED.
|
||||
- Search the bugtracker for similar feature requests: https://github.com/yt-dlp/yt-dlp/issues. DO NOT post duplicates.
|
||||
- Read "opening an issue" section in CONTRIBUTING.md: https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue
|
||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a feature request
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.07.21**
|
||||
- [ ] I've verified that I'm running yt-dlp version **2021.10.10**
|
||||
- [ ] I've searched the bugtracker for similar feature requests including closed ones
|
||||
- [ ] I've read the opening an issue section in CONTRIBUTING.md
|
||||
- [ ] I have given an appropriate title to the issue
|
||||
|
||||
|
||||
## Description
|
||||
|
||||
13
.github/ISSUE_TEMPLATE/6_question.md
vendored
13
.github/ISSUE_TEMPLATE/6_question.md
vendored
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: Ask question
|
||||
about: Ask youtube-dl related question
|
||||
title: "[Question]"
|
||||
about: Ask yt-dlp related question
|
||||
title: "[Question] A short description of your question"
|
||||
labels: question
|
||||
assignees: ''
|
||||
|
||||
@@ -21,14 +21,17 @@ assignees: ''
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||
- Look through the README (https://github.com/yt-dlp/yt-dlp) and FAQ (https://github.com/yt-dlp/yt-dlp) for similar questions
|
||||
- Search the bugtracker for similar questions: https://github.com/yt-dlp/yt-dlp
|
||||
- Look through the README (https://github.com/yt-dlp/yt-dlp)
|
||||
- Read "opening an issue" section in CONTRIBUTING.md: https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue
|
||||
- Search the bugtracker for similar questions: https://github.com/yt-dlp/yt-dlp/issues
|
||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
||||
-->
|
||||
|
||||
- [ ] I'm asking a question
|
||||
- [ ] I've looked through the README and FAQ for similar questions
|
||||
- [ ] I've looked through the README
|
||||
- [ ] I've read the opening an issue section in CONTRIBUTING.md
|
||||
- [ ] I've searched the bugtracker for similar questions including closed ones
|
||||
- [ ] I have given an appropriate title to the issue
|
||||
|
||||
|
||||
## Question
|
||||
|
||||
15
.github/ISSUE_TEMPLATE_tmpl/1_broken_site.md
vendored
15
.github/ISSUE_TEMPLATE_tmpl/1_broken_site.md
vendored
@@ -1,8 +1,8 @@
|
||||
---
|
||||
name: Broken site support
|
||||
about: Report broken or misfunctioning site
|
||||
title: "[Broken]"
|
||||
labels: Broken
|
||||
title: "[Broken] Website Name: A short description of the issue"
|
||||
labels: ['triage', 'extractor-bug']
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
@@ -21,11 +21,12 @@ assignees: ''
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is %(version)s. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is %(version)s. If it's not, see https://github.com/yt-dlp/yt-dlp#update on how to update. Issues with outdated version will be REJECTED.
|
||||
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
||||
- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in https://github.com/yt-dlp/yt-dlp.
|
||||
- Search the bugtracker for similar issues: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
||||
- Make sure that all URLs and arguments with special characters are properly quoted or escaped.
|
||||
- Search the bugtracker for similar issues: https://github.com/yt-dlp/yt-dlp/issues. DO NOT post duplicates.
|
||||
- Read "opening an issue" section in CONTRIBUTING.md: https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue
|
||||
- Finally, confirm all RELEVANT tasks from the following by putting x into all the boxes like this [x] (Dont forget to delete the empty space)
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a broken site support
|
||||
@@ -33,6 +34,8 @@ Carefully read and work through this check list in order to prevent the most com
|
||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
||||
- [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
|
||||
- [ ] I've searched the bugtracker for similar issues including closed ones
|
||||
- [ ] I've read the opening an issue section in CONTRIBUTING.md
|
||||
- [ ] I have given an appropriate title to the issue
|
||||
|
||||
|
||||
## Verbose log
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
---
|
||||
name: Site support request
|
||||
about: Request support for a new site
|
||||
title: "[Site Request]"
|
||||
labels: Request
|
||||
title: "[Site Request] Website Name"
|
||||
labels: ['triage', 'site-request']
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
@@ -21,18 +21,22 @@ assignees: ''
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is %(version)s. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is %(version)s. If it's not, see https://github.com/yt-dlp/yt-dlp#update on how to update. Issues with outdated version will be REJECTED.
|
||||
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
||||
- Make sure that site you are requesting is not dedicated to copyright infringement, see https://github.com/yt-dlp/yt-dlp. yt-dlp does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
|
||||
- Search the bugtracker for similar site support requests: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
||||
- Make sure that site you are requesting is not dedicated to copyright infringement. yt-dlp does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
|
||||
- Search the bugtracker for similar site support requests: https://github.com/yt-dlp/yt-dlp/issues. DO NOT post duplicates.
|
||||
- Read "opening an issue" section in CONTRIBUTING.md: https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue
|
||||
- Finally, confirm all RELEVANT tasks from the following by putting x into all the boxes like this [x] (Dont forget to delete the empty space)
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a new site support request
|
||||
- [ ] I've verified that I'm running yt-dlp version **%(version)s**
|
||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
||||
- [ ] I've checked that none of provided URLs violate any copyrights
|
||||
- [ ] The provided URLs do not contain any DRM to the best of my knowledge
|
||||
- [ ] I've searched the bugtracker for similar site support requests including closed ones
|
||||
- [ ] I've read the opening an issue section in CONTRIBUTING.md
|
||||
- [ ] I have given an appropriate title to the issue
|
||||
|
||||
|
||||
## Example URLs
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
---
|
||||
name: Site feature request
|
||||
about: Request a new functionality for a site
|
||||
title: "[Site Request]"
|
||||
labels: Request
|
||||
title: "[Site Feature] Website Name: A short description of the feature"
|
||||
labels: ['triage', 'site-enhancement']
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
@@ -21,14 +21,17 @@ assignees: ''
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is %(version)s. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- Search the bugtracker for similar site feature requests: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is %(version)s. If it's not, see https://github.com/yt-dlp/yt-dlp#update on how to update. Issues with outdated version will be REJECTED.
|
||||
- Search the bugtracker for similar site feature requests: https://github.com/yt-dlp/yt-dlp/issues. DO NOT post duplicates.
|
||||
- Read "opening an issue" section in CONTRIBUTING.md: https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue
|
||||
- Finally, confirm all RELEVANT tasks from the following by putting x into all the boxes like this [x] (Dont forget to delete the empty space)
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a site feature request
|
||||
- [ ] I've verified that I'm running yt-dlp version **%(version)s**
|
||||
- [ ] I've searched the bugtracker for similar site feature requests including closed ones
|
||||
- [ ] I've read the opening an issue section in CONTRIBUTING.md
|
||||
- [ ] I have given an appropriate title to the issue
|
||||
|
||||
|
||||
## Description
|
||||
|
||||
20
.github/ISSUE_TEMPLATE_tmpl/4_bug_report.md
vendored
20
.github/ISSUE_TEMPLATE_tmpl/4_bug_report.md
vendored
@@ -1,8 +1,8 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Report a bug unrelated to any particular site or extractor
|
||||
title: ''
|
||||
labels: ''
|
||||
title: '[Bug] A short description of the issue'
|
||||
labels: ['triage', 'bug']
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
@@ -21,20 +21,22 @@ assignees: ''
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is %(version)s. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is %(version)s. If it's not, see https://github.com/yt-dlp/yt-dlp#update on how to update. Issues with outdated version will be REJECTED.
|
||||
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
||||
- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in https://github.com/yt-dlp/yt-dlp.
|
||||
- Search the bugtracker for similar issues: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
||||
- Read bugs section in FAQ: https://github.com/yt-dlp/yt-dlp
|
||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
||||
- Make sure that all URLs and arguments with special characters are properly quoted or escaped.
|
||||
- Search the bugtracker for similar issues: https://github.com/yt-dlp/yt-dlp/issues. DO NOT post duplicates.
|
||||
- Read "opening an issue" section in CONTRIBUTING.md: https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue
|
||||
- Finally, confirm all RELEVANT tasks from the following by putting x into all the boxes like this [x] (Dont forget to delete the empty space)
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a broken site support issue
|
||||
- [ ] I'm reporting a bug unrelated to a specific site
|
||||
- [ ] I've verified that I'm running yt-dlp version **%(version)s**
|
||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
||||
- [ ] The provided URLs do not contain any DRM to the best of my knowledge
|
||||
- [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
|
||||
- [ ] I've searched the bugtracker for similar bug reports including closed ones
|
||||
- [ ] I've read bugs section in FAQ
|
||||
- [ ] I've read the opening an issue section in CONTRIBUTING.md
|
||||
- [ ] I have given an appropriate title to the issue
|
||||
|
||||
|
||||
## Verbose log
|
||||
|
||||
11
.github/ISSUE_TEMPLATE_tmpl/5_feature_request.md
vendored
11
.github/ISSUE_TEMPLATE_tmpl/5_feature_request.md
vendored
@@ -1,8 +1,8 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Request a new functionality unrelated to any particular site or extractor
|
||||
title: "[Feature Request]"
|
||||
labels: Request
|
||||
title: "[Feature Request] A short description of your feature"
|
||||
labels: ['triage', 'enhancement']
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
@@ -21,14 +21,17 @@ assignees: ''
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is %(version)s. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
||||
- Search the bugtracker for similar feature requests: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is %(version)s. If it's not, see https://github.com/yt-dlp/yt-dlp#update on how to update. Issues with outdated version will be REJECTED.
|
||||
- Search the bugtracker for similar feature requests: https://github.com/yt-dlp/yt-dlp/issues. DO NOT post duplicates.
|
||||
- Read "opening an issue" section in CONTRIBUTING.md: https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue
|
||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a feature request
|
||||
- [ ] I've verified that I'm running yt-dlp version **%(version)s**
|
||||
- [ ] I've searched the bugtracker for similar feature requests including closed ones
|
||||
- [ ] I've read the opening an issue section in CONTRIBUTING.md
|
||||
- [ ] I have given an appropriate title to the issue
|
||||
|
||||
|
||||
## Description
|
||||
|
||||
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -7,11 +7,11 @@
|
||||
---
|
||||
|
||||
### Before submitting a *pull request* make sure you have:
|
||||
- [ ] At least skimmed through [adding new extractor tutorial](https://github.com/ytdl-org/youtube-dl#adding-support-for-a-new-site) and [youtube-dl coding conventions](https://github.com/ytdl-org/youtube-dl#youtube-dl-coding-conventions) sections
|
||||
- [ ] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions)
|
||||
- [ ] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
|
||||
- [ ] Checked the code with [flake8](https://pypi.python.org/pypi/flake8)
|
||||
|
||||
### In order to be accepted and merged into youtube-dl each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options:
|
||||
### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options:
|
||||
- [ ] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
|
||||
- [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
|
||||
|
||||
|
||||
31
.github/banner.svg
vendored
Normal file
31
.github/banner.svg
vendored
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 24 KiB |
191
.github/workflows/build.yml
vendored
191
.github/workflows/build.yml
vendored
@@ -12,11 +12,15 @@ jobs:
|
||||
outputs:
|
||||
ytdlp_version: ${{ steps.bump_version.outputs.ytdlp_version }}
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
sha256_unix: ${{ steps.sha256_file.outputs.sha256_unix }}
|
||||
sha512_unix: ${{ steps.sha512_file.outputs.sha512_unix }}
|
||||
sha256_bin: ${{ steps.sha256_bin.outputs.sha256_bin }}
|
||||
sha512_bin: ${{ steps.sha512_bin.outputs.sha512_bin }}
|
||||
sha256_tar: ${{ steps.sha256_tar.outputs.sha256_tar }}
|
||||
sha512_tar: ${{ steps.sha512_tar.outputs.sha512_tar }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
@@ -25,11 +29,76 @@ jobs:
|
||||
run: sudo apt-get -y install zip pandoc man
|
||||
- name: Bump version
|
||||
id: bump_version
|
||||
run: python devscripts/update-version.py
|
||||
run: |
|
||||
python devscripts/update-version.py
|
||||
make issuetemplates
|
||||
- name: Print version
|
||||
run: echo "${{ steps.bump_version.outputs.ytdlp_version }}"
|
||||
- name: Update master
|
||||
id: push_update
|
||||
run: |
|
||||
git config --global user.email "${{ github.event.pusher.email }}"
|
||||
git config --global user.name "${{ github.event.pusher.name }}"
|
||||
git add -u
|
||||
git commit -m "[version] update" -m ":ci skip all"
|
||||
git pull --rebase origin ${{ github.event.repository.master_branch }}
|
||||
git push origin ${{ github.event.ref }}:${{ github.event.repository.master_branch }}
|
||||
echo ::set-output name=head_sha::$(git rev-parse HEAD)
|
||||
- name: Get Changelog
|
||||
id: get_changelog
|
||||
run: |
|
||||
changelog=$(cat Changelog.md | grep -oPz '(?s)(?<=### ${{ steps.bump_version.outputs.ytdlp_version }}\n{2}).+?(?=\n{2,3}###)') || true
|
||||
echo "changelog<<EOF" >> $GITHUB_ENV
|
||||
echo "$changelog" >> $GITHUB_ENV
|
||||
echo "EOF" >> $GITHUB_ENV
|
||||
- name: Run Make
|
||||
run: make all tar
|
||||
- name: Get SHA2-256SUMS for yt-dlp
|
||||
id: sha256_bin
|
||||
run: echo "::set-output name=sha256_bin::$(sha256sum yt-dlp | awk '{print $1}')"
|
||||
- name: Get SHA2-256SUMS for yt-dlp.tar.gz
|
||||
id: sha256_tar
|
||||
run: echo "::set-output name=sha256_tar::$(sha256sum yt-dlp.tar.gz | awk '{print $1}')"
|
||||
- name: Get SHA2-512SUMS for yt-dlp
|
||||
id: sha512_bin
|
||||
run: echo "::set-output name=sha512_bin::$(sha512sum yt-dlp | awk '{print $1}')"
|
||||
- name: Get SHA2-512SUMS for yt-dlp.tar.gz
|
||||
id: sha512_tar
|
||||
run: echo "::set-output name=sha512_tar::$(sha512sum yt-dlp.tar.gz | awk '{print $1}')"
|
||||
- name: Install dependencies for pypi
|
||||
env:
|
||||
PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }}
|
||||
if: "env.PYPI_TOKEN != ''"
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install setuptools wheel twine
|
||||
- name: Build and publish on pypi
|
||||
env:
|
||||
TWINE_USERNAME: __token__
|
||||
TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }}
|
||||
if: "env.TWINE_PASSWORD != ''"
|
||||
run: |
|
||||
rm -rf dist/*
|
||||
python setup.py sdist bdist_wheel
|
||||
twine upload dist/*
|
||||
- name: Install SSH private key
|
||||
env:
|
||||
BREW_TOKEN: ${{ secrets.BREW_TOKEN }}
|
||||
if: "env.BREW_TOKEN != ''"
|
||||
uses: webfactory/ssh-agent@v0.5.3
|
||||
with:
|
||||
ssh-private-key: ${{ env.BREW_TOKEN }}
|
||||
- name: Update Homebrew Formulae
|
||||
env:
|
||||
BREW_TOKEN: ${{ secrets.BREW_TOKEN }}
|
||||
if: "env.BREW_TOKEN != ''"
|
||||
run: |
|
||||
git clone git@github.com:yt-dlp/homebrew-taps taps/
|
||||
python3 devscripts/update-formulae.py taps/Formula/yt-dlp.rb "${{ steps.bump_version.outputs.ytdlp_version }}"
|
||||
git -C taps/ config user.name github-actions
|
||||
git -C taps/ config user.email github-actions@example.com
|
||||
git -C taps/ commit -am 'yt-dlp: ${{ steps.bump_version.outputs.ytdlp_version }}'
|
||||
git -C taps/ push
|
||||
- name: Create Release
|
||||
id: create_release
|
||||
uses: actions/create-release@v1
|
||||
@@ -38,9 +107,10 @@ jobs:
|
||||
with:
|
||||
tag_name: ${{ steps.bump_version.outputs.ytdlp_version }}
|
||||
release_name: yt-dlp ${{ steps.bump_version.outputs.ytdlp_version }}
|
||||
commitish: ${{ steps.push_update.outputs.head_sha }}
|
||||
body: |
|
||||
Changelog:
|
||||
PLACEHOLDER
|
||||
${{ env.changelog }}
|
||||
draft: false
|
||||
prerelease: false
|
||||
- name: Upload yt-dlp Unix binary
|
||||
@@ -62,36 +132,16 @@ jobs:
|
||||
asset_path: ./yt-dlp.tar.gz
|
||||
asset_name: yt-dlp.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
- name: Get SHA2-256SUMS for yt-dlp
|
||||
id: sha256_file
|
||||
run: echo "::set-output name=sha256_unix::$(sha256sum yt-dlp | awk '{print $1}')"
|
||||
- name: Get SHA2-512SUMS for yt-dlp
|
||||
id: sha512_file
|
||||
run: echo "::set-output name=sha512_unix::$(sha512sum yt-dlp | awk '{print $1}')"
|
||||
- name: Install dependencies for pypi
|
||||
env:
|
||||
PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }}
|
||||
if: "env.PYPI_TOKEN != ''"
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install setuptools wheel twine
|
||||
- name: Build and publish on pypi
|
||||
env:
|
||||
TWINE_USERNAME: __token__
|
||||
TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }}
|
||||
if: "env.TWINE_PASSWORD != ''"
|
||||
run: |
|
||||
rm -rf dist/*
|
||||
python setup.py sdist bdist_wheel
|
||||
twine upload dist/*
|
||||
|
||||
build_windows:
|
||||
runs-on: windows-latest
|
||||
needs: build_unix
|
||||
|
||||
outputs:
|
||||
sha256_windows: ${{ steps.sha256_file_win.outputs.sha256_windows }}
|
||||
sha512_windows: ${{ steps.sha512_file_win.outputs.sha512_windows }}
|
||||
sha256_win: ${{ steps.sha256_win.outputs.sha256_win }}
|
||||
sha512_win: ${{ steps.sha512_win.outputs.sha512_win }}
|
||||
sha256_win_zip: ${{ steps.sha256_win_zip.outputs.sha256_win_zip }}
|
||||
sha512_win_zip: ${{ steps.sha512_win_zip.outputs.sha512_win_zip }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
@@ -103,14 +153,15 @@ jobs:
|
||||
- name: Upgrade pip and enable wheel support
|
||||
run: python -m pip install --upgrade pip setuptools wheel
|
||||
- name: Install Requirements
|
||||
run: pip install pyinstaller mutagen pycryptodome websockets
|
||||
# Custom pyinstaller built with https://github.com/yt-dlp/pyinstaller-builds
|
||||
run: pip install "https://yt-dlp.github.io/Pyinstaller-Builds/x86_64/pyinstaller-4.5.1-py3-none-any.whl" mutagen pycryptodomex websockets
|
||||
- name: Bump version
|
||||
id: bump_version
|
||||
run: python devscripts/update-version.py
|
||||
- name: Print version
|
||||
run: echo "${{ steps.bump_version.outputs.ytdlp_version }}"
|
||||
- name: Run PyInstaller Script
|
||||
run: python pyinst.py 64
|
||||
run: python pyinst.py
|
||||
- name: Upload yt-dlp.exe Windows binary
|
||||
id: upload-release-windows
|
||||
uses: actions/upload-release-asset@v1
|
||||
@@ -122,19 +173,41 @@ jobs:
|
||||
asset_name: yt-dlp.exe
|
||||
asset_content_type: application/vnd.microsoft.portable-executable
|
||||
- name: Get SHA2-256SUMS for yt-dlp.exe
|
||||
id: sha256_file_win
|
||||
run: echo "::set-output name=sha256_windows::$((Get-FileHash dist\yt-dlp.exe -Algorithm SHA256).Hash.ToLower())"
|
||||
id: sha256_win
|
||||
run: echo "::set-output name=sha256_win::$((Get-FileHash dist\yt-dlp.exe -Algorithm SHA256).Hash.ToLower())"
|
||||
- name: Get SHA2-512SUMS for yt-dlp.exe
|
||||
id: sha512_file_win
|
||||
run: echo "::set-output name=sha512_windows::$((Get-FileHash dist\yt-dlp.exe -Algorithm SHA512).Hash.ToLower())"
|
||||
id: sha512_win
|
||||
run: echo "::set-output name=sha512_win::$((Get-FileHash dist\yt-dlp.exe -Algorithm SHA512).Hash.ToLower())"
|
||||
- name: Run PyInstaller Script with --onedir
|
||||
run: python pyinst.py --onedir
|
||||
- uses: papeloto/action-zip@v1
|
||||
with:
|
||||
files: ./dist/yt-dlp
|
||||
dest: ./dist/yt-dlp.zip
|
||||
- name: Upload yt-dlp.zip Windows onedir
|
||||
id: upload-release-windows-zip
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ needs.build_unix.outputs.upload_url }}
|
||||
asset_path: ./dist/yt-dlp.zip
|
||||
asset_name: yt-dlp.zip
|
||||
asset_content_type: application/zip
|
||||
- name: Get SHA2-256SUMS for yt-dlp.zip
|
||||
id: sha256_win_zip
|
||||
run: echo "::set-output name=sha256_win_zip::$((Get-FileHash dist\yt-dlp.zip -Algorithm SHA256).Hash.ToLower())"
|
||||
- name: Get SHA2-512SUMS for yt-dlp.zip
|
||||
id: sha512_win_zip
|
||||
run: echo "::set-output name=sha512_win_zip::$((Get-FileHash dist\yt-dlp.zip -Algorithm SHA512).Hash.ToLower())"
|
||||
|
||||
build_windows32:
|
||||
runs-on: windows-latest
|
||||
needs: [build_unix, build_windows]
|
||||
|
||||
outputs:
|
||||
sha256_windows32: ${{ steps.sha256_file_win32.outputs.sha256_windows32 }}
|
||||
sha512_windows32: ${{ steps.sha512_file_win32.outputs.sha512_windows32 }}
|
||||
sha256_win32: ${{ steps.sha256_win32.outputs.sha256_win32 }}
|
||||
sha512_win32: ${{ steps.sha512_win32.outputs.sha512_win32 }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
@@ -147,14 +220,14 @@ jobs:
|
||||
- name: Upgrade pip and enable wheel support
|
||||
run: python -m pip install --upgrade pip setuptools wheel
|
||||
- name: Install Requirements
|
||||
run: pip install pyinstaller mutagen pycryptodome websockets
|
||||
run: pip install "https://yt-dlp.github.io/Pyinstaller-Builds/i686/pyinstaller-4.5.1-py3-none-any.whl" mutagen pycryptodomex websockets
|
||||
- name: Bump version
|
||||
id: bump_version
|
||||
run: python devscripts/update-version.py
|
||||
- name: Print version
|
||||
run: echo "${{ steps.bump_version.outputs.ytdlp_version }}"
|
||||
- name: Run PyInstaller Script for 32 Bit
|
||||
run: python pyinst.py 32
|
||||
run: python pyinst.py
|
||||
- name: Upload Executable yt-dlp_x86.exe
|
||||
id: upload-release-windows32
|
||||
uses: actions/upload-release-asset@v1
|
||||
@@ -166,11 +239,11 @@ jobs:
|
||||
asset_name: yt-dlp_x86.exe
|
||||
asset_content_type: application/vnd.microsoft.portable-executable
|
||||
- name: Get SHA2-256SUMS for yt-dlp_x86.exe
|
||||
id: sha256_file_win32
|
||||
run: echo "::set-output name=sha256_windows32::$((Get-FileHash dist\yt-dlp_x86.exe -Algorithm SHA256).Hash.ToLower())"
|
||||
id: sha256_win32
|
||||
run: echo "::set-output name=sha256_win32::$((Get-FileHash dist\yt-dlp_x86.exe -Algorithm SHA256).Hash.ToLower())"
|
||||
- name: Get SHA2-512SUMS for yt-dlp_x86.exe
|
||||
id: sha512_file_win32
|
||||
run: echo "::set-output name=sha512_windows32::$((Get-FileHash dist\yt-dlp_x86.exe -Algorithm SHA512).Hash.ToLower())"
|
||||
id: sha512_win32
|
||||
run: echo "::set-output name=sha512_win32::$((Get-FileHash dist\yt-dlp_x86.exe -Algorithm SHA512).Hash.ToLower())"
|
||||
|
||||
finish:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -179,15 +252,17 @@ jobs:
|
||||
steps:
|
||||
- name: Make SHA2-256SUMS file
|
||||
env:
|
||||
SHA256_WINDOWS: ${{ needs.build_windows.outputs.sha256_windows }}
|
||||
SHA256_WINDOWS32: ${{ needs.build_windows32.outputs.sha256_windows32 }}
|
||||
SHA256_UNIX: ${{ needs.build_unix.outputs.sha256_unix }}
|
||||
YTDLP_VERSION: ${{ needs.build_unix.outputs.ytdlp_version }}
|
||||
SHA256_WIN: ${{ needs.build_windows.outputs.sha256_win }}
|
||||
SHA256_WIN_ZIP: ${{ needs.build_windows.outputs.sha256_win_zip }}
|
||||
SHA256_WIN32: ${{ needs.build_windows32.outputs.sha256_win32 }}
|
||||
SHA256_BIN: ${{ needs.build_unix.outputs.sha256_bin }}
|
||||
SHA256_TAR: ${{ needs.build_unix.outputs.sha256_tar }}
|
||||
run: |
|
||||
echo "version:${{ env.YTDLP_VERSION }}" >> SHA2-256SUMS
|
||||
echo "yt-dlp.exe:${{ env.SHA256_WINDOWS }}" >> SHA2-256SUMS
|
||||
echo "yt-dlp_x86.exe:${{ env.SHA256_WINDOWS32 }}" >> SHA2-256SUMS
|
||||
echo "yt-dlp:${{ env.SHA256_UNIX }}" >> SHA2-256SUMS
|
||||
echo "${{ env.SHA256_WIN }} yt-dlp.exe" >> SHA2-256SUMS
|
||||
echo "${{ env.SHA256_WIN32 }} yt-dlp_x86.exe" >> SHA2-256SUMS
|
||||
echo "${{ env.SHA256_BIN }} yt-dlp" >> SHA2-256SUMS
|
||||
echo "${{ env.SHA256_TAR }} yt-dlp.tar.gz" >> SHA2-256SUMS
|
||||
echo "${{ env.SHA256_WIN_ZIP }} yt-dlp.zip" >> SHA2-256SUMS
|
||||
- name: Upload 256SUMS file
|
||||
id: upload-sums
|
||||
uses: actions/upload-release-asset@v1
|
||||
@@ -200,13 +275,17 @@ jobs:
|
||||
asset_content_type: text/plain
|
||||
- name: Make SHA2-512SUMS file
|
||||
env:
|
||||
SHA512_WINDOWS: ${{ needs.build_windows.outputs.sha512_windows }}
|
||||
SHA512_WINDOWS32: ${{ needs.build_windows32.outputs.sha512_windows32 }}
|
||||
SHA512_UNIX: ${{ needs.build_unix.outputs.sha512_unix }}
|
||||
SHA512_WIN: ${{ needs.build_windows.outputs.sha512_win }}
|
||||
SHA512_WIN_ZIP: ${{ needs.build_windows.outputs.sha512_win_zip }}
|
||||
SHA512_WIN32: ${{ needs.build_windows32.outputs.sha512_win32 }}
|
||||
SHA512_BIN: ${{ needs.build_unix.outputs.sha512_bin }}
|
||||
SHA512_TAR: ${{ needs.build_unix.outputs.sha512_tar }}
|
||||
run: |
|
||||
echo "${{ env.SHA512_WINDOWS }} yt-dlp.exe" >> SHA2-512SUMS
|
||||
echo "${{ env.SHA512_WINDOWS32 }} yt-dlp_x86.exe" >> SHA2-512SUMS
|
||||
echo "${{ env.SHA512_UNIX }} yt-dlp" >> SHA2-512SUMS
|
||||
echo "${{ env.SHA512_WIN }} yt-dlp.exe" >> SHA2-512SUMS
|
||||
echo "${{ env.SHA512_WIN32 }} yt-dlp_x86.exe" >> SHA2-512SUMS
|
||||
echo "${{ env.SHA512_BIN }} yt-dlp" >> SHA2-512SUMS
|
||||
echo "${{ env.SHA512_TAR }} yt-dlp.tar.gz" >> SHA2-512SUMS
|
||||
echo "${{ env.SHA512_WIN_ZIP }} yt-dlp.zip" >> SHA2-512SUMS
|
||||
- name: Upload 512SUMS file
|
||||
id: upload-512sums
|
||||
uses: actions/upload-release-asset@v1
|
||||
|
||||
4
.github/workflows/quick-test.yml
vendored
4
.github/workflows/quick-test.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install test requirements
|
||||
run: pip install pytest pycryptodome
|
||||
run: pip install pytest pycryptodomex
|
||||
- name: Run tests
|
||||
run: ./devscripts/run_tests.sh core
|
||||
flake8:
|
||||
@@ -27,5 +27,7 @@ jobs:
|
||||
python-version: 3.9
|
||||
- name: Install flake8
|
||||
run: pip install flake8
|
||||
- name: Make lazy extractors
|
||||
run: python devscripts/make_lazy_extractors.py yt_dlp/extractor/lazy_extractors.py
|
||||
- name: Run flake8
|
||||
run: flake8 .
|
||||
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -2,7 +2,8 @@
|
||||
*.conf
|
||||
*.spec
|
||||
cookies
|
||||
cookies.txt
|
||||
*cookies.txt
|
||||
.netrc
|
||||
|
||||
# Downloaded
|
||||
*.srt
|
||||
@@ -19,6 +20,8 @@ cookies.txt
|
||||
*.wav
|
||||
*.ape
|
||||
*.mkv
|
||||
*.flac
|
||||
*.avi
|
||||
*.swf
|
||||
*.part
|
||||
*.part-*
|
||||
@@ -40,7 +43,7 @@ cookies.txt
|
||||
*.description
|
||||
|
||||
# Allow config/media files in testdata
|
||||
!test/testdata/**
|
||||
!test/**
|
||||
|
||||
# Python
|
||||
*.pyc
|
||||
|
||||
269
CONTRIBUTING.md
269
CONTRIBUTING.md
@@ -1,26 +1,59 @@
|
||||
**Please include the full output of youtube-dl when run with `-v`**, i.e. **add** `-v` flag to **your command line**, copy the **whole** output and post it in the issue body wrapped in \`\`\` for better formatting. It should look similar to this:
|
||||
# CONTRIBUTING TO YT-DLP
|
||||
|
||||
- [OPENING AN ISSUE](#opening-an-issue)
|
||||
- [Is the description of the issue itself sufficient?](#is-the-description-of-the-issue-itself-sufficient)
|
||||
- [Are you using the latest version?](#are-you-using-the-latest-version)
|
||||
- [Is the issue already documented?](#is-the-issue-already-documented)
|
||||
- [Why are existing options not enough?](#why-are-existing-options-not-enough)
|
||||
- [Have you read and understood the changes, between youtube-dl and yt-dlp](#have-you-read-and-understood-the-changes-between-youtube-dl-and-yt-dlp)
|
||||
- [Is there enough context in your bug report?](#is-there-enough-context-in-your-bug-report)
|
||||
- [Does the issue involve one problem, and one problem only?](#does-the-issue-involve-one-problem-and-one-problem-only)
|
||||
- [Is anyone going to need the feature?](#is-anyone-going-to-need-the-feature)
|
||||
- [Is your question about yt-dlp?](#is-your-question-about-yt-dlp)
|
||||
- [DEVELOPER INSTRUCTIONS](#developer-instructions)
|
||||
- [Adding new feature or making overarching changes](#adding-new-feature-or-making-overarching-changes)
|
||||
- [Adding support for a new site](#adding-support-for-a-new-site)
|
||||
- [yt-dlp coding conventions](#yt-dlp-coding-conventions)
|
||||
- [Mandatory and optional metafields](#mandatory-and-optional-metafields)
|
||||
- [Provide fallbacks](#provide-fallbacks)
|
||||
- [Regular expressions](#regular-expressions)
|
||||
- [Long lines policy](#long-lines-policy)
|
||||
- [Inline values](#inline-values)
|
||||
- [Collapse fallbacks](#collapse-fallbacks)
|
||||
- [Trailing parentheses](#trailing-parentheses)
|
||||
- [Use convenience conversion and parsing functions](#use-convenience-conversion-and-parsing-functions)
|
||||
- [EMBEDDING YT-DLP](README.md#embedding-yt-dlp)
|
||||
|
||||
|
||||
|
||||
# OPENING AN ISSUE
|
||||
|
||||
Bugs and suggestions should be reported at: [yt-dlp/yt-dlp/issues](https://github.com/yt-dlp/yt-dlp/issues). Unless you were prompted to or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email. For discussions, join us in our [discord server](https://discord.gg/H5MNcFW63r).
|
||||
|
||||
**Please include the full output of yt-dlp when run with `-Uv`**, i.e. **add** `-Uv` flag to **your command line**, copy the **whole** output and post it in the issue body wrapped in \`\`\` for better formatting. It should look similar to this:
|
||||
```
|
||||
$ youtube-dl -v <your command line>
|
||||
[debug] System config: []
|
||||
[debug] User config: []
|
||||
[debug] Command-line args: [u'-v', u'https://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||
[debug] youtube-dl version 2015.12.06
|
||||
[debug] Git HEAD: 135392e
|
||||
[debug] Python version 2.6.6 - Windows-2003Server-5.2.3790-SP2
|
||||
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
||||
$ yt-dlp -Uv <your command line>
|
||||
[debug] Command-line config: ['-v', 'demo.com']
|
||||
[debug] Encodings: locale UTF-8, fs utf-8, out utf-8, pref UTF-8
|
||||
[debug] yt-dlp version 2021.09.25 (zip)
|
||||
[debug] Python version 3.8.10 (CPython 64bit) - Linux-5.4.0-74-generic-x86_64-with-glibc2.29
|
||||
[debug] exe versions: ffmpeg 4.2.4, ffprobe 4.2.4
|
||||
[debug] Proxy map: {}
|
||||
Current Build Hash 25cc412d1d3c0725a1f2f5b7e4682f6fb40e6d15f7024e96f7afd572e9919535
|
||||
yt-dlp is up to date (2021.09.25)
|
||||
...
|
||||
```
|
||||
**Do not post screenshots of verbose logs; only plain text is acceptable.**
|
||||
|
||||
The output (including the first lines) contains important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
|
||||
The output (including the first lines) contains important debugging information. Issues without the full output are often not reproducible and therefore will be closed as `incomplete`.
|
||||
|
||||
The templates provided for the Issues, should be completed and **not removed**, this helps aide the resolution of the issue.
|
||||
|
||||
Please re-read your issue once again to avoid a couple of common mistakes (you can and should use this as a checklist):
|
||||
|
||||
### Is the description of the issue itself sufficient?
|
||||
|
||||
We often get issue reports that we cannot really decipher. While in most cases we eventually get the required information after asking back multiple times, this poses an unnecessary drain on our resources. Many contributors, including myself, are also not native speakers, so we may misread some parts.
|
||||
We often get issue reports that we cannot really decipher. While in most cases we eventually get the required information after asking back multiple times, this poses an unnecessary drain on our resources.
|
||||
|
||||
So please elaborate on what feature you are requesting, or what bug you want to be fixed. Make sure that it's obvious
|
||||
|
||||
@@ -28,25 +61,31 @@ So please elaborate on what feature you are requesting, or what bug you want to
|
||||
- How it could be fixed
|
||||
- How your proposed solution would look like
|
||||
|
||||
If your report is shorter than two lines, it is almost certainly missing some of these, which makes it hard for us to respond to it. We're often too polite to close the issue outright, but the missing info makes misinterpretation likely. As a committer myself, I often get frustrated by these issues, since the only possible way for me to move forward on them is to ask for clarification over and over.
|
||||
If your report is shorter than two lines, it is almost certainly missing some of these, which makes it hard for us to respond to it. We're often too polite to close the issue outright, but the missing info makes misinterpretation likely. We often get frustrated by these issues, since the only possible way for us to move forward on them is to ask for clarification over and over.
|
||||
|
||||
For bug reports, this means that your report should contain the *complete* output of youtube-dl when called with the `-v` flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information.
|
||||
For bug reports, this means that your report should contain the **complete** output of yt-dlp when called with the `-Uv` flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information.
|
||||
|
||||
If your server has multiple IPs or you suspect censorship, adding `--call-home` may be a good idea to get more diagnostics. If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--dump-pages` (warning: this will yield a rather large output, redirect it to the file `log.txt` by adding `>log.txt 2>&1` to your command-line) or upload the `.dump` files you get when you add `--write-pages` [somewhere](https://gist.github.com/).
|
||||
If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--write-pages` and upload the `.dump` files you get [somewhere](https://gist.github.com).
|
||||
|
||||
**Site support requests must contain an example URL**. An example URL is a URL you might want to download, like `https://www.youtube.com/watch?v=BaW_jenozKc`. There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. `https://www.youtube.com/`) is *not* an example URL.
|
||||
|
||||
### Are you using the latest version?
|
||||
|
||||
Before reporting any issue, type `youtube-dl -U`. This should report that you're up-to-date. About 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well.
|
||||
Before reporting any issue, type `yt-dlp -U`. This should report that you're up-to-date. This goes for feature requests as well.
|
||||
|
||||
### Is the issue already documented?
|
||||
|
||||
Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or browse the [GitHub Issues](https://github.com/ytdl-org/youtube-dl/search?type=Issues) of this repository. If there is an issue, feel free to write something along the lines of "This affects me as well, with version 2015.01.01. Here is some more information on the issue: ...". While some issues may be old, a new post into them often spurs rapid activity.
|
||||
Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or browse the [GitHub Issues](https://github.com/yt-dlp/yt-dlp/search?type=Issues) of this repository. If there is an issue, feel free to write something along the lines of "This affects me as well, with version 2021.01.01. Here is some more information on the issue: ...". While some issues may be old, a new post into them often spurs rapid activity.
|
||||
|
||||
Additionally, it is also helpful to see if the issue has already been documented in the [youtube-dl issue tracker](https://github.com/ytdl-org/youtube-dl/issues). If similar issues have already been reported in youtube-dl (but not in our issue tracker), links to them can be included in your issue report here.
|
||||
|
||||
### Why are existing options not enough?
|
||||
|
||||
Before requesting a new feature, please have a quick peek at [the list of supported options](https://github.com/ytdl-org/youtube-dl/blob/master/README.md#options). Many feature requests are for features that actually exist already! Please, absolutely do show off your work in the issue report and detail how the existing similar options do *not* solve your problem.
|
||||
Before requesting a new feature, please have a quick peek at [the list of supported options](README.md#usage-and-options). Many feature requests are for features that actually exist already! Please, absolutely do show off your work in the issue report and detail how the existing similar options do *not* solve your problem.
|
||||
|
||||
### Have you read and understood the changes, between youtube-dl and yt-dlp
|
||||
|
||||
There are many changes between youtube-dl and yt-dlp [(changes to default behavior)](README.md#differences-in-default-behavior), and some of the options available have a different behaviour in yt-dlp, or have been removed all together [(list of changes to options)](README.md#deprecated-options). Make sure you have read and understand the differences in the options and how this may impact your downloads before opening an issue.
|
||||
|
||||
### Is there enough context in your bug report?
|
||||
|
||||
@@ -58,23 +97,28 @@ We are then presented with a very complicated request when the original problem
|
||||
|
||||
Some of our users seem to think there is a limit of issues they can or should open. There is no limit of issues they can or should open. While it may seem appealing to be able to dump all your issues into one ticket, that means that someone who solves one of your issues cannot mark the issue as closed. Typically, reporting a bunch of issues leads to the ticket lingering since nobody wants to attack that behemoth, until someone mercifully splits the issue into multiple ones.
|
||||
|
||||
In particular, every site support request issue should only pertain to services at one site (generally under a common domain, but always using the same backend technology). Do not request support for vimeo user videos, White house podcasts, and Google Plus pages in the same issue. Also, make sure that you don't post bug reports alongside feature requests. As a rule of thumb, a feature request does not include outputs of youtube-dl that are not immediately related to the feature at hand. Do not post reports of a network error alongside the request for a new video service.
|
||||
In particular, every site support request issue should only pertain to services at one site (generally under a common domain, but always using the same backend technology). Do not request support for vimeo user videos, White house podcasts, and Google Plus pages in the same issue. Also, make sure that you don't post bug reports alongside feature requests. As a rule of thumb, a feature request does not include outputs of yt-dlp that are not immediately related to the feature at hand. Do not post reports of a network error alongside the request for a new video service.
|
||||
|
||||
### Is anyone going to need the feature?
|
||||
|
||||
Only post features that you (or an incapacitated friend you can personally talk to) require. Do not post features because they seem like a good idea. If they are really useful, they will be requested by someone who requires them.
|
||||
|
||||
### Is your question about youtube-dl?
|
||||
### Is your question about yt-dlp?
|
||||
|
||||
Some bug reports are completely unrelated to yt-dlp and relate to a different, or even the reporter's own, application. Please make sure that you are actually using yt-dlp. If you are using a UI for yt-dlp, report the bug to the maintainer of the actual application providing the UI. On the other hand, if your UI for yt-dlp fails in some way you believe is related to yt-dlp, by all means, go ahead and report the bug.
|
||||
|
||||
If the issue is with `youtube-dl` (the upstream fork of yt-dlp) and not with yt-dlp, the issue should be raised in the youtube-dl project.
|
||||
|
||||
|
||||
|
||||
It may sound strange, but some bug reports we receive are completely unrelated to youtube-dl and relate to a different, or even the reporter's own, application. Please make sure that you are actually using youtube-dl. If you are using a UI for youtube-dl, report the bug to the maintainer of the actual application providing the UI. On the other hand, if your UI for youtube-dl fails in some way you believe is related to youtube-dl, by all means, go ahead and report the bug.
|
||||
|
||||
# DEVELOPER INSTRUCTIONS
|
||||
|
||||
Most users do not need to build youtube-dl and can [download the builds](https://ytdl-org.github.io/youtube-dl/download.html) or get them from their distribution.
|
||||
Most users do not need to build yt-dlp and can [download the builds](https://github.com/yt-dlp/yt-dlp/releases) or get them via [the other installation methods](README.md#installation).
|
||||
|
||||
To run youtube-dl as a developer, you don't need to build anything either. Simply execute
|
||||
To run yt-dlp as a developer, you don't need to build anything either. Simply execute
|
||||
|
||||
python -m youtube_dl
|
||||
python -m yt_dlp
|
||||
|
||||
To run the test, simply invoke your favorite test runner, or execute a test file directly; any of the following work:
|
||||
|
||||
@@ -85,42 +129,42 @@ To run the test, simply invoke your favorite test runner, or execute a test file
|
||||
|
||||
See item 6 of [new extractor tutorial](#adding-support-for-a-new-site) for how to run extractor specific test cases.
|
||||
|
||||
If you want to create a build of youtube-dl yourself, you'll need
|
||||
If you want to create a build of yt-dlp yourself, you can follow the instructions [here](README.md#compile).
|
||||
|
||||
* python3
|
||||
* make (only GNU make is supported)
|
||||
* pandoc
|
||||
* zip
|
||||
* pytest
|
||||
|
||||
### Adding support for a new site
|
||||
## Adding new feature or making overarching changes
|
||||
|
||||
If you want to add support for a new site, first of all **make sure** this site is **not dedicated to [copyright infringement](README.md#can-you-add-support-for-this-anime-video-site-or-site-which-shows-current-movies-for-free)**. youtube-dl does **not support** such sites thus pull requests adding support for them **will be rejected**.
|
||||
Before you start writing code for implementing a new feature, open an issue explaining your feature request and atleast one use case. This allows the maintainers to decide whether such a feature is desired for the project in the first place, and will provide an avenue to discuss some implementation details. If you open a pull request for a new feature without discussing with us first, do not be surprised when we ask for large changes to the code, or even reject it outright.
|
||||
|
||||
The same applies for overarching changes to the architecture, documentation or code style
|
||||
|
||||
|
||||
## Adding support for a new site
|
||||
|
||||
If you want to add support for a new site, first of all **make sure** this site is **not dedicated to [copyright infringement](https://www.github.com/ytdl-org/youtube-dl#can-you-add-support-for-this-anime-video-site-or-site-which-shows-current-movies-for-free)**. yt-dlp does **not support** such sites thus pull requests adding support for them **will be rejected**.
|
||||
|
||||
After you have ensured this site is distributing its content legally, you can follow this quick list (assuming your service is called `yourextractor`):
|
||||
|
||||
1. [Fork this repository](https://github.com/ytdl-org/youtube-dl/fork)
|
||||
2. Check out the source code with:
|
||||
1. [Fork this repository](https://github.com/yt-dlp/yt-dlp/fork)
|
||||
1. Check out the source code with:
|
||||
|
||||
git clone git@github.com:YOUR_GITHUB_USERNAME/youtube-dl.git
|
||||
git clone git@github.com:YOUR_GITHUB_USERNAME/yt-dlp.git
|
||||
|
||||
3. Start a new git branch with
|
||||
1. Start a new git branch with
|
||||
|
||||
cd youtube-dl
|
||||
cd yt-dlp
|
||||
git checkout -b yourextractor
|
||||
|
||||
4. Start with this simple template and save it to `youtube_dl/extractor/yourextractor.py`:
|
||||
1. Start with this simple template and save it to `yt_dlp/extractor/yourextractor.py`:
|
||||
|
||||
```python
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
|
||||
|
||||
class YourExtractorIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
|
||||
_TEST = {
|
||||
_TESTS = [{
|
||||
'url': 'https://yourextractor.com/watch/42',
|
||||
'md5': 'TODO: md5 sum of the first 10241 bytes of the video file (use --test)',
|
||||
'info_dict': {
|
||||
@@ -134,12 +178,12 @@ After you have ensured this site is distributing its content legally, you can fo
|
||||
# * A regular expression; start the string with re:
|
||||
# * Any Python type (for example int or float)
|
||||
}
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
|
||||
# TODO more code goes here, for example ...
|
||||
title = self._html_search_regex(r'<h1>(.+?)</h1>', webpage, 'title')
|
||||
|
||||
@@ -148,45 +192,48 @@ After you have ensured this site is distributing its content legally, you can fo
|
||||
'title': title,
|
||||
'description': self._og_search_description(webpage),
|
||||
'uploader': self._search_regex(r'<div[^>]+id="uploader"[^>]*>([^<]+)<', webpage, 'uploader', fatal=False),
|
||||
# TODO more properties (see youtube_dl/extractor/common.py)
|
||||
# TODO more properties (see yt_dlp/extractor/common.py)
|
||||
}
|
||||
```
|
||||
5. Add an import in [`youtube_dl/extractor/extractors.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/extractors.py).
|
||||
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. Note that tests with `only_matching` key in test's dict are not counted in.
|
||||
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/ytdl-org/youtube-dl/blob/7f41a598b3fba1bcab2817de64a08941200aa3c8/youtube_dl/extractor/common.py#L94-L303). Add tests and code for as many as you want.
|
||||
8. Make sure your code follows [youtube-dl coding conventions](#youtube-dl-coding-conventions) and check the code with [flake8](https://flake8.pycqa.org/en/latest/index.html#quickstart):
|
||||
1. Add an import in [`yt_dlp/extractor/extractors.py`](yt_dlp/extractor/extractors.py).
|
||||
1. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, the tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. Note that tests with `only_matching` key in test's dict are not counted in. You can also run all the tests in one go with `TestDownload.test_YourExtractor_all`
|
||||
1. Make sure you have atleast one test for your extractor. Even if all videos covered by the extractor are expected to be inaccessible for automated testing, tests should still be added with a `skip` parameter indicating why the purticular test is disabled from running.
|
||||
1. Have a look at [`yt_dlp/extractor/common.py`](yt_dlp/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](yt_dlp/extractor/common.py#L91-L426). Add tests and code for as many as you want.
|
||||
1. Make sure your code follows [yt-dlp coding conventions](#yt-dlp-coding-conventions) and check the code with [flake8](https://flake8.pycqa.org/en/latest/index.html#quickstart):
|
||||
|
||||
$ flake8 youtube_dl/extractor/yourextractor.py
|
||||
$ flake8 yt_dlp/extractor/yourextractor.py
|
||||
|
||||
9. Make sure your code works under all [Python](https://www.python.org/) versions claimed supported by youtube-dl, namely 2.6, 2.7, and 3.2+.
|
||||
10. When the tests pass, [add](https://git-scm.com/docs/git-add) the new files and [commit](https://git-scm.com/docs/git-commit) them and [push](https://git-scm.com/docs/git-push) the result, like this:
|
||||
1. Make sure your code works under all [Python](https://www.python.org/) versions supported by yt-dlp, namely CPython and PyPy for Python 3.6 and above. Backward compatability is not required for even older versions of Python.
|
||||
1. When the tests pass, [add](https://git-scm.com/docs/git-add) the new files, [commit](https://git-scm.com/docs/git-commit) them and [push](https://git-scm.com/docs/git-push) the result, like this:
|
||||
|
||||
$ git add youtube_dl/extractor/extractors.py
|
||||
$ git add youtube_dl/extractor/yourextractor.py
|
||||
$ git commit -m '[yourextractor] Add new extractor'
|
||||
$ git add yt_dlp/extractor/extractors.py
|
||||
$ git add yt_dlp/extractor/yourextractor.py
|
||||
$ git commit -m '[yourextractor] Add extractor'
|
||||
$ git push origin yourextractor
|
||||
|
||||
11. Finally, [create a pull request](https://help.github.com/articles/creating-a-pull-request). We'll then review and merge it.
|
||||
1. Finally, [create a pull request](https://help.github.com/articles/creating-a-pull-request). We'll then review and merge it.
|
||||
|
||||
In any case, thank you very much for your contributions!
|
||||
|
||||
## youtube-dl coding conventions
|
||||
|
||||
## yt-dlp coding conventions
|
||||
|
||||
This section introduces a guide lines for writing idiomatic, robust and future-proof extractor code.
|
||||
|
||||
Extractors are very fragile by nature since they depend on the layout of the source data provided by 3rd party media hosters out of your control and this layout tends to change. As an extractor implementer your task is not only to write code that will extract media links and metadata correctly but also to minimize dependency on the source's layout and even to make the code foresee potential future changes and be ready for that. This is important because it will allow the extractor not to break on minor layout changes thus keeping old youtube-dl versions working. Even though this breakage issue is easily fixed by emitting a new version of youtube-dl with a fix incorporated, all the previous versions become broken in all repositories and distros' packages that may not be so prompt in fetching the update from us. Needless to say, some non rolling release distros may never receive an update at all.
|
||||
Extractors are very fragile by nature since they depend on the layout of the source data provided by 3rd party media hosters out of your control and this layout tends to change. As an extractor implementer your task is not only to write code that will extract media links and metadata correctly but also to minimize dependency on the source's layout and even to make the code foresee potential future changes and be ready for that. This is important because it will allow the extractor not to break on minor layout changes thus keeping old yt-dlp versions working. Even though this breakage issue may be easily fixed by a new version of yt-dlp, this could take some time, during which the the extractor will remain broken.
|
||||
|
||||
|
||||
### Mandatory and optional metafields
|
||||
|
||||
For extraction to work youtube-dl relies on metadata your extractor extracts and provides to youtube-dl expressed by an [information dictionary](https://github.com/ytdl-org/youtube-dl/blob/7f41a598b3fba1bcab2817de64a08941200aa3c8/youtube_dl/extractor/common.py#L94-L303) or simply *info dict*. Only the following meta fields in the *info dict* are considered mandatory for a successful extraction process by youtube-dl:
|
||||
For extraction to work yt-dlp relies on metadata your extractor extracts and provides to yt-dlp expressed by an [information dictionary](yt_dlp/extractor/common.py#L91-L426) or simply *info dict*. Only the following meta fields in the *info dict* are considered mandatory for a successful extraction process by yt-dlp:
|
||||
|
||||
- `id` (media identifier)
|
||||
- `title` (media title)
|
||||
- `url` (media download URL) or `formats`
|
||||
|
||||
In fact only the last option is technically mandatory (i.e. if you can't figure out the download location of the media the extraction does not make any sense). But by convention youtube-dl also treats `id` and `title` as mandatory. Thus the aforementioned metafields are the critical data that the extraction does not make any sense without and if any of them fail to be extracted then the extractor is considered completely broken.
|
||||
The aforementioned metafields are the critical data that the extraction does not make any sense without and if any of them fail to be extracted then the extractor is considered completely broken. While, in fact, only `id` is technically mandatory, due to compatability reasons, yt-dlp also treats `title` as mandatory. The extractor is allowed to return the info dict without url or formats in some special cases if it allows the user to extract usefull information with `--ignore-no-formats-error` - Eg: when the video is a live stream that has not started yet.
|
||||
|
||||
[Any field](https://github.com/ytdl-org/youtube-dl/blob/7f41a598b3fba1bcab2817de64a08941200aa3c8/youtube_dl/extractor/common.py#L188-L303) apart from the aforementioned ones are considered **optional**. That means that extraction should be **tolerant** to situations when sources for these fields can potentially be unavailable (even if they are always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields.
|
||||
[Any field](yt_dlp/extractor/common.py#219-L426) apart from the aforementioned ones are considered **optional**. That means that extraction should be **tolerant** to situations when sources for these fields can potentially be unavailable (even if they are always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields.
|
||||
|
||||
#### Example
|
||||
|
||||
@@ -200,8 +247,10 @@ Assume at this point `meta`'s layout is:
|
||||
|
||||
```python
|
||||
{
|
||||
...
|
||||
"summary": "some fancy summary text",
|
||||
"user": {
|
||||
"name": "uploader name"
|
||||
},
|
||||
...
|
||||
}
|
||||
```
|
||||
@@ -220,6 +269,30 @@ description = meta['summary'] # incorrect
|
||||
|
||||
The latter will break extraction process with `KeyError` if `summary` disappears from `meta` at some later time but with the former approach extraction will just go ahead with `description` set to `None` which is perfectly fine (remember `None` is equivalent to the absence of data).
|
||||
|
||||
|
||||
If the data is nested, do not use `.get` chains, but instead make use of the utility functions `try_get` or `traverse_obj`
|
||||
|
||||
Considering the above `meta` again, assume you want to extract `["user"]["name"]` and put it in the resulting info dict as `uploader`
|
||||
|
||||
```python
|
||||
uploader = try_get(meta, lambda x: x['user']['name']) # correct
|
||||
```
|
||||
or
|
||||
```python
|
||||
uploader = traverse_obj(meta, ('user', 'name')) # correct
|
||||
```
|
||||
|
||||
and not like:
|
||||
|
||||
```python
|
||||
uploader = meta['user']['name'] # incorrect
|
||||
```
|
||||
or
|
||||
```python
|
||||
uploader = meta.get('user', {}).get('name') # incorrect
|
||||
```
|
||||
|
||||
|
||||
Similarly, you should pass `fatal=False` when extracting optional data from a webpage with `_search_regex`, `_html_search_regex` or similar methods, for instance:
|
||||
|
||||
```python
|
||||
@@ -239,11 +312,36 @@ description = self._search_regex(
|
||||
```
|
||||
|
||||
On failure this code will silently continue the extraction with `description` set to `None`. That is useful for metafields that may or may not be present.
|
||||
|
||||
|
||||
|
||||
Another thing to remember is not to try to iterate over `None`
|
||||
|
||||
Say you extracted a list of thumbnails into `thumbnail_data` using `try_get` and now want to iterate over them
|
||||
|
||||
```python
|
||||
thumbnail_data = try_get(...)
|
||||
thumbnails = [{
|
||||
'url': item['url']
|
||||
} for item in thumbnail_data or []] # correct
|
||||
```
|
||||
|
||||
and not like:
|
||||
|
||||
```python
|
||||
thumbnail_data = try_get(...)
|
||||
thumbnails = [{
|
||||
'url': item['url']
|
||||
} for item in thumbnail_data] # incorrect
|
||||
```
|
||||
|
||||
In the later case, `thumbnail_data` will be `None` if the field was not found and this will cause the loop `for item in thumbnail_data` to raise a fatal error. Using `for item in thumbnail_data or []` avoids this error and results in setting an empty list in `thumbnails` instead.
|
||||
|
||||
|
||||
### Provide fallbacks
|
||||
|
||||
When extracting metadata try to do so from multiple sources. For example if `title` is present in several places, try extracting from at least some of them. This makes it more future-proof in case some of the sources become unavailable.
|
||||
|
||||
|
||||
#### Example
|
||||
|
||||
Say `meta` from the previous example has a `title` and you are about to extract it. Since `title` is a mandatory meta field you should end up with something like:
|
||||
@@ -262,6 +360,7 @@ title = meta.get('title') or self._og_search_title(webpage)
|
||||
|
||||
This code will try to extract from `meta` first and if it fails it will try extracting `og:title` from a `webpage`.
|
||||
|
||||
|
||||
### Regular expressions
|
||||
|
||||
#### Don't capture groups you don't use
|
||||
@@ -283,11 +382,10 @@ Incorrect:
|
||||
r'(id|ID)=(?P<id>\d+)'
|
||||
```
|
||||
|
||||
|
||||
#### Make regular expressions relaxed and flexible
|
||||
|
||||
When using regular expressions try to write them fuzzy, relaxed and flexible, skipping insignificant parts that are more likely to change, allowing both single and double quotes for quoted values and so on.
|
||||
|
||||
|
||||
##### Example
|
||||
|
||||
Say you need to extract `title` from the following HTML code:
|
||||
@@ -299,14 +397,14 @@ Say you need to extract `title` from the following HTML code:
|
||||
The code for that task should look similar to:
|
||||
|
||||
```python
|
||||
title = self._search_regex(
|
||||
title = self._search_regex( # correct
|
||||
r'<span[^>]+class="title"[^>]*>([^<]+)', webpage, 'title')
|
||||
```
|
||||
|
||||
Or even better:
|
||||
|
||||
```python
|
||||
title = self._search_regex(
|
||||
title = self._search_regex( # correct
|
||||
r'<span[^>]+class=(["\'])title\1[^>]*>(?P<title>[^<]+)',
|
||||
webpage, 'title', group='title')
|
||||
```
|
||||
@@ -316,14 +414,25 @@ Note how you tolerate potential changes in the `style` attribute's value or swit
|
||||
The code definitely should not look like:
|
||||
|
||||
```python
|
||||
title = self._search_regex(
|
||||
title = self._search_regex( # incorrect
|
||||
r'<span style="position: absolute; left: 910px; width: 90px; float: right; z-index: 9999;" class="title">(.*?)</span>',
|
||||
webpage, 'title', group='title')
|
||||
```
|
||||
|
||||
or even
|
||||
|
||||
```python
|
||||
title = self._search_regex( # incorrect
|
||||
r'<span style=".*?" class="title">(.*?)</span>',
|
||||
webpage, 'title', group='title')
|
||||
```
|
||||
|
||||
Here the presence or absence of other attributes including `style` is irrelevent for the data we need, and so the regex must not depend on it
|
||||
|
||||
|
||||
### Long lines policy
|
||||
|
||||
There is a soft limit to keep lines of code under 80 characters long. This means it should be respected if possible and if it does not make readability and code maintenance worse.
|
||||
There is a soft limit to keep lines of code under 100 characters long. This means it should be respected if possible and if it does not make readability and code maintenance worse. Sometimes, it may be reasonable to go upto 120 characters and sometimes even 80 can be unreadable. Keep in mind that this is not a hard limit and is just one of many tools to make the code more readable
|
||||
|
||||
For example, you should **never** split long string literals like URLs or some other often copied entities over multiple lines to fit this limit:
|
||||
|
||||
@@ -360,6 +469,7 @@ TITLE_RE = r'<title>([^<]+)</title>'
|
||||
title = self._html_search_regex(TITLE_RE, webpage, 'title')
|
||||
```
|
||||
|
||||
|
||||
### Collapse fallbacks
|
||||
|
||||
Multiple fallback values can quickly become unwieldy. Collapse multiple fallback values into a single expression via a list of patterns.
|
||||
@@ -385,10 +495,13 @@ description = (
|
||||
|
||||
Methods supporting list of patterns are: `_search_regex`, `_html_search_regex`, `_og_search_property`, `_html_search_meta`.
|
||||
|
||||
|
||||
### Trailing parentheses
|
||||
|
||||
Always move trailing parentheses after the last argument.
|
||||
|
||||
Note that this *does not* apply to braces `}` or square brackets `]` both of which should closed be in a new line
|
||||
|
||||
#### Example
|
||||
|
||||
Correct:
|
||||
@@ -406,30 +519,36 @@ Incorrect:
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
### Use convenience conversion and parsing functions
|
||||
|
||||
Wrap all extracted numeric data into safe functions from [`youtube_dl/utils.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/utils.py): `int_or_none`, `float_or_none`. Use them for string to number conversions as well.
|
||||
Wrap all extracted numeric data into safe functions from [`yt_dlp/utils.py`](yt_dlp/utils.py): `int_or_none`, `float_or_none`. Use them for string to number conversions as well.
|
||||
|
||||
Use `url_or_none` for safe URL processing.
|
||||
|
||||
Use `try_get` for safe metadata extraction from parsed JSON.
|
||||
Use `try_get`, `dict_get` and `traverse_obj` for safe metadata extraction from parsed JSON.
|
||||
|
||||
Use `unified_strdate` for uniform `upload_date` or any `YYYYMMDD` meta field extraction, `unified_timestamp` for uniform `timestamp` extraction, `parse_filesize` for `filesize` extraction, `parse_count` for count meta fields extraction, `parse_resolution`, `parse_duration` for `duration` extraction, `parse_age_limit` for `age_limit` extraction.
|
||||
|
||||
Explore [`youtube_dl/utils.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/utils.py) for more useful convenience functions.
|
||||
Explore [`yt_dlp/utils.py`](yt_dlp/utils.py) for more useful convenience functions.
|
||||
|
||||
#### More examples
|
||||
|
||||
##### Safely extract optional description from parsed JSON
|
||||
```python
|
||||
description = try_get(response, lambda x: x['result']['video'][0]['summary'], compat_str)
|
||||
description = traverse_obj(response, ('result', 'video', 'summary'), expected_type=str)
|
||||
```
|
||||
|
||||
##### Safely extract more optional metadata
|
||||
```python
|
||||
video = try_get(response, lambda x: x['result']['video'][0], dict) or {}
|
||||
video = traverse_obj(response, ('result', 'video', 0), default={}, expected_type=dict)
|
||||
description = video.get('summary')
|
||||
duration = float_or_none(video.get('durationMs'), scale=1000)
|
||||
view_count = int_or_none(video.get('views'))
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
# EMBEDDING YT-DLP
|
||||
See [README.md#embedding-yt-dlp](README.md#embedding-yt-dlp) for instructions on how to embed yt-dlp in another Python program
|
||||
|
||||
66
CONTRIBUTORS
66
CONTRIBUTORS
@@ -1,6 +1,6 @@
|
||||
pukkandan (owner)
|
||||
shirt-dev (collaborator)
|
||||
colethedj (collaborator)
|
||||
coletdjnz/colethedj (collaborator)
|
||||
Ashish0804 (collaborator)
|
||||
h-h-h-h
|
||||
pauldubois98
|
||||
@@ -22,7 +22,7 @@ Zocker1999NET
|
||||
nao20010128nao
|
||||
kurumigi
|
||||
bbepis
|
||||
animelover1984
|
||||
animelover1984/horahoradev
|
||||
Pccode66
|
||||
RobinD42
|
||||
hseg
|
||||
@@ -63,3 +63,65 @@ TpmKranz
|
||||
mzbaulhaque
|
||||
zackmark29
|
||||
mbway
|
||||
zerodytrash
|
||||
wesnm
|
||||
pento
|
||||
rigstot
|
||||
dirkf
|
||||
funniray
|
||||
Jessecar96
|
||||
jhwgh1968
|
||||
kikuyan
|
||||
max-te
|
||||
nchilada
|
||||
pgaig
|
||||
PSlava
|
||||
stdedos
|
||||
u-spec-png
|
||||
Sipherdrakon
|
||||
kidonng
|
||||
smege1001
|
||||
tandy1000
|
||||
IONECarter
|
||||
capntrips
|
||||
mrfade
|
||||
ParadoxGBB
|
||||
wlritchi
|
||||
NeroBurner
|
||||
mahanstreamer
|
||||
alerikaisattera
|
||||
Derkades
|
||||
BunnyHelp
|
||||
i6t
|
||||
std-move
|
||||
Chocobozzz
|
||||
ouwou
|
||||
korli
|
||||
octotherp
|
||||
CeruleanSky
|
||||
zootedb0t
|
||||
chao813
|
||||
ChillingPepper
|
||||
ConquerorDopy
|
||||
dalanmiller
|
||||
DigitalDJ
|
||||
f4pp3rk1ng
|
||||
gesa
|
||||
Jules-A
|
||||
makeworld-the-better-one
|
||||
MKSherbini
|
||||
mrx23dot
|
||||
poschi3
|
||||
raphaeldore
|
||||
renalid
|
||||
sleaux-meaux
|
||||
sulyi
|
||||
tmarki
|
||||
Vangelis66
|
||||
AjaxGb
|
||||
ajj8
|
||||
jakubadamw
|
||||
jfogelman
|
||||
timethrow
|
||||
sarnoud
|
||||
Bojidarist
|
||||
|
||||
495
Changelog.md
495
Changelog.md
@@ -7,23 +7,408 @@
|
||||
* Update Changelog.md and CONTRIBUTORS
|
||||
* Change "Merged with ytdl" version in Readme.md if needed
|
||||
* Add new/fixed extractors in "new features" section of Readme.md
|
||||
* Commit to master as `Release <version>`
|
||||
* Commit as `Release <version>`
|
||||
* Push to origin/release using `git push origin master:release`
|
||||
build task will now run
|
||||
* Update version.py using `devscripts\update-version.py`
|
||||
* Run `make issuetemplates`
|
||||
* Commit to master as `[version] update :ci skip all`
|
||||
* Push to origin/master
|
||||
* Update changelog in /releases
|
||||
|
||||
-->
|
||||
|
||||
|
||||
### 2021.10.10
|
||||
|
||||
* [downloader/ffmpeg] Fix bug in initializing `FFmpegPostProcessor`
|
||||
* [minicurses] Fix when printing to file
|
||||
* [downloader] Fix throttledratelimit
|
||||
* [francetv] Fix extractor by [fstirlitz](https://github.com/fstirlitz), [sarnoud](https://github.com/sarnoud)
|
||||
* [NovaPlay] Add extractor by [Bojidarist](https://github.com/Bojidarist)
|
||||
* [ffmpeg] Revert "Set max probesize" - No longer needed
|
||||
* [docs] Remove incorrect dependency on VC++10
|
||||
* [build] Allow to release without changelog
|
||||
|
||||
### 2021.10.09
|
||||
|
||||
* Improved progress reporting
|
||||
* Separate `--console-title` and `--no-progress`
|
||||
* Add option `--progress` to show progress-bar even in quiet mode
|
||||
* Fix and refactor `minicurses` and use it for all progress reporting
|
||||
* Standardize use of terminal sequences and enable color support for windows 10
|
||||
* Add option `--progress-template` to customize progress-bar and console-title
|
||||
* Add postprocessor hooks and progress reporting
|
||||
* [postprocessor] Add plugin support with option `--use-postprocessor`
|
||||
* [extractor] Extract storyboards from SMIL manifests by [fstirlitz](https://github.com/fstirlitz)
|
||||
* [outtmpl] Alternate form of format type `l` for `\n` delimited list
|
||||
* [outtmpl] Format type `U` for unicode normalization
|
||||
* [outtmpl] Allow empty output template to skip a type of file
|
||||
* Merge webm formats into mkv if thumbnails are to be embedded
|
||||
* [adobepass] Add RCN as MSO by [jfogelman](https://github.com/jfogelman)
|
||||
* [ciscowebex] Add extractor by [damianoamatruda](https://github.com/damianoamatruda)
|
||||
* [Gettr] Add extractor by [i6t](https://github.com/i6t)
|
||||
* [GoPro] Add extractor by [i6t](https://github.com/i6t)
|
||||
* [N1] Add extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [Theta] Add video extractor by [alerikaisattera](https://github.com/alerikaisattera)
|
||||
* [Veo] Add extractor by [i6t](https://github.com/i6t)
|
||||
* [Vupload] Add extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [bbc] Extract better quality videos by [ajj8](https://github.com/ajj8)
|
||||
* [Bilibili] Add subtitle converter by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [CBC] Cleanup tests by [makeworld-the-better-one](https://github.com/makeworld-the-better-one)
|
||||
* [Douyin] Rewrite extractor by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [Funimation] Fix for /v/ urls by [pukkandan](https://github.com/pukkandan), [Jules-A](https://github.com/Jules-A)
|
||||
* [Funimation] Sort formats according to the relevant extractor-args
|
||||
* [Hidive] Fix duplicate and incorrect formats
|
||||
* [HotStarSeries] Fix cookies by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [LinkedInLearning] Add subtitles by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [Mediaite] Relax valid url by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [Newgrounds] Add age_limit and fix duration by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [Newgrounds] Fix view count on songs by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [parliamentlive.tv] Fix extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [PolskieRadio] Fix extractors by [jakubadamw](https://github.com/jakubadamw), [u-spec-png](https://github.com/u-spec-png)
|
||||
* [reddit] Add embedded url by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [reddit] Fix 429 by generating a random `reddit_session` by [AjaxGb](https://github.com/AjaxGb)
|
||||
* [Rumble] Add RumbleChannelIE by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [soundcloud:playlist] Detect last page correctly
|
||||
* [SovietsCloset] Add duration from m3u8 by [ChillingPepper](https://github.com/ChillingPepper)
|
||||
* [Streamable] Add codecs by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [vidme] Remove extractor by [alerikaisattera](https://github.com/alerikaisattera)
|
||||
* [youtube:tab] Fallback to API when webpage fails to download by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Fix non-fatal errors in fetching player
|
||||
* Fix `--flat-playlist` when neither IE nor id is known
|
||||
* Fix `-f mp4` behaving differently from youtube-dl
|
||||
* Workaround for bug in `ssl.SSLContext.load_default_certs`
|
||||
* [aes] Improve performance slightly by [sulyi](https://github.com/sulyi)
|
||||
* [cookies] Fix keyring fallback by [mbway](https://github.com/mbway)
|
||||
* [embedsubtitle] Fix error when duration is unknown
|
||||
* [ffmpeg] Fix error when subtitle file is missing
|
||||
* [ffmpeg] Set max probesize to workaround AAC HLS stream issues by [shirt](https://github.com/shirt-dev)
|
||||
* [FixupM3u8] Remove redundant run if merged is needed
|
||||
* [hls] Fix decryption issues by [shirt](https://github.com/shirt-dev), [pukkandan](https://github.com/pukkandan)
|
||||
* [http] Respect user-provided chunk size over extractor's
|
||||
* [utils] Let traverse_obj accept functions as keys
|
||||
* [docs] Add note about our custom ffmpeg builds
|
||||
* [docs] Write embedding and contributing documentation by [pukkandan](https://github.com/pukkandan), [timethrow](https://github.com/timethrow)
|
||||
* [update] Check for new version even if not updateable
|
||||
* [build] Add more files to the tarball
|
||||
* [build] Allow building with py2exe (and misc fixes)
|
||||
* [build] Use pycryptodomex by [shirt](https://github.com/shirt-dev), [pukkandan](https://github.com/pukkandan)
|
||||
* [cleanup] Some minor refactoring, improve docs and misc cleanup
|
||||
|
||||
|
||||
### 2021.09.25
|
||||
|
||||
* Add new option `--netrc-location`
|
||||
* [outtmpl] Allow alternate fields using `,`
|
||||
* [outtmpl] Add format type `B` to treat the value as bytes (eg: to limit the filename to a certain number of bytes)
|
||||
* Separate the options `--ignore-errors` and `--no-abort-on-error`
|
||||
* Basic framework for simultaneous download of multiple formats by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [17live] Add 17.live extractor by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [bilibili] Add BiliIntlIE and BiliIntlSeriesIE by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [CAM4] Add extractor by [alerikaisattera](https://github.com/alerikaisattera)
|
||||
* [Chingari] Add extractors by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [CGTN] Add extractor by [chao813](https://github.com/chao813)
|
||||
* [damtomo] Add extractor by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [gotostage] Add extractor by [poschi3](https://github.com/poschi3)
|
||||
* [Koo] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [Mediaite] Add Extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [Mediaklikk] Add Extractor by [tmarki](https://github.com/tmarki), [mrx23dot](https://github.com/mrx23dot), [coletdjnz](https://github.com/coletdjnz)
|
||||
* [MuseScore] Add Extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [Newgrounds] Add NewgroundsUserIE and improve extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [nzherald] Add NZHeraldIE by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [Olympics] Add replay extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [Peertube] Add channel and playlist extractors by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [radlive] Add extractor by [nyuszika7h](https://github.com/nyuszika7h)
|
||||
* [SovietsCloset] Add extractor by [ChillingPepper](https://github.com/ChillingPepper)
|
||||
* [Streamanity] Add Extractor by [alerikaisattera](https://github.com/alerikaisattera)
|
||||
* [Theta] Add extractor by [alerikaisattera](https://github.com/alerikaisattera)
|
||||
* [Yandex] Add ZenYandexIE and ZenYandexChannelIE by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [9Now] handle episodes of series by [dalanmiller](https://github.com/dalanmiller)
|
||||
* [AnimalPlanet] Fix extractor by [Sipherdrakon](https://github.com/Sipherdrakon)
|
||||
* [Arte] Improve description extraction by [renalid](https://github.com/renalid)
|
||||
* [atv.at] Use jwt for API by [NeroBurner](https://github.com/NeroBurner)
|
||||
* [brightcove] Extract subtitles from manifests
|
||||
* [CBC] Fix CBC Gem extractors by [makeworld-the-better-one](https://github.com/makeworld-the-better-one)
|
||||
* [cbs] Report appropriate error for DRM
|
||||
* [comedycentral] Support `collection-playlist` by [nixxo](https://github.com/nixxo)
|
||||
* [DIYNetwork] Support new format by [Sipherdrakon](https://github.com/Sipherdrakon)
|
||||
* [downloader/niconico] Pass custom headers by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [dw] Fix extractor
|
||||
* [Fancode] Fix live streams by [zenerdi0de](https://github.com/zenerdi0de)
|
||||
* [funimation] Fix for locations outside US by [Jules-A](https://github.com/Jules-A), [pukkandan](https://github.com/pukkandan)
|
||||
* [globo] Fix GloboIE by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [HiDive] Fix extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [Hotstar] Add referer for subs by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [itv] Fix extractor, add subtitles and thumbnails by [coletdjnz](https://github.com/coletdjnz), [sleaux-meaux](https://github.com/sleaux-meaux), [Vangelis66](https://github.com/Vangelis66)
|
||||
* [lbry] Show error message from API response
|
||||
* [Mxplayer] Use mobile API by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [NDR] Rewrite NDRIE by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [Nuvid] Fix extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [Oreilly] Handle new web url by [MKSherbini](https://github.com/MKSherbini)
|
||||
* [pbs] Fix subtitle extraction by [coletdjnz](https://github.com/coletdjnz), [gesa](https://github.com/gesa), [raphaeldore](https://github.com/raphaeldore)
|
||||
* [peertube] Update instances by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [plutotv] Fix extractor for URLs with `/en`
|
||||
* [reddit] Workaround for 429 by redirecting to old.reddit.com
|
||||
* [redtube] Fix exts
|
||||
* [soundcloud] Make playlist extraction lazy
|
||||
* [soundcloud] Retry playlist pages on `502` error and update `_CLIENT_ID`
|
||||
* [southpark] Fix SouthParkDE by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [SovietsCloset] Fix playlists for games with only named categories by [ConquerorDopy](https://github.com/ConquerorDopy)
|
||||
* [SpankBang] Fix uploader by [f4pp3rk1ng](https://github.com/f4pp3rk1ng), [coletdjnz](https://github.com/coletdjnz)
|
||||
* [tiktok] Use API to fetch higher quality video by [MinePlayersPE](https://github.com/MinePlayersPE), [llacb47](https://github.com/llacb47)
|
||||
* [TikTokUser] Fix extractor using mobile API by [MinePlayersPE](https://github.com/MinePlayersPE), [llacb47](https://github.com/llacb47)
|
||||
* [videa] Fix some extraction errors by [nyuszika7h](https://github.com/nyuszika7h)
|
||||
* [VrtNU] Handle login errors by [llacb47](https://github.com/llacb47)
|
||||
* [vrv] Don't raise error when thumbnails are missing
|
||||
* [youtube] Cleanup authentication code by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Fix `--mark-watched` with `--cookies-from-browser`
|
||||
* [youtube] Improvements to JS player extraction and add extractor-args to skip it by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Retry on 'Unknown Error' by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Return full URL instead of just ID
|
||||
* [youtube] Warn when trying to download clips
|
||||
* [zdf] Improve format sorting
|
||||
* [zype] Extract subtitles from the m3u8 manifest by [fstirlitz](https://github.com/fstirlitz)
|
||||
* Allow `--force-write-archive` to work with `--flat-playlist`
|
||||
* Download subtitles in order of `--sub-langs`
|
||||
* Allow `0` in `--playlist-items`
|
||||
* Handle more playlist errors with `-i`
|
||||
* Fix `--no-get-comments`
|
||||
* Fix `extra_info` being reused across runs
|
||||
* Fix compat options `no-direct-merge` and `playlist-index`
|
||||
* Dump files should obey `--trim-filename` by [sulyi](https://github.com/sulyi)
|
||||
* [aes] Add `aes_gcm_decrypt_and_verify` by [sulyi](https://github.com/sulyi), [pukkandan](https://github.com/pukkandan)
|
||||
* [aria2c] Fix IV for some AES-128 streams by [shirt](https://github.com/shirt-dev)
|
||||
* [compat] Don't ignore `HOME` (if set) on windows
|
||||
* [cookies] Make browser names case insensitive
|
||||
* [cookies] Print warning for cookie decoding error only once
|
||||
* [extractor] Fix root-relative URLs in MPD by [DigitalDJ](https://github.com/DigitalDJ)
|
||||
* [ffmpeg] Add `aac_adtstoasc` when merging if needed
|
||||
* [fragment,aria2c] Generalize and refactor some code
|
||||
* [fragment] Avoid repeated request for AES key
|
||||
* [fragment] Fix range header when using `-N` and media sequence by [shirt](https://github.com/shirt-dev)
|
||||
* [hls,aes] Fallback to native implementation for AES-CBC and detect `Cryptodome` in addition to `Crypto`
|
||||
* [hls] Byterange + AES128 is supported by native downloader
|
||||
* [ModifyChapters] Improve sponsor chapter merge algorithm by [nihil-admirari](https://github.com/nihil-admirari)
|
||||
* [ModifyChapters] Minor fixes
|
||||
* [WebVTT] Adjust parser to accommodate PBS subtitles
|
||||
* [utils] Improve `extract_timezone` by [dirkf](https://github.com/dirkf)
|
||||
* [options] Fix `--no-config` and refactor reading of config files
|
||||
* [options] Strip spaces and ignore empty entries in list-like switches
|
||||
* [test/cookies] Improve logging
|
||||
* [build] Automate more of the release process by [animelover1984](https://github.com/animelover1984), [pukkandan](https://github.com/pukkandan)
|
||||
* [build] Fix sha256 by [nihil-admirari](https://github.com/nihil-admirari)
|
||||
* [build] Bring back brew taps by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [build] Provide `--onedir` zip for windows by [pukkandan](https://github.com/pukkandan)
|
||||
* [cleanup,docs] Add deprecation warning in docs for some counter intuitive behaviour
|
||||
* [cleanup] Fix line endings for `nebula.py` by [glenn-slayden](https://github.com/glenn-slayden)
|
||||
* [cleanup] Improve `make clean-test` by [sulyi](https://github.com/sulyi)
|
||||
* [cleanup] Misc
|
||||
|
||||
|
||||
### 2021.09.02
|
||||
|
||||
* **Native SponsorBlock** implementation by [nihil-admirari](https://github.com/nihil-admirari), [pukkandan](https://github.com/pukkandan)
|
||||
* `--sponsorblock-remove CATS` removes specified chapters from file
|
||||
* `--sponsorblock-mark CATS` marks the specified sponsor sections as chapters
|
||||
* `--sponsorblock-chapter-title TMPL` to specify sponsor chapter template
|
||||
* `--sponsorblock-api URL` to use a different API
|
||||
* No re-encoding is done unless `--force-keyframes-at-cuts` is used
|
||||
* The fetched sponsor sections are written to the infojson
|
||||
* Deprecates: `--sponskrub`, `--no-sponskrub`, `--sponskrub-cut`, `--no-sponskrub-cut`, `--sponskrub-force`, `--no-sponskrub-force`, `--sponskrub-location`, `--sponskrub-args`
|
||||
* Split `--embed-chapters` from `--embed-metadata` (it still implies the former by default)
|
||||
* Add option `--remove-chapters` to remove arbitrary chapters by [nihil-admirari](https://github.com/nihil-admirari), [pukkandan](https://github.com/pukkandan)
|
||||
* Add option `--force-keyframes-at-cuts` for more accurate cuts when removing and splitting chapters by [nihil-admirari](https://github.com/nihil-admirari)
|
||||
* Let `--match-filter` reject entries early
|
||||
* Makes redundant: `--match-title`, `--reject-title`, `--min-views`, `--max-views`
|
||||
* [lazy_extractor] Improvements (It now passes all tests)
|
||||
* Bugfix for when plugin directory doesn't exist by [kidonng](https://github.com/kidonng)
|
||||
* Create instance only after pre-checking archive
|
||||
* Import actual class if an attribute is accessed
|
||||
* Fix `suitable` and add flake8 test
|
||||
* [downloader/ffmpeg] Experimental support for DASH manifests (including live)
|
||||
* Your ffmpeg must have [this patch](https://github.com/FFmpeg/FFmpeg/commit/3249c757aed678780e22e99a1a49f4672851bca9) applied for YouTube DASH to work
|
||||
* [downloader/ffmpeg] Allow passing custom arguments before `-i`
|
||||
* [BannedVideo] Add extractor by [smege1001](https://github.com/smege1001), [blackjack4494](https://github.com/blackjack4494), [pukkandan](https://github.com/pukkandan)
|
||||
* [bilibili] Add category extractor by [animelover1984](https://github.com/animelover1984)
|
||||
* [Epicon] Add extractors by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [filmmodu] Add extractor by [mzbaulhaque](https://github.com/mzbaulhaque)
|
||||
* [GabTV] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [Hungama] Fix `HungamaSongIE` and add `HungamaAlbumPlaylistIE` by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [ManotoTV] Add new extractors by [tandy1000](https://github.com/tandy1000)
|
||||
* [Niconico] Add Search extractors by [animelover1984](https://github.com/animelover1984), [pukkandan](https://github.com/pukkandan)
|
||||
* [Patreon] Add `PatreonUserIE` by [zenerdi0de](https://github.com/zenerdi0de)
|
||||
* [peloton] Add extractor by [IONECarter](https://github.com/IONECarter), [capntrips](https://github.com/capntrips), [pukkandan](https://github.com/pukkandan)
|
||||
* [ProjectVeritas] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [radiko] Add extractors by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [StarTV] Add extractor for `startv.com.tr` by [mrfade](https://github.com/mrfade), [coletdjnz](https://github.com/coletdjnz)
|
||||
* [tiktok] Add `TikTokUserIE` by [Ashish0804](https://github.com/Ashish0804), [pukkandan](https://github.com/pukkandan)
|
||||
* [Tokentube] Add extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [TV2Hu] Fix `TV2HuIE` and add `TV2HuSeriesIE` by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [voicy] Add extractor by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [adobepass] Fix Verizon SAML login by [nyuszika7h](https://github.com/nyuszika7h), [ParadoxGBB](https://github.com/ParadoxGBB)
|
||||
* [afreecatv] Fix adult VODs by [wlritchi](https://github.com/wlritchi)
|
||||
* [afreecatv] Tolerate failure to parse date string by [wlritchi](https://github.com/wlritchi)
|
||||
* [aljazeera] Fix extractor by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [ATV.at] Fix extractor for ATV.at by [NeroBurner](https://github.com/NeroBurner), [coletdjnz](https://github.com/coletdjnz)
|
||||
* [bitchute] Fix test by [mahanstreamer](https://github.com/mahanstreamer)
|
||||
* [camtube] Remove obsolete extractor by [alerikaisattera](https://github.com/alerikaisattera)
|
||||
* [CDA] Add more formats by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [eroprofile] Fix page skipping in albums by [jhwgh1968](https://github.com/jhwgh1968)
|
||||
* [facebook] Fix format sorting
|
||||
* [facebook] Fix metadata extraction by [kikuyan](https://github.com/kikuyan)
|
||||
* [facebook] Update onion URL by [Derkades](https://github.com/Derkades)
|
||||
* [HearThisAtIE] Fix extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [instagram] Add referrer to prevent throttling by [u-spec-png](https://github.com/u-spec-png), [kikuyan](https://github.com/kikuyan)
|
||||
* [iwara.tv] Extract more metadata by [BunnyHelp](https://github.com/BunnyHelp)
|
||||
* [iwara] Add thumbnail by [i6t](https://github.com/i6t)
|
||||
* [kakao] Fix extractor
|
||||
* [mediaset] Fix extraction for some videos by [nyuszika7h](https://github.com/nyuszika7h)
|
||||
* [Motherless] Fix extractor by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [Nova] fix extractor by [std-move](https://github.com/std-move)
|
||||
* [ParamountPlus] Fix geo verification by [shirt](https://github.com/shirt-dev)
|
||||
* [peertube] handle new video URL format by [Chocobozzz](https://github.com/Chocobozzz)
|
||||
* [pornhub] Separate and fix playlist extractor by [mzbaulhaque](https://github.com/mzbaulhaque)
|
||||
* [reddit] Fix for quarantined subreddits by [ouwou](https://github.com/ouwou)
|
||||
* [ShemarooMe] Fix extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [soundcloud] Refetch `client_id` on 403
|
||||
* [tiktok] Fix metadata extraction
|
||||
* [TV2] Fix extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [tv5mondeplus] Fix extractor by [korli](https://github.com/korli)
|
||||
* [VH1,TVLand] Fix extractors by [Sipherdrakon](https://github.com/Sipherdrakon)
|
||||
* [Viafree] Fix extractor and extract subtitles by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [XHamster] Extract `uploader_id` by [octotherp](https://github.com/octotherp)
|
||||
* [youtube] Add `shorts` to `_VALID_URL`
|
||||
* [youtube] Add av01 itags to known formats list by [blackjack4494](https://github.com/blackjack4494)
|
||||
* [youtube] Extract error messages from HTTPError response by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Fix subtitle names
|
||||
* [youtube] Prefer audio stream that YouTube considers default
|
||||
* [youtube] Remove annotations and deprecate `--write-annotations` by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [Zee5] Fix extractor and add subtitles by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [aria2c] Obey `--rate-limit`
|
||||
* [EmbedSubtitle] Continue even if some files are missing
|
||||
* [extractor] Better error message for DRM
|
||||
* [extractor] Common function `_match_valid_url`
|
||||
* [extractor] Show video id in error messages if possible
|
||||
* [FormatSort] Remove priority of `lang`
|
||||
* [options] Add `_set_from_options_callback`
|
||||
* [SubtitleConvertor] Fix bug during subtitle conversion
|
||||
* [utils] Add `parse_qs`
|
||||
* [webvtt] Fix timestamp overflow adjustment by [fstirlitz](https://github.com/fstirlitz)
|
||||
* Bugfix for `--replace-in-metadata`
|
||||
* Don't try to merge with final extension
|
||||
* Fix `--force-overwrites` when using `-k`
|
||||
* Fix `--no-prefer-free-formats` by [CeruleanSky](https://github.com/CeruleanSky)
|
||||
* Fix `-F` for extractors that directly return url
|
||||
* Fix `-J` when there are failed videos
|
||||
* Fix `extra_info` being reused across runs
|
||||
* Fix `playlist_index` not obeying `playlist_start` and add tests
|
||||
* Fix resuming of single formats when using `--no-part`
|
||||
* Revert erroneous use of the `Content-Length` header by [fstirlitz](https://github.com/fstirlitz)
|
||||
* Use `os.replace` where applicable by; paulwrubel
|
||||
* [build] Add homebrew taps `yt-dlp/taps/yt-dlp` by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [build] Fix bug in making `yt-dlp.tar.gz`
|
||||
* [docs] Fix some typos by [pukkandan](https://github.com/pukkandan), [zootedb0t](https://github.com/zootedb0t)
|
||||
* [cleanup] Replace improper use of tab in trovo by [glenn-slayden](https://github.com/glenn-slayden)
|
||||
|
||||
|
||||
### 2021.08.10
|
||||
|
||||
* Add option `--replace-in-metadata`
|
||||
* Add option `--no-simulate` to not simulate even when `--print` or `--list...` are used - Deprecates `--print-json`
|
||||
* Allow entire infodict to be printed using `%()s` - makes `--dump-json` redundant
|
||||
* Allow multiple `--exec` and `--exec-before-download`
|
||||
* Add regex to `--match-filter`
|
||||
* Add all format filtering operators also to `--match-filter` by [max-te](https://github.com/max-te)
|
||||
* Add compat-option `no-keep-subs`
|
||||
* [adobepass] Add MSO Cablevision by [Jessecar96](https://github.com/Jessecar96)
|
||||
* [BandCamp] Add BandcampMusicIE by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [blackboardcollaborate] Add new extractor by [mzbaulhaque](https://github.com/mzbaulhaque)
|
||||
* [eroprofile] Add album downloader by [jhwgh1968](https://github.com/jhwgh1968)
|
||||
* [mirrativ] Add extractors by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [openrec] Add extractors by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [nbcolympics:stream] Fix extractor by [nchilada](https://github.com/nchilada), [pukkandan](https://github.com/pukkandan)
|
||||
* [nbcolympics] Update extractor for 2020 olympics by [wesnm](https://github.com/wesnm)
|
||||
* [paramountplus] Separate extractor and fix some titles by [shirt](https://github.com/shirt-dev), [pukkandan](https://github.com/pukkandan)
|
||||
* [RCTIPlus] Support events and TV by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [Newgrounds] Improve extractor and fix playlist by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [aenetworks] Update `_THEPLATFORM_KEY` and `_THEPLATFORM_SECRET` by [wesnm](https://github.com/wesnm)
|
||||
* [crunchyroll] Fix thumbnail by [funniray](https://github.com/funniray)
|
||||
* [HotStar] Use API for metadata and extract subtitles by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [instagram] Fix comments extraction by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [peertube] Fix videos without description by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [twitch:clips] Extract `display_id` by [dirkf](https://github.com/dirkf)
|
||||
* [viki] Print error message from API request
|
||||
* [Vine] Remove invalid formats by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [VrtNU] Fix XSRF token by [pgaig](https://github.com/pgaig)
|
||||
* [vrv] Fix thumbnail extraction by [funniray](https://github.com/funniray)
|
||||
* [youtube] Add extractor-arg `include-live-dash` to show live dash formats
|
||||
* [youtube] Improve signature function detection by [PSlava](https://github.com/PSlava)
|
||||
* [youtube] Raise appropriate error when API pages can't be downloaded
|
||||
* Ensure `_write_ytdl_file` closes file handle on error
|
||||
* Fix `--compat-options filename` by [stdedos](https://github.com/stdedos)
|
||||
* Fix issues with infodict sanitization
|
||||
* Fix resuming when using `--no-part`
|
||||
* Fix wrong extension for intermediate files
|
||||
* Handle `BrokenPipeError` by [kikuyan](https://github.com/kikuyan)
|
||||
* Show libraries present in verbose head
|
||||
* [extractor] Detect `sttp` as subtitles in MPD by [fstirlitz](https://github.com/fstirlitz)
|
||||
* [extractor] Reset non-repeating warnings per video
|
||||
* [ffmpeg] Fix streaming `mp4` to `stdout`
|
||||
* [ffpmeg] Allow `--ffmpeg-location` to be a file with different name
|
||||
* [utils] Fix `InAdvancePagedList.__getitem__`
|
||||
* [utils] Fix `traverse_obj` depth when `is_user_input`
|
||||
* [webvtt] Merge daisy-chained duplicate cues by [fstirlitz](https://github.com/fstirlitz)
|
||||
* [build] Use custom build of `pyinstaller` by [shirt](https://github.com/shirt-dev)
|
||||
* [tests:download] Add batch testing for extractors (`test_YourExtractor_all`)
|
||||
* [docs] Document which fields `--add-metadata` adds to the file
|
||||
* [docs] Fix some mistakes and improve doc
|
||||
* [cleanup] Misc code cleanup
|
||||
|
||||
|
||||
### 2021.08.02
|
||||
|
||||
* Add logo, banner and donate links
|
||||
* [outtmpl] Expand and escape environment variables
|
||||
* [outtmpl] Add format types `j` (json), `l` (comma delimited list), `q` (quoted for terminal)
|
||||
* [downloader] Allow streaming some unmerged formats to stdout using ffmpeg
|
||||
* [youtube] **Age-gate bypass**
|
||||
* Add `agegate` clients by [pukkandan](https://github.com/pukkandan), [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* Add `thirdParty` to agegate clients to bypass more videos
|
||||
* Simplify client definitions, expose `embedded` clients
|
||||
* Improve age-gate detection by [coletdjnz](https://github.com/coletdjnz)
|
||||
* Fix default global API key by [coletdjnz](https://github.com/coletdjnz)
|
||||
* Add `creator` clients for age-gate bypass using unverified accounts by [zerodytrash](https://github.com/zerodytrash), [coletdjnz](https://github.com/coletdjnz), [pukkandan](https://github.com/pukkandan)
|
||||
* [adobepass] Add MSO Sling TV by [wesnm](https://github.com/wesnm)
|
||||
* [CBS] Add ParamountPlusSeriesIE by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [dplay] Add `ScienceChannelIE` by [Sipherdrakon](https://github.com/Sipherdrakon)
|
||||
* [UtreonIE] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [youtube] Add `mweb` client by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Add `player_client=all`
|
||||
* [youtube] Force `hl=en` for comments by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Fix format sorting when using alternate clients
|
||||
* [youtube] Misc cleanup by [pukkandan](https://github.com/pukkandan), [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Extract SAPISID only once
|
||||
* [CBS] Add fallback by [llacb47](https://github.com/llacb47), [pukkandan](https://github.com/pukkandan)
|
||||
* [Hotstar] Support cookies by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [HotStarSeriesIE] Fix regex by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [bilibili] Improve `_VALID_URL`
|
||||
* [mediaset] Fix extraction by [nixxo](https://github.com/nixxo)
|
||||
* [Mxplayer] Add h265 formats by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [RCTIPlus] Remove PhantomJS dependency by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [tenplay] Add MA15+ age limit by [pento](https://github.com/pento)
|
||||
* [vidio] Fix login error detection by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [vimeo] Better extraction of original file by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [generic] Support KVS player (replaces ThisVidIE) by [rigstot](https://github.com/rigstot)
|
||||
* Add compat-option `no-clean-infojson`
|
||||
* Remove `asr` appearing twice in `-F`
|
||||
* Set `home:` as the default key for `-P`
|
||||
* [utils] Fix slicing of reversed `LazyList`
|
||||
* [FormatSort] Fix bug for audio with unknown codec
|
||||
* [test:download] Support testing with `ignore_no_formats_error`
|
||||
* [cleanup] Refactor some code
|
||||
|
||||
|
||||
### 2021.07.24
|
||||
|
||||
* [youtube:tab] Extract video duration early
|
||||
* [downloader] Pass `info_dict` to `progress_hook`s
|
||||
* [youtube] Fix age-gated videos for API clients when cookies are supplied by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Fix age-gated videos for API clients when cookies are supplied by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Disable `get_video_info` age-gate workaround - This endpoint seems to be completely dead
|
||||
* [youtube] Try all clients even if age-gated
|
||||
* [youtube] Fix subtitles only being extracted from the first client
|
||||
@@ -43,7 +428,7 @@
|
||||
* [FFmpegMetadata] Add language of each stream and some refactoring
|
||||
* [douyin] Add extractor by [pukkandan](https://github.com/pukkandan), [pyx](https://github.com/pyx)
|
||||
* [pornflip] Add extractor by [mzbaulhaque](https://github.com/mzbaulhaque)
|
||||
* **[youtube] Extract data from multiple clients** by [pukkandan](https://github.com/pukkandan), [colethedj](https://github.com/colethedj)
|
||||
* **[youtube] Extract data from multiple clients** by [pukkandan](https://github.com/pukkandan), [coletdjnz](https://github.com/coletdjnz)
|
||||
* `player_client` now accepts multiple clients
|
||||
* Default `player_client` = `android,web`
|
||||
* This uses twice as many requests, but avoids throttling for most videos while also not losing any formats
|
||||
@@ -56,19 +441,19 @@
|
||||
* [youtube] Misc fixes
|
||||
* Improve extraction of livestream metadata by [pukkandan](https://github.com/pukkandan), [krichbanana](https://github.com/krichbanana)
|
||||
* Hide live dash formats since they can't be downloaded anyway
|
||||
* Fix authentication when using multiple accounts by [colethedj](https://github.com/colethedj)
|
||||
* Fix controversial videos when requested via API by [colethedj](https://github.com/colethedj)
|
||||
* Fix session index extraction and headers for non-web player clients by [colethedj](https://github.com/colethedj)
|
||||
* Fix authentication when using multiple accounts by [coletdjnz](https://github.com/coletdjnz)
|
||||
* Fix controversial videos when requested via API by [coletdjnz](https://github.com/coletdjnz)
|
||||
* Fix session index extraction and headers for non-web player clients by [coletdjnz](https://github.com/coletdjnz)
|
||||
* Make `--extractor-retries` work for more errors
|
||||
* Fix sorting of 3gp format
|
||||
* Sanity check `chapters` (and refactor related code)
|
||||
* Make `parse_time_text` and `_extract_chapters` non-fatal
|
||||
* Misc cleanup and bug fixes by [colethedj](https://github.com/colethedj)
|
||||
* Misc cleanup and bug fixes by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube:tab] Fix channels tab
|
||||
* [youtube:tab] Extract playlist availability by [colethedj](https://github.com/colethedj)
|
||||
* **[youtube:comments] Move comment extraction to new API** by [colethedj](https://github.com/colethedj)
|
||||
* [youtube:tab] Extract playlist availability by [coletdjnz](https://github.com/coletdjnz)
|
||||
* **[youtube:comments] Move comment extraction to new API** by [coletdjnz](https://github.com/coletdjnz)
|
||||
* Adds extractor-args `comment_sort` (`top`/`new`), `max_comments`, `max_comment_depth`
|
||||
* [youtube:comments] Fix `is_favorited`, improve `like_count` parsing by [colethedj](https://github.com/colethedj)
|
||||
* [youtube:comments] Fix `is_favorited`, improve `like_count` parsing by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [BravoTV] Improve metadata extraction by [kevinoconnor7](https://github.com/kevinoconnor7)
|
||||
* [crunchyroll:playlist] Force http
|
||||
* [yahoo:gyao:player] Relax `_VALID_URL` by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
@@ -94,7 +479,7 @@
|
||||
* [utils] Improve `js_to_json` comment regex by [fstirlitz](https://github.com/fstirlitz)
|
||||
* [webtt] Fix timestamps
|
||||
* [compat] Remove unnecessary code
|
||||
* [doc] fix default of multistreams
|
||||
* [docs] fix default of multistreams
|
||||
|
||||
|
||||
### 2021.07.07
|
||||
@@ -104,7 +489,7 @@
|
||||
* Add extractor option `skip` for `youtube`. Eg: `--extractor-args youtube:skip=hls,dash`
|
||||
* Deprecates `--youtube-skip-dash-manifest`, `--youtube-skip-hls-manifest`, `--youtube-include-dash-manifest`, `--youtube-include-hls-manifest`
|
||||
* Allow `--list...` options to work with `--print`, `--quiet` and other `--list...` options
|
||||
* [youtube] Use `player` API for additional video extraction requests by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Use `player` API for additional video extraction requests by [coletdjnz](https://github.com/coletdjnz)
|
||||
* **Fixes youtube premium music** (format 141) extraction
|
||||
* Adds extractor option `player_client` = `web`/`android`
|
||||
* **`--extractor-args youtube:player_client=android` works around the throttling** for the time-being
|
||||
@@ -112,7 +497,7 @@
|
||||
* Adds age-gate fallback using embedded client
|
||||
* [youtube] Choose correct Live chat API for upcoming streams by [krichbanana](https://github.com/krichbanana)
|
||||
* [youtube] Fix subtitle names for age-gated videos
|
||||
* [youtube:comments] Fix error handling and add `itct` to params by [colethedj](https://github.com/colethedj)
|
||||
* [youtube:comments] Fix error handling and add `itct` to params by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube_live_chat] Fix download with cookies by [siikamiika](https://github.com/siikamiika)
|
||||
* [youtube_live_chat] use `clickTrackingParams` by [siikamiika](https://github.com/siikamiika)
|
||||
* [Funimation] Rewrite extractor
|
||||
@@ -159,9 +544,9 @@
|
||||
* [downloader/mhtml] Add new downloader for slideshows/storyboards by [fstirlitz](https://github.com/fstirlitz)
|
||||
* [youtube] Temporary **fix for age-gate**
|
||||
* [youtube] Support ongoing live chat by [siikamiika](https://github.com/siikamiika)
|
||||
* [youtube] Improve SAPISID cookie handling by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Improve SAPISID cookie handling by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Login is not needed for `:ytrec`
|
||||
* [youtube] Non-fatal alert reporting for unavailable videos page by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Non-fatal alert reporting for unavailable videos page by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [twitcasting] Websocket support by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [mediasite] Extract slides by [fstirlitz](https://github.com/fstirlitz)
|
||||
* [funimation] Extract subtitles
|
||||
@@ -185,7 +570,7 @@
|
||||
### 2021.06.09
|
||||
|
||||
* Fix bug where `%(field)d` in filename template throws error
|
||||
* Improve offset parsing in outtmpl
|
||||
* [outtmpl] Improve offset parsing
|
||||
* [test] More rigorous tests for `prepare_filename`
|
||||
|
||||
### 2021.06.08
|
||||
@@ -219,7 +604,7 @@
|
||||
* Merge youtube-dl: Upto [commit/d495292](https://github.com/ytdl-org/youtube-dl/commit/d495292852b6c2f1bd58bc2141ff2b0265c952cf)
|
||||
* Pre-check archive and filters during playlist extraction
|
||||
* Handle Basic Auth `user:pass` in URLs by [hhirtz](https://github.com/hhirtz) and [pukkandan](https://github.com/pukkandan)
|
||||
* [archiveorg] Add YoutubeWebArchiveIE by [colethedj](https://github.com/colethedj) and [alex-gedeon](https://github.com/alex-gedeon)
|
||||
* [archiveorg] Add YoutubeWebArchiveIE by [coletdjnz](https://github.com/coletdjnz) and [alex-gedeon](https://github.com/alex-gedeon)
|
||||
* [fancode] Add extractor by [rhsmachine](https://github.com/rhsmachine)
|
||||
* [patreon] Support vimeo embeds by [rhsmachine](https://github.com/rhsmachine)
|
||||
* [Saitosan] Add new extractor by [llacb47](https://github.com/llacb47)
|
||||
@@ -262,7 +647,7 @@
|
||||
|
||||
* **Youtube improvements**:
|
||||
* Support youtube music `MP`, `VL` and `browse` pages
|
||||
* Extract more formats for youtube music by [craftingmod](https://github.com/craftingmod), [colethedj](https://github.com/colethedj) and [pukkandan](https://github.com/pukkandan)
|
||||
* Extract more formats for youtube music by [craftingmod](https://github.com/craftingmod), [coletdjnz](https://github.com/coletdjnz) and [pukkandan](https://github.com/pukkandan)
|
||||
* Extract multiple subtitles in same language by [pukkandan](https://github.com/pukkandan) and [tpikonen](https://github.com/tpikonen)
|
||||
* Redirect channels that doesn't have a `videos` tab to their `UU` playlists
|
||||
* Support in-channel search
|
||||
@@ -271,10 +656,10 @@
|
||||
* Extract audio language
|
||||
* Add subtitle language names by [nixxo](https://github.com/nixxo) and [tpikonen](https://github.com/tpikonen)
|
||||
* Show alerts only from the final webpage
|
||||
* Add `html5=1` param to `get_video_info` page requests by [colethedj](https://github.com/colethedj)
|
||||
* Add `html5=1` param to `get_video_info` page requests by [coletdjnz](https://github.com/coletdjnz)
|
||||
* Better message when login required
|
||||
* **Add option `--print`**: to print any field/template
|
||||
* Deprecates: `--get-description`, `--get-duration`, `--get-filename`, `--get-format`, `--get-id`, `--get-thumbnail`, `--get-title`, `--get-url`
|
||||
* Makes redundant: `--get-description`, `--get-duration`, `--get-filename`, `--get-format`, `--get-id`, `--get-thumbnail`, `--get-title`, `--get-url`
|
||||
* Field `additional_urls` to download additional videos from metadata using [`--parse-metadata`](https://github.com/yt-dlp/yt-dlp#modifying-metadata)
|
||||
* Merge youtube-dl: Upto [commit/dfbbe29](https://github.com/ytdl-org/youtube-dl/commit/dfbbe2902fc67f0f93ee47a8077c148055c67a9b)
|
||||
* Write thumbnail of playlist and add `pl_thumbnail` outtmpl key
|
||||
@@ -368,11 +753,11 @@
|
||||
* [TubiTv] Add TubiTvShowIE by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [twitcasting] Fix extractor
|
||||
* [viu:ott] Fix extractor and support series by [lkho](https://github.com/lkho) and [pukkandan](https://github.com/pukkandan)
|
||||
* [youtube:tab] Show unavailable videos in playlists by [colethedj](https://github.com/colethedj)
|
||||
* [youtube:tab] Show unavailable videos in playlists by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube:tab] Reload with unavailable videos for all playlists
|
||||
* [youtube] Ignore invalid stretch ratio
|
||||
* [youtube] Improve channel syncid extraction to support ytcfg by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Standardize API calls for tabs, mixes and search by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Improve channel syncid extraction to support ytcfg by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Standardize API calls for tabs, mixes and search by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Bugfix in `_extract_ytcfg`
|
||||
* [mildom:user:vod] Download only necessary amount of pages
|
||||
* [mildom] Remove proxy completely by [fstirlitz](https://github.com/fstirlitz)
|
||||
@@ -384,8 +769,8 @@
|
||||
* Improve the yt-dlp.sh script by [fstirlitz](https://github.com/fstirlitz)
|
||||
* [lazy_extractor] Do not load plugins
|
||||
* [ci] Disable fail-fast
|
||||
* [documentation] Clarify which deprecated options still work
|
||||
* [documentation] Fix typos
|
||||
* [docs] Clarify which deprecated options still work
|
||||
* [docs] Fix typos
|
||||
|
||||
|
||||
### 2021.04.11
|
||||
@@ -402,17 +787,17 @@
|
||||
* [nitter] Fix extraction of reply tweets and update instance list by [B0pol](https://github.com/B0pol)
|
||||
* [nitter] Fix thumbnails by [B0pol](https://github.com/B0pol)
|
||||
* [youtube] Fix thumbnail URL
|
||||
* [youtube] Parse API parameters from initial webpage by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Extract comments' approximate timestamp by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Parse API parameters from initial webpage by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Extract comments' approximate timestamp by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Fix alert extraction
|
||||
* [bilibili] Fix uploader
|
||||
* [utils] Add `datetime_from_str` and `datetime_add_months` by [colethedj](https://github.com/colethedj)
|
||||
* [utils] Add `datetime_from_str` and `datetime_add_months` by [coletdjnz](https://github.com/coletdjnz)
|
||||
* Run some `postprocessors` before actual download
|
||||
* Improve argument parsing for `-P`, `-o`, `-S`
|
||||
* Fix some `m3u8` not obeying `--allow-unplayable-formats`
|
||||
* Fix default of `dynamic_mpd`
|
||||
* Deprecate `--all-formats`, `--include-ads`, `--hls-prefer-native`, `--hls-prefer-ffmpeg`
|
||||
* [documentation] Improvements
|
||||
* [docs] Improvements
|
||||
|
||||
### 2021.04.03
|
||||
* Merge youtube-dl: Upto [commit/654b4f4](https://github.com/ytdl-org/youtube-dl/commit/654b4f4ff2718f38b3182c1188c5d569c14cc70a)
|
||||
@@ -423,10 +808,10 @@
|
||||
* [mildom] Update extractor with current proxy by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [ard:mediathek] Fix video id extraction
|
||||
* [generic] Detect Invidious' link element
|
||||
* [youtube] Show premium state in `availability` by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Show premium state in `availability` by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [viewsource] Add extractor to handle `view-source:`
|
||||
* [sponskrub] Run before embedding thumbnail
|
||||
* [documentation] Improve `--parse-metadata` documentation
|
||||
* [docs] Improve `--parse-metadata` documentation
|
||||
|
||||
|
||||
### 2021.03.24.1
|
||||
@@ -458,8 +843,8 @@
|
||||
* Use headers and cookies when downloading subtitles by [damianoamatruda](https://github.com/damianoamatruda)
|
||||
* Parse resolution in info dictionary by [damianoamatruda](https://github.com/damianoamatruda)
|
||||
* More consistent warning messages by [damianoamatruda](https://github.com/damianoamatruda) and [pukkandan](https://github.com/pukkandan)
|
||||
* [documentation] Add deprecated options and aliases in readme
|
||||
* [documentation] Fix some minor mistakes
|
||||
* [docs] Add deprecated options and aliases in readme
|
||||
* [docs] Fix some minor mistakes
|
||||
|
||||
* [niconico] Partial fix adapted from [animelover1984/youtube-dl@b5eff52](https://github.com/animelover1984/youtube-dl/commit/b5eff52dd9ed5565672ea1694b38c9296db3fade) (login and smile formats still don't work)
|
||||
* [niconico] Add user extractor by [animelover1984](https://github.com/animelover1984)
|
||||
@@ -468,7 +853,7 @@
|
||||
* [stitcher] Merge from youtube-dl by [nixxo](https://github.com/nixxo)
|
||||
* [rcs] Improved extraction by [nixxo](https://github.com/nixxo)
|
||||
* [linuxacadamy] Improve regex
|
||||
* [youtube] Show if video is `private`, `unlisted` etc in info (`availability`) by [colethedj](https://github.com/colethedj) and [pukkandan](https://github.com/pukkandan)
|
||||
* [youtube] Show if video is `private`, `unlisted` etc in info (`availability`) by [coletdjnz](https://github.com/coletdjnz) and [pukkandan](https://github.com/pukkandan)
|
||||
* [youtube] bugfix for channel playlist extraction
|
||||
* [nbc] Improve metadata extraction by [2ShedsJackson](https://github.com/2ShedsJackson)
|
||||
|
||||
@@ -485,15 +870,15 @@
|
||||
* [wimtv] Add extractor by [nixxo](https://github.com/nixxo)
|
||||
* [mtv] Add mtv.it and extract series metadata by [nixxo](https://github.com/nixxo)
|
||||
* [pluto.tv] Add extractor by [kevinoconnor7](https://github.com/kevinoconnor7)
|
||||
* [youtube] Rewrite comment extraction by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Rewrite comment extraction by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [embedthumbnail] Set mtime correctly
|
||||
* Refactor some postprocessor/downloader code by [pukkandan](https://github.com/pukkandan) and [shirt](https://github.com/shirt-dev)
|
||||
|
||||
|
||||
### 2021.03.07
|
||||
* [youtube] Fix history, mixes, community pages and trending by [pukkandan](https://github.com/pukkandan) and [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Fix private feeds/playlists on multi-channel accounts by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Extract alerts from continuation by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Fix history, mixes, community pages and trending by [pukkandan](https://github.com/pukkandan) and [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Fix private feeds/playlists on multi-channel accounts by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Extract alerts from continuation by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [cbs] Add support for ParamountPlus by [shirt](https://github.com/shirt-dev)
|
||||
* [mxplayer] Rewrite extractor with show support by [pukkandan](https://github.com/pukkandan) and [Ashish0804](https://github.com/Ashish0804)
|
||||
* [gedi] Improvements from youtube-dl by [nixxo](https://github.com/nixxo)
|
||||
@@ -505,7 +890,7 @@
|
||||
* [downloader] Fix bug for `ffmpeg`/`httpie`
|
||||
* [update] Fix updater removing the executable bit on some UNIX distros
|
||||
* [update] Fix current build hash for UNIX
|
||||
* [documentation] Include wget/curl/aria2c install instructions for Unix by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [docs] Include wget/curl/aria2c install instructions for Unix by [Ashish0804](https://github.com/Ashish0804)
|
||||
* Fix some videos downloading with `m3u8` extension
|
||||
* Remove "fixup is ignored" warning when fixup wasn't passed by user
|
||||
|
||||
@@ -514,7 +899,7 @@
|
||||
* [build] Fix bug
|
||||
|
||||
### 2021.03.03
|
||||
* [youtube] Use new browse API for continuation page extraction by [colethedj](https://github.com/colethedj) and [pukkandan](https://github.com/pukkandan)
|
||||
* [youtube] Use new browse API for continuation page extraction by [coletdjnz](https://github.com/coletdjnz) and [pukkandan](https://github.com/pukkandan)
|
||||
* Fix HLS playlist downloading by [shirt](https://github.com/shirt-dev)
|
||||
* Merge youtube-dl: Upto [2021.03.03](https://github.com/ytdl-org/youtube-dl/releases/tag/2021.03.03)
|
||||
* [mtv] Fix extractor
|
||||
@@ -562,7 +947,7 @@
|
||||
* [ffmpeg] Allow passing custom arguments before -i using `--ppa "ffmpeg_i1:ARGS"` syntax
|
||||
* Fix `--windows-filenames` removing `/` from UNIX paths
|
||||
* [hls] Show warning if pycryptodome is not found
|
||||
* [documentation] Improvements
|
||||
* [docs] Improvements
|
||||
* Fix documentation of `Extractor Options`
|
||||
* Document `all` in format selection
|
||||
* Document `playable_in_embed` in output templates
|
||||
@@ -590,7 +975,7 @@
|
||||
* Exclude `vcruntime140.dll` from UPX by [jbruchon](https://github.com/jbruchon)
|
||||
* Set version number based on UTC time, not local time
|
||||
* Publish on PyPi only if token is set
|
||||
* [documentation] Better document `--prefer-free-formats` and add `--no-prefer-free-format`
|
||||
* [docs] Better document `--prefer-free-formats` and add `--no-prefer-free-format`
|
||||
|
||||
|
||||
### 2021.02.15
|
||||
@@ -633,7 +1018,7 @@
|
||||
* [movefiles] Fix compatibility with python2
|
||||
* [remuxvideo] Fix validation of conditional remux
|
||||
* [sponskrub] Don't raise error when the video does not exist
|
||||
* [documentation] Crypto is an optional dependency
|
||||
* [docs] Crypto is an optional dependency
|
||||
|
||||
|
||||
### 2021.02.04
|
||||
@@ -694,10 +1079,10 @@
|
||||
* Merge youtube-dl: Upto [2021.01.24](https://github.com/ytdl-org/youtube-dl/releases/tag/2021.01.16)
|
||||
* Plugin support ([documentation](https://github.com/yt-dlp/yt-dlp#plugins))
|
||||
* **Multiple paths**: New option `-P`/`--paths` to give different paths for different types of files
|
||||
* The syntax is `-P "type:path" -P "type:path"` ([documentation](https://github.com/yt-dlp/yt-dlp#:~:text=-P,%20--paths%20TYPE:PATH))
|
||||
* The syntax is `-P "type:path" -P "type:path"`
|
||||
* Valid types are: home, temp, description, annotation, subtitle, infojson, thumbnail
|
||||
* Additionally, configuration file is taken from home directory or current directory ([documentation](https://github.com/yt-dlp/yt-dlp#:~:text=Home%20Configuration))
|
||||
* Allow passing different arguments to different external downloaders ([documentation](https://github.com/yt-dlp/yt-dlp#:~:text=--downloader-args%20NAME:ARGS))
|
||||
* Additionally, configuration file is taken from home directory or current directory
|
||||
* Allow passing different arguments to different external downloaders
|
||||
* [mildom] Add extractor by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* Warn when using old style `--external-downloader-args` and `--post-processor-args`
|
||||
* Fix `--no-overwrite` when using `--write-link`
|
||||
@@ -732,9 +1117,9 @@
|
||||
* [roosterteeth.com] Fix for bonus episodes by [Zocker1999NET](https://github.com/Zocker1999NET)
|
||||
* [tiktok] Fix for when share_info is empty
|
||||
* [EmbedThumbnail] Fix bug due to incorrect function name
|
||||
* [documentation] Changed sponskrub links to point to [yt-dlp/SponSkrub](https://github.com/yt-dlp/SponSkrub) since I am now providing both linux and windows releases
|
||||
* [documentation] Change all links to correctly point to new fork URL
|
||||
* [documentation] Fixes typos
|
||||
* [docs] Changed sponskrub links to point to [yt-dlp/SponSkrub](https://github.com/yt-dlp/SponSkrub) since I am now providing both linux and windows releases
|
||||
* [docs] Change all links to correctly point to new fork URL
|
||||
* [docs] Fixes typos
|
||||
|
||||
|
||||
### 2021.01.12
|
||||
@@ -830,7 +1215,7 @@
|
||||
* Redirect channel home to /video
|
||||
* Print youtube's warning message
|
||||
* Handle Multiple pages for feeds better
|
||||
* [youtube] Fix ytsearch not returning results sometimes due to promoted content by [colethedj](https://github.com/colethedj)
|
||||
* [youtube] Fix ytsearch not returning results sometimes due to promoted content by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Temporary fix for automatic captions - disable json3 by [blackjack4494](https://github.com/blackjack4494)
|
||||
* Add --break-on-existing by [gergesh](https://github.com/gergesh)
|
||||
* Pre-check video IDs in the archive before downloading by [pukkandan](https://github.com/pukkandan)
|
||||
|
||||
39
Collaborators.md
Normal file
39
Collaborators.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# Collaborators
|
||||
|
||||
This is a list of the collaborators of the project and their major contributions. See the [Changelog](Changelog.md) for more details.
|
||||
|
||||
You can also find lists of all [contributors of yt-dlp](CONTRIBUTORS) and [authors of youtube-dl](https://github.com/ytdl-org/youtube-dl/blob/master/AUTHORS)
|
||||
|
||||
|
||||
## [pukkandan](https://github.com/pukkandan)
|
||||
|
||||
[](https://ko-fi.com/pukkandan)
|
||||
|
||||
* Owner of the fork
|
||||
|
||||
|
||||
|
||||
## [shirt](https://github.com/shirt-dev)
|
||||
|
||||
[](https://ko-fi.com/shirt)
|
||||
|
||||
* Multithreading (`-N`) and aria2c support for fragment downloads
|
||||
* Support for media initialization and discontinuity in HLS
|
||||
* The self-updater (`-U`)
|
||||
|
||||
|
||||
|
||||
## [coletdjnz](https://github.com/coletdjnz)
|
||||
|
||||
[](https://github.com/sponsors/coletdjnz)
|
||||
|
||||
* YouTube improvements including: age-gate bypass, private playlists, multiple-clients (to avoid throttling) and a lot of under-the-hood improvements
|
||||
|
||||
|
||||
|
||||
## [Ashish0804](https://github.com/Ashish0804)
|
||||
|
||||
[](https://ko-fi.com/ashish0804)
|
||||
|
||||
* Added support for new websites Zee5, MXPlayer, DiscoveryPlusIndia, ShemarooMe, Utreon etc
|
||||
* Added playlist/series downloads for TubiTv, SonyLIV, Voot, HotStar etc
|
||||
16
Makefile
16
Makefile
@@ -13,7 +13,9 @@ pypi-files: AUTHORS Changelog.md LICENSE README.md README.txt supportedsites com
|
||||
.PHONY: all clean install test tar pypi-files completions ot offlinetest codetest supportedsites
|
||||
|
||||
clean-test:
|
||||
rm -rf *.dump *.part* *.ytdl *.info.json *.mp4 *.m4a *.flv *.mp3 *.avi *.mkv *.webm *.3gp *.wav *.ape *.swf *.jpg *.png *.frag *.frag.urls *.frag.aria2 test/testdata/player-*.js
|
||||
rm -rf *.3gp *.annotations.xml *.ape *.avi *.description *.dump *.flac *.flv *.frag *.frag.aria2 *.frag.urls \
|
||||
*.info.json *.jpeg *.jpg *.live_chat.json *.m4a *.m4v *.mkv *.mp3 *.mp4 *.ogg *.opus *.part* *.png *.sbv *.srt \
|
||||
*.swf *.swp *.ttml *.vtt *.wav *.webm *.webp *.ytdl test/testdata/player-*.js
|
||||
clean-dist:
|
||||
rm -rf yt-dlp.1.temp.md yt-dlp.1 README.txt MANIFEST build/ dist/ .coverage cover/ yt-dlp.tar.gz completions/ yt_dlp/extractor/lazy_extractors.py *.spec CONTRIBUTING.md.tmp yt-dlp yt-dlp.exe yt_dlp.egg-info/ AUTHORS .mailmap
|
||||
clean-cache:
|
||||
@@ -110,7 +112,7 @@ _EXTRACTOR_FILES = $(shell find yt_dlp/extractor -iname '*.py' -and -not -iname
|
||||
yt_dlp/extractor/lazy_extractors.py: devscripts/make_lazy_extractors.py devscripts/lazy_load_template.py $(_EXTRACTOR_FILES)
|
||||
$(PYTHON) devscripts/make_lazy_extractors.py $@
|
||||
|
||||
yt-dlp.tar.gz: README.md yt-dlp.1 completions Changelog.md AUTHORS
|
||||
yt-dlp.tar.gz: all
|
||||
@tar -czf $(DESTDIR)/yt-dlp.tar.gz --transform "s|^|yt-dlp/|" --owner 0 --group 0 \
|
||||
--exclude '*.DS_Store' \
|
||||
--exclude '*.kate-swp' \
|
||||
@@ -119,12 +121,12 @@ yt-dlp.tar.gz: README.md yt-dlp.1 completions Changelog.md AUTHORS
|
||||
--exclude '*~' \
|
||||
--exclude '__pycache__' \
|
||||
--exclude '.git' \
|
||||
--exclude 'docs/_build' \
|
||||
-- \
|
||||
devscripts test \
|
||||
Changelog.md AUTHORS LICENSE README.md supportedsites.md \
|
||||
Makefile MANIFEST.in yt-dlp.1 completions \
|
||||
setup.py setup.cfg yt-dlp
|
||||
README.md supportedsites.md Changelog.md LICENSE \
|
||||
CONTRIBUTING.md Collaborators.md CONTRIBUTORS AUTHORS \
|
||||
Makefile MANIFEST.in yt-dlp.1 README.txt completions \
|
||||
setup.py setup.cfg yt-dlp yt_dlp requirements.txt \
|
||||
devscripts test tox.ini pytest.ini
|
||||
|
||||
AUTHORS: .mailmap
|
||||
git shortlog -s -n | cut -f2 | sort > AUTHORS
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 4.2 KiB |
@@ -1,20 +1,31 @@
|
||||
#!/usr/bin/env python3
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from ..utils import bug_reports_message, write_string
|
||||
|
||||
class LazyLoadExtractor(object):
|
||||
|
||||
class LazyLoadMetaClass(type):
|
||||
def __getattr__(cls, name):
|
||||
if '_real_class' not in cls.__dict__:
|
||||
write_string(
|
||||
f'WARNING: Falling back to normal extractor since lazy extractor '
|
||||
f'{cls.__name__} does not have attribute {name}{bug_reports_message()}')
|
||||
return getattr(cls._get_real_class(), name)
|
||||
|
||||
|
||||
class LazyLoadExtractor(metaclass=LazyLoadMetaClass):
|
||||
_module = None
|
||||
_WORKING = True
|
||||
|
||||
@classmethod
|
||||
def ie_key(cls):
|
||||
return cls.__name__[:-2]
|
||||
def _get_real_class(cls):
|
||||
if '_real_class' not in cls.__dict__:
|
||||
mod = __import__(cls._module, fromlist=(cls.__name__,))
|
||||
cls._real_class = getattr(mod, cls.__name__)
|
||||
return cls._real_class
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
mod = __import__(cls._module, fromlist=(cls.__name__,))
|
||||
real_cls = getattr(mod, cls.__name__)
|
||||
real_cls = cls._get_real_class()
|
||||
instance = real_cls.__new__(real_cls)
|
||||
instance.__init__(*args, **kwargs)
|
||||
return instance
|
||||
|
||||
BIN
devscripts/logo.ico
Normal file
BIN
devscripts/logo.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 40 KiB |
@@ -1,33 +1,34 @@
|
||||
#!/usr/bin/env python3
|
||||
from __future__ import unicode_literals
|
||||
|
||||
# import io
|
||||
import io
|
||||
import optparse
|
||||
# import re
|
||||
import re
|
||||
|
||||
|
||||
def main():
|
||||
return # This is unused in yt-dlp
|
||||
|
||||
parser = optparse.OptionParser(usage='%prog INFILE OUTFILE')
|
||||
options, args = parser.parse_args()
|
||||
if len(args) != 2:
|
||||
parser.error('Expected an input and an output filename')
|
||||
|
||||
|
||||
""" infile, outfile = args
|
||||
infile, outfile = args
|
||||
|
||||
with io.open(infile, encoding='utf-8') as inf:
|
||||
readme = inf.read()
|
||||
|
||||
bug_text = re.search( """
|
||||
# r'(?s)#\s*BUGS\s*[^\n]*\s*(.*?)#\s*COPYRIGHT', readme).group(1)
|
||||
# dev_text = re.search(
|
||||
# r'(?s)(#\s*DEVELOPER INSTRUCTIONS.*?)#\s*EMBEDDING yt-dlp',
|
||||
""" readme).group(1)
|
||||
bug_text = re.search(
|
||||
r'(?s)#\s*BUGS\s*[^\n]*\s*(.*?)#\s*COPYRIGHT', readme).group(1)
|
||||
dev_text = re.search(
|
||||
r'(?s)(#\s*DEVELOPER INSTRUCTIONS.*?)#\s*EMBEDDING yt-dlp', readme).group(1)
|
||||
|
||||
out = bug_text + dev_text
|
||||
|
||||
with io.open(outfile, 'w', encoding='utf-8') as outf:
|
||||
outf.write(out) """
|
||||
outf.write(out)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
@@ -7,8 +7,6 @@ import os
|
||||
from os.path import dirname as dirn
|
||||
import sys
|
||||
|
||||
print('WARNING: Lazy loading extractors is an experimental feature that may not always work', file=sys.stderr)
|
||||
|
||||
sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
|
||||
|
||||
lazy_extractors_filename = sys.argv[1]
|
||||
@@ -16,23 +14,28 @@ if os.path.exists(lazy_extractors_filename):
|
||||
os.remove(lazy_extractors_filename)
|
||||
|
||||
# Block plugins from loading
|
||||
os.rename('ytdlp_plugins', 'ytdlp_plugins_blocked')
|
||||
plugins_dirname = 'ytdlp_plugins'
|
||||
plugins_blocked_dirname = 'ytdlp_plugins_blocked'
|
||||
if os.path.exists(plugins_dirname):
|
||||
os.rename(plugins_dirname, plugins_blocked_dirname)
|
||||
|
||||
from yt_dlp.extractor import _ALL_CLASSES
|
||||
from yt_dlp.extractor.common import InfoExtractor, SearchInfoExtractor
|
||||
|
||||
os.rename('ytdlp_plugins_blocked', 'ytdlp_plugins')
|
||||
if os.path.exists(plugins_blocked_dirname):
|
||||
os.rename(plugins_blocked_dirname, plugins_dirname)
|
||||
|
||||
with open('devscripts/lazy_load_template.py', 'rt') as f:
|
||||
module_template = f.read()
|
||||
|
||||
CLASS_PROPERTIES = ['ie_key', 'working', '_match_valid_url', 'suitable', '_match_id', 'get_temp_id']
|
||||
module_contents = [
|
||||
module_template + '\n' + getsource(InfoExtractor.suitable) + '\n',
|
||||
'class LazyLoadSearchExtractor(LazyLoadExtractor):\n pass\n']
|
||||
module_template,
|
||||
*[getsource(getattr(InfoExtractor, k)) for k in CLASS_PROPERTIES],
|
||||
'\nclass LazyLoadSearchExtractor(LazyLoadExtractor):\n pass\n']
|
||||
|
||||
ie_template = '''
|
||||
class {name}({bases}):
|
||||
_VALID_URL = {valid_url!r}
|
||||
_module = '{module}'
|
||||
'''
|
||||
|
||||
@@ -53,14 +56,17 @@ def get_base_name(base):
|
||||
|
||||
|
||||
def build_lazy_ie(ie, name):
|
||||
valid_url = getattr(ie, '_VALID_URL', None)
|
||||
s = ie_template.format(
|
||||
name=name,
|
||||
bases=', '.join(map(get_base_name, ie.__bases__)),
|
||||
valid_url=valid_url,
|
||||
module=ie.__module__)
|
||||
valid_url = getattr(ie, '_VALID_URL', None)
|
||||
if valid_url:
|
||||
s += f' _VALID_URL = {valid_url!r}\n'
|
||||
if not ie._WORKING:
|
||||
s += ' _WORKING = False\n'
|
||||
if ie.suitable.__func__ is not InfoExtractor.suitable.__func__:
|
||||
s += '\n' + getsource(ie.suitable)
|
||||
s += f'\n{getsource(ie.suitable)}'
|
||||
if hasattr(ie, '_make_valid_url'):
|
||||
# search extractors
|
||||
s += make_valid_template.format(valid_url=ie._make_valid_url())
|
||||
@@ -98,7 +104,7 @@ for ie in ordered_cls:
|
||||
names.append(name)
|
||||
|
||||
module_contents.append(
|
||||
'_ALL_CLASSES = [{0}]'.format(', '.join(names)))
|
||||
'\n_ALL_CLASSES = [{0}]'.format(', '.join(names)))
|
||||
|
||||
module_src = '\n'.join(module_contents) + '\n'
|
||||
|
||||
|
||||
@@ -11,5 +11,4 @@ else
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo python3 -m pytest -k $test_set
|
||||
python3 -m pytest -k "$test_set"
|
||||
|
||||
37
devscripts/update-formulae.py
Normal file
37
devscripts/update-formulae.py
Normal file
@@ -0,0 +1,37 @@
|
||||
#!/usr/bin/env python3
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from yt_dlp.compat import compat_urllib_request
|
||||
|
||||
|
||||
# usage: python3 ./devscripts/update-formulae.py <path-to-formulae-rb> <version>
|
||||
# version can be either 0-aligned (yt-dlp version) or normalized (PyPl version)
|
||||
|
||||
filename, version = sys.argv[1:]
|
||||
|
||||
normalized_version = '.'.join(str(int(x)) for x in version.split('.'))
|
||||
|
||||
pypi_release = json.loads(compat_urllib_request.urlopen(
|
||||
'https://pypi.org/pypi/yt-dlp/%s/json' % normalized_version
|
||||
).read().decode('utf-8'))
|
||||
|
||||
tarball_file = next(x for x in pypi_release['urls'] if x['filename'].endswith('.tar.gz'))
|
||||
|
||||
sha256sum = tarball_file['digests']['sha256']
|
||||
url = tarball_file['url']
|
||||
|
||||
with open(filename, 'r') as r:
|
||||
formulae_text = r.read()
|
||||
|
||||
formulae_text = re.sub(r'sha256 "[0-9a-f]*?"', 'sha256 "%s"' % sha256sum, formulae_text)
|
||||
formulae_text = re.sub(r'url "[^"]*?"', 'url "%s"' % url, formulae_text)
|
||||
|
||||
with open(filename, 'w') as w:
|
||||
w.write(formulae_text)
|
||||
5
docs/Collaborators.md
Normal file
5
docs/Collaborators.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
orphan: true
|
||||
---
|
||||
```{include} ../Collaborators.md
|
||||
```
|
||||
42
pyinst.py
42
pyinst.py
@@ -3,7 +3,6 @@
|
||||
|
||||
from __future__ import unicode_literals
|
||||
import sys
|
||||
# import os
|
||||
import platform
|
||||
|
||||
from PyInstaller.utils.hooks import collect_submodules
|
||||
@@ -13,16 +12,21 @@ from PyInstaller.utils.win32.versioninfo import (
|
||||
)
|
||||
import PyInstaller.__main__
|
||||
|
||||
arch = sys.argv[1] if len(sys.argv) > 1 else platform.architecture()[0][:2]
|
||||
arch = platform.architecture()[0][:2]
|
||||
assert arch in ('32', '64')
|
||||
print('Building %sbit version' % arch)
|
||||
_x86 = '_x86' if arch == '32' else ''
|
||||
|
||||
FILE_DESCRIPTION = 'yt-dlp%s' % (' (32 Bit)' if _x86 else '')
|
||||
# Compatability with older arguments
|
||||
opts = sys.argv[1:]
|
||||
if opts[0:1] in (['32'], ['64']):
|
||||
if arch != opts[0]:
|
||||
raise Exception(f'{opts[0]}bit executable cannot be built on a {arch}bit system')
|
||||
opts = opts[1:]
|
||||
opts = opts or ['--onefile']
|
||||
|
||||
# root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
|
||||
# print('Changing working directory to %s' % root_dir)
|
||||
# os.chdir(root_dir)
|
||||
print(f'Building {arch}bit version with options {opts}')
|
||||
|
||||
FILE_DESCRIPTION = 'yt-dlp%s' % (' (32 Bit)' if _x86 else '')
|
||||
|
||||
exec(compile(open('yt_dlp/version.py').read(), 'yt_dlp/version.py', 'exec'))
|
||||
VERSION = locals()['__version__']
|
||||
@@ -67,16 +71,32 @@ VERSION_FILE = VSVersionInfo(
|
||||
]
|
||||
)
|
||||
|
||||
dependancies = ['Crypto', 'mutagen'] + collect_submodules('websockets')
|
||||
|
||||
def pycryptodome_module():
|
||||
try:
|
||||
import Cryptodome # noqa: F401
|
||||
except ImportError:
|
||||
try:
|
||||
import Crypto # noqa: F401
|
||||
print('WARNING: Using Crypto since Cryptodome is not available. '
|
||||
'Install with: pip install pycryptodomex', file=sys.stderr)
|
||||
return 'Crypto'
|
||||
except ImportError:
|
||||
pass
|
||||
return 'Cryptodome'
|
||||
|
||||
|
||||
dependancies = [pycryptodome_module(), 'mutagen'] + collect_submodules('websockets')
|
||||
excluded_modules = ['test', 'ytdlp_plugins', 'youtube-dl', 'youtube-dlc']
|
||||
|
||||
PyInstaller.__main__.run([
|
||||
'--name=yt-dlp%s' % _x86,
|
||||
'--onefile',
|
||||
'--icon=devscripts/cloud.ico',
|
||||
'--icon=devscripts/logo.ico',
|
||||
*[f'--exclude-module={module}' for module in excluded_modules],
|
||||
*[f'--hidden-import={module}' for module in dependancies],
|
||||
'--upx-exclude=vcruntime140.dll',
|
||||
'--noconfirm',
|
||||
*opts,
|
||||
'yt_dlp/__main__.py',
|
||||
])
|
||||
SetVersion('dist/yt-dlp%s.exe' % _x86, VERSION_FILE)
|
||||
SetVersion('dist/%syt-dlp%s.exe' % ('yt-dlp/' if '--onedir' in opts else '', _x86), VERSION_FILE)
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
mutagen
|
||||
pycryptodome
|
||||
pycryptodomex
|
||||
websockets
|
||||
|
||||
94
setup.py
94
setup.py
@@ -1,12 +1,16 @@
|
||||
#!/usr/bin/env python3
|
||||
# coding: utf-8
|
||||
|
||||
from setuptools import setup, Command, find_packages
|
||||
import os.path
|
||||
import warnings
|
||||
import sys
|
||||
from distutils.spawn import spawn
|
||||
|
||||
try:
|
||||
from setuptools import setup, Command, find_packages
|
||||
setuptools_available = True
|
||||
except ImportError:
|
||||
from distutils.core import setup, Command
|
||||
setuptools_available = False
|
||||
from distutils.spawn import spawn
|
||||
|
||||
# Get the version from yt_dlp/version.py without importing the package
|
||||
exec(compile(open('yt_dlp/version.py').read(), 'yt_dlp/version.py', 'exec'))
|
||||
@@ -19,34 +23,64 @@ LONG_DESCRIPTION = '\n\n'.join((
|
||||
'**PS**: Some links in this document will not work since this is a copy of the README.md from Github',
|
||||
open('README.md', 'r', encoding='utf-8').read()))
|
||||
|
||||
REQUIREMENTS = ['mutagen', 'pycryptodome', 'websockets']
|
||||
REQUIREMENTS = ['mutagen', 'pycryptodomex', 'websockets']
|
||||
|
||||
|
||||
if sys.argv[1:2] == ['py2exe']:
|
||||
raise NotImplementedError('py2exe is not currently supported; instead, use "pyinst.py" to build with pyinstaller')
|
||||
import py2exe
|
||||
warnings.warn(
|
||||
'Building with py2exe is not officially supported. '
|
||||
'The recommended way is to use "pyinst.py" to build using pyinstaller')
|
||||
params = {
|
||||
'console': [{
|
||||
'script': './yt_dlp/__main__.py',
|
||||
'dest_base': 'yt-dlp',
|
||||
'version': __version__,
|
||||
'description': DESCRIPTION,
|
||||
'comments': LONG_DESCRIPTION.split('\n')[0],
|
||||
'product_name': 'yt-dlp',
|
||||
'product_version': __version__,
|
||||
}],
|
||||
'options': {
|
||||
'py2exe': {
|
||||
'bundle_files': 0,
|
||||
'compressed': 1,
|
||||
'optimize': 2,
|
||||
'dist_dir': './dist',
|
||||
'excludes': ['Crypto', 'Cryptodome'], # py2exe cannot import Crypto
|
||||
'dll_excludes': ['w9xpopen.exe', 'crypt32.dll'],
|
||||
}
|
||||
},
|
||||
'zipfile': None
|
||||
}
|
||||
|
||||
else:
|
||||
files_spec = [
|
||||
('share/bash-completion/completions', ['completions/bash/yt-dlp']),
|
||||
('share/zsh/site-functions', ['completions/zsh/_yt-dlp']),
|
||||
('share/fish/vendor_completions.d', ['completions/fish/yt-dlp.fish']),
|
||||
('share/doc/yt_dlp', ['README.txt']),
|
||||
('share/man/man1', ['yt-dlp.1'])
|
||||
]
|
||||
root = os.path.dirname(os.path.abspath(__file__))
|
||||
data_files = []
|
||||
for dirname, files in files_spec:
|
||||
resfiles = []
|
||||
for fn in files:
|
||||
if not os.path.exists(fn):
|
||||
warnings.warn('Skipping file %s since it is not present. Try running `make pypi-files` first' % fn)
|
||||
else:
|
||||
resfiles.append(fn)
|
||||
data_files.append((dirname, resfiles))
|
||||
|
||||
files_spec = [
|
||||
('share/bash-completion/completions', ['completions/bash/yt-dlp']),
|
||||
('share/zsh/site-functions', ['completions/zsh/_yt-dlp']),
|
||||
('share/fish/vendor_completions.d', ['completions/fish/yt-dlp.fish']),
|
||||
('share/doc/yt_dlp', ['README.txt']),
|
||||
('share/man/man1', ['yt-dlp.1'])
|
||||
]
|
||||
root = os.path.dirname(os.path.abspath(__file__))
|
||||
data_files = []
|
||||
for dirname, files in files_spec:
|
||||
resfiles = []
|
||||
for fn in files:
|
||||
if not os.path.exists(fn):
|
||||
warnings.warn('Skipping file %s since it is not present. Try running `make pypi-files` first' % fn)
|
||||
else:
|
||||
resfiles.append(fn)
|
||||
data_files.append((dirname, resfiles))
|
||||
params = {
|
||||
'data_files': data_files,
|
||||
}
|
||||
|
||||
params = {
|
||||
'data_files': data_files,
|
||||
}
|
||||
params['entry_points'] = {'console_scripts': ['yt-dlp = yt_dlp:main']}
|
||||
if setuptools_available:
|
||||
params['entry_points'] = {'console_scripts': ['yt-dlp = yt_dlp:main']}
|
||||
else:
|
||||
params['scripts'] = ['yt-dlp']
|
||||
|
||||
|
||||
class build_lazy_extractors(Command):
|
||||
@@ -64,7 +98,11 @@ class build_lazy_extractors(Command):
|
||||
dry_run=self.dry_run)
|
||||
|
||||
|
||||
packages = find_packages(exclude=('youtube_dl', 'test', 'ytdlp_plugins'))
|
||||
if setuptools_available:
|
||||
packages = find_packages(exclude=('youtube_dl', 'youtube_dlc', 'test', 'ytdlp_plugins'))
|
||||
else:
|
||||
packages = ['yt_dlp', 'yt_dlp.downloader', 'yt_dlp.extractor', 'yt_dlp.postprocessor']
|
||||
|
||||
|
||||
setup(
|
||||
name='yt-dlp',
|
||||
@@ -81,7 +119,7 @@ setup(
|
||||
'Documentation': 'https://yt-dlp.readthedocs.io',
|
||||
'Source': 'https://github.com/yt-dlp/yt-dlp',
|
||||
'Tracker': 'https://github.com/yt-dlp/yt-dlp/issues',
|
||||
#'Funding': 'https://donate.pypi.org',
|
||||
'Funding': 'https://github.com/yt-dlp/yt-dlp/blob/master/Collaborators.md#collaborators',
|
||||
},
|
||||
classifiers=[
|
||||
'Topic :: Multimedia :: Video',
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
# Supported sites
|
||||
- **17live**
|
||||
- **17live:clip**
|
||||
- **1tv**: Первый канал
|
||||
- **20min**
|
||||
- **220.ro**
|
||||
@@ -50,6 +52,7 @@
|
||||
- **AmericasTestKitchen**
|
||||
- **AmericasTestKitchenSeason**
|
||||
- **anderetijden**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
||||
- **AnimalPlanet**
|
||||
- **AnimeLab**
|
||||
- **AnimeLabShows**
|
||||
- **AnimeOnDemand**
|
||||
@@ -95,7 +98,9 @@
|
||||
- **Bandcamp**
|
||||
- **Bandcamp:album**
|
||||
- **Bandcamp:weekly**
|
||||
- **BandcampMusic**
|
||||
- **bangumi.bilibili.com**: BiliBili番剧
|
||||
- **BannedVideo**
|
||||
- **bbc**: BBC
|
||||
- **bbc.co.uk**: BBC iPlayer
|
||||
- **bbc.co.uk:article**: BBC articles
|
||||
@@ -117,11 +122,14 @@
|
||||
- **Bigflix**
|
||||
- **Bild**: Bild.de
|
||||
- **BiliBili**
|
||||
- **Bilibili category extractor**
|
||||
- **BilibiliAudio**
|
||||
- **BilibiliAudioAlbum**
|
||||
- **BilibiliChannel**
|
||||
- **BiliBiliPlayer**
|
||||
- **BiliBiliSearch**: Bilibili video search, "bilisearch" keyword
|
||||
- **BiliIntl**
|
||||
- **BiliIntlSeries**
|
||||
- **BioBioChileTV**
|
||||
- **Biography**
|
||||
- **BIQLE**
|
||||
@@ -129,6 +137,7 @@
|
||||
- **BitChuteChannel**
|
||||
- **bitwave:replay**
|
||||
- **bitwave:stream**
|
||||
- **BlackboardCollaborate**
|
||||
- **BleacherReport**
|
||||
- **BleacherReportCMS**
|
||||
- **Bloomberg**
|
||||
@@ -148,10 +157,10 @@
|
||||
- **BusinessInsider**
|
||||
- **BuzzFeed**
|
||||
- **BYUtv**
|
||||
- **CAM4**
|
||||
- **Camdemy**
|
||||
- **CamdemyFolder**
|
||||
- **CamModels**
|
||||
- **CamTube**
|
||||
- **CamWithHer**
|
||||
- **canalc2.tv**
|
||||
- **Canalplus**: mycanal.fr and piwiplus.fr
|
||||
@@ -161,10 +170,7 @@
|
||||
- **CarambaTVPage**
|
||||
- **CartoonNetwork**
|
||||
- **cbc.ca**
|
||||
- **cbc.ca:olympics**
|
||||
- **cbc.ca:player**
|
||||
- **cbc.ca:watch**
|
||||
- **cbc.ca:watch:video**
|
||||
- **CBS**
|
||||
- **CBSInteractive**
|
||||
- **CBSLocal**
|
||||
@@ -179,10 +185,13 @@
|
||||
- **CDA**
|
||||
- **CeskaTelevize**
|
||||
- **CeskaTelevizePorady**
|
||||
- **CGTN**
|
||||
- **channel9**: Channel 9
|
||||
- **CharlieRose**
|
||||
- **Chaturbate**
|
||||
- **Chilloutzone**
|
||||
- **Chingari**
|
||||
- **ChingariUser**
|
||||
- **chirbit**
|
||||
- **chirbit:profile**
|
||||
- **cielotv.it**
|
||||
@@ -190,6 +199,7 @@
|
||||
- **Cinemax**
|
||||
- **CiscoLiveSearch**
|
||||
- **CiscoLiveSession**
|
||||
- **ciscowebex**: Cisco Webex
|
||||
- **CJSW**
|
||||
- **cliphunter**
|
||||
- **Clippit**
|
||||
@@ -222,7 +232,6 @@
|
||||
- **CTV**
|
||||
- **CTVNews**
|
||||
- **cu.ntv.co.jp**: Nippon Television Network
|
||||
- **Culturebox**
|
||||
- **CultureUnplugged**
|
||||
- **curiositystream**
|
||||
- **curiositystream:collection**
|
||||
@@ -232,6 +241,8 @@
|
||||
- **dailymotion**
|
||||
- **dailymotion:playlist**
|
||||
- **dailymotion:user**
|
||||
- **damtomo:record**
|
||||
- **damtomo:video**
|
||||
- **daum.net**
|
||||
- **daum.net:clip**
|
||||
- **daum.net:playlist**
|
||||
@@ -255,6 +266,7 @@
|
||||
- **DiscoveryPlusIndiaShow**
|
||||
- **DiscoveryVR**
|
||||
- **Disney**
|
||||
- **DIYNetwork**
|
||||
- **dlive:stream**
|
||||
- **dlive:vod**
|
||||
- **DoodStream**
|
||||
@@ -293,8 +305,11 @@
|
||||
- **Embedly**
|
||||
- **EMPFlix**
|
||||
- **Engadget**
|
||||
- **Epicon**
|
||||
- **EpiconSeries**
|
||||
- **Eporner**
|
||||
- **EroProfile**
|
||||
- **EroProfile:album**
|
||||
- **Escapist**
|
||||
- **ESPN**
|
||||
- **ESPNArticle**
|
||||
@@ -313,6 +328,7 @@
|
||||
- **fc2**
|
||||
- **fc2:embed**
|
||||
- **Fczenit**
|
||||
- **Filmmodu**
|
||||
- **filmon**
|
||||
- **filmon:channel**
|
||||
- **Filmweb**
|
||||
@@ -329,13 +345,10 @@
|
||||
- **foxnews**: Fox News and Fox Business Video
|
||||
- **foxnews:article**
|
||||
- **FoxSports**
|
||||
- **france2.fr:generation-what**
|
||||
- **FranceCulture**
|
||||
- **FranceInter**
|
||||
- **FranceTV**
|
||||
- **FranceTVEmbed**
|
||||
- **francetvinfo.fr**
|
||||
- **FranceTVJeunesse**
|
||||
- **FranceTVSite**
|
||||
- **Freesound**
|
||||
- **freespeech.org**
|
||||
@@ -350,6 +363,7 @@
|
||||
- **Funk**
|
||||
- **Fusion**
|
||||
- **Fux**
|
||||
- **GabTV**
|
||||
- **Gaia**
|
||||
- **GameInformer**
|
||||
- **GameSpot**
|
||||
@@ -358,7 +372,11 @@
|
||||
- **Gazeta**
|
||||
- **GDCVault**
|
||||
- **GediDigital**
|
||||
- **gem.cbc.ca**
|
||||
- **gem.cbc.ca:live**
|
||||
- **gem.cbc.ca:playlist**
|
||||
- **generic**: Generic downloader that works on some sites
|
||||
- **Gettr**
|
||||
- **Gfycat**
|
||||
- **GiantBomb**
|
||||
- **Giga**
|
||||
@@ -372,7 +390,9 @@
|
||||
- **google:podcasts**
|
||||
- **google:podcasts:feed**
|
||||
- **GoogleDrive**
|
||||
- **GoPro**
|
||||
- **Goshgay**
|
||||
- **GoToStage**
|
||||
- **GPUTechConf**
|
||||
- **Groupon**
|
||||
- **hbo**
|
||||
@@ -405,6 +425,7 @@
|
||||
- **Huajiao**: 花椒直播
|
||||
- **HuffPost**: Huffington Post
|
||||
- **Hungama**
|
||||
- **HungamaAlbumPlaylist**
|
||||
- **HungamaSong**
|
||||
- **Hypem**
|
||||
- **ign.com**
|
||||
@@ -457,6 +478,7 @@
|
||||
- **KinjaEmbed**
|
||||
- **KinoPoisk**
|
||||
- **KonserthusetPlay**
|
||||
- **Koo**
|
||||
- **KrasView**: Красвью
|
||||
- **Ku6**
|
||||
- **KUSI**
|
||||
@@ -517,6 +539,9 @@
|
||||
- **MallTV**
|
||||
- **mangomolo:live**
|
||||
- **mangomolo:video**
|
||||
- **ManotoTV**: Manoto TV (Episode)
|
||||
- **ManotoTVLive**: Manoto TV (Live)
|
||||
- **ManotoTVShow**: Manoto TV (Show)
|
||||
- **ManyVids**
|
||||
- **MaoriTV**
|
||||
- **Markiza**
|
||||
@@ -527,6 +552,8 @@
|
||||
- **MedalTV**
|
||||
- **media.ccc.de**
|
||||
- **media.ccc.de:lists**
|
||||
- **Mediaite**
|
||||
- **MediaKlikk**
|
||||
- **Medialaan**
|
||||
- **Mediaset**
|
||||
- **Mediasite**
|
||||
@@ -552,6 +579,8 @@
|
||||
- **MinistryGrid**
|
||||
- **Minoto**
|
||||
- **miomio.tv**
|
||||
- **mirrativ**
|
||||
- **mirrativ:user**
|
||||
- **MiTele**: mitele.es
|
||||
- **mixcloud**
|
||||
- **mixcloud:playlist**
|
||||
@@ -583,6 +612,7 @@
|
||||
- **mtvservices:embedded**
|
||||
- **MTVUutisetArticle**
|
||||
- **MuenchenTV**: münchen.tv
|
||||
- **MuseScore**
|
||||
- **mva**: Microsoft Virtual Academy videos
|
||||
- **mva:course**: Microsoft Virtual Academy courses
|
||||
- **Mwave**
|
||||
@@ -599,6 +629,8 @@
|
||||
- **MyviEmbed**
|
||||
- **MyVisionTV**
|
||||
- **n-tv.de**
|
||||
- **N1Info:article**
|
||||
- **N1InfoAsset**
|
||||
- **natgeo:video**
|
||||
- **NationalGeographicTV**
|
||||
- **Naver**
|
||||
@@ -632,7 +664,8 @@
|
||||
- **NetPlus**
|
||||
- **Netzkino**
|
||||
- **Newgrounds**
|
||||
- **NewgroundsPlaylist**
|
||||
- **Newgrounds:playlist**
|
||||
- **Newgrounds:user**
|
||||
- **Newstube**
|
||||
- **NextMedia**: 蘋果日報
|
||||
- **NextMediaActionNews**: 蘋果日報 - 動新聞
|
||||
@@ -653,6 +686,9 @@
|
||||
- **niconico**: ニコニコ動画
|
||||
- **NiconicoPlaylist**
|
||||
- **NiconicoUser**
|
||||
- **nicovideo:search**: Nico video searches
|
||||
- **nicovideo:search:date**: Nico video searches, newest first
|
||||
- **nicovideo:search_url**: Nico video search URLs
|
||||
- **Nintendo**
|
||||
- **Nitter**
|
||||
- **njoy**: N-JOY
|
||||
@@ -665,6 +701,7 @@
|
||||
- **NosVideo**
|
||||
- **Nova**: TN.cz, Prásk.tv, Nova.cz, Novaplus.cz, FANDA.tv, Krásná.cz and Doma.cz
|
||||
- **NovaEmbed**
|
||||
- **NovaPlay**
|
||||
- **nowness**
|
||||
- **nowness:playlist**
|
||||
- **nowness:series**
|
||||
@@ -690,11 +727,13 @@
|
||||
- **NYTimes**
|
||||
- **NYTimesArticle**
|
||||
- **NYTimesCooking**
|
||||
- **nzherald**
|
||||
- **NZZ**
|
||||
- **ocw.mit.edu**
|
||||
- **OdaTV**
|
||||
- **Odnoklassniki**
|
||||
- **OktoberfestTV**
|
||||
- **OlympicsReplay**
|
||||
- **OnDemandKorea**
|
||||
- **onet.pl**
|
||||
- **onet.tv**
|
||||
@@ -703,6 +742,8 @@
|
||||
- **OnionStudios**
|
||||
- **Ooyala**
|
||||
- **OoyalaExternal**
|
||||
- **openrec**
|
||||
- **openrec:capture**
|
||||
- **OraTV**
|
||||
- **orf:burgenland**: Radio Burgenland
|
||||
- **orf:fm4**: radio FM4
|
||||
@@ -728,12 +769,18 @@
|
||||
- **PalcoMP3:video**
|
||||
- **pandora.tv**: 판도라TV
|
||||
- **ParamountNetwork**
|
||||
- **ParamountPlus**
|
||||
- **ParamountPlusSeries**
|
||||
- **parliamentlive.tv**: UK parliament videos
|
||||
- **Parlview**
|
||||
- **Patreon**
|
||||
- **PatreonUser**
|
||||
- **pbs**: Public Broadcasting Service (PBS) and member stations: PBS: Public Broadcasting Service, APT - Alabama Public Television (WBIQ), GPB/Georgia Public Broadcasting (WGTV), Mississippi Public Broadcasting (WMPN), Nashville Public Television (WNPT), WFSU-TV (WFSU), WSRE (WSRE), WTCI (WTCI), WPBA/Channel 30 (WPBA), Alaska Public Media (KAKM), Arizona PBS (KAET), KNME-TV/Channel 5 (KNME), Vegas PBS (KLVX), AETN/ARKANSAS ETV NETWORK (KETS), KET (WKLE), WKNO/Channel 10 (WKNO), LPB/LOUISIANA PUBLIC BROADCASTING (WLPB), OETA (KETA), Ozarks Public Television (KOZK), WSIU Public Broadcasting (WSIU), KEET TV (KEET), KIXE/Channel 9 (KIXE), KPBS San Diego (KPBS), KQED (KQED), KVIE Public Television (KVIE), PBS SoCal/KOCE (KOCE), ValleyPBS (KVPT), CONNECTICUT PUBLIC TELEVISION (WEDH), KNPB Channel 5 (KNPB), SOPTV (KSYS), Rocky Mountain PBS (KRMA), KENW-TV3 (KENW), KUED Channel 7 (KUED), Wyoming PBS (KCWC), Colorado Public Television / KBDI 12 (KBDI), KBYU-TV (KBYU), Thirteen/WNET New York (WNET), WGBH/Channel 2 (WGBH), WGBY (WGBY), NJTV Public Media NJ (WNJT), WLIW21 (WLIW), mpt/Maryland Public Television (WMPB), WETA Television and Radio (WETA), WHYY (WHYY), PBS 39 (WLVT), WVPT - Your Source for PBS and More! (WVPT), Howard University Television (WHUT), WEDU PBS (WEDU), WGCU Public Media (WGCU), WPBT2 (WPBT), WUCF TV (WUCF), WUFT/Channel 5 (WUFT), WXEL/Channel 42 (WXEL), WLRN/Channel 17 (WLRN), WUSF Public Broadcasting (WUSF), ETV (WRLK), UNC-TV (WUNC), PBS Hawaii - Oceanic Cable Channel 10 (KHET), Idaho Public Television (KAID), KSPS (KSPS), OPB (KOPB), KWSU/Channel 10 & KTNW/Channel 31 (KWSU), WILL-TV (WILL), Network Knowledge - WSEC/Springfield (WSEC), WTTW11 (WTTW), Iowa Public Television/IPTV (KDIN), Nine Network (KETC), PBS39 Fort Wayne (WFWA), WFYI Indianapolis (WFYI), Milwaukee Public Television (WMVS), WNIN (WNIN), WNIT Public Television (WNIT), WPT (WPNE), WVUT/Channel 22 (WVUT), WEIU/Channel 51 (WEIU), WQPT-TV (WQPT), WYCC PBS Chicago (WYCC), WIPB-TV (WIPB), WTIU (WTIU), CET (WCET), ThinkTVNetwork (WPTD), WBGU-TV (WBGU), WGVU TV (WGVU), NET1 (KUON), Pioneer Public Television (KWCM), SDPB Television (KUSD), TPT (KTCA), KSMQ (KSMQ), KPTS/Channel 8 (KPTS), KTWU/Channel 11 (KTWU), East Tennessee PBS (WSJK), WCTE-TV (WCTE), WLJT, Channel 11 (WLJT), WOSU TV (WOSU), WOUB/WOUC (WOUB), WVPB (WVPB), WKYU-PBS (WKYU), KERA 13 (KERA), MPBN (WCBB), Mountain Lake PBS (WCFE), NHPTV (WENH), Vermont PBS (WETK), witf (WITF), WQED Multimedia (WQED), WMHT Educational Telecommunications (WMHT), Q-TV (WDCQ), WTVS Detroit Public TV (WTVS), CMU Public Television (WCMU), WKAR-TV (WKAR), WNMU-TV Public TV 13 (WNMU), WDSE - WRPT (WDSE), WGTE TV (WGTE), Lakeland Public Television (KAWE), KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS), MontanaPBS (KUSM), KRWG/Channel 22 (KRWG), KACV (KACV), KCOS/Channel 13 (KCOS), WCNY/Channel 24 (WCNY), WNED (WNED), WPBS (WPBS), WSKG Public TV (WSKG), WXXI (WXXI), WPSU (WPSU), WVIA Public Media Studios (WVIA), WTVI (WTVI), Western Reserve PBS (WNEO), WVIZ/PBS ideastream (WVIZ), KCTS 9 (KCTS), Basin PBS (KPBT), KUHT / Channel 8 (KUHT), KLRN (KLRN), KLRU (KLRU), WTJX Channel 12 (WTJX), WCVE PBS (WCVE), KBTC Public Television (KBTC)
|
||||
- **PearVideo**
|
||||
- **PeerTube**
|
||||
- **PeerTube:Playlist**
|
||||
- **peloton**
|
||||
- **peloton:live**: Peloton Live
|
||||
- **People**
|
||||
- **PerformGroup**
|
||||
- **periscope**: Periscope
|
||||
@@ -774,6 +821,7 @@
|
||||
- **PornHd**
|
||||
- **PornHub**: PornHub and Thumbzilla
|
||||
- **PornHubPagedVideoList**
|
||||
- **PornHubPlaylist**
|
||||
- **PornHubUser**
|
||||
- **PornHubUserVideosUpload**
|
||||
- **Pornotube**
|
||||
@@ -781,6 +829,7 @@
|
||||
- **PornoXO**
|
||||
- **PornTube**
|
||||
- **PressTV**
|
||||
- **ProjectVeritas**
|
||||
- **prosiebensat1**: ProSiebenSat.1 Digital
|
||||
- **puhutv**
|
||||
- **puhutv:serie**
|
||||
@@ -797,12 +846,17 @@
|
||||
- **QuicklineLive**
|
||||
- **R7**
|
||||
- **R7Article**
|
||||
- **Radiko**
|
||||
- **RadikoRadio**
|
||||
- **radio.de**
|
||||
- **radiobremen**
|
||||
- **radiocanada**
|
||||
- **radiocanada:audiovideo**
|
||||
- **radiofrance**
|
||||
- **RadioJavan**
|
||||
- **radlive**
|
||||
- **radlive:channel**
|
||||
- **radlive:season**
|
||||
- **Rai**
|
||||
- **RaiPlay**
|
||||
- **RaiPlayLive**
|
||||
@@ -815,6 +869,7 @@
|
||||
- **RCSVarious**
|
||||
- **RCTIPlus**
|
||||
- **RCTIPlusSeries**
|
||||
- **RCTIPlusTV**
|
||||
- **RDS**: RDS.ca
|
||||
- **RedBull**
|
||||
- **RedBullEmbed**
|
||||
@@ -852,6 +907,7 @@
|
||||
- **RTVNH**
|
||||
- **RTVS**
|
||||
- **RUHD**
|
||||
- **RumbleChannel**
|
||||
- **RumbleEmbed**
|
||||
- **rutube**: Rutube videos
|
||||
- **rutube:channel**: Rutube channels
|
||||
@@ -873,6 +929,7 @@
|
||||
- **savefrom.net**
|
||||
- **SBS**: sbs.com.au
|
||||
- **schooltv**
|
||||
- **ScienceChannel**
|
||||
- **screen.yahoo:search**: Yahoo screen search
|
||||
- **Screencast**
|
||||
- **ScreencastOMatic**
|
||||
@@ -925,11 +982,12 @@
|
||||
- **southpark.de**
|
||||
- **southpark.nl**
|
||||
- **southparkstudios.dk**
|
||||
- **SovietsCloset**
|
||||
- **SovietsClosetPlaylist**
|
||||
- **SpankBang**
|
||||
- **SpankBangPlaylist**
|
||||
- **Spankwire**
|
||||
- **Spiegel**
|
||||
- **sport.francetvinfo.fr**
|
||||
- **Sport5**
|
||||
- **SportBox**
|
||||
- **SportDeutschland**
|
||||
@@ -945,6 +1003,7 @@
|
||||
- **SRGSSR**
|
||||
- **SRGSSRPlay**: srf.ch, rts.ch, rsi.ch, rtr.ch and swissinfo.ch play sites
|
||||
- **stanfordoc**: Stanford Open ClassRoom
|
||||
- **startv**
|
||||
- **Steam**
|
||||
- **Stitcher**
|
||||
- **StitcherShow**
|
||||
@@ -952,6 +1011,7 @@
|
||||
- **StoryFireSeries**
|
||||
- **StoryFireUser**
|
||||
- **Streamable**
|
||||
- **Streamanity**
|
||||
- **streamcloud.eu**
|
||||
- **StreamCZ**
|
||||
- **StreetVoice**
|
||||
@@ -1007,17 +1067,21 @@
|
||||
- **TheScene**
|
||||
- **TheStar**
|
||||
- **TheSun**
|
||||
- **ThetaStream**
|
||||
- **ThetaVideo**
|
||||
- **TheWeatherChannel**
|
||||
- **ThisAmericanLife**
|
||||
- **ThisAV**
|
||||
- **ThisOldHouse**
|
||||
- **ThisVid**
|
||||
- **TikTok**
|
||||
- **tiktok:user**
|
||||
- **tinypic**: tinypic.com videos
|
||||
- **TMZ**
|
||||
- **TNAFlix**
|
||||
- **TNAFlixNetworkEmbed**
|
||||
- **toggle**
|
||||
- **Tokentube**
|
||||
- **Tokentube:channel**
|
||||
- **ToonGoggles**
|
||||
- **tou.tv**
|
||||
- **Toypics**: Toypics video
|
||||
@@ -1040,10 +1104,11 @@
|
||||
- **Turbo**
|
||||
- **tv.dfb.de**
|
||||
- **TV2**
|
||||
- **tv2.hu**
|
||||
- **TV2Article**
|
||||
- **TV2DK**
|
||||
- **TV2DKBornholmPlay**
|
||||
- **tv2play.hu**
|
||||
- **tv2playseries.hu**
|
||||
- **TV4**: tv4.se and tv4play.se
|
||||
- **TV5MondePlus**: TV5MONDE+
|
||||
- **tv5unis**
|
||||
@@ -1108,9 +1173,11 @@
|
||||
- **ustream:channel**
|
||||
- **ustudio**
|
||||
- **ustudio:embed**
|
||||
- **Utreon**
|
||||
- **Varzesh3**
|
||||
- **Vbox7**
|
||||
- **VeeHD**
|
||||
- **Veo**
|
||||
- **Veoh**
|
||||
- **Vesti**: Вести.Ru
|
||||
- **Vevo**
|
||||
@@ -1139,9 +1206,6 @@
|
||||
- **VidioLive**
|
||||
- **VidioPremier**
|
||||
- **VidLii**
|
||||
- **vidme**
|
||||
- **vidme:user**
|
||||
- **vidme:user:likes**
|
||||
- **vier**: vier.be and vijf.be
|
||||
- **vier:videos**
|
||||
- **viewlift**
|
||||
@@ -1176,6 +1240,8 @@
|
||||
- **VODPl**
|
||||
- **VODPlatform**
|
||||
- **VoiceRepublic**
|
||||
- **voicy**
|
||||
- **voicy:channel**
|
||||
- **Voot**
|
||||
- **VootSeries**
|
||||
- **VoxMedia**
|
||||
@@ -1191,6 +1257,7 @@
|
||||
- **VTXTV**
|
||||
- **vube**: Vube.com
|
||||
- **VuClip**
|
||||
- **Vupload**
|
||||
- **VVVVID**
|
||||
- **VVVVIDShow**
|
||||
- **VyboryMos**
|
||||
@@ -1288,6 +1355,8 @@
|
||||
- **ZDFChannel**
|
||||
- **Zee5**
|
||||
- **zee5:series**
|
||||
- **ZenYandex**
|
||||
- **ZenYandexChannel**
|
||||
- **Zhihu**
|
||||
- **zingmp3**: mp3.zing.vn
|
||||
- **zingmp3:album**
|
||||
|
||||
@@ -22,7 +22,7 @@ from yt_dlp.utils import (
|
||||
)
|
||||
|
||||
|
||||
if "pytest" in sys.modules:
|
||||
if 'pytest' in sys.modules:
|
||||
import pytest
|
||||
is_download_test = pytest.mark.download
|
||||
else:
|
||||
@@ -32,9 +32,9 @@ else:
|
||||
|
||||
def get_params(override=None):
|
||||
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
|
||||
"parameters.json")
|
||||
'parameters.json')
|
||||
LOCAL_PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
|
||||
"local_parameters.json")
|
||||
'local_parameters.json')
|
||||
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
|
||||
parameters = json.load(pf)
|
||||
if os.path.exists(LOCAL_PARAMETERS_FILE):
|
||||
@@ -198,7 +198,10 @@ def expect_info_dict(self, got_dict, expected_dict):
|
||||
expect_dict(self, got_dict, expected_dict)
|
||||
# Check for the presence of mandatory fields
|
||||
if got_dict.get('_type') not in ('playlist', 'multi_video'):
|
||||
for key in ('id', 'url', 'title', 'ext'):
|
||||
mandatory_fields = ['id', 'title']
|
||||
if expected_dict.get('ext'):
|
||||
mandatory_fields.extend(('url', 'ext'))
|
||||
for key in mandatory_fields:
|
||||
self.assertTrue(got_dict.get(key), 'Missing mandatory field %s' % key)
|
||||
# Check for mandatory fields that are automatically set by YoutubeDL
|
||||
for key in ['webpage_url', 'extractor', 'extractor_key']:
|
||||
|
||||
@@ -10,14 +10,15 @@ import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
import copy
|
||||
import json
|
||||
|
||||
from test.helper import FakeYDL, assertRegexpMatches
|
||||
from yt_dlp import YoutubeDL
|
||||
from yt_dlp.compat import compat_str, compat_urllib_error
|
||||
from yt_dlp.compat import compat_os_name, compat_setenv, compat_str, compat_urllib_error
|
||||
from yt_dlp.extractor import YoutubeIE
|
||||
from yt_dlp.extractor.common import InfoExtractor
|
||||
from yt_dlp.postprocessor.common import PostProcessor
|
||||
from yt_dlp.utils import ExtractorError, int_or_none, match_filter_func
|
||||
from yt_dlp.utils import ExtractorError, int_or_none, match_filter_func, LazyList
|
||||
|
||||
TEST_URL = 'http://localhost/sample.mp4'
|
||||
|
||||
@@ -647,9 +648,12 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
'title1': '$PATH',
|
||||
'title2': '%PATH%',
|
||||
'title3': 'foo/bar\\test',
|
||||
'title4': 'foo "bar" test',
|
||||
'title5': 'áéí 𝐀',
|
||||
'timestamp': 1618488000,
|
||||
'duration': 100000,
|
||||
'playlist_index': 1,
|
||||
'playlist_autonumber': 2,
|
||||
'_last_playlist_index': 100,
|
||||
'n_entries': 10,
|
||||
'formats': [{'id': 'id1'}, {'id': 'id2'}, {'id': 'id3'}]
|
||||
@@ -662,32 +666,45 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
ydl._num_downloads = 1
|
||||
self.assertEqual(ydl.validate_outtmpl(tmpl), None)
|
||||
|
||||
outtmpl, tmpl_dict = ydl.prepare_outtmpl(tmpl, info or self.outtmpl_info)
|
||||
out = outtmpl % tmpl_dict
|
||||
out = ydl.evaluate_outtmpl(tmpl, info or self.outtmpl_info)
|
||||
fname = ydl.prepare_filename(info or self.outtmpl_info)
|
||||
|
||||
if callable(expected):
|
||||
self.assertTrue(expected(out))
|
||||
self.assertTrue(expected(fname))
|
||||
elif isinstance(expected, compat_str):
|
||||
self.assertEqual((out, fname), (expected, expected))
|
||||
else:
|
||||
self.assertEqual((out, fname), expected)
|
||||
if not isinstance(expected, (list, tuple)):
|
||||
expected = (expected, expected)
|
||||
for (name, got), expect in zip((('outtmpl', out), ('filename', fname)), expected):
|
||||
if callable(expect):
|
||||
self.assertTrue(expect(got), f'Wrong {name} from {tmpl}')
|
||||
else:
|
||||
self.assertEqual(got, expect, f'Wrong {name} from {tmpl}')
|
||||
|
||||
# Side-effects
|
||||
original_infodict = dict(self.outtmpl_info)
|
||||
test('foo.bar', 'foo.bar')
|
||||
original_infodict['epoch'] = self.outtmpl_info.get('epoch')
|
||||
self.assertTrue(isinstance(original_infodict['epoch'], int))
|
||||
test('%(epoch)d', int_or_none)
|
||||
self.assertEqual(original_infodict, self.outtmpl_info)
|
||||
|
||||
# Auto-generated fields
|
||||
test('%(id)s.%(ext)s', '1234.mp4')
|
||||
test('%(duration_string)s', ('27:46:40', '27-46-40'))
|
||||
test('%(epoch)d', int_or_none)
|
||||
test('%(resolution)s', '1080p')
|
||||
test('%(playlist_index)s', '001')
|
||||
test('%(playlist_autonumber)s', '02')
|
||||
test('%(autonumber)s', '00001')
|
||||
test('%(autonumber+2)03d', '005', autonumber_start=3)
|
||||
test('%(autonumber)s', '001', autonumber_size=3)
|
||||
|
||||
# Escaping %
|
||||
test('%', '%')
|
||||
test('%%', '%')
|
||||
test('%%%%', '%%')
|
||||
test('%s', '%s')
|
||||
test('%%%s', '%%s')
|
||||
test('%d', '%d')
|
||||
test('%abc%', '%abc%')
|
||||
test('%%(width)06d.%(ext)s', '%(width)06d.mp4')
|
||||
test('%%%(height)s', '%1080')
|
||||
test('%(width)06d.%(ext)s', 'NA.mp4')
|
||||
test('%(width)06d.%%(ext)s', 'NA.%(ext)s')
|
||||
test('%%(width)06d.%(ext)s', '%(width)06d.mp4')
|
||||
@@ -702,12 +719,18 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
test('%(id)s', ('ab:cd', 'ab -cd'), info={'id': 'ab:cd'})
|
||||
|
||||
# Invalid templates
|
||||
self.assertTrue(isinstance(YoutubeDL.validate_outtmpl('%'), ValueError))
|
||||
self.assertTrue(isinstance(YoutubeDL.validate_outtmpl('%(title)'), ValueError))
|
||||
test('%(invalid@tmpl|def)s', 'none', outtmpl_na_placeholder='none')
|
||||
test('%()s', 'NA')
|
||||
test('%s', '%s')
|
||||
test('%d', '%d')
|
||||
test('%(..)s', 'NA')
|
||||
|
||||
# Entire info_dict
|
||||
def expect_same_infodict(out):
|
||||
got_dict = json.loads(out)
|
||||
for info_field, expected in self.outtmpl_info.items():
|
||||
self.assertEqual(got_dict.get(info_field), expected, info_field)
|
||||
return True
|
||||
|
||||
test('%()j', (expect_same_infodict, str))
|
||||
|
||||
# NA placeholder
|
||||
NA_TEST_OUTTMPL = '%(uploader_date)s-%(width)d-%(x|def)s-%(id)s.%(ext)s'
|
||||
@@ -738,13 +761,32 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
test('%(width|0)04d', '0000')
|
||||
test('a%(width|)d', 'a', outtmpl_na_placeholder='none')
|
||||
|
||||
# Internal formatting
|
||||
FORMATS = self.outtmpl_info['formats']
|
||||
sanitize = lambda x: x.replace(':', ' -').replace('"', "'")
|
||||
|
||||
# Custom type casting
|
||||
test('%(formats.:.id)l', 'id1, id2, id3')
|
||||
test('%(formats.:.id)#l', ('id1\nid2\nid3', 'id1 id2 id3'))
|
||||
test('%(ext)l', 'mp4')
|
||||
test('%(formats.:.id) 15l', ' id1, id2, id3')
|
||||
test('%(formats)j', (json.dumps(FORMATS), sanitize(json.dumps(FORMATS))))
|
||||
test('%(title5).3B', 'á')
|
||||
test('%(title5)U', 'áéí 𝐀')
|
||||
test('%(title5)#U', 'a\u0301e\u0301i\u0301 𝐀')
|
||||
test('%(title5)+U', 'áéí A')
|
||||
test('%(title5)+#U', 'a\u0301e\u0301i\u0301 A')
|
||||
if compat_os_name == 'nt':
|
||||
test('%(title4)q', ('"foo \\"bar\\" test"', "'foo _'bar_' test'"))
|
||||
else:
|
||||
test('%(title4)q', ('\'foo "bar" test\'', "'foo 'bar' test'"))
|
||||
|
||||
# Internal formatting
|
||||
test('%(timestamp-1000>%H-%M-%S)s', '11-43-20')
|
||||
test('%(title|%)s %(title|%%)s', '% %%')
|
||||
test('%(id+1-height+3)05d', '00158')
|
||||
test('%(width+100)05d', 'NA')
|
||||
test('%(formats.0) 15s', ('% 15s' % FORMATS[0], '% 15s' % str(FORMATS[0]).replace(':', ' -')))
|
||||
test('%(formats.0)r', (repr(FORMATS[0]), repr(FORMATS[0]).replace(':', ' -')))
|
||||
test('%(formats.0) 15s', ('% 15s' % FORMATS[0], '% 15s' % sanitize(str(FORMATS[0]))))
|
||||
test('%(formats.0)r', (repr(FORMATS[0]), sanitize(repr(FORMATS[0]))))
|
||||
test('%(height.0)03d', '001')
|
||||
test('%(-height.0)04d', '-001')
|
||||
test('%(formats.-1.id)s', FORMATS[-1]['id'])
|
||||
@@ -754,11 +796,28 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
test('%(formats.0.id.-1+id)f', '1235.000000')
|
||||
test('%(formats.0.id.-1+formats.1.id.-1)d', '3')
|
||||
|
||||
# Alternates
|
||||
test('%(title,id)s', '1234')
|
||||
test('%(width-100,height+20|def)d', '1100')
|
||||
test('%(width-100,height+width|def)s', 'def')
|
||||
test('%(timestamp-x>%H\\,%M\\,%S,timestamp>%H\\,%M\\,%S)s', '12,00,00')
|
||||
|
||||
# Laziness
|
||||
def gen():
|
||||
yield from range(5)
|
||||
raise self.assertTrue(False, 'LazyList should not be evaluated till here')
|
||||
test('%(key.4)s', '4', info={'key': LazyList(gen())})
|
||||
|
||||
# Empty filename
|
||||
test('%(foo|)s-%(bar|)s.%(ext)s', '-.mp4')
|
||||
# test('%(foo|)s.%(ext)s', ('.mp4', '_.mp4')) # fixme
|
||||
# test('%(foo|)s', ('', '_')) # fixme
|
||||
|
||||
# Environment variable expansion for prepare_filename
|
||||
compat_setenv('__yt_dlp_var', 'expanded')
|
||||
envvar = '%__yt_dlp_var%' if compat_os_name == 'nt' else '$__yt_dlp_var'
|
||||
test(envvar, (envvar, 'expanded'))
|
||||
|
||||
# Path expansion and escaping
|
||||
test('Hello %(title1)s', 'Hello $PATH')
|
||||
test('Hello %(title2)s', 'Hello %PATH%')
|
||||
@@ -933,54 +992,32 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
ydl.process_ie_result(copy.deepcopy(playlist))
|
||||
return ydl.downloaded_info_dicts
|
||||
|
||||
def get_ids(params):
|
||||
return [int(v['id']) for v in get_downloaded_info_dicts(params)]
|
||||
def test_selection(params, expected_ids):
|
||||
results = [
|
||||
(v['playlist_autonumber'] - 1, (int(v['id']), v['playlist_index']))
|
||||
for v in get_downloaded_info_dicts(params)]
|
||||
self.assertEqual(results, list(enumerate(zip(expected_ids, expected_ids))))
|
||||
|
||||
result = get_ids({})
|
||||
self.assertEqual(result, [1, 2, 3, 4])
|
||||
|
||||
result = get_ids({'playlistend': 10})
|
||||
self.assertEqual(result, [1, 2, 3, 4])
|
||||
|
||||
result = get_ids({'playlistend': 2})
|
||||
self.assertEqual(result, [1, 2])
|
||||
|
||||
result = get_ids({'playliststart': 10})
|
||||
self.assertEqual(result, [])
|
||||
|
||||
result = get_ids({'playliststart': 2})
|
||||
self.assertEqual(result, [2, 3, 4])
|
||||
|
||||
result = get_ids({'playlist_items': '2-4'})
|
||||
self.assertEqual(result, [2, 3, 4])
|
||||
|
||||
result = get_ids({'playlist_items': '2,4'})
|
||||
self.assertEqual(result, [2, 4])
|
||||
|
||||
result = get_ids({'playlist_items': '10'})
|
||||
self.assertEqual(result, [])
|
||||
|
||||
result = get_ids({'playlist_items': '3-10'})
|
||||
self.assertEqual(result, [3, 4])
|
||||
|
||||
result = get_ids({'playlist_items': '2-4,3-4,3'})
|
||||
self.assertEqual(result, [2, 3, 4])
|
||||
test_selection({}, [1, 2, 3, 4])
|
||||
test_selection({'playlistend': 10}, [1, 2, 3, 4])
|
||||
test_selection({'playlistend': 2}, [1, 2])
|
||||
test_selection({'playliststart': 10}, [])
|
||||
test_selection({'playliststart': 2}, [2, 3, 4])
|
||||
test_selection({'playlist_items': '2-4'}, [2, 3, 4])
|
||||
test_selection({'playlist_items': '2,4'}, [2, 4])
|
||||
test_selection({'playlist_items': '10'}, [])
|
||||
test_selection({'playlist_items': '0'}, [])
|
||||
|
||||
# Tests for https://github.com/ytdl-org/youtube-dl/issues/10591
|
||||
# @{
|
||||
result = get_downloaded_info_dicts({'playlist_items': '2-4,3-4,3'})
|
||||
self.assertEqual(result[0]['playlist_index'], 2)
|
||||
self.assertEqual(result[1]['playlist_index'], 3)
|
||||
test_selection({'playlist_items': '2-4,3-4,3'}, [2, 3, 4])
|
||||
test_selection({'playlist_items': '4,2'}, [4, 2])
|
||||
|
||||
result = get_downloaded_info_dicts({'playlist_items': '2-4,3-4,3'})
|
||||
self.assertEqual(result[0]['playlist_index'], 2)
|
||||
self.assertEqual(result[1]['playlist_index'], 3)
|
||||
self.assertEqual(result[2]['playlist_index'], 4)
|
||||
|
||||
result = get_downloaded_info_dicts({'playlist_items': '4,2'})
|
||||
self.assertEqual(result[0]['playlist_index'], 4)
|
||||
self.assertEqual(result[1]['playlist_index'], 2)
|
||||
# @}
|
||||
# Tests for https://github.com/yt-dlp/yt-dlp/issues/720
|
||||
# https://github.com/yt-dlp/yt-dlp/issues/302
|
||||
test_selection({'playlistreverse': True}, [4, 3, 2, 1])
|
||||
test_selection({'playliststart': 2, 'playlistreverse': True}, [4, 3, 2])
|
||||
test_selection({'playlist_items': '2,4', 'playlistreverse': True}, [4, 2])
|
||||
test_selection({'playlist_items': '4,2'}, [4, 2])
|
||||
|
||||
def test_urlopen_no_file_protocol(self):
|
||||
# see https://github.com/ytdl-org/youtube-dl/issues/8227
|
||||
|
||||
@@ -7,7 +7,19 @@ import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from yt_dlp.aes import aes_decrypt, aes_encrypt, aes_cbc_decrypt, aes_cbc_encrypt, aes_decrypt_text
|
||||
from yt_dlp.aes import (
|
||||
aes_decrypt,
|
||||
aes_encrypt,
|
||||
aes_cbc_decrypt,
|
||||
aes_cbc_decrypt_bytes,
|
||||
aes_cbc_encrypt,
|
||||
aes_ctr_decrypt,
|
||||
aes_ctr_encrypt,
|
||||
aes_gcm_decrypt_and_verify,
|
||||
aes_gcm_decrypt_and_verify_bytes,
|
||||
aes_decrypt_text
|
||||
)
|
||||
from yt_dlp.compat import compat_pycrypto_AES
|
||||
from yt_dlp.utils import bytes_to_intlist, intlist_to_bytes
|
||||
import base64
|
||||
|
||||
@@ -27,18 +39,43 @@ class TestAES(unittest.TestCase):
|
||||
self.assertEqual(decrypted, msg)
|
||||
|
||||
def test_cbc_decrypt(self):
|
||||
data = bytes_to_intlist(
|
||||
b"\x97\x92+\xe5\x0b\xc3\x18\x91ky9m&\xb3\xb5@\xe6'\xc2\x96.\xc8u\x88\xab9-[\x9e|\xf1\xcd"
|
||||
)
|
||||
decrypted = intlist_to_bytes(aes_cbc_decrypt(data, self.key, self.iv))
|
||||
data = b'\x97\x92+\xe5\x0b\xc3\x18\x91ky9m&\xb3\xb5@\xe6\x27\xc2\x96.\xc8u\x88\xab9-[\x9e|\xf1\xcd'
|
||||
decrypted = intlist_to_bytes(aes_cbc_decrypt(bytes_to_intlist(data), self.key, self.iv))
|
||||
self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg)
|
||||
if compat_pycrypto_AES:
|
||||
decrypted = aes_cbc_decrypt_bytes(data, intlist_to_bytes(self.key), intlist_to_bytes(self.iv))
|
||||
self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg)
|
||||
|
||||
def test_cbc_encrypt(self):
|
||||
data = bytes_to_intlist(self.secret_msg)
|
||||
encrypted = intlist_to_bytes(aes_cbc_encrypt(data, self.key, self.iv))
|
||||
self.assertEqual(
|
||||
encrypted,
|
||||
b"\x97\x92+\xe5\x0b\xc3\x18\x91ky9m&\xb3\xb5@\xe6'\xc2\x96.\xc8u\x88\xab9-[\x9e|\xf1\xcd")
|
||||
b'\x97\x92+\xe5\x0b\xc3\x18\x91ky9m&\xb3\xb5@\xe6\'\xc2\x96.\xc8u\x88\xab9-[\x9e|\xf1\xcd')
|
||||
|
||||
def test_ctr_decrypt(self):
|
||||
data = bytes_to_intlist(b'\x03\xc7\xdd\xd4\x8e\xb3\xbc\x1a*O\xdc1\x12+8Aio\xd1z\xb5#\xaf\x08')
|
||||
decrypted = intlist_to_bytes(aes_ctr_decrypt(data, self.key, self.iv))
|
||||
self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg)
|
||||
|
||||
def test_ctr_encrypt(self):
|
||||
data = bytes_to_intlist(self.secret_msg)
|
||||
encrypted = intlist_to_bytes(aes_ctr_encrypt(data, self.key, self.iv))
|
||||
self.assertEqual(
|
||||
encrypted,
|
||||
b'\x03\xc7\xdd\xd4\x8e\xb3\xbc\x1a*O\xdc1\x12+8Aio\xd1z\xb5#\xaf\x08')
|
||||
|
||||
def test_gcm_decrypt(self):
|
||||
data = b'\x159Y\xcf5eud\x90\x9c\x85&]\x14\x1d\x0f.\x08\xb4T\xe4/\x17\xbd'
|
||||
authentication_tag = b'\xe8&I\x80rI\x07\x9d}YWuU@:e'
|
||||
|
||||
decrypted = intlist_to_bytes(aes_gcm_decrypt_and_verify(
|
||||
bytes_to_intlist(data), self.key, bytes_to_intlist(authentication_tag), self.iv[:12]))
|
||||
self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg)
|
||||
if compat_pycrypto_AES:
|
||||
decrypted = aes_gcm_decrypt_and_verify_bytes(
|
||||
data, intlist_to_bytes(self.key), authentication_tag, intlist_to_bytes(self.iv[:12]))
|
||||
self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg)
|
||||
|
||||
def test_decrypt_text(self):
|
||||
password = intlist_to_bytes(self.key).decode('utf-8')
|
||||
|
||||
@@ -3,16 +3,28 @@ from datetime import datetime, timezone
|
||||
|
||||
from yt_dlp import cookies
|
||||
from yt_dlp.cookies import (
|
||||
CRYPTO_AVAILABLE,
|
||||
LinuxChromeCookieDecryptor,
|
||||
MacChromeCookieDecryptor,
|
||||
WindowsChromeCookieDecryptor,
|
||||
YDLLogger,
|
||||
parse_safari_cookies,
|
||||
pbkdf2_sha1,
|
||||
)
|
||||
|
||||
|
||||
class Logger:
|
||||
def debug(self, message):
|
||||
print(f'[verbose] {message}')
|
||||
|
||||
def info(self, message):
|
||||
print(message)
|
||||
|
||||
def warning(self, message, only_once=False):
|
||||
self.error(message)
|
||||
|
||||
def error(self, message):
|
||||
raise Exception(message)
|
||||
|
||||
|
||||
class MonkeyPatch:
|
||||
def __init__(self, module, temporary_values):
|
||||
self._module = module
|
||||
@@ -42,7 +54,7 @@ class TestCookies(unittest.TestCase):
|
||||
with MonkeyPatch(cookies, {'_get_linux_keyring_password': lambda *args, **kwargs: b''}):
|
||||
encrypted_value = b'v10\xccW%\xcd\xe6\xe6\x9fM" \xa7\xb0\xca\xe4\x07\xd6'
|
||||
value = 'USD'
|
||||
decryptor = LinuxChromeCookieDecryptor('Chrome', YDLLogger())
|
||||
decryptor = LinuxChromeCookieDecryptor('Chrome', Logger())
|
||||
self.assertEqual(decryptor.decrypt(encrypted_value), value)
|
||||
|
||||
def test_chrome_cookie_decryptor_linux_v11(self):
|
||||
@@ -50,24 +62,23 @@ class TestCookies(unittest.TestCase):
|
||||
'KEYRING_AVAILABLE': True}):
|
||||
encrypted_value = b'v11#\x81\x10>`w\x8f)\xc0\xb2\xc1\r\xf4\x1al\xdd\x93\xfd\xf8\xf8N\xf2\xa9\x83\xf1\xe9o\x0elVQd'
|
||||
value = 'tz=Europe.London'
|
||||
decryptor = LinuxChromeCookieDecryptor('Chrome', YDLLogger())
|
||||
decryptor = LinuxChromeCookieDecryptor('Chrome', Logger())
|
||||
self.assertEqual(decryptor.decrypt(encrypted_value), value)
|
||||
|
||||
@unittest.skipIf(not CRYPTO_AVAILABLE, 'cryptography library not available')
|
||||
def test_chrome_cookie_decryptor_windows_v10(self):
|
||||
with MonkeyPatch(cookies, {
|
||||
'_get_windows_v10_key': lambda *args, **kwargs: b'Y\xef\xad\xad\xeerp\xf0Y\xe6\x9b\x12\xc2<z\x16]\n\xbb\xb8\xcb\xd7\x9bA\xc3\x14e\x99{\xd6\xf4&'
|
||||
}):
|
||||
encrypted_value = b'v10T\xb8\xf3\xb8\x01\xa7TtcV\xfc\x88\xb8\xb8\xef\x05\xb5\xfd\x18\xc90\x009\xab\xb1\x893\x85)\x87\xe1\xa9-\xa3\xad='
|
||||
value = '32101439'
|
||||
decryptor = WindowsChromeCookieDecryptor('', YDLLogger())
|
||||
decryptor = WindowsChromeCookieDecryptor('', Logger())
|
||||
self.assertEqual(decryptor.decrypt(encrypted_value), value)
|
||||
|
||||
def test_chrome_cookie_decryptor_mac_v10(self):
|
||||
with MonkeyPatch(cookies, {'_get_mac_keyring_password': lambda *args, **kwargs: b'6eIDUdtKAacvlHwBVwvg/Q=='}):
|
||||
encrypted_value = b'v10\xb3\xbe\xad\xa1[\x9fC\xa1\x98\xe0\x9a\x01\xd9\xcf\xbfc'
|
||||
value = '2021-06-01-22'
|
||||
decryptor = MacChromeCookieDecryptor('', YDLLogger())
|
||||
decryptor = MacChromeCookieDecryptor('', Logger())
|
||||
self.assertEqual(decryptor.decrypt(encrypted_value), value)
|
||||
|
||||
def test_safari_cookie_parsing(self):
|
||||
|
||||
45
test/test_download.py
Normal file → Executable file
45
test/test_download.py
Normal file → Executable file
@@ -73,6 +73,8 @@ class TestDownload(unittest.TestCase):
|
||||
|
||||
maxDiff = None
|
||||
|
||||
COMPLETED_TESTS = {}
|
||||
|
||||
def __str__(self):
|
||||
"""Identify each test with the `add_ie` attribute, if available."""
|
||||
|
||||
@@ -94,6 +96,9 @@ class TestDownload(unittest.TestCase):
|
||||
def generator(test_case, tname):
|
||||
|
||||
def test_template(self):
|
||||
if self.COMPLETED_TESTS.get(tname):
|
||||
return
|
||||
self.COMPLETED_TESTS[tname] = True
|
||||
ie = yt_dlp.extractor.get_info_extractor(test_case['name'])()
|
||||
other_ies = [get_info_extractor(ie_key)() for ie_key in test_case.get('add_ie', [])]
|
||||
is_playlist = any(k.startswith('playlist') for k in test_case)
|
||||
@@ -108,8 +113,13 @@ def generator(test_case, tname):
|
||||
|
||||
for tc in test_cases:
|
||||
info_dict = tc.get('info_dict', {})
|
||||
if not (info_dict.get('id') and info_dict.get('ext')):
|
||||
raise Exception('Test definition incorrect. The output file cannot be known. Are both \'id\' and \'ext\' keys present?')
|
||||
params = tc.get('params', {})
|
||||
if not info_dict.get('id'):
|
||||
raise Exception('Test definition incorrect. \'id\' key is not present')
|
||||
elif not info_dict.get('ext'):
|
||||
if params.get('skip_download') and params.get('ignore_no_formats_error'):
|
||||
continue
|
||||
raise Exception('Test definition incorrect. The output file cannot be known. \'ext\' key is not present')
|
||||
|
||||
if 'skip' in test_case:
|
||||
print_skipping(test_case['skip'])
|
||||
@@ -137,7 +147,7 @@ def generator(test_case, tname):
|
||||
expect_warnings(ydl, test_case.get('expected_warnings', []))
|
||||
|
||||
def get_tc_filename(tc):
|
||||
return ydl.prepare_filename(tc.get('info_dict', {}))
|
||||
return ydl.prepare_filename(dict(tc.get('info_dict', {})))
|
||||
|
||||
res_dict = None
|
||||
|
||||
@@ -250,12 +260,12 @@ def generator(test_case, tname):
|
||||
|
||||
|
||||
# And add them to TestDownload
|
||||
for n, test_case in enumerate(defs):
|
||||
tname = 'test_' + str(test_case['name'])
|
||||
i = 1
|
||||
while hasattr(TestDownload, tname):
|
||||
tname = 'test_%s_%d' % (test_case['name'], i)
|
||||
i += 1
|
||||
tests_counter = {}
|
||||
for test_case in defs:
|
||||
name = test_case['name']
|
||||
i = tests_counter.get(name, 0)
|
||||
tests_counter[name] = i + 1
|
||||
tname = f'test_{name}_{i}' if i else f'test_{name}'
|
||||
test_method = generator(test_case, tname)
|
||||
test_method.__name__ = str(tname)
|
||||
ie_list = test_case.get('add_ie')
|
||||
@@ -264,5 +274,22 @@ for n, test_case in enumerate(defs):
|
||||
del test_method
|
||||
|
||||
|
||||
def batch_generator(name, num_tests):
|
||||
|
||||
def test_template(self):
|
||||
for i in range(num_tests):
|
||||
getattr(self, f'test_{name}_{i}' if i else f'test_{name}')()
|
||||
|
||||
return test_template
|
||||
|
||||
|
||||
for name, num_tests in tests_counter.items():
|
||||
test_method = batch_generator(name, num_tests)
|
||||
test_method.__name__ = f'test_{name}_all'
|
||||
test_method.add_ie = ''
|
||||
setattr(TestDownload, test_method.__name__, test_method)
|
||||
del test_method
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
@@ -8,13 +8,14 @@ import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from test.helper import try_rm
|
||||
from test.helper import is_download_test, try_rm
|
||||
|
||||
|
||||
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
download_file = join(root_dir, 'test.webm')
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestOverwrites(unittest.TestCase):
|
||||
def setUp(self):
|
||||
# create an empty file
|
||||
|
||||
@@ -6,37 +6,38 @@ from __future__ import unicode_literals
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from yt_dlp import YoutubeDL
|
||||
from yt_dlp.compat import compat_shlex_quote
|
||||
from yt_dlp.postprocessor import (
|
||||
ExecAfterDownloadPP,
|
||||
ExecPP,
|
||||
FFmpegThumbnailsConvertorPP,
|
||||
MetadataFromFieldPP,
|
||||
MetadataFromTitlePP,
|
||||
MetadataParserPP,
|
||||
ModifyChaptersPP
|
||||
)
|
||||
|
||||
|
||||
class TestMetadataFromField(unittest.TestCase):
|
||||
|
||||
def test_format_to_regex(self):
|
||||
pp = MetadataFromFieldPP(None, ['title:%(title)s - %(artist)s'])
|
||||
self.assertEqual(pp._data[0]['regex'], r'(?P<title>.+)\ \-\ (?P<artist>.+)')
|
||||
self.assertEqual(
|
||||
MetadataParserPP.format_to_regex('%(title)s - %(artist)s'),
|
||||
r'(?P<title>.+)\ \-\ (?P<artist>.+)')
|
||||
self.assertEqual(MetadataParserPP.format_to_regex(r'(?P<x>.+)'), r'(?P<x>.+)')
|
||||
|
||||
def test_field_to_outtmpl(self):
|
||||
pp = MetadataFromFieldPP(None, ['title:%(title)s : %(artist)s'])
|
||||
self.assertEqual(pp._data[0]['tmpl'], '%(title)s')
|
||||
def test_field_to_template(self):
|
||||
self.assertEqual(MetadataParserPP.field_to_template('title'), '%(title)s')
|
||||
self.assertEqual(MetadataParserPP.field_to_template('1'), '1')
|
||||
self.assertEqual(MetadataParserPP.field_to_template('foo bar'), 'foo bar')
|
||||
self.assertEqual(MetadataParserPP.field_to_template(' literal'), ' literal')
|
||||
|
||||
def test_in_out_seperation(self):
|
||||
pp = MetadataFromFieldPP(None, ['%(title)s \\: %(artist)s:%(title)s : %(artist)s'])
|
||||
self.assertEqual(pp._data[0]['in'], '%(title)s : %(artist)s')
|
||||
self.assertEqual(pp._data[0]['out'], '%(title)s : %(artist)s')
|
||||
|
||||
|
||||
class TestMetadataFromTitle(unittest.TestCase):
|
||||
def test_format_to_regex(self):
|
||||
pp = MetadataFromTitlePP(None, '%(title)s - %(artist)s')
|
||||
self.assertEqual(pp._titleregex, r'(?P<title>.+)\ \-\ (?P<artist>.+)')
|
||||
def test_metadatafromfield(self):
|
||||
self.assertEqual(
|
||||
MetadataFromFieldPP.to_action('%(title)s \\: %(artist)s:%(title)s : %(artist)s'),
|
||||
(MetadataParserPP.Actions.INTERPRET, '%(title)s : %(artist)s', '%(title)s : %(artist)s'))
|
||||
|
||||
|
||||
class TestConvertThumbnail(unittest.TestCase):
|
||||
@@ -60,12 +61,502 @@ class TestConvertThumbnail(unittest.TestCase):
|
||||
os.remove(file.format(out))
|
||||
|
||||
|
||||
class TestExecAfterDownload(unittest.TestCase):
|
||||
class TestExec(unittest.TestCase):
|
||||
def test_parse_cmd(self):
|
||||
pp = ExecAfterDownloadPP(YoutubeDL(), '')
|
||||
pp = ExecPP(YoutubeDL(), '')
|
||||
info = {'filepath': 'file name'}
|
||||
quoted_filepath = compat_shlex_quote(info['filepath'])
|
||||
cmd = 'echo %s' % compat_shlex_quote(info['filepath'])
|
||||
|
||||
self.assertEqual(pp.parse_cmd('echo', info), 'echo %s' % quoted_filepath)
|
||||
self.assertEqual(pp.parse_cmd('echo.{}', info), 'echo.%s' % quoted_filepath)
|
||||
self.assertEqual(pp.parse_cmd('echo "%(filepath)s"', info), 'echo "%s"' % info['filepath'])
|
||||
self.assertEqual(pp.parse_cmd('echo', info), cmd)
|
||||
self.assertEqual(pp.parse_cmd('echo {}', info), cmd)
|
||||
self.assertEqual(pp.parse_cmd('echo %(filepath)q', info), cmd)
|
||||
|
||||
|
||||
class TestModifyChaptersPP(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self._pp = ModifyChaptersPP(YoutubeDL())
|
||||
|
||||
@staticmethod
|
||||
def _sponsor_chapter(start, end, cat, remove=False):
|
||||
c = {'start_time': start, 'end_time': end, '_categories': [(cat, start, end)]}
|
||||
if remove:
|
||||
c['remove'] = True
|
||||
return c
|
||||
|
||||
@staticmethod
|
||||
def _chapter(start, end, title=None, remove=False):
|
||||
c = {'start_time': start, 'end_time': end}
|
||||
if title is not None:
|
||||
c['title'] = title
|
||||
if remove:
|
||||
c['remove'] = True
|
||||
return c
|
||||
|
||||
def _chapters(self, ends, titles):
|
||||
self.assertEqual(len(ends), len(titles))
|
||||
start = 0
|
||||
chapters = []
|
||||
for e, t in zip(ends, titles):
|
||||
chapters.append(self._chapter(start, e, t))
|
||||
start = e
|
||||
return chapters
|
||||
|
||||
def _remove_marked_arrange_sponsors_test_impl(
|
||||
self, chapters, expected_chapters, expected_removed):
|
||||
actual_chapters, actual_removed = (
|
||||
self._pp._remove_marked_arrange_sponsors(chapters))
|
||||
for c in actual_removed:
|
||||
c.pop('title', None)
|
||||
c.pop('_categories', None)
|
||||
actual_chapters = [{
|
||||
'start_time': c['start_time'],
|
||||
'end_time': c['end_time'],
|
||||
'title': c['title'],
|
||||
} for c in actual_chapters]
|
||||
self.assertSequenceEqual(expected_chapters, actual_chapters)
|
||||
self.assertSequenceEqual(expected_removed, actual_removed)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_CanGetThroughUnaltered(self):
|
||||
chapters = self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, chapters, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithSponsors(self):
|
||||
chapters = self._chapters([70], ['c']) + [
|
||||
self._sponsor_chapter(10, 20, 'sponsor'),
|
||||
self._sponsor_chapter(30, 40, 'preview'),
|
||||
self._sponsor_chapter(50, 60, 'sponsor')]
|
||||
expected = self._chapters(
|
||||
[10, 20, 30, 40, 50, 60, 70],
|
||||
['c', '[SponsorBlock]: Sponsor', 'c', '[SponsorBlock]: Preview/Recap',
|
||||
'c', '[SponsorBlock]: Sponsor', 'c'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_UniqueNamesForOverlappingSponsors(self):
|
||||
chapters = self._chapters([120], ['c']) + [
|
||||
self._sponsor_chapter(10, 45, 'sponsor'), self._sponsor_chapter(20, 40, 'selfpromo'),
|
||||
self._sponsor_chapter(50, 70, 'sponsor'), self._sponsor_chapter(60, 85, 'selfpromo'),
|
||||
self._sponsor_chapter(90, 120, 'selfpromo'), self._sponsor_chapter(100, 110, 'sponsor')]
|
||||
expected = self._chapters(
|
||||
[10, 20, 40, 45, 50, 60, 70, 85, 90, 100, 110, 120],
|
||||
['c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Sponsor, Unpaid/Self Promotion',
|
||||
'[SponsorBlock]: Sponsor',
|
||||
'c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Sponsor, Unpaid/Self Promotion',
|
||||
'[SponsorBlock]: Unpaid/Self Promotion',
|
||||
'c', '[SponsorBlock]: Unpaid/Self Promotion', '[SponsorBlock]: Unpaid/Self Promotion, Sponsor',
|
||||
'[SponsorBlock]: Unpaid/Self Promotion'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithCuts(self):
|
||||
cuts = [self._chapter(10, 20, remove=True),
|
||||
self._sponsor_chapter(30, 40, 'sponsor', remove=True),
|
||||
self._chapter(50, 60, remove=True)]
|
||||
chapters = self._chapters([70], ['c']) + cuts
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, self._chapters([40], ['c']), cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithSponsorsAndCuts(self):
|
||||
chapters = self._chapters([70], ['c']) + [
|
||||
self._sponsor_chapter(10, 20, 'sponsor'),
|
||||
self._sponsor_chapter(30, 40, 'selfpromo', remove=True),
|
||||
self._sponsor_chapter(50, 60, 'interaction')]
|
||||
expected = self._chapters([10, 20, 40, 50, 60],
|
||||
['c', '[SponsorBlock]: Sponsor', 'c',
|
||||
'[SponsorBlock]: Interaction Reminder', 'c'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, expected, [self._chapter(30, 40, remove=True)])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithSponsorCutInTheMiddle(self):
|
||||
cuts = [self._sponsor_chapter(20, 30, 'selfpromo', remove=True),
|
||||
self._chapter(40, 50, remove=True)]
|
||||
chapters = self._chapters([70], ['c']) + [self._sponsor_chapter(10, 60, 'sponsor')] + cuts
|
||||
expected = self._chapters(
|
||||
[10, 40, 50], ['c', '[SponsorBlock]: Sponsor', 'c'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithCutHidingSponsor(self):
|
||||
cuts = [self._sponsor_chapter(20, 50, 'selpromo', remove=True)]
|
||||
chapters = self._chapters([60], ['c']) + [
|
||||
self._sponsor_chapter(10, 20, 'intro'),
|
||||
self._sponsor_chapter(30, 40, 'sponsor'),
|
||||
self._sponsor_chapter(50, 60, 'outro'),
|
||||
] + cuts
|
||||
expected = self._chapters(
|
||||
[10, 20, 30], ['c', '[SponsorBlock]: Intermission/Intro Animation', '[SponsorBlock]: Endcards/Credits'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithAdjacentSponsors(self):
|
||||
chapters = self._chapters([70], ['c']) + [
|
||||
self._sponsor_chapter(10, 20, 'sponsor'),
|
||||
self._sponsor_chapter(20, 30, 'selfpromo'),
|
||||
self._sponsor_chapter(30, 40, 'interaction')]
|
||||
expected = self._chapters(
|
||||
[10, 20, 30, 40, 70],
|
||||
['c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Unpaid/Self Promotion',
|
||||
'[SponsorBlock]: Interaction Reminder', 'c'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithAdjacentCuts(self):
|
||||
chapters = self._chapters([70], ['c']) + [
|
||||
self._sponsor_chapter(10, 20, 'sponsor'),
|
||||
self._sponsor_chapter(20, 30, 'interaction', remove=True),
|
||||
self._chapter(30, 40, remove=True),
|
||||
self._sponsor_chapter(40, 50, 'selpromo', remove=True),
|
||||
self._sponsor_chapter(50, 60, 'interaction')]
|
||||
expected = self._chapters([10, 20, 30, 40],
|
||||
['c', '[SponsorBlock]: Sponsor',
|
||||
'[SponsorBlock]: Interaction Reminder', 'c'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, expected, [self._chapter(20, 50, remove=True)])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithOverlappingSponsors(self):
|
||||
chapters = self._chapters([70], ['c']) + [
|
||||
self._sponsor_chapter(10, 30, 'sponsor'),
|
||||
self._sponsor_chapter(20, 50, 'selfpromo'),
|
||||
self._sponsor_chapter(40, 60, 'interaction')]
|
||||
expected = self._chapters(
|
||||
[10, 20, 30, 40, 50, 60, 70],
|
||||
['c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Sponsor, Unpaid/Self Promotion',
|
||||
'[SponsorBlock]: Unpaid/Self Promotion', '[SponsorBlock]: Unpaid/Self Promotion, Interaction Reminder',
|
||||
'[SponsorBlock]: Interaction Reminder', 'c'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithOverlappingCuts(self):
|
||||
chapters = self._chapters([70], ['c']) + [
|
||||
self._sponsor_chapter(10, 30, 'sponsor', remove=True),
|
||||
self._sponsor_chapter(20, 50, 'selfpromo', remove=True),
|
||||
self._sponsor_chapter(40, 60, 'interaction', remove=True)]
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, self._chapters([20], ['c']), [self._chapter(10, 60, remove=True)])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingSponsors(self):
|
||||
chapters = self._chapters([170], ['c']) + [
|
||||
self._sponsor_chapter(0, 30, 'intro'),
|
||||
self._sponsor_chapter(20, 50, 'sponsor'),
|
||||
self._sponsor_chapter(40, 60, 'selfpromo'),
|
||||
self._sponsor_chapter(70, 90, 'sponsor'),
|
||||
self._sponsor_chapter(80, 100, 'sponsor'),
|
||||
self._sponsor_chapter(90, 110, 'sponsor'),
|
||||
self._sponsor_chapter(120, 140, 'selfpromo'),
|
||||
self._sponsor_chapter(130, 160, 'interaction'),
|
||||
self._sponsor_chapter(150, 170, 'outro')]
|
||||
expected = self._chapters(
|
||||
[20, 30, 40, 50, 60, 70, 110, 120, 130, 140, 150, 160, 170],
|
||||
['[SponsorBlock]: Intermission/Intro Animation', '[SponsorBlock]: Intermission/Intro Animation, Sponsor', '[SponsorBlock]: Sponsor',
|
||||
'[SponsorBlock]: Sponsor, Unpaid/Self Promotion', '[SponsorBlock]: Unpaid/Self Promotion', 'c',
|
||||
'[SponsorBlock]: Sponsor', 'c', '[SponsorBlock]: Unpaid/Self Promotion',
|
||||
'[SponsorBlock]: Unpaid/Self Promotion, Interaction Reminder',
|
||||
'[SponsorBlock]: Interaction Reminder',
|
||||
'[SponsorBlock]: Interaction Reminder, Endcards/Credits', '[SponsorBlock]: Endcards/Credits'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingCuts(self):
|
||||
chapters = self._chapters([170], ['c']) + [
|
||||
self._chapter(0, 30, remove=True),
|
||||
self._sponsor_chapter(20, 50, 'sponsor', remove=True),
|
||||
self._chapter(40, 60, remove=True),
|
||||
self._sponsor_chapter(70, 90, 'sponsor', remove=True),
|
||||
self._chapter(80, 100, remove=True),
|
||||
self._chapter(90, 110, remove=True),
|
||||
self._sponsor_chapter(120, 140, 'sponsor', remove=True),
|
||||
self._sponsor_chapter(130, 160, 'selfpromo', remove=True),
|
||||
self._chapter(150, 170, remove=True)]
|
||||
expected_cuts = [self._chapter(0, 60, remove=True),
|
||||
self._chapter(70, 110, remove=True),
|
||||
self._chapter(120, 170, remove=True)]
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, self._chapters([20], ['c']), expected_cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_OverlappingSponsorsDifferentTitlesAfterCut(self):
|
||||
chapters = self._chapters([60], ['c']) + [
|
||||
self._sponsor_chapter(10, 60, 'sponsor'),
|
||||
self._sponsor_chapter(10, 40, 'intro'),
|
||||
self._sponsor_chapter(30, 50, 'interaction'),
|
||||
self._sponsor_chapter(30, 50, 'selfpromo', remove=True),
|
||||
self._sponsor_chapter(40, 50, 'interaction'),
|
||||
self._sponsor_chapter(50, 60, 'outro')]
|
||||
expected = self._chapters(
|
||||
[10, 30, 40], ['c', '[SponsorBlock]: Sponsor, Intermission/Intro Animation', '[SponsorBlock]: Sponsor, Endcards/Credits'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, expected, [self._chapter(30, 50, remove=True)])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorsNoLongerOverlapAfterCut(self):
|
||||
chapters = self._chapters([70], ['c']) + [
|
||||
self._sponsor_chapter(10, 30, 'sponsor'),
|
||||
self._sponsor_chapter(20, 50, 'interaction'),
|
||||
self._sponsor_chapter(30, 50, 'selpromo', remove=True),
|
||||
self._sponsor_chapter(40, 60, 'sponsor'),
|
||||
self._sponsor_chapter(50, 60, 'interaction')]
|
||||
expected = self._chapters(
|
||||
[10, 20, 40, 50], ['c', '[SponsorBlock]: Sponsor',
|
||||
'[SponsorBlock]: Sponsor, Interaction Reminder', 'c'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, expected, [self._chapter(30, 50, remove=True)])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorsStillOverlapAfterCut(self):
|
||||
chapters = self._chapters([70], ['c']) + [
|
||||
self._sponsor_chapter(10, 60, 'sponsor'),
|
||||
self._sponsor_chapter(20, 60, 'interaction'),
|
||||
self._sponsor_chapter(30, 50, 'selfpromo', remove=True)]
|
||||
expected = self._chapters(
|
||||
[10, 20, 40, 50], ['c', '[SponsorBlock]: Sponsor',
|
||||
'[SponsorBlock]: Sponsor, Interaction Reminder', 'c'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, expected, [self._chapter(30, 50, remove=True)])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingSponsorsAndCuts(self):
|
||||
chapters = self._chapters([200], ['c']) + [
|
||||
self._sponsor_chapter(10, 40, 'sponsor'),
|
||||
self._sponsor_chapter(10, 30, 'intro'),
|
||||
self._chapter(20, 30, remove=True),
|
||||
self._sponsor_chapter(30, 40, 'selfpromo'),
|
||||
self._sponsor_chapter(50, 70, 'sponsor'),
|
||||
self._sponsor_chapter(60, 80, 'interaction'),
|
||||
self._chapter(70, 80, remove=True),
|
||||
self._sponsor_chapter(70, 90, 'sponsor'),
|
||||
self._sponsor_chapter(80, 100, 'interaction'),
|
||||
self._sponsor_chapter(120, 170, 'selfpromo'),
|
||||
self._sponsor_chapter(130, 180, 'outro'),
|
||||
self._chapter(140, 150, remove=True),
|
||||
self._chapter(150, 160, remove=True)]
|
||||
expected = self._chapters(
|
||||
[10, 20, 30, 40, 50, 70, 80, 100, 110, 130, 140, 160],
|
||||
['c', '[SponsorBlock]: Sponsor, Intermission/Intro Animation', '[SponsorBlock]: Sponsor, Unpaid/Self Promotion',
|
||||
'c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Sponsor, Interaction Reminder',
|
||||
'[SponsorBlock]: Interaction Reminder', 'c', '[SponsorBlock]: Unpaid/Self Promotion',
|
||||
'[SponsorBlock]: Unpaid/Self Promotion, Endcards/Credits', '[SponsorBlock]: Endcards/Credits', 'c'])
|
||||
expected_cuts = [self._chapter(20, 30, remove=True),
|
||||
self._chapter(70, 80, remove=True),
|
||||
self._chapter(140, 160, remove=True)]
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, expected_cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorOverlapsMultipleChapters(self):
|
||||
chapters = (self._chapters([20, 40, 60, 80, 100], ['c1', 'c2', 'c3', 'c4', 'c5'])
|
||||
+ [self._sponsor_chapter(10, 90, 'sponsor')])
|
||||
expected = self._chapters([10, 90, 100], ['c1', '[SponsorBlock]: Sponsor', 'c5'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_CutOverlapsMultipleChapters(self):
|
||||
cuts = [self._chapter(10, 90, remove=True)]
|
||||
chapters = self._chapters([20, 40, 60, 80, 100], ['c1', 'c2', 'c3', 'c4', 'c5']) + cuts
|
||||
expected = self._chapters([10, 20], ['c1', 'c5'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorsWithinSomeChaptersAndOverlappingOthers(self):
|
||||
chapters = (self._chapters([10, 40, 60, 80], ['c1', 'c2', 'c3', 'c4'])
|
||||
+ [self._sponsor_chapter(20, 30, 'sponsor'),
|
||||
self._sponsor_chapter(50, 70, 'selfpromo')])
|
||||
expected = self._chapters([10, 20, 30, 40, 50, 70, 80],
|
||||
['c1', 'c2', '[SponsorBlock]: Sponsor', 'c2', 'c3',
|
||||
'[SponsorBlock]: Unpaid/Self Promotion', 'c4'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_CutsWithinSomeChaptersAndOverlappingOthers(self):
|
||||
cuts = [self._chapter(20, 30, remove=True), self._chapter(50, 70, remove=True)]
|
||||
chapters = self._chapters([10, 40, 60, 80], ['c1', 'c2', 'c3', 'c4']) + cuts
|
||||
expected = self._chapters([10, 30, 40, 50], ['c1', 'c2', 'c3', 'c4'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChaptersAfterLastSponsor(self):
|
||||
chapters = (self._chapters([20, 40, 50, 60], ['c1', 'c2', 'c3', 'c4'])
|
||||
+ [self._sponsor_chapter(10, 30, 'music_offtopic')])
|
||||
expected = self._chapters(
|
||||
[10, 30, 40, 50, 60],
|
||||
['c1', '[SponsorBlock]: Non-Music Section', 'c2', 'c3', 'c4'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_ChaptersAfterLastCut(self):
|
||||
cuts = [self._chapter(10, 30, remove=True)]
|
||||
chapters = self._chapters([20, 40, 50, 60], ['c1', 'c2', 'c3', 'c4']) + cuts
|
||||
expected = self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorStartsAtChapterStart(self):
|
||||
chapters = (self._chapters([10, 20, 40], ['c1', 'c2', 'c3'])
|
||||
+ [self._sponsor_chapter(20, 30, 'sponsor')])
|
||||
expected = self._chapters([10, 20, 30, 40], ['c1', 'c2', '[SponsorBlock]: Sponsor', 'c3'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_CutStartsAtChapterStart(self):
|
||||
cuts = [self._chapter(20, 30, remove=True)]
|
||||
chapters = self._chapters([10, 20, 40], ['c1', 'c2', 'c3']) + cuts
|
||||
expected = self._chapters([10, 20, 30], ['c1', 'c2', 'c3'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorEndsAtChapterEnd(self):
|
||||
chapters = (self._chapters([10, 30, 40], ['c1', 'c2', 'c3'])
|
||||
+ [self._sponsor_chapter(20, 30, 'sponsor')])
|
||||
expected = self._chapters([10, 20, 30, 40], ['c1', 'c2', '[SponsorBlock]: Sponsor', 'c3'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_CutEndsAtChapterEnd(self):
|
||||
cuts = [self._chapter(20, 30, remove=True)]
|
||||
chapters = self._chapters([10, 30, 40], ['c1', 'c2', 'c3']) + cuts
|
||||
expected = self._chapters([10, 20, 30], ['c1', 'c2', 'c3'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorCoincidesWithChapters(self):
|
||||
chapters = (self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4'])
|
||||
+ [self._sponsor_chapter(10, 30, 'sponsor')])
|
||||
expected = self._chapters([10, 30, 40], ['c1', '[SponsorBlock]: Sponsor', 'c4'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_CutCoincidesWithChapters(self):
|
||||
cuts = [self._chapter(10, 30, remove=True)]
|
||||
chapters = self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4']) + cuts
|
||||
expected = self._chapters([10, 20], ['c1', 'c4'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorsAtVideoBoundaries(self):
|
||||
chapters = (self._chapters([20, 40, 60], ['c1', 'c2', 'c3'])
|
||||
+ [self._sponsor_chapter(0, 10, 'intro'), self._sponsor_chapter(50, 60, 'outro')])
|
||||
expected = self._chapters(
|
||||
[10, 20, 40, 50, 60], ['[SponsorBlock]: Intermission/Intro Animation', 'c1', 'c2', 'c3', '[SponsorBlock]: Endcards/Credits'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_CutsAtVideoBoundaries(self):
|
||||
cuts = [self._chapter(0, 10, remove=True), self._chapter(50, 60, remove=True)]
|
||||
chapters = self._chapters([20, 40, 60], ['c1', 'c2', 'c3']) + cuts
|
||||
expected = self._chapters([10, 30, 40], ['c1', 'c2', 'c3'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SponsorsOverlapChaptersAtVideoBoundaries(self):
|
||||
chapters = (self._chapters([10, 40, 50], ['c1', 'c2', 'c3'])
|
||||
+ [self._sponsor_chapter(0, 20, 'intro'), self._sponsor_chapter(30, 50, 'outro')])
|
||||
expected = self._chapters(
|
||||
[20, 30, 50], ['[SponsorBlock]: Intermission/Intro Animation', 'c2', '[SponsorBlock]: Endcards/Credits'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_CutsOverlapChaptersAtVideoBoundaries(self):
|
||||
cuts = [self._chapter(0, 20, remove=True), self._chapter(30, 50, remove=True)]
|
||||
chapters = self._chapters([10, 40, 50], ['c1', 'c2', 'c3']) + cuts
|
||||
expected = self._chapters([10], ['c2'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_EverythingSponsored(self):
|
||||
chapters = (self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4'])
|
||||
+ [self._sponsor_chapter(0, 20, 'intro'), self._sponsor_chapter(20, 40, 'outro')])
|
||||
expected = self._chapters([20, 40], ['[SponsorBlock]: Intermission/Intro Animation', '[SponsorBlock]: Endcards/Credits'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_EverythingCut(self):
|
||||
cuts = [self._chapter(0, 20, remove=True), self._chapter(20, 40, remove=True)]
|
||||
chapters = self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4']) + cuts
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, [], [self._chapter(0, 40, remove=True)])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_TinyChaptersInTheOriginalArePreserved(self):
|
||||
chapters = self._chapters([0.1, 0.2, 0.3, 0.4], ['c1', 'c2', 'c3', 'c4'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, chapters, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_TinySponsorsAreIgnored(self):
|
||||
chapters = [self._sponsor_chapter(0, 0.1, 'intro'), self._chapter(0.1, 0.2, 'c1'),
|
||||
self._sponsor_chapter(0.2, 0.3, 'sponsor'), self._chapter(0.3, 0.4, 'c2'),
|
||||
self._sponsor_chapter(0.4, 0.5, 'outro')]
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, self._chapters([0.3, 0.5], ['c1', 'c2']), [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_TinyChaptersResultingFromCutsAreIgnored(self):
|
||||
cuts = [self._chapter(1.5, 2.5, remove=True)]
|
||||
chapters = self._chapters([2, 3, 3.5], ['c1', 'c2', 'c3']) + cuts
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, self._chapters([2, 2.5], ['c1', 'c3']), cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SingleTinyChapterIsPreserved(self):
|
||||
cuts = [self._chapter(0.5, 2, remove=True)]
|
||||
chapters = self._chapters([2], ['c']) + cuts
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, self._chapters([0.5], ['c']), cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_TinyChapterAtTheStartPrependedToTheNext(self):
|
||||
cuts = [self._chapter(0.5, 2, remove=True)]
|
||||
chapters = self._chapters([2, 4], ['c1', 'c2']) + cuts
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, self._chapters([2.5], ['c2']), cuts)
|
||||
|
||||
def test_remove_marked_arrange_sponsors_TinyChaptersResultingFromSponsorOverlapAreIgnored(self):
|
||||
chapters = self._chapters([1, 3, 4], ['c1', 'c2', 'c3']) + [
|
||||
self._sponsor_chapter(1.5, 2.5, 'sponsor')]
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, self._chapters([1.5, 2.5, 4], ['c1', '[SponsorBlock]: Sponsor', 'c3']), [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_TinySponsorsOverlapsAreIgnored(self):
|
||||
chapters = self._chapters([2, 3, 5], ['c1', 'c2', 'c3']) + [
|
||||
self._sponsor_chapter(1, 3, 'sponsor'),
|
||||
self._sponsor_chapter(2.5, 4, 'selfpromo')
|
||||
]
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, self._chapters([1, 3, 4, 5], [
|
||||
'c1', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Unpaid/Self Promotion', 'c3']), [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_TinySponsorsPrependedToTheNextSponsor(self):
|
||||
chapters = self._chapters([4], ['c']) + [
|
||||
self._sponsor_chapter(1.5, 2, 'sponsor'),
|
||||
self._sponsor_chapter(2, 4, 'selfpromo')
|
||||
]
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, self._chapters([1.5, 4], ['c', '[SponsorBlock]: Unpaid/Self Promotion']), [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_SmallestSponsorInTheOverlapGetsNamed(self):
|
||||
self._pp._sponsorblock_chapter_title = '[SponsorBlock]: %(name)s'
|
||||
chapters = self._chapters([10], ['c']) + [
|
||||
self._sponsor_chapter(2, 8, 'sponsor'),
|
||||
self._sponsor_chapter(4, 6, 'selfpromo')
|
||||
]
|
||||
self._remove_marked_arrange_sponsors_test_impl(
|
||||
chapters, self._chapters([2, 4, 6, 8, 10], [
|
||||
'c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Unpaid/Self Promotion',
|
||||
'[SponsorBlock]: Sponsor', 'c'
|
||||
]), [])
|
||||
|
||||
def test_make_concat_opts_CommonCase(self):
|
||||
sponsor_chapters = [self._chapter(1, 2, 's1'), self._chapter(10, 20, 's2')]
|
||||
expected = '''ffconcat version 1.0
|
||||
file 'file:test'
|
||||
outpoint 1.000000
|
||||
file 'file:test'
|
||||
inpoint 2.000000
|
||||
outpoint 10.000000
|
||||
file 'file:test'
|
||||
inpoint 20.000000
|
||||
'''
|
||||
opts = self._pp._make_concat_opts(sponsor_chapters, 30)
|
||||
self.assertEqual(expected, ''.join(self._pp._concat_spec(['test'] * len(opts), opts)))
|
||||
|
||||
def test_make_concat_opts_NoZeroDurationChunkAtVideoStart(self):
|
||||
sponsor_chapters = [self._chapter(0, 1, 's1'), self._chapter(10, 20, 's2')]
|
||||
expected = '''ffconcat version 1.0
|
||||
file 'file:test'
|
||||
inpoint 1.000000
|
||||
outpoint 10.000000
|
||||
file 'file:test'
|
||||
inpoint 20.000000
|
||||
'''
|
||||
opts = self._pp._make_concat_opts(sponsor_chapters, 30)
|
||||
self.assertEqual(expected, ''.join(self._pp._concat_spec(['test'] * len(opts), opts)))
|
||||
|
||||
def test_make_concat_opts_NoZeroDurationChunkAtVideoEnd(self):
|
||||
sponsor_chapters = [self._chapter(1, 2, 's1'), self._chapter(10, 20, 's2')]
|
||||
expected = '''ffconcat version 1.0
|
||||
file 'file:test'
|
||||
outpoint 1.000000
|
||||
file 'file:test'
|
||||
inpoint 2.000000
|
||||
outpoint 10.000000
|
||||
'''
|
||||
opts = self._pp._make_concat_opts(sponsor_chapters, 20)
|
||||
self.assertEqual(expected, ''.join(self._pp._concat_spec(['test'] * len(opts), opts)))
|
||||
|
||||
def test_quote_for_concat_RunsOfQuotes(self):
|
||||
self.assertEqual(
|
||||
r"'special '\'' '\'\''characters'\'\'\''galore'",
|
||||
self._pp._quote_for_ffmpeg("special ' ''characters'''galore"))
|
||||
|
||||
def test_quote_for_concat_QuotesAtStart(self):
|
||||
self.assertEqual(
|
||||
r"\'\'\''special '\'' characters '\'' galore'",
|
||||
self._pp._quote_for_ffmpeg("'''special ' characters ' galore"))
|
||||
|
||||
def test_quote_for_concat_QuotesAtEnd(self):
|
||||
self.assertEqual(
|
||||
r"'special '\'' characters '\'' galore'\'\'\'",
|
||||
self._pp._quote_for_ffmpeg("special ' characters ' galore'''"))
|
||||
|
||||
@@ -19,6 +19,7 @@ from yt_dlp.extractor import (
|
||||
CeskaTelevizeIE,
|
||||
LyndaIE,
|
||||
NPOIE,
|
||||
PBSIE,
|
||||
ComedyCentralIE,
|
||||
NRKTVIE,
|
||||
RaiPlayIE,
|
||||
@@ -372,5 +373,42 @@ class TestDemocracynowSubtitles(BaseTestSubtitles):
|
||||
self.assertEqual(md5(subtitles['en']), 'acaca989e24a9e45a6719c9b3d60815c')
|
||||
|
||||
|
||||
@is_download_test
|
||||
class TestPBSSubtitles(BaseTestSubtitles):
|
||||
url = 'https://www.pbs.org/video/how-fantasy-reflects-our-world-picecq/'
|
||||
IE = PBSIE
|
||||
|
||||
def test_allsubtitles(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(set(subtitles.keys()), set(['en']))
|
||||
|
||||
def test_subtitles_dfxp_format(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['subtitlesformat'] = 'dfxp'
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertIn(md5(subtitles['en']), ['643b034254cdc3768ff1e750b6b5873b'])
|
||||
|
||||
def test_subtitles_vtt_format(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['subtitlesformat'] = 'vtt'
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertIn(
|
||||
md5(subtitles['en']), ['937a05711555b165d4c55a9667017045', 'f49ea998d6824d94959c8152a368ff73'])
|
||||
|
||||
def test_subtitles_srt_format(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['subtitlesformat'] = 'srt'
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertIn(md5(subtitles['en']), ['2082c21b43759d9bf172931b2f2ca371'])
|
||||
|
||||
def test_subtitles_sami_format(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['subtitlesformat'] = 'sami'
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertIn(md5(subtitles['en']), ['4256b16ac7da6a6780fafd04294e85cd'])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
@@ -62,6 +62,7 @@ from yt_dlp.utils import (
|
||||
parse_iso8601,
|
||||
parse_resolution,
|
||||
parse_bitrate,
|
||||
parse_qs,
|
||||
pkcs1pad,
|
||||
read_batch_urls,
|
||||
sanitize_filename,
|
||||
@@ -117,8 +118,6 @@ from yt_dlp.compat import (
|
||||
compat_getenv,
|
||||
compat_os_name,
|
||||
compat_setenv,
|
||||
compat_urlparse,
|
||||
compat_parse_qs,
|
||||
)
|
||||
|
||||
|
||||
@@ -688,38 +687,36 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertTrue(isinstance(data, bytes))
|
||||
|
||||
def test_update_url_query(self):
|
||||
def query_dict(url):
|
||||
return compat_parse_qs(compat_urlparse.urlparse(url).query)
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
'http://example.com/path', {'quality': ['HD'], 'format': ['mp4']})),
|
||||
query_dict('http://example.com/path?quality=HD&format=mp4'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
parse_qs('http://example.com/path?quality=HD&format=mp4'))
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
'http://example.com/path', {'system': ['LINUX', 'WINDOWS']})),
|
||||
query_dict('http://example.com/path?system=LINUX&system=WINDOWS'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
parse_qs('http://example.com/path?system=LINUX&system=WINDOWS'))
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
'http://example.com/path', {'fields': 'id,formats,subtitles'})),
|
||||
query_dict('http://example.com/path?fields=id,formats,subtitles'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
parse_qs('http://example.com/path?fields=id,formats,subtitles'))
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
'http://example.com/path', {'fields': ('id,formats,subtitles', 'thumbnails')})),
|
||||
query_dict('http://example.com/path?fields=id,formats,subtitles&fields=thumbnails'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
parse_qs('http://example.com/path?fields=id,formats,subtitles&fields=thumbnails'))
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
'http://example.com/path?manifest=f4m', {'manifest': []})),
|
||||
query_dict('http://example.com/path'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
parse_qs('http://example.com/path'))
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
'http://example.com/path?system=LINUX&system=WINDOWS', {'system': 'LINUX'})),
|
||||
query_dict('http://example.com/path?system=LINUX'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
parse_qs('http://example.com/path?system=LINUX'))
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
'http://example.com/path', {'fields': b'id,formats,subtitles'})),
|
||||
query_dict('http://example.com/path?fields=id,formats,subtitles'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
parse_qs('http://example.com/path?fields=id,formats,subtitles'))
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
'http://example.com/path', {'width': 1080, 'height': 720})),
|
||||
query_dict('http://example.com/path?width=1080&height=720'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
parse_qs('http://example.com/path?width=1080&height=720'))
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
'http://example.com/path', {'bitrate': 5020.43})),
|
||||
query_dict('http://example.com/path?bitrate=5020.43'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
parse_qs('http://example.com/path?bitrate=5020.43'))
|
||||
self.assertEqual(parse_qs(update_url_query(
|
||||
'http://example.com/path', {'test': '第二行тест'})),
|
||||
query_dict('http://example.com/path?test=%E7%AC%AC%E4%BA%8C%E8%A1%8C%D1%82%D0%B5%D1%81%D1%82'))
|
||||
parse_qs('http://example.com/path?test=%E7%AC%AC%E4%BA%8C%E8%A1%8C%D1%82%D0%B5%D1%81%D1%82'))
|
||||
|
||||
def test_multipart_encode(self):
|
||||
self.assertEqual(
|
||||
@@ -1207,35 +1204,12 @@ ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4')
|
||||
'9999 51')
|
||||
|
||||
def test_match_str(self):
|
||||
self.assertRaises(ValueError, match_str, 'xy>foobar', {})
|
||||
# Unary
|
||||
self.assertFalse(match_str('xy', {'x': 1200}))
|
||||
self.assertTrue(match_str('!xy', {'x': 1200}))
|
||||
self.assertTrue(match_str('x', {'x': 1200}))
|
||||
self.assertFalse(match_str('!x', {'x': 1200}))
|
||||
self.assertTrue(match_str('x', {'x': 0}))
|
||||
self.assertFalse(match_str('x>0', {'x': 0}))
|
||||
self.assertFalse(match_str('x>0', {}))
|
||||
self.assertTrue(match_str('x>?0', {}))
|
||||
self.assertTrue(match_str('x>1K', {'x': 1200}))
|
||||
self.assertFalse(match_str('x>2K', {'x': 1200}))
|
||||
self.assertTrue(match_str('x>=1200 & x < 1300', {'x': 1200}))
|
||||
self.assertFalse(match_str('x>=1100 & x < 1200', {'x': 1200}))
|
||||
self.assertFalse(match_str('y=a212', {'y': 'foobar42'}))
|
||||
self.assertTrue(match_str('y=foobar42', {'y': 'foobar42'}))
|
||||
self.assertFalse(match_str('y!=foobar42', {'y': 'foobar42'}))
|
||||
self.assertTrue(match_str('y!=foobar2', {'y': 'foobar42'}))
|
||||
self.assertFalse(match_str(
|
||||
'like_count > 100 & dislike_count <? 50 & description',
|
||||
{'like_count': 90, 'description': 'foo'}))
|
||||
self.assertTrue(match_str(
|
||||
'like_count > 100 & dislike_count <? 50 & description',
|
||||
{'like_count': 190, 'description': 'foo'}))
|
||||
self.assertFalse(match_str(
|
||||
'like_count > 100 & dislike_count <? 50 & description',
|
||||
{'like_count': 190, 'dislike_count': 60, 'description': 'foo'}))
|
||||
self.assertFalse(match_str(
|
||||
'like_count > 100 & dislike_count <? 50 & description',
|
||||
{'like_count': 190, 'dislike_count': 10}))
|
||||
self.assertTrue(match_str('is_live', {'is_live': True}))
|
||||
self.assertFalse(match_str('is_live', {'is_live': False}))
|
||||
self.assertFalse(match_str('is_live', {'is_live': None}))
|
||||
@@ -1249,6 +1223,75 @@ ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4')
|
||||
self.assertFalse(match_str('!title', {'title': 'abc'}))
|
||||
self.assertFalse(match_str('!title', {'title': ''}))
|
||||
|
||||
# Numeric
|
||||
self.assertFalse(match_str('x>0', {'x': 0}))
|
||||
self.assertFalse(match_str('x>0', {}))
|
||||
self.assertTrue(match_str('x>?0', {}))
|
||||
self.assertTrue(match_str('x>1K', {'x': 1200}))
|
||||
self.assertFalse(match_str('x>2K', {'x': 1200}))
|
||||
self.assertTrue(match_str('x>=1200 & x < 1300', {'x': 1200}))
|
||||
self.assertFalse(match_str('x>=1100 & x < 1200', {'x': 1200}))
|
||||
|
||||
# String
|
||||
self.assertFalse(match_str('y=a212', {'y': 'foobar42'}))
|
||||
self.assertTrue(match_str('y=foobar42', {'y': 'foobar42'}))
|
||||
self.assertFalse(match_str('y!=foobar42', {'y': 'foobar42'}))
|
||||
self.assertTrue(match_str('y!=foobar2', {'y': 'foobar42'}))
|
||||
self.assertTrue(match_str('y^=foo', {'y': 'foobar42'}))
|
||||
self.assertFalse(match_str('y!^=foo', {'y': 'foobar42'}))
|
||||
self.assertFalse(match_str('y^=bar', {'y': 'foobar42'}))
|
||||
self.assertTrue(match_str('y!^=bar', {'y': 'foobar42'}))
|
||||
self.assertRaises(ValueError, match_str, 'x^=42', {'x': 42})
|
||||
self.assertTrue(match_str('y*=bar', {'y': 'foobar42'}))
|
||||
self.assertFalse(match_str('y!*=bar', {'y': 'foobar42'}))
|
||||
self.assertFalse(match_str('y*=baz', {'y': 'foobar42'}))
|
||||
self.assertTrue(match_str('y!*=baz', {'y': 'foobar42'}))
|
||||
self.assertTrue(match_str('y$=42', {'y': 'foobar42'}))
|
||||
self.assertFalse(match_str('y$=43', {'y': 'foobar42'}))
|
||||
|
||||
# And
|
||||
self.assertFalse(match_str(
|
||||
'like_count > 100 & dislike_count <? 50 & description',
|
||||
{'like_count': 90, 'description': 'foo'}))
|
||||
self.assertTrue(match_str(
|
||||
'like_count > 100 & dislike_count <? 50 & description',
|
||||
{'like_count': 190, 'description': 'foo'}))
|
||||
self.assertFalse(match_str(
|
||||
'like_count > 100 & dislike_count <? 50 & description',
|
||||
{'like_count': 190, 'dislike_count': 60, 'description': 'foo'}))
|
||||
self.assertFalse(match_str(
|
||||
'like_count > 100 & dislike_count <? 50 & description',
|
||||
{'like_count': 190, 'dislike_count': 10}))
|
||||
|
||||
# Regex
|
||||
self.assertTrue(match_str(r'x~=\bbar', {'x': 'foo bar'}))
|
||||
self.assertFalse(match_str(r'x~=\bbar.+', {'x': 'foo bar'}))
|
||||
self.assertFalse(match_str(r'x~=^FOO', {'x': 'foo bar'}))
|
||||
self.assertTrue(match_str(r'x~=(?i)^FOO', {'x': 'foo bar'}))
|
||||
|
||||
# Quotes
|
||||
self.assertTrue(match_str(r'x^="foo"', {'x': 'foo "bar"'}))
|
||||
self.assertFalse(match_str(r'x^="foo "', {'x': 'foo "bar"'}))
|
||||
self.assertFalse(match_str(r'x$="bar"', {'x': 'foo "bar"'}))
|
||||
self.assertTrue(match_str(r'x$=" \"bar\""', {'x': 'foo "bar"'}))
|
||||
|
||||
# Escaping &
|
||||
self.assertFalse(match_str(r'x=foo & bar', {'x': 'foo & bar'}))
|
||||
self.assertTrue(match_str(r'x=foo \& bar', {'x': 'foo & bar'}))
|
||||
self.assertTrue(match_str(r'x=foo \& bar & x^=foo', {'x': 'foo & bar'}))
|
||||
self.assertTrue(match_str(r'x="foo \& bar" & x^=foo', {'x': 'foo & bar'}))
|
||||
|
||||
# Example from docs
|
||||
self.assertTrue(match_str(
|
||||
r"!is_live & like_count>?100 & description~='(?i)\bcats \& dogs\b'",
|
||||
{'description': 'Raining Cats & Dogs'}))
|
||||
|
||||
# Incomplete
|
||||
self.assertFalse(match_str('id!=foo', {'id': 'foo'}, True))
|
||||
self.assertTrue(match_str('x', {'id': 'foo'}, True))
|
||||
self.assertTrue(match_str('!x', {'id': 'foo'}, True))
|
||||
self.assertFalse(match_str('x', {'id': 'foo'}, False))
|
||||
|
||||
def test_parse_dfxp_time_expr(self):
|
||||
self.assertEqual(parse_dfxp_time_expr(None), None)
|
||||
self.assertEqual(parse_dfxp_time_expr(''), None)
|
||||
@@ -1537,8 +1580,11 @@ Line 1
|
||||
self.assertEqual(LazyList(it).exhaust(), it)
|
||||
self.assertEqual(LazyList(it)[5], it[5])
|
||||
|
||||
self.assertEqual(LazyList(it)[5:], it[5:])
|
||||
self.assertEqual(LazyList(it)[:5], it[:5])
|
||||
self.assertEqual(LazyList(it)[::2], it[::2])
|
||||
self.assertEqual(LazyList(it)[1::2], it[1::2])
|
||||
self.assertEqual(LazyList(it)[5::-1], it[5::-1])
|
||||
self.assertEqual(LazyList(it)[6:2:-2], it[6:2:-2])
|
||||
self.assertEqual(LazyList(it)[::-1], it[::-1])
|
||||
|
||||
@@ -1550,6 +1596,7 @@ Line 1
|
||||
|
||||
self.assertEqual(list(LazyList(it).reverse()), it[::-1])
|
||||
self.assertEqual(list(LazyList(it).reverse()[1:3:7]), it[::-1][1:3:7])
|
||||
self.assertEqual(list(LazyList(it).reverse()[::-1]), it)
|
||||
|
||||
def test_LazyList_laziness(self):
|
||||
|
||||
|
||||
1000
yt_dlp/YoutubeDL.py
1000
yt_dlp/YoutubeDL.py
File diff suppressed because it is too large
Load Diff
@@ -1,23 +1,24 @@
|
||||
#!/usr/bin/env python3
|
||||
# coding: utf-8
|
||||
|
||||
from __future__ import unicode_literals
|
||||
f'You are using an unsupported version of Python. Only Python versions 3.6 and above are supported by yt-dlp' # noqa: F541
|
||||
|
||||
__license__ = 'Public Domain'
|
||||
|
||||
import codecs
|
||||
import io
|
||||
import itertools
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import sys
|
||||
|
||||
|
||||
from .options import (
|
||||
parseOpts,
|
||||
)
|
||||
from .compat import (
|
||||
compat_getpass,
|
||||
compat_shlex_quote,
|
||||
workaround_optparse_bug9161,
|
||||
)
|
||||
from .cookies import SUPPORTED_BROWSERS
|
||||
@@ -46,14 +47,15 @@ from .downloader import (
|
||||
from .extractor import gen_extractors, list_extractors
|
||||
from .extractor.common import InfoExtractor
|
||||
from .extractor.adobepass import MSO_INFO
|
||||
from .postprocessor.ffmpeg import (
|
||||
from .postprocessor import (
|
||||
FFmpegExtractAudioPP,
|
||||
FFmpegSubtitlesConvertorPP,
|
||||
FFmpegThumbnailsConvertorPP,
|
||||
FFmpegVideoConvertorPP,
|
||||
FFmpegVideoRemuxerPP,
|
||||
MetadataFromFieldPP,
|
||||
MetadataParserPP,
|
||||
)
|
||||
from .postprocessor.metadatafromfield import MetadataFromFieldPP
|
||||
from .YoutubeDL import YoutubeDL
|
||||
|
||||
|
||||
@@ -107,14 +109,14 @@ def _real_main(argv=None):
|
||||
|
||||
if opts.list_extractors:
|
||||
for ie in list_extractors(opts.age_limit):
|
||||
write_string(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else '') + '\n', out=sys.stdout)
|
||||
write_string(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie.working() else '') + '\n', out=sys.stdout)
|
||||
matchedUrls = [url for url in all_urls if ie.suitable(url)]
|
||||
for mu in matchedUrls:
|
||||
write_string(' ' + mu + '\n', out=sys.stdout)
|
||||
sys.exit(0)
|
||||
if opts.list_extractor_descriptions:
|
||||
for ie in list_extractors(opts.age_limit):
|
||||
if not ie._WORKING:
|
||||
if not ie.working():
|
||||
continue
|
||||
desc = getattr(ie, 'IE_DESC', ie.IE_NAME)
|
||||
if desc is False:
|
||||
@@ -246,7 +248,7 @@ def _real_main(argv=None):
|
||||
if opts.cookiesfrombrowser is not None:
|
||||
opts.cookiesfrombrowser = [
|
||||
part.strip() or None for part in opts.cookiesfrombrowser.split(':', 1)]
|
||||
if opts.cookiesfrombrowser[0] not in SUPPORTED_BROWSERS:
|
||||
if opts.cookiesfrombrowser[0].lower() not in SUPPORTED_BROWSERS:
|
||||
parser.error('unsupported browser specified for cookies')
|
||||
|
||||
if opts.date is not None:
|
||||
@@ -254,35 +256,7 @@ def _real_main(argv=None):
|
||||
else:
|
||||
date = DateRange(opts.dateafter, opts.datebefore)
|
||||
|
||||
def parse_compat_opts():
|
||||
parsed_compat_opts, compat_opts = set(), opts.compat_opts[::-1]
|
||||
while compat_opts:
|
||||
actual_opt = opt = compat_opts.pop().lower()
|
||||
if opt == 'youtube-dl':
|
||||
compat_opts.extend(['-multistreams', 'all'])
|
||||
elif opt == 'youtube-dlc':
|
||||
compat_opts.extend(['-no-youtube-channel-redirect', '-no-live-chat', 'all'])
|
||||
elif opt == 'all':
|
||||
parsed_compat_opts.update(all_compat_opts)
|
||||
elif opt == '-all':
|
||||
parsed_compat_opts = set()
|
||||
else:
|
||||
if opt[0] == '-':
|
||||
opt = opt[1:]
|
||||
parsed_compat_opts.discard(opt)
|
||||
else:
|
||||
parsed_compat_opts.update([opt])
|
||||
if opt not in all_compat_opts:
|
||||
parser.error('Invalid compatibility option %s' % actual_opt)
|
||||
return parsed_compat_opts
|
||||
|
||||
all_compat_opts = [
|
||||
'filename', 'format-sort', 'abort-on-error', 'format-spec', 'no-playlist-metafiles',
|
||||
'multistreams', 'no-live-chat', 'playlist-index', 'list-formats', 'no-direct-merge',
|
||||
'no-youtube-channel-redirect', 'no-youtube-unavailable-videos', 'no-attach-info-json',
|
||||
'embed-thumbnail-atomicparsley', 'seperate-video-versions',
|
||||
]
|
||||
compat_opts = parse_compat_opts()
|
||||
compat_opts = opts.compat_opts
|
||||
|
||||
def _unused_compat_opt(name):
|
||||
if name not in compat_opts:
|
||||
@@ -291,7 +265,7 @@ def _real_main(argv=None):
|
||||
compat_opts.update(['*%s' % name])
|
||||
return True
|
||||
|
||||
def set_default_compat(compat_name, opt_name, default=True, remove_compat=False):
|
||||
def set_default_compat(compat_name, opt_name, default=True, remove_compat=True):
|
||||
attr = getattr(opts, opt_name)
|
||||
if compat_name in compat_opts:
|
||||
if attr is None:
|
||||
@@ -305,8 +279,9 @@ def _real_main(argv=None):
|
||||
setattr(opts, opt_name, default)
|
||||
return None
|
||||
|
||||
set_default_compat('abort-on-error', 'ignoreerrors')
|
||||
set_default_compat('abort-on-error', 'ignoreerrors', 'only_download')
|
||||
set_default_compat('no-playlist-metafiles', 'allow_playlist_files')
|
||||
set_default_compat('no-clean-infojson', 'clean_infojson')
|
||||
if 'format-sort' in compat_opts:
|
||||
opts.format_sort.extend(InfoExtractor.FormatSort.ytdl_default)
|
||||
_video_multistreams_set = set_default_compat('multistreams', 'allow_multiple_video_streams', False, remove_compat=False)
|
||||
@@ -316,7 +291,7 @@ def _real_main(argv=None):
|
||||
outtmpl_default = opts.outtmpl.get('default')
|
||||
if 'filename' in compat_opts:
|
||||
if outtmpl_default is None:
|
||||
outtmpl_default = '%(title)s.%(id)s.%(ext)s'
|
||||
outtmpl_default = '%(title)s-%(id)s.%(ext)s'
|
||||
opts.outtmpl.update({'default': outtmpl_default})
|
||||
else:
|
||||
_unused_compat_opt('filename')
|
||||
@@ -327,9 +302,14 @@ def _real_main(argv=None):
|
||||
parser.error('invalid %s %r: %s' % (msg, tmpl, error_to_compat_str(err)))
|
||||
|
||||
for k, tmpl in opts.outtmpl.items():
|
||||
validate_outtmpl(tmpl, '%s output template' % k)
|
||||
for tmpl in opts.forceprint:
|
||||
validate_outtmpl(tmpl, f'{k} output template')
|
||||
opts.forceprint = opts.forceprint or []
|
||||
for tmpl in opts.forceprint or []:
|
||||
validate_outtmpl(tmpl, 'print template')
|
||||
validate_outtmpl(opts.sponsorblock_chapter_title, 'SponsorBlock chapter title')
|
||||
for k, tmpl in opts.progress_template.items():
|
||||
k = f'{k[:-6]} console title' if '-title' in k else f'{k} progress'
|
||||
validate_outtmpl(tmpl, f'{k} template')
|
||||
|
||||
if opts.extractaudio and not opts.keepvideo and opts.format is None:
|
||||
opts.format = 'bestaudio/best'
|
||||
@@ -343,13 +323,29 @@ def _real_main(argv=None):
|
||||
if re.match(InfoExtractor.FormatSort.regex, f) is None:
|
||||
parser.error('invalid format sort string "%s" specified' % f)
|
||||
|
||||
if opts.metafromfield is None:
|
||||
opts.metafromfield = []
|
||||
def metadataparser_actions(f):
|
||||
if isinstance(f, str):
|
||||
cmd = '--parse-metadata %s' % compat_shlex_quote(f)
|
||||
try:
|
||||
actions = [MetadataFromFieldPP.to_action(f)]
|
||||
except Exception as err:
|
||||
parser.error(f'{cmd} is invalid; {err}')
|
||||
else:
|
||||
cmd = '--replace-in-metadata %s' % ' '.join(map(compat_shlex_quote, f))
|
||||
actions = ((MetadataParserPP.Actions.REPLACE, x, *f[1:]) for x in f[0].split(','))
|
||||
|
||||
for action in actions:
|
||||
try:
|
||||
MetadataParserPP.validate_action(*action)
|
||||
except Exception as err:
|
||||
parser.error(f'{cmd} is invalid; {err}')
|
||||
yield action
|
||||
|
||||
if opts.parse_metadata is None:
|
||||
opts.parse_metadata = []
|
||||
if opts.metafromtitle is not None:
|
||||
opts.metafromfield.append('title:%s' % opts.metafromtitle)
|
||||
for f in opts.metafromfield:
|
||||
if re.match(MetadataFromFieldPP.regex, f) is None:
|
||||
parser.error('invalid format string "%s" specified for --parse-metadata' % f)
|
||||
opts.parse_metadata.append('title:%s' % opts.metafromtitle)
|
||||
opts.parse_metadata = list(itertools.chain(*map(metadataparser_actions, opts.parse_metadata)))
|
||||
|
||||
any_getting = opts.forceprint or opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson or opts.dump_single_json
|
||||
any_printing = opts.print_json
|
||||
@@ -360,15 +356,34 @@ def _real_main(argv=None):
|
||||
if opts.getcomments and not printing_json:
|
||||
opts.writeinfojson = True
|
||||
|
||||
if opts.no_sponsorblock:
|
||||
opts.sponsorblock_mark = set()
|
||||
opts.sponsorblock_remove = set()
|
||||
sponsorblock_query = opts.sponsorblock_mark | opts.sponsorblock_remove
|
||||
|
||||
if (opts.addmetadata or opts.sponsorblock_mark) and opts.addchapters is None:
|
||||
opts.addchapters = True
|
||||
opts.remove_chapters = opts.remove_chapters or []
|
||||
|
||||
def report_conflict(arg1, arg2):
|
||||
warnings.append('%s is ignored since %s was given' % (arg2, arg1))
|
||||
|
||||
if (opts.remove_chapters or sponsorblock_query) and opts.sponskrub is not False:
|
||||
if opts.sponskrub:
|
||||
if opts.remove_chapters:
|
||||
report_conflict('--remove-chapters', '--sponskrub')
|
||||
if opts.sponsorblock_mark:
|
||||
report_conflict('--sponsorblock-mark', '--sponskrub')
|
||||
if opts.sponsorblock_remove:
|
||||
report_conflict('--sponsorblock-remove', '--sponskrub')
|
||||
opts.sponskrub = False
|
||||
if opts.sponskrub_cut and opts.split_chapters and opts.sponskrub is not False:
|
||||
report_conflict('--split-chapter', '--sponskrub-cut')
|
||||
opts.sponskrub_cut = False
|
||||
|
||||
if opts.remuxvideo and opts.recodevideo:
|
||||
report_conflict('--recode-video', '--remux-video')
|
||||
opts.remuxvideo = False
|
||||
if opts.sponskrub_cut and opts.split_chapters and opts.sponskrub is not False:
|
||||
report_conflict('--split-chapter', '--sponskrub-cut')
|
||||
opts.sponskrub_cut = False
|
||||
|
||||
if opts.allow_unplayable_formats:
|
||||
if opts.extractaudio:
|
||||
@@ -395,16 +410,30 @@ def _real_main(argv=None):
|
||||
if opts.fixup and opts.fixup.lower() not in ('never', 'ignore'):
|
||||
report_conflict('--allow-unplayable-formats', '--fixup')
|
||||
opts.fixup = 'never'
|
||||
if opts.remove_chapters:
|
||||
report_conflict('--allow-unplayable-formats', '--remove-chapters')
|
||||
opts.remove_chapters = []
|
||||
if opts.sponsorblock_remove:
|
||||
report_conflict('--allow-unplayable-formats', '--sponsorblock-remove')
|
||||
opts.sponsorblock_remove = set()
|
||||
if opts.sponskrub:
|
||||
report_conflict('--allow-unplayable-formats', '--sponskrub')
|
||||
opts.sponskrub = False
|
||||
|
||||
# PostProcessors
|
||||
postprocessors = []
|
||||
if opts.metafromfield:
|
||||
postprocessors = list(opts.add_postprocessors)
|
||||
if sponsorblock_query:
|
||||
postprocessors.append({
|
||||
'key': 'MetadataFromField',
|
||||
'formats': opts.metafromfield,
|
||||
'key': 'SponsorBlock',
|
||||
'categories': sponsorblock_query,
|
||||
'api': opts.sponsorblock_api,
|
||||
# Run this immediately after extraction is complete
|
||||
'when': 'pre_process'
|
||||
})
|
||||
if opts.parse_metadata:
|
||||
postprocessors.append({
|
||||
'key': 'MetadataParser',
|
||||
'actions': opts.parse_metadata,
|
||||
# Run this immediately after extraction is complete
|
||||
'when': 'pre_process'
|
||||
})
|
||||
@@ -425,7 +454,7 @@ def _real_main(argv=None):
|
||||
# Must be after all other before_dl
|
||||
if opts.exec_before_dl_cmd:
|
||||
postprocessors.append({
|
||||
'key': 'ExecAfterDownload',
|
||||
'key': 'Exec',
|
||||
'exec_cmd': opts.exec_before_dl_cmd,
|
||||
'when': 'before_dl'
|
||||
})
|
||||
@@ -446,29 +475,48 @@ def _real_main(argv=None):
|
||||
'key': 'FFmpegVideoConvertor',
|
||||
'preferedformat': opts.recodevideo,
|
||||
})
|
||||
# FFmpegMetadataPP should be run after FFmpegVideoConvertorPP and
|
||||
# FFmpegExtractAudioPP as containers before conversion may not support
|
||||
# metadata (3gp, webm, etc.)
|
||||
# And this post-processor should be placed before other metadata
|
||||
# manipulating post-processors (FFmpegEmbedSubtitle) to prevent loss of
|
||||
# extra metadata. By default ffmpeg preserves metadata applicable for both
|
||||
# source and target containers. From this point the container won't change,
|
||||
# so metadata can be added here.
|
||||
if opts.addmetadata:
|
||||
postprocessors.append({'key': 'FFmpegMetadata'})
|
||||
# If ModifyChapters is going to remove chapters, subtitles must already be in the container.
|
||||
if opts.embedsubtitles:
|
||||
already_have_subtitle = opts.writesubtitles
|
||||
already_have_subtitle = opts.writesubtitles and 'no-keep-subs' not in compat_opts
|
||||
postprocessors.append({
|
||||
'key': 'FFmpegEmbedSubtitle',
|
||||
# already_have_subtitle = True prevents the file from being deleted after embedding
|
||||
'already_have_subtitle': already_have_subtitle
|
||||
})
|
||||
if not already_have_subtitle:
|
||||
if not opts.writeautomaticsub and 'no-keep-subs' not in compat_opts:
|
||||
opts.writesubtitles = True
|
||||
# --all-sub automatically sets --write-sub if --write-auto-sub is not given
|
||||
# this was the old behaviour if only --all-sub was given.
|
||||
if opts.allsubtitles and not opts.writeautomaticsub:
|
||||
opts.writesubtitles = True
|
||||
# ModifyChapters must run before FFmpegMetadataPP
|
||||
remove_chapters_patterns = []
|
||||
for regex in opts.remove_chapters:
|
||||
try:
|
||||
remove_chapters_patterns.append(re.compile(regex))
|
||||
except re.error as err:
|
||||
parser.error(f'invalid --remove-chapters regex {regex!r} - {err}')
|
||||
if opts.remove_chapters or sponsorblock_query:
|
||||
postprocessors.append({
|
||||
'key': 'ModifyChapters',
|
||||
'remove_chapters_patterns': remove_chapters_patterns,
|
||||
'remove_sponsor_segments': opts.sponsorblock_remove,
|
||||
'sponsorblock_chapter_title': opts.sponsorblock_chapter_title,
|
||||
'force_keyframes': opts.force_keyframes_at_cuts
|
||||
})
|
||||
# FFmpegMetadataPP should be run after FFmpegVideoConvertorPP and
|
||||
# FFmpegExtractAudioPP as containers before conversion may not support
|
||||
# metadata (3gp, webm, etc.)
|
||||
# By default ffmpeg preserves metadata applicable for both
|
||||
# source and target containers. From this point the container won't change,
|
||||
# so metadata can be added here.
|
||||
if opts.addmetadata or opts.addchapters:
|
||||
postprocessors.append({
|
||||
'key': 'FFmpegMetadata',
|
||||
'add_chapters': opts.addchapters,
|
||||
'add_metadata': opts.addmetadata,
|
||||
})
|
||||
# Note: Deprecated
|
||||
# This should be above EmbedThumbnail since sponskrub removes the thumbnail attachment
|
||||
# but must be below EmbedSubtitle and FFmpegMetadata
|
||||
# See https://github.com/yt-dlp/yt-dlp/issues/204 , https://github.com/faissaloo/SponSkrub/issues/29
|
||||
@@ -491,15 +539,19 @@ def _real_main(argv=None):
|
||||
})
|
||||
if not already_have_thumbnail:
|
||||
opts.writethumbnail = True
|
||||
opts.outtmpl['pl_thumbnail'] = ''
|
||||
if opts.split_chapters:
|
||||
postprocessors.append({'key': 'FFmpegSplitChapters'})
|
||||
postprocessors.append({
|
||||
'key': 'FFmpegSplitChapters',
|
||||
'force_keyframes': opts.force_keyframes_at_cuts,
|
||||
})
|
||||
# XAttrMetadataPP should be run after post-processors that may change file contents
|
||||
if opts.xattrs:
|
||||
postprocessors.append({'key': 'XAttrMetadata'})
|
||||
# ExecAfterDownload must be the last PP
|
||||
# Exec must be the last PP
|
||||
if opts.exec_cmd:
|
||||
postprocessors.append({
|
||||
'key': 'ExecAfterDownload',
|
||||
'key': 'Exec',
|
||||
'exec_cmd': opts.exec_cmd,
|
||||
# Run this only after the files have been moved to their final locations
|
||||
'when': 'after_move'
|
||||
@@ -528,6 +580,7 @@ def _real_main(argv=None):
|
||||
|
||||
ydl_opts = {
|
||||
'usenetrc': opts.usenetrc,
|
||||
'netrc_location': opts.netrc_location,
|
||||
'username': opts.username,
|
||||
'password': opts.password,
|
||||
'twofactor': opts.twofactor,
|
||||
@@ -549,7 +602,7 @@ def _real_main(argv=None):
|
||||
'forcejson': opts.dumpjson or opts.print_json,
|
||||
'dump_single_json': opts.dump_single_json,
|
||||
'force_write_download_archive': opts.force_write_download_archive,
|
||||
'simulate': opts.simulate or any_getting,
|
||||
'simulate': (any_getting or None) if opts.simulate is None else opts.simulate,
|
||||
'skip_download': opts.skip_download,
|
||||
'format': opts.format,
|
||||
'allow_unplayable_formats': opts.allow_unplayable_formats,
|
||||
@@ -583,8 +636,9 @@ def _real_main(argv=None):
|
||||
'noresizebuffer': opts.noresizebuffer,
|
||||
'http_chunk_size': opts.http_chunk_size,
|
||||
'continuedl': opts.continue_dl,
|
||||
'noprogress': opts.noprogress,
|
||||
'noprogress': opts.quiet if opts.noprogress is None else opts.noprogress,
|
||||
'progress_with_newline': opts.progress_with_newline,
|
||||
'progress_template': opts.progress_template,
|
||||
'playliststart': opts.playliststart,
|
||||
'playlistend': opts.playlistend,
|
||||
'playlistreverse': opts.playlist_reverse,
|
||||
@@ -681,10 +735,6 @@ def _real_main(argv=None):
|
||||
'geo_bypass_ip_block': opts.geo_bypass_ip_block,
|
||||
'warnings': warnings,
|
||||
'compat_opts': compat_opts,
|
||||
# just for deprecation check
|
||||
'autonumber': opts.autonumber or None,
|
||||
'usetitle': opts.usetitle or None,
|
||||
'useid': opts.useid or None,
|
||||
}
|
||||
|
||||
with YoutubeDL(ydl_opts) as ydl:
|
||||
@@ -733,6 +783,11 @@ def main(argv=None):
|
||||
sys.exit('ERROR: fixed output name but more than one file to download')
|
||||
except KeyboardInterrupt:
|
||||
sys.exit('\nERROR: Interrupted by user')
|
||||
except BrokenPipeError:
|
||||
# https://docs.python.org/3/library/signal.html#note-on-sigpipe
|
||||
devnull = os.open(os.devnull, os.O_WRONLY)
|
||||
os.dup2(devnull, sys.stdout.fileno())
|
||||
sys.exit(r'\nERROR: {err}')
|
||||
|
||||
|
||||
__all__ = ['main', 'YoutubeDL', 'gen_extractors', 'list_extractors']
|
||||
|
||||
269
yt_dlp/aes.py
269
yt_dlp/aes.py
@@ -2,36 +2,68 @@ from __future__ import unicode_literals
|
||||
|
||||
from math import ceil
|
||||
|
||||
from .compat import compat_b64decode
|
||||
from .compat import compat_b64decode, compat_pycrypto_AES
|
||||
from .utils import bytes_to_intlist, intlist_to_bytes
|
||||
|
||||
|
||||
if compat_pycrypto_AES:
|
||||
def aes_cbc_decrypt_bytes(data, key, iv):
|
||||
""" Decrypt bytes with AES-CBC using pycryptodome """
|
||||
return compat_pycrypto_AES.new(key, compat_pycrypto_AES.MODE_CBC, iv).decrypt(data)
|
||||
|
||||
def aes_gcm_decrypt_and_verify_bytes(data, key, tag, nonce):
|
||||
""" Decrypt bytes with AES-GCM using pycryptodome """
|
||||
return compat_pycrypto_AES.new(key, compat_pycrypto_AES.MODE_GCM, nonce).decrypt_and_verify(data, tag)
|
||||
|
||||
else:
|
||||
def aes_cbc_decrypt_bytes(data, key, iv):
|
||||
""" Decrypt bytes with AES-CBC using native implementation since pycryptodome is unavailable """
|
||||
return intlist_to_bytes(aes_cbc_decrypt(*map(bytes_to_intlist, (data, key, iv))))
|
||||
|
||||
def aes_gcm_decrypt_and_verify_bytes(data, key, tag, nonce):
|
||||
""" Decrypt bytes with AES-GCM using native implementation since pycryptodome is unavailable """
|
||||
return intlist_to_bytes(aes_gcm_decrypt_and_verify(*map(bytes_to_intlist, (data, key, tag, nonce))))
|
||||
|
||||
|
||||
BLOCK_SIZE_BYTES = 16
|
||||
|
||||
|
||||
def aes_ctr_decrypt(data, key, counter):
|
||||
def aes_ctr_decrypt(data, key, iv):
|
||||
"""
|
||||
Decrypt with aes in counter mode
|
||||
|
||||
@param {int[]} data cipher
|
||||
@param {int[]} key 16/24/32-Byte cipher key
|
||||
@param {instance} counter Instance whose next_value function (@returns {int[]} 16-Byte block)
|
||||
returns the next counter block
|
||||
@param {int[]} iv 16-Byte initialization vector
|
||||
@returns {int[]} decrypted data
|
||||
"""
|
||||
return aes_ctr_encrypt(data, key, iv)
|
||||
|
||||
|
||||
def aes_ctr_encrypt(data, key, iv):
|
||||
"""
|
||||
Encrypt with aes in counter mode
|
||||
|
||||
@param {int[]} data cleartext
|
||||
@param {int[]} key 16/24/32-Byte cipher key
|
||||
@param {int[]} iv 16-Byte initialization vector
|
||||
@returns {int[]} encrypted data
|
||||
"""
|
||||
expanded_key = key_expansion(key)
|
||||
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
||||
counter = iter_vector(iv)
|
||||
|
||||
decrypted_data = []
|
||||
encrypted_data = []
|
||||
for i in range(block_count):
|
||||
counter_block = counter.next_value()
|
||||
counter_block = next(counter)
|
||||
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
|
||||
block += [0] * (BLOCK_SIZE_BYTES - len(block))
|
||||
|
||||
cipher_counter_block = aes_encrypt(counter_block, expanded_key)
|
||||
decrypted_data += xor(block, cipher_counter_block)
|
||||
decrypted_data = decrypted_data[:len(data)]
|
||||
encrypted_data += xor(block, cipher_counter_block)
|
||||
encrypted_data = encrypted_data[:len(data)]
|
||||
|
||||
return decrypted_data
|
||||
return encrypted_data
|
||||
|
||||
|
||||
def aes_cbc_decrypt(data, key, iv):
|
||||
@@ -88,39 +120,47 @@ def aes_cbc_encrypt(data, key, iv):
|
||||
return encrypted_data
|
||||
|
||||
|
||||
def key_expansion(data):
|
||||
def aes_gcm_decrypt_and_verify(data, key, tag, nonce):
|
||||
"""
|
||||
Generate key schedule
|
||||
Decrypt with aes in GBM mode and checks authenticity using tag
|
||||
|
||||
@param {int[]} data 16/24/32-Byte cipher key
|
||||
@returns {int[]} 176/208/240-Byte expanded key
|
||||
@param {int[]} data cipher
|
||||
@param {int[]} key 16-Byte cipher key
|
||||
@param {int[]} tag authentication tag
|
||||
@param {int[]} nonce IV (recommended 12-Byte)
|
||||
@returns {int[]} decrypted data
|
||||
"""
|
||||
data = data[:] # copy
|
||||
rcon_iteration = 1
|
||||
key_size_bytes = len(data)
|
||||
expanded_key_size_bytes = (key_size_bytes // 4 + 7) * BLOCK_SIZE_BYTES
|
||||
|
||||
while len(data) < expanded_key_size_bytes:
|
||||
temp = data[-4:]
|
||||
temp = key_schedule_core(temp, rcon_iteration)
|
||||
rcon_iteration += 1
|
||||
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||
# XXX: check aes, gcm param
|
||||
|
||||
for _ in range(3):
|
||||
temp = data[-4:]
|
||||
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||
hash_subkey = aes_encrypt([0] * BLOCK_SIZE_BYTES, key_expansion(key))
|
||||
|
||||
if key_size_bytes == 32:
|
||||
temp = data[-4:]
|
||||
temp = sub_bytes(temp)
|
||||
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||
if len(nonce) == 12:
|
||||
j0 = nonce + [0, 0, 0, 1]
|
||||
else:
|
||||
fill = (BLOCK_SIZE_BYTES - (len(nonce) % BLOCK_SIZE_BYTES)) % BLOCK_SIZE_BYTES + 8
|
||||
ghash_in = nonce + [0] * fill + bytes_to_intlist((8 * len(nonce)).to_bytes(8, 'big'))
|
||||
j0 = ghash(hash_subkey, ghash_in)
|
||||
|
||||
for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
|
||||
temp = data[-4:]
|
||||
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||
data = data[:expanded_key_size_bytes]
|
||||
# TODO: add nonce support to aes_ctr_decrypt
|
||||
|
||||
return data
|
||||
# nonce_ctr = j0[:12]
|
||||
iv_ctr = inc(j0)
|
||||
|
||||
decrypted_data = aes_ctr_decrypt(data, key, iv_ctr + [0] * (BLOCK_SIZE_BYTES - len(iv_ctr)))
|
||||
pad_len = len(data) // 16 * 16
|
||||
s_tag = ghash(
|
||||
hash_subkey,
|
||||
data
|
||||
+ [0] * (BLOCK_SIZE_BYTES - len(data) + pad_len) # pad
|
||||
+ bytes_to_intlist((0 * 8).to_bytes(8, 'big') # length of associated data
|
||||
+ ((len(data) * 8).to_bytes(8, 'big'))) # length of data
|
||||
)
|
||||
|
||||
if tag != aes_ctr_encrypt(s_tag, key, j0):
|
||||
raise ValueError("Mismatching authentication tag")
|
||||
|
||||
return decrypted_data
|
||||
|
||||
|
||||
def aes_encrypt(data, expanded_key):
|
||||
@@ -138,7 +178,7 @@ def aes_encrypt(data, expanded_key):
|
||||
data = sub_bytes(data)
|
||||
data = shift_rows(data)
|
||||
if i != rounds:
|
||||
data = mix_columns(data)
|
||||
data = list(iter_mix_columns(data, MIX_COLUMN_MATRIX))
|
||||
data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES])
|
||||
|
||||
return data
|
||||
@@ -157,7 +197,7 @@ def aes_decrypt(data, expanded_key):
|
||||
for i in range(rounds, 0, -1):
|
||||
data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES])
|
||||
if i != rounds:
|
||||
data = mix_columns_inv(data)
|
||||
data = list(iter_mix_columns(data, MIX_COLUMN_MATRIX_INV))
|
||||
data = shift_rows_inv(data)
|
||||
data = sub_bytes_inv(data)
|
||||
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
|
||||
@@ -189,15 +229,7 @@ def aes_decrypt_text(data, password, key_size_bytes):
|
||||
nonce = data[:NONCE_LENGTH_BYTES]
|
||||
cipher = data[NONCE_LENGTH_BYTES:]
|
||||
|
||||
class Counter(object):
|
||||
__value = nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
|
||||
|
||||
def next_value(self):
|
||||
temp = self.__value
|
||||
self.__value = inc(self.__value)
|
||||
return temp
|
||||
|
||||
decrypted_data = aes_ctr_decrypt(cipher, key, Counter())
|
||||
decrypted_data = aes_ctr_decrypt(cipher, key, nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES))
|
||||
plaintext = intlist_to_bytes(decrypted_data)
|
||||
|
||||
return plaintext
|
||||
@@ -278,6 +310,47 @@ RIJNDAEL_LOG_TABLE = (0x00, 0x00, 0x19, 0x01, 0x32, 0x02, 0x1a, 0xc6, 0x4b, 0xc7
|
||||
0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07)
|
||||
|
||||
|
||||
def key_expansion(data):
|
||||
"""
|
||||
Generate key schedule
|
||||
|
||||
@param {int[]} data 16/24/32-Byte cipher key
|
||||
@returns {int[]} 176/208/240-Byte expanded key
|
||||
"""
|
||||
data = data[:] # copy
|
||||
rcon_iteration = 1
|
||||
key_size_bytes = len(data)
|
||||
expanded_key_size_bytes = (key_size_bytes // 4 + 7) * BLOCK_SIZE_BYTES
|
||||
|
||||
while len(data) < expanded_key_size_bytes:
|
||||
temp = data[-4:]
|
||||
temp = key_schedule_core(temp, rcon_iteration)
|
||||
rcon_iteration += 1
|
||||
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||
|
||||
for _ in range(3):
|
||||
temp = data[-4:]
|
||||
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||
|
||||
if key_size_bytes == 32:
|
||||
temp = data[-4:]
|
||||
temp = sub_bytes(temp)
|
||||
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||
|
||||
for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
|
||||
temp = data[-4:]
|
||||
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||
data = data[:expanded_key_size_bytes]
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def iter_vector(iv):
|
||||
while True:
|
||||
yield iv
|
||||
iv = inc(iv)
|
||||
|
||||
|
||||
def sub_bytes(data):
|
||||
return [SBOX[x] for x in data]
|
||||
|
||||
@@ -302,48 +375,36 @@ def xor(data1, data2):
|
||||
return [x ^ y for x, y in zip(data1, data2)]
|
||||
|
||||
|
||||
def rijndael_mul(a, b):
|
||||
if(a == 0 or b == 0):
|
||||
return 0
|
||||
return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF]
|
||||
|
||||
|
||||
def mix_column(data, matrix):
|
||||
data_mixed = []
|
||||
for row in range(4):
|
||||
mixed = 0
|
||||
for column in range(4):
|
||||
# xor is (+) and (-)
|
||||
mixed ^= rijndael_mul(data[column], matrix[row][column])
|
||||
data_mixed.append(mixed)
|
||||
return data_mixed
|
||||
|
||||
|
||||
def mix_columns(data, matrix=MIX_COLUMN_MATRIX):
|
||||
data_mixed = []
|
||||
for i in range(4):
|
||||
column = data[i * 4: (i + 1) * 4]
|
||||
data_mixed += mix_column(column, matrix)
|
||||
return data_mixed
|
||||
|
||||
|
||||
def mix_columns_inv(data):
|
||||
return mix_columns(data, MIX_COLUMN_MATRIX_INV)
|
||||
def iter_mix_columns(data, matrix):
|
||||
for i in (0, 4, 8, 12):
|
||||
for row in matrix:
|
||||
mixed = 0
|
||||
for j in range(4):
|
||||
# xor is (+) and (-)
|
||||
mixed ^= (0 if data[i:i + 4][j] == 0 or row[j] == 0 else
|
||||
RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[data[i + j]] + RIJNDAEL_LOG_TABLE[row[j]]) % 0xFF])
|
||||
yield mixed
|
||||
|
||||
|
||||
def shift_rows(data):
|
||||
data_shifted = []
|
||||
for column in range(4):
|
||||
for row in range(4):
|
||||
data_shifted.append(data[((column + row) & 0b11) * 4 + row])
|
||||
return data_shifted
|
||||
return [data[((column + row) & 0b11) * 4 + row] for column in range(4) for row in range(4)]
|
||||
|
||||
|
||||
def shift_rows_inv(data):
|
||||
return [data[((column - row) & 0b11) * 4 + row] for column in range(4) for row in range(4)]
|
||||
|
||||
|
||||
def shift_block(data):
|
||||
data_shifted = []
|
||||
for column in range(4):
|
||||
for row in range(4):
|
||||
data_shifted.append(data[((column - row) & 0b11) * 4 + row])
|
||||
|
||||
bit = 0
|
||||
for n in data:
|
||||
if bit:
|
||||
n |= 0x100
|
||||
bit = n & 1
|
||||
n >>= 1
|
||||
data_shifted.append(n)
|
||||
|
||||
return data_shifted
|
||||
|
||||
|
||||
@@ -358,4 +419,50 @@ def inc(data):
|
||||
return data
|
||||
|
||||
|
||||
__all__ = ['aes_encrypt', 'key_expansion', 'aes_ctr_decrypt', 'aes_cbc_decrypt', 'aes_decrypt_text']
|
||||
def block_product(block_x, block_y):
|
||||
# NIST SP 800-38D, Algorithm 1
|
||||
|
||||
if len(block_x) != BLOCK_SIZE_BYTES or len(block_y) != BLOCK_SIZE_BYTES:
|
||||
raise ValueError("Length of blocks need to be %d bytes" % BLOCK_SIZE_BYTES)
|
||||
|
||||
block_r = [0xE1] + [0] * (BLOCK_SIZE_BYTES - 1)
|
||||
block_v = block_y[:]
|
||||
block_z = [0] * BLOCK_SIZE_BYTES
|
||||
|
||||
for i in block_x:
|
||||
for bit in range(7, -1, -1):
|
||||
if i & (1 << bit):
|
||||
block_z = xor(block_z, block_v)
|
||||
|
||||
do_xor = block_v[-1] & 1
|
||||
block_v = shift_block(block_v)
|
||||
if do_xor:
|
||||
block_v = xor(block_v, block_r)
|
||||
|
||||
return block_z
|
||||
|
||||
|
||||
def ghash(subkey, data):
|
||||
# NIST SP 800-38D, Algorithm 2
|
||||
|
||||
if len(data) % BLOCK_SIZE_BYTES:
|
||||
raise ValueError("Length of data should be %d bytes" % BLOCK_SIZE_BYTES)
|
||||
|
||||
last_y = [0] * BLOCK_SIZE_BYTES
|
||||
for i in range(0, len(data), BLOCK_SIZE_BYTES):
|
||||
block = data[i : i + BLOCK_SIZE_BYTES] # noqa: E203
|
||||
last_y = block_product(xor(last_y, block), subkey)
|
||||
|
||||
return last_y
|
||||
|
||||
|
||||
__all__ = [
|
||||
'aes_ctr_decrypt',
|
||||
'aes_cbc_decrypt',
|
||||
'aes_cbc_decrypt_bytes',
|
||||
'aes_decrypt_text',
|
||||
'aes_encrypt',
|
||||
'aes_gcm_decrypt_and_verify',
|
||||
'aes_gcm_decrypt_and_verify_bytes',
|
||||
'key_expansion'
|
||||
]
|
||||
|
||||
@@ -50,6 +50,7 @@ class Cache(object):
|
||||
except OSError as ose:
|
||||
if ose.errno != errno.EEXIST:
|
||||
raise
|
||||
self._ydl.write_debug(f'Saving {section}.{key} to cache')
|
||||
write_json_file(data, fn)
|
||||
except Exception:
|
||||
tb = traceback.format_exc()
|
||||
@@ -66,6 +67,7 @@ class Cache(object):
|
||||
try:
|
||||
try:
|
||||
with io.open(cache_fn, 'r', encoding='utf-8') as cachef:
|
||||
self._ydl.write_debug(f'Loading {section}.{key} from cache')
|
||||
return json.load(cachef)
|
||||
except ValueError:
|
||||
try:
|
||||
|
||||
@@ -33,6 +33,8 @@ class compat_HTMLParseError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
# compat_ctypes_WINFUNCTYPE = ctypes.WINFUNCTYPE
|
||||
# will not work since ctypes.WINFUNCTYPE does not exist in UNIX machines
|
||||
def compat_ctypes_WINFUNCTYPE(*args, **kwargs):
|
||||
return ctypes.WINFUNCTYPE(*args, **kwargs)
|
||||
|
||||
@@ -130,6 +132,39 @@ except AttributeError:
|
||||
asyncio.run = compat_asyncio_run
|
||||
|
||||
|
||||
# Python 3.8+ does not honor %HOME% on windows, but this breaks compatibility with youtube-dl
|
||||
# See https://github.com/yt-dlp/yt-dlp/issues/792
|
||||
# https://docs.python.org/3/library/os.path.html#os.path.expanduser
|
||||
if compat_os_name in ('nt', 'ce') and 'HOME' in os.environ:
|
||||
_userhome = os.environ['HOME']
|
||||
|
||||
def compat_expanduser(path):
|
||||
if not path.startswith('~'):
|
||||
return path
|
||||
i = path.replace('\\', '/', 1).find('/') # ~user
|
||||
if i < 0:
|
||||
i = len(path)
|
||||
userhome = os.path.join(os.path.dirname(_userhome), path[1:i]) if i > 1 else _userhome
|
||||
return userhome + path[i:]
|
||||
else:
|
||||
compat_expanduser = os.path.expanduser
|
||||
|
||||
|
||||
try:
|
||||
from Cryptodome.Cipher import AES as compat_pycrypto_AES
|
||||
except ImportError:
|
||||
try:
|
||||
from Crypto.Cipher import AES as compat_pycrypto_AES
|
||||
except ImportError:
|
||||
compat_pycrypto_AES = None
|
||||
|
||||
|
||||
def windows_enable_vt_mode(): # TODO: Do this the proper way https://bugs.python.org/issue30075
|
||||
if compat_os_name != 'nt':
|
||||
return
|
||||
os.system('')
|
||||
|
||||
|
||||
# Deprecated
|
||||
|
||||
compat_basestring = str
|
||||
@@ -152,7 +187,6 @@ compat_cookies = http.cookies
|
||||
compat_cookies_SimpleCookie = compat_cookies.SimpleCookie
|
||||
compat_etree_Element = etree.Element
|
||||
compat_etree_register_namespace = etree.register_namespace
|
||||
compat_expanduser = os.path.expanduser
|
||||
compat_get_terminal_size = shutil.get_terminal_size
|
||||
compat_getenv = os.getenv
|
||||
compat_getpass = getpass.getpass
|
||||
@@ -224,6 +258,7 @@ __all__ = [
|
||||
'compat_os_name',
|
||||
'compat_parse_qs',
|
||||
'compat_print',
|
||||
'compat_pycrypto_AES',
|
||||
'compat_realpath',
|
||||
'compat_setenv',
|
||||
'compat_shlex_quote',
|
||||
@@ -252,5 +287,6 @@ __all__ = [
|
||||
'compat_xml_parse_error',
|
||||
'compat_xpath',
|
||||
'compat_zip',
|
||||
'windows_enable_vt_mode',
|
||||
'workaround_optparse_bug9161',
|
||||
]
|
||||
|
||||
@@ -9,16 +9,14 @@ import tempfile
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from hashlib import pbkdf2_hmac
|
||||
|
||||
from yt_dlp.aes import aes_cbc_decrypt
|
||||
from yt_dlp.compat import (
|
||||
from .aes import aes_cbc_decrypt_bytes, aes_gcm_decrypt_and_verify_bytes
|
||||
from .compat import (
|
||||
compat_b64decode,
|
||||
compat_cookiejar_Cookie,
|
||||
)
|
||||
from yt_dlp.utils import (
|
||||
from .utils import (
|
||||
bug_reports_message,
|
||||
bytes_to_intlist,
|
||||
expand_path,
|
||||
intlist_to_bytes,
|
||||
process_communicate_or_kill,
|
||||
YoutubeDLCookieJar,
|
||||
)
|
||||
@@ -32,12 +30,6 @@ except ImportError:
|
||||
SQLITE_AVAILABLE = False
|
||||
|
||||
|
||||
try:
|
||||
from Crypto.Cipher import AES
|
||||
CRYPTO_AVAILABLE = True
|
||||
except ImportError:
|
||||
CRYPTO_AVAILABLE = False
|
||||
|
||||
try:
|
||||
import keyring
|
||||
KEYRING_AVAILABLE = True
|
||||
@@ -123,7 +115,7 @@ def _extract_firefox_cookies(profile, logger):
|
||||
cookie_database_path = _find_most_recently_used_file(search_root, 'cookies.sqlite')
|
||||
if cookie_database_path is None:
|
||||
raise FileNotFoundError('could not find firefox cookies database in {}'.format(search_root))
|
||||
logger.debug('extracting from: "{}"'.format(cookie_database_path))
|
||||
logger.debug('Extracting cookies from: "{}"'.format(cookie_database_path))
|
||||
|
||||
with tempfile.TemporaryDirectory(prefix='youtube_dl') as tmpdir:
|
||||
cursor = None
|
||||
@@ -240,7 +232,7 @@ def _extract_chrome_cookies(browser_name, profile, logger):
|
||||
cookie_database_path = _find_most_recently_used_file(search_root, 'Cookies')
|
||||
if cookie_database_path is None:
|
||||
raise FileNotFoundError('could not find {} cookies database in "{}"'.format(browser_name, search_root))
|
||||
logger.debug('extracting from: "{}"'.format(cookie_database_path))
|
||||
logger.debug('Extracting cookies from: "{}"'.format(cookie_database_path))
|
||||
|
||||
decryptor = get_cookie_decryptor(config['browser_dir'], config['keyring_name'], logger)
|
||||
|
||||
@@ -361,7 +353,7 @@ class LinuxChromeCookieDecryptor(ChromeCookieDecryptor):
|
||||
class MacChromeCookieDecryptor(ChromeCookieDecryptor):
|
||||
def __init__(self, browser_keyring_name, logger):
|
||||
self._logger = logger
|
||||
password = _get_mac_keyring_password(browser_keyring_name)
|
||||
password = _get_mac_keyring_password(browser_keyring_name, logger)
|
||||
self._v10_key = None if password is None else self.derive_key(password)
|
||||
|
||||
@staticmethod
|
||||
@@ -400,11 +392,6 @@ class WindowsChromeCookieDecryptor(ChromeCookieDecryptor):
|
||||
if self._v10_key is None:
|
||||
self._logger.warning('cannot decrypt v10 cookies: no key found', only_once=True)
|
||||
return None
|
||||
elif not CRYPTO_AVAILABLE:
|
||||
self._logger.warning('cannot decrypt cookie as the `pycryptodome` module is not installed. '
|
||||
'Please install by running `python3 -m pip install pycryptodome`',
|
||||
only_once=True)
|
||||
return None
|
||||
|
||||
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_win.cc
|
||||
# kNonceLength
|
||||
@@ -559,7 +546,7 @@ def _parse_safari_cookies_record(data, jar, logger):
|
||||
p.skip_to(value_offset)
|
||||
value = p.read_cstring()
|
||||
except UnicodeDecodeError:
|
||||
logger.warning('failed to parse cookie because UTF-8 decoding failed')
|
||||
logger.warning('failed to parse Safari cookie because UTF-8 decoding failed', only_once=True)
|
||||
return record_size
|
||||
|
||||
p.skip_to(record_size, 'space at the end of the record')
|
||||
@@ -605,11 +592,13 @@ def _get_linux_keyring_password(browser_keyring_name):
|
||||
return password.encode('utf-8')
|
||||
|
||||
|
||||
def _get_mac_keyring_password(browser_keyring_name):
|
||||
def _get_mac_keyring_password(browser_keyring_name, logger):
|
||||
if KEYRING_AVAILABLE:
|
||||
logger.debug('using keyring to obtain password')
|
||||
password = keyring.get_password('{} Safe Storage'.format(browser_keyring_name), browser_keyring_name)
|
||||
return password.encode('utf-8')
|
||||
else:
|
||||
logger.debug('using find-generic-password to obtain password')
|
||||
proc = subprocess.Popen(['security', 'find-generic-password',
|
||||
'-w', # write password to stdout
|
||||
'-a', browser_keyring_name, # match 'account'
|
||||
@@ -618,8 +607,11 @@ def _get_mac_keyring_password(browser_keyring_name):
|
||||
stderr=subprocess.DEVNULL)
|
||||
try:
|
||||
stdout, stderr = process_communicate_or_kill(proc)
|
||||
if stdout[-1:] == b'\n':
|
||||
stdout = stdout[:-1]
|
||||
return stdout
|
||||
except BaseException:
|
||||
except BaseException as e:
|
||||
logger.warning(f'exception running find-generic-password: {type(e).__name__}({e})')
|
||||
return None
|
||||
|
||||
|
||||
@@ -648,29 +640,26 @@ def pbkdf2_sha1(password, salt, iterations, key_length):
|
||||
|
||||
|
||||
def _decrypt_aes_cbc(ciphertext, key, logger, initialization_vector=b' ' * 16):
|
||||
plaintext = aes_cbc_decrypt(bytes_to_intlist(ciphertext),
|
||||
bytes_to_intlist(key),
|
||||
bytes_to_intlist(initialization_vector))
|
||||
plaintext = aes_cbc_decrypt_bytes(ciphertext, key, initialization_vector)
|
||||
padding_length = plaintext[-1]
|
||||
try:
|
||||
return intlist_to_bytes(plaintext[:-padding_length]).decode('utf-8')
|
||||
return plaintext[:-padding_length].decode('utf-8')
|
||||
except UnicodeDecodeError:
|
||||
logger.warning('failed to decrypt cookie because UTF-8 decoding failed. Possibly the key is wrong?')
|
||||
logger.warning('failed to decrypt cookie (AES-CBC) because UTF-8 decoding failed. Possibly the key is wrong?', only_once=True)
|
||||
return None
|
||||
|
||||
|
||||
def _decrypt_aes_gcm(ciphertext, key, nonce, authentication_tag, logger):
|
||||
cipher = AES.new(key, AES.MODE_GCM, nonce)
|
||||
try:
|
||||
plaintext = cipher.decrypt_and_verify(ciphertext, authentication_tag)
|
||||
plaintext = aes_gcm_decrypt_and_verify_bytes(ciphertext, key, authentication_tag, nonce)
|
||||
except ValueError:
|
||||
logger.warning('failed to decrypt cookie because the MAC check failed. Possibly the key is wrong?')
|
||||
logger.warning('failed to decrypt cookie (AES-GCM) because the MAC check failed. Possibly the key is wrong?', only_once=True)
|
||||
return None
|
||||
|
||||
try:
|
||||
return plaintext.decode('utf-8')
|
||||
except UnicodeDecodeError:
|
||||
logger.warning('failed to decrypt cookie because UTF-8 decoding failed. Possibly the key is wrong?')
|
||||
logger.warning('failed to decrypt cookie (AES-GCM) because UTF-8 decoding failed. Possibly the key is wrong?', only_once=True)
|
||||
return None
|
||||
|
||||
|
||||
@@ -698,7 +687,7 @@ def _decrypt_windows_dpapi(ciphertext, logger):
|
||||
ctypes.byref(blob_out) # pDataOut
|
||||
)
|
||||
if not ret:
|
||||
logger.warning('failed to decrypt with DPAPI')
|
||||
logger.warning('failed to decrypt with DPAPI', only_once=True)
|
||||
return None
|
||||
|
||||
result = ctypes.string_at(blob_out.pbData, blob_out.cbData)
|
||||
@@ -748,6 +737,7 @@ def _is_path(value):
|
||||
|
||||
|
||||
def _parse_browser_specification(browser_name, profile=None):
|
||||
browser_name = browser_name.lower()
|
||||
if browser_name not in SUPPORTED_BROWSERS:
|
||||
raise ValueError(f'unsupported browser: "{browser_name}"')
|
||||
if profile is not None and _is_path(profile):
|
||||
|
||||
@@ -3,17 +3,20 @@ from __future__ import unicode_literals
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
determine_protocol,
|
||||
NO_DEFAULT
|
||||
)
|
||||
|
||||
|
||||
def _get_real_downloader(info_dict, protocol=None, *args, **kwargs):
|
||||
def get_suitable_downloader(info_dict, params={}, default=NO_DEFAULT, protocol=None, to_stdout=False):
|
||||
info_dict['protocol'] = determine_protocol(info_dict)
|
||||
info_copy = info_dict.copy()
|
||||
if protocol:
|
||||
info_copy['protocol'] = protocol
|
||||
return get_suitable_downloader(info_copy, *args, **kwargs)
|
||||
info_copy['to_stdout'] = to_stdout
|
||||
return _get_suitable_downloader(info_copy, params, default)
|
||||
|
||||
|
||||
# Some of these require _get_real_downloader
|
||||
# Some of these require get_suitable_downloader
|
||||
from .common import FileDownloader
|
||||
from .dash import DashSegmentsFD
|
||||
from .f4m import F4mFD
|
||||
@@ -69,32 +72,39 @@ def shorten_protocol_name(proto, simplify=False):
|
||||
return short_protocol_names.get(proto, proto)
|
||||
|
||||
|
||||
def get_suitable_downloader(info_dict, params={}, default=HttpFD):
|
||||
def _get_suitable_downloader(info_dict, params, default):
|
||||
"""Get the downloader class that can handle the info dict."""
|
||||
protocol = determine_protocol(info_dict)
|
||||
info_dict['protocol'] = protocol
|
||||
if default is NO_DEFAULT:
|
||||
default = HttpFD
|
||||
|
||||
# if (info_dict.get('start_time') or info_dict.get('end_time')) and not info_dict.get('requested_formats') and FFmpegFD.can_download(info_dict):
|
||||
# return FFmpegFD
|
||||
|
||||
protocol = info_dict['protocol']
|
||||
downloaders = params.get('external_downloader')
|
||||
external_downloader = (
|
||||
downloaders if isinstance(downloaders, compat_str) or downloaders is None
|
||||
else downloaders.get(shorten_protocol_name(protocol, True), downloaders.get('default')))
|
||||
if external_downloader and external_downloader.lower() == 'native':
|
||||
external_downloader = 'native'
|
||||
|
||||
if external_downloader not in (None, 'native'):
|
||||
if external_downloader is None:
|
||||
if info_dict['to_stdout'] and FFmpegFD.can_merge_formats(info_dict, params):
|
||||
return FFmpegFD
|
||||
elif external_downloader.lower() != 'native':
|
||||
ed = get_external_downloader(external_downloader)
|
||||
if ed.can_download(info_dict, external_downloader):
|
||||
return ed
|
||||
|
||||
if protocol == 'http_dash_segments':
|
||||
if info_dict.get('is_live') and (external_downloader or '').lower() != 'native':
|
||||
return FFmpegFD
|
||||
|
||||
if protocol in ('m3u8', 'm3u8_native'):
|
||||
if info_dict.get('is_live'):
|
||||
return FFmpegFD
|
||||
elif external_downloader == 'native':
|
||||
elif (external_downloader or '').lower() == 'native':
|
||||
return HlsFD
|
||||
elif _get_real_downloader(info_dict, 'm3u8_frag_urls', params, None):
|
||||
elif get_suitable_downloader(
|
||||
info_dict, params, None, protocol='m3u8_frag_urls', to_stdout=info_dict['to_stdout']):
|
||||
return HlsFD
|
||||
elif params.get('hls_prefer_native') is True:
|
||||
return HlsFD
|
||||
|
||||
@@ -3,11 +3,9 @@ from __future__ import division, unicode_literals
|
||||
import copy
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import random
|
||||
|
||||
from ..compat import compat_os_name
|
||||
from ..utils import (
|
||||
decodeArgument,
|
||||
encodeFilename,
|
||||
@@ -16,6 +14,12 @@ from ..utils import (
|
||||
shell_quote,
|
||||
timeconvert,
|
||||
)
|
||||
from ..minicurses import (
|
||||
MultilineLogger,
|
||||
MultilinePrinter,
|
||||
QuietMultilinePrinter,
|
||||
BreaklineStatusPrinter
|
||||
)
|
||||
|
||||
|
||||
class FileDownloader(object):
|
||||
@@ -39,20 +43,22 @@ class FileDownloader(object):
|
||||
noresizebuffer: Do not automatically resize the download buffer.
|
||||
continuedl: Try to continue downloads if possible.
|
||||
noprogress: Do not print the progress bar.
|
||||
logtostderr: Log messages to stderr instead of stdout.
|
||||
consoletitle: Display progress in console window's titlebar.
|
||||
nopart: Do not use temporary .part files.
|
||||
updatetime: Use the Last-modified header to set output file timestamps.
|
||||
test: Download only first bytes to test the downloader.
|
||||
min_filesize: Skip files smaller than this size
|
||||
max_filesize: Skip files larger than this size
|
||||
xattr_set_filesize: Set ytdl.filesize user xattribute with expected size.
|
||||
external_downloader_args: A list of additional command-line arguments for the
|
||||
external downloader.
|
||||
external_downloader_args: A dictionary of downloader keys (in lower case)
|
||||
and a list of additional command-line arguments for the
|
||||
executable. Use 'default' as the name for arguments to be
|
||||
passed to all downloaders. For compatibility with youtube-dl,
|
||||
a single list of args can also be used
|
||||
hls_use_mpegts: Use the mpegts container for HLS videos.
|
||||
http_chunk_size: Size of a chunk for chunk-based HTTP downloading. May be
|
||||
useful for bypassing bandwidth throttling imposed by
|
||||
a webserver (experimental)
|
||||
progress_template: See YoutubeDL.py
|
||||
|
||||
Subclasses of this one must re-define the real_download method.
|
||||
"""
|
||||
@@ -65,6 +71,7 @@ class FileDownloader(object):
|
||||
self.ydl = ydl
|
||||
self._progress_hooks = []
|
||||
self.params = params
|
||||
self._prepare_multiline_status()
|
||||
self.add_progress_hook(self.report_progress)
|
||||
|
||||
@staticmethod
|
||||
@@ -201,12 +208,12 @@ class FileDownloader(object):
|
||||
return filename + '.ytdl'
|
||||
|
||||
def try_rename(self, old_filename, new_filename):
|
||||
if old_filename == new_filename:
|
||||
return
|
||||
try:
|
||||
if old_filename == new_filename:
|
||||
return
|
||||
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
|
||||
os.replace(old_filename, new_filename)
|
||||
except (IOError, OSError) as err:
|
||||
self.report_error('unable to rename file: %s' % error_to_compat_str(err))
|
||||
self.report_error(f'unable to rename file: {err}')
|
||||
|
||||
def try_utime(self, filename, last_modified_hdr):
|
||||
"""Try to set the last-modified time of the given file."""
|
||||
@@ -233,39 +240,46 @@ class FileDownloader(object):
|
||||
"""Report destination filename."""
|
||||
self.to_screen('[download] Destination: ' + filename)
|
||||
|
||||
def _report_progress_status(self, msg, is_last_line=False):
|
||||
fullmsg = '[download] ' + msg
|
||||
if self.params.get('progress_with_newline', False):
|
||||
self.to_screen(fullmsg)
|
||||
def _prepare_multiline_status(self, lines=1):
|
||||
if self.params.get('noprogress'):
|
||||
self._multiline = QuietMultilinePrinter()
|
||||
elif self.ydl.params.get('logger'):
|
||||
self._multiline = MultilineLogger(self.ydl.params['logger'], lines)
|
||||
elif self.params.get('progress_with_newline'):
|
||||
self._multiline = BreaklineStatusPrinter(self.ydl._screen_file, lines)
|
||||
else:
|
||||
if compat_os_name == 'nt':
|
||||
prev_len = getattr(self, '_report_progress_prev_line_length',
|
||||
0)
|
||||
if prev_len > len(fullmsg):
|
||||
fullmsg += ' ' * (prev_len - len(fullmsg))
|
||||
self._report_progress_prev_line_length = len(fullmsg)
|
||||
clear_line = '\r'
|
||||
else:
|
||||
clear_line = ('\r\x1b[K' if sys.stderr.isatty() else '\r')
|
||||
self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line)
|
||||
self.to_console_title('yt-dlp ' + msg)
|
||||
self._multiline = MultilinePrinter(self.ydl._screen_file, lines, not self.params.get('quiet'))
|
||||
|
||||
def _finish_multiline_status(self):
|
||||
self._multiline.end()
|
||||
|
||||
def _report_progress_status(self, s):
|
||||
progress_dict = s.copy()
|
||||
progress_dict.pop('info_dict')
|
||||
progress_dict = {'info': s['info_dict'], 'progress': progress_dict}
|
||||
|
||||
progress_template = self.params.get('progress_template', {})
|
||||
self._multiline.print_at_line(self.ydl.evaluate_outtmpl(
|
||||
progress_template.get('download') or '[download] %(progress._default_template)s',
|
||||
progress_dict), s.get('progress_idx') or 0)
|
||||
self.to_console_title(self.ydl.evaluate_outtmpl(
|
||||
progress_template.get('download-title') or 'yt-dlp %(progress._default_template)s',
|
||||
progress_dict))
|
||||
|
||||
def report_progress(self, s):
|
||||
if s['status'] == 'finished':
|
||||
if self.params.get('noprogress', False):
|
||||
if self.params.get('noprogress'):
|
||||
self.to_screen('[download] Download completed')
|
||||
else:
|
||||
msg_template = '100%%'
|
||||
if s.get('total_bytes') is not None:
|
||||
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
|
||||
msg_template += ' of %(_total_bytes_str)s'
|
||||
if s.get('elapsed') is not None:
|
||||
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
|
||||
msg_template += ' in %(_elapsed_str)s'
|
||||
self._report_progress_status(
|
||||
msg_template % s, is_last_line=True)
|
||||
|
||||
if self.params.get('noprogress'):
|
||||
msg_template = '100%%'
|
||||
if s.get('total_bytes') is not None:
|
||||
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
|
||||
msg_template += ' of %(_total_bytes_str)s'
|
||||
if s.get('elapsed') is not None:
|
||||
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
|
||||
msg_template += ' in %(_elapsed_str)s'
|
||||
s['_percent_str'] = self.format_percent(100)
|
||||
s['_default_template'] = msg_template % s
|
||||
self._report_progress_status(s)
|
||||
return
|
||||
|
||||
if s['status'] != 'downloading':
|
||||
@@ -307,8 +321,8 @@ class FileDownloader(object):
|
||||
msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s'
|
||||
else:
|
||||
msg_template = '%(_percent_str)s % at %(_speed_str)s ETA %(_eta_str)s'
|
||||
|
||||
self._report_progress_status(msg_template % s)
|
||||
s['_default_template'] = msg_template % s
|
||||
self._report_progress_status(s)
|
||||
|
||||
def report_resuming_byte(self, resume_len):
|
||||
"""Report attempt to resume at given byte."""
|
||||
@@ -320,12 +334,9 @@ class FileDownloader(object):
|
||||
'[download] Got server HTTP error: %s. Retrying (attempt %d of %s) ...'
|
||||
% (error_to_compat_str(err), count, self.format_retries(retries)))
|
||||
|
||||
def report_file_already_downloaded(self, file_name):
|
||||
def report_file_already_downloaded(self, *args, **kwargs):
|
||||
"""Report file has already been fully downloaded."""
|
||||
try:
|
||||
self.to_screen('[download] %s has already been downloaded' % file_name)
|
||||
except UnicodeEncodeError:
|
||||
self.to_screen('[download] The file has already been downloaded')
|
||||
return self.ydl.report_file_already_downloaded(*args, **kwargs)
|
||||
|
||||
def report_unable_to_resume(self):
|
||||
"""Report it was impossible to resume download."""
|
||||
@@ -343,7 +354,7 @@ class FileDownloader(object):
|
||||
"""
|
||||
|
||||
nooverwrites_and_exists = (
|
||||
not self.params.get('overwrites', subtitle)
|
||||
not self.params.get('overwrites', True)
|
||||
and os.path.exists(encodeFilename(filename))
|
||||
)
|
||||
|
||||
@@ -383,7 +394,9 @@ class FileDownloader(object):
|
||||
'[download] Sleeping %s seconds ...' % (
|
||||
sleep_interval_sub))
|
||||
time.sleep(sleep_interval_sub)
|
||||
return self.real_download(filename, info_dict), True
|
||||
ret = self.real_download(filename, info_dict)
|
||||
self._finish_multiline_status()
|
||||
return ret, True
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
"""Real download process. Redefine in subclasses."""
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ..downloader import _get_real_downloader
|
||||
from ..downloader import get_suitable_downloader
|
||||
from .fragment import FragmentFD
|
||||
|
||||
from ..utils import urljoin
|
||||
@@ -15,11 +15,15 @@ class DashSegmentsFD(FragmentFD):
|
||||
FD_NAME = 'dashsegments'
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
if info_dict.get('is_live'):
|
||||
self.report_error('Live DASH videos are not supported')
|
||||
|
||||
fragment_base_url = info_dict.get('fragment_base_url')
|
||||
fragments = info_dict['fragments'][:1] if self.params.get(
|
||||
'test', False) else info_dict['fragments']
|
||||
|
||||
real_downloader = _get_real_downloader(info_dict, 'dash_frag_urls', self.params, None)
|
||||
real_downloader = get_suitable_downloader(
|
||||
info_dict, self.params, None, protocol='dash_frag_urls', to_stdout=(filename == '-'))
|
||||
|
||||
ctx = {
|
||||
'filename': filename,
|
||||
@@ -54,9 +58,6 @@ class DashSegmentsFD(FragmentFD):
|
||||
info_copy = info_dict.copy()
|
||||
info_copy['fragments'] = fragments_to_download
|
||||
fd = real_downloader(self.ydl, self.params)
|
||||
# TODO: Make progress updates work without hooking twice
|
||||
# for ph in self._progress_hooks:
|
||||
# fd.add_progress_hook(ph)
|
||||
return fd.real_download(filename, info_copy)
|
||||
|
||||
return self.download_and_append_fragments(ctx, fragments_to_download, info_dict)
|
||||
|
||||
@@ -6,13 +6,7 @@ import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
try:
|
||||
from Crypto.Cipher import AES
|
||||
can_decrypt_frag = True
|
||||
except ImportError:
|
||||
can_decrypt_frag = False
|
||||
|
||||
from .common import FileDownloader
|
||||
from .fragment import FragmentFD
|
||||
from ..compat import (
|
||||
compat_setenv,
|
||||
compat_str,
|
||||
@@ -22,20 +16,20 @@ from ..utils import (
|
||||
cli_option,
|
||||
cli_valueless_option,
|
||||
cli_bool_option,
|
||||
cli_configuration_args,
|
||||
_configuration_args,
|
||||
encodeFilename,
|
||||
encodeArgument,
|
||||
handle_youtubedl_headers,
|
||||
check_executable,
|
||||
is_outdated_version,
|
||||
process_communicate_or_kill,
|
||||
sanitized_Request,
|
||||
sanitize_open,
|
||||
)
|
||||
|
||||
|
||||
class ExternalFD(FileDownloader):
|
||||
class ExternalFD(FragmentFD):
|
||||
SUPPORTED_PROTOCOLS = ('http', 'https', 'ftp', 'ftps')
|
||||
can_download_to_stdout = False
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
self.report_destination(filename)
|
||||
@@ -93,7 +87,9 @@ class ExternalFD(FileDownloader):
|
||||
|
||||
@classmethod
|
||||
def supports(cls, info_dict):
|
||||
return info_dict['protocol'] in cls.SUPPORTED_PROTOCOLS
|
||||
return (
|
||||
(cls.can_download_to_stdout or not info_dict.get('to_stdout'))
|
||||
and info_dict['protocol'] in cls.SUPPORTED_PROTOCOLS)
|
||||
|
||||
@classmethod
|
||||
def can_download(cls, info_dict, path=None):
|
||||
@@ -108,11 +104,10 @@ class ExternalFD(FileDownloader):
|
||||
def _valueless_option(self, command_option, param, expected_value=True):
|
||||
return cli_valueless_option(self.params, command_option, param, expected_value)
|
||||
|
||||
def _configuration_args(self, *args, **kwargs):
|
||||
return cli_configuration_args(
|
||||
self.params.get('external_downloader_args'),
|
||||
[self.get_basename(), 'default'],
|
||||
*args, **kwargs)
|
||||
def _configuration_args(self, keys=None, *args, **kwargs):
|
||||
return _configuration_args(
|
||||
self.get_basename(), self.params.get('external_downloader_args'), self.get_basename(),
|
||||
keys, *args, **kwargs)
|
||||
|
||||
def _call_downloader(self, tmpfilename, info_dict):
|
||||
""" Either overwrite this or implement _make_cmd """
|
||||
@@ -144,6 +139,7 @@ class ExternalFD(FileDownloader):
|
||||
self.report_error('Giving up after %s fragment retries' % fragment_retries)
|
||||
return -1
|
||||
|
||||
decrypt_fragment = self.decrypter(info_dict)
|
||||
dest, _ = sanitize_open(tmpfilename, 'wb')
|
||||
for frag_index, fragment in enumerate(info_dict['fragments']):
|
||||
fragment_filename = '%s-Frag%d' % (tmpfilename, frag_index)
|
||||
@@ -155,22 +151,7 @@ class ExternalFD(FileDownloader):
|
||||
continue
|
||||
self.report_error('Unable to open fragment %d' % frag_index)
|
||||
return -1
|
||||
decrypt_info = fragment.get('decrypt_info')
|
||||
if decrypt_info:
|
||||
if decrypt_info['METHOD'] == 'AES-128':
|
||||
iv = decrypt_info.get('IV')
|
||||
decrypt_info['KEY'] = decrypt_info.get('KEY') or self.ydl.urlopen(
|
||||
self._prepare_url(info_dict, info_dict.get('_decryption_key_url') or decrypt_info['URI'])).read()
|
||||
encrypted_data = src.read()
|
||||
decrypted_data = AES.new(
|
||||
decrypt_info['KEY'], AES.MODE_CBC, iv).decrypt(encrypted_data)
|
||||
dest.write(decrypted_data)
|
||||
else:
|
||||
fragment_data = src.read()
|
||||
dest.write(fragment_data)
|
||||
else:
|
||||
fragment_data = src.read()
|
||||
dest.write(fragment_data)
|
||||
dest.write(decrypt_fragment(fragment, src.read()))
|
||||
src.close()
|
||||
if not self.params.get('keep_fragments', False):
|
||||
os.remove(encodeFilename(fragment_filename))
|
||||
@@ -184,10 +165,6 @@ class ExternalFD(FileDownloader):
|
||||
self.to_stderr(stderr.decode('utf-8', 'replace'))
|
||||
return p.returncode
|
||||
|
||||
def _prepare_url(self, info_dict, url):
|
||||
headers = info_dict.get('http_headers')
|
||||
return sanitized_Request(url, None, headers) if headers else url
|
||||
|
||||
|
||||
class CurlFD(ExternalFD):
|
||||
AVAILABLE_OPT = '-V'
|
||||
@@ -286,6 +263,7 @@ class Aria2cFD(ExternalFD):
|
||||
if info_dict.get('http_headers') is not None:
|
||||
for key, val in info_dict['http_headers'].items():
|
||||
cmd += ['--header', '%s: %s' % (key, val)]
|
||||
cmd += self._option('--max-overall-download-limit', 'ratelimit')
|
||||
cmd += self._option('--interface', 'source_address')
|
||||
cmd += self._option('--all-proxy', 'proxy')
|
||||
cmd += self._bool_option('--check-certificate', 'nocheckcertificate', 'false', 'true', '=')
|
||||
@@ -340,17 +318,28 @@ class HttpieFD(ExternalFD):
|
||||
|
||||
|
||||
class FFmpegFD(ExternalFD):
|
||||
SUPPORTED_PROTOCOLS = ('http', 'https', 'ftp', 'ftps', 'm3u8', 'm3u8_native', 'rtsp', 'rtmp', 'rtmp_ffmpeg', 'mms')
|
||||
SUPPORTED_PROTOCOLS = ('http', 'https', 'ftp', 'ftps', 'm3u8', 'm3u8_native', 'rtsp', 'rtmp', 'rtmp_ffmpeg', 'mms', 'http_dash_segments')
|
||||
can_download_to_stdout = True
|
||||
|
||||
@classmethod
|
||||
def available(cls, path=None):
|
||||
# TODO: Fix path for ffmpeg
|
||||
# Fixme: This may be wrong when --ffmpeg-location is used
|
||||
return FFmpegPostProcessor().available
|
||||
|
||||
def on_process_started(self, proc, stdin):
|
||||
""" Override this in subclasses """
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def can_merge_formats(cls, info_dict, params):
|
||||
return (
|
||||
info_dict.get('requested_formats')
|
||||
and info_dict.get('protocol')
|
||||
and not params.get('allow_unplayable_formats')
|
||||
and 'no-direct-merge' not in params.get('compat_opts', [])
|
||||
and cls.can_download(info_dict))
|
||||
|
||||
def _call_downloader(self, tmpfilename, info_dict):
|
||||
urls = [f['url'] for f in info_dict.get('requested_formats', [])] or [info_dict['url']]
|
||||
ffpp = FFmpegPostProcessor(downloader=self)
|
||||
@@ -368,6 +357,9 @@ class FFmpegFD(ExternalFD):
|
||||
if not self.params.get('verbose'):
|
||||
args += ['-hide_banner']
|
||||
|
||||
args += info_dict.get('_ffmpeg_args', [])
|
||||
|
||||
# This option exists only for compatibility. Extractors should use `_ffmpeg_args` instead
|
||||
seekable = info_dict.get('_seekable')
|
||||
if seekable is not None:
|
||||
# setting -seekable prevents ffmpeg from guessing if the server
|
||||
@@ -442,20 +434,20 @@ class FFmpegFD(ExternalFD):
|
||||
elif isinstance(conn, compat_str):
|
||||
args += ['-rtmp_conn', conn]
|
||||
|
||||
for url in urls:
|
||||
args += ['-i', url]
|
||||
for i, url in enumerate(urls):
|
||||
args += self._configuration_args((f'_i{i + 1}', '_i')) + ['-i', url]
|
||||
|
||||
args += self._configuration_args() + ['-c', 'copy']
|
||||
if info_dict.get('requested_formats'):
|
||||
for (i, fmt) in enumerate(info_dict['requested_formats']):
|
||||
if fmt.get('acodec') != 'none':
|
||||
args.extend(['-map', '%d:a:0' % i])
|
||||
if fmt.get('vcodec') != 'none':
|
||||
args.extend(['-map', '%d:v:0' % i])
|
||||
args += ['-c', 'copy']
|
||||
if info_dict.get('requested_formats') or protocol == 'http_dash_segments':
|
||||
for (i, fmt) in enumerate(info_dict.get('requested_formats') or [info_dict]):
|
||||
stream_number = fmt.get('manifest_stream_number', 0)
|
||||
a_or_v = 'a' if fmt.get('acodec') != 'none' else 'v'
|
||||
args.extend(['-map', f'{i}:{a_or_v}:{stream_number}'])
|
||||
|
||||
if self.params.get('test', False):
|
||||
args += ['-fs', compat_str(self._TEST_FILE_SIZE)]
|
||||
|
||||
ext = info_dict['ext']
|
||||
if protocol in ('m3u8', 'm3u8_native'):
|
||||
use_mpegts = (tmpfilename == '-') or self.params.get('hls_use_mpegts')
|
||||
if use_mpegts is None:
|
||||
@@ -468,12 +460,15 @@ class FFmpegFD(ExternalFD):
|
||||
args += ['-bsf:a', 'aac_adtstoasc']
|
||||
elif protocol == 'rtmp':
|
||||
args += ['-f', 'flv']
|
||||
elif ext == 'mp4' and tmpfilename == '-':
|
||||
args += ['-f', 'mpegts']
|
||||
else:
|
||||
args += ['-f', EXT_TO_OUT_FORMATS.get(info_dict['ext'], info_dict['ext'])]
|
||||
args += ['-f', EXT_TO_OUT_FORMATS.get(ext, ext)]
|
||||
|
||||
args += self._configuration_args(('_o1', '_o', ''))
|
||||
|
||||
args = [encodeArgument(opt) for opt in args]
|
||||
args.append(encodeFilename(ffpp._ffmpeg_filename_argument(tmpfilename), True))
|
||||
|
||||
self._debug_cmd(args)
|
||||
|
||||
proc = subprocess.Popen(args, stdin=subprocess.PIPE, env=env)
|
||||
@@ -503,7 +498,7 @@ class AVconvFD(FFmpegFD):
|
||||
_BY_NAME = dict(
|
||||
(klass.get_basename(), klass)
|
||||
for name, klass in globals().items()
|
||||
if name.endswith('FD') and name != 'ExternalFD'
|
||||
if name.endswith('FD') and name not in ('ExternalFD', 'FragmentFD')
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -3,12 +3,7 @@ from __future__ import division, unicode_literals
|
||||
import os
|
||||
import time
|
||||
import json
|
||||
|
||||
try:
|
||||
from Crypto.Cipher import AES
|
||||
can_decrypt_frag = True
|
||||
except ImportError:
|
||||
can_decrypt_frag = False
|
||||
from math import ceil
|
||||
|
||||
try:
|
||||
import concurrent.futures
|
||||
@@ -18,6 +13,7 @@ except ImportError:
|
||||
|
||||
from .common import FileDownloader
|
||||
from .http import HttpFD
|
||||
from ..aes import aes_cbc_decrypt_bytes
|
||||
from ..compat import (
|
||||
compat_urllib_error,
|
||||
compat_struct_pack,
|
||||
@@ -105,17 +101,19 @@ class FragmentFD(FileDownloader):
|
||||
|
||||
def _write_ytdl_file(self, ctx):
|
||||
frag_index_stream, _ = sanitize_open(self.ytdl_filename(ctx['filename']), 'w')
|
||||
downloader = {
|
||||
'current_fragment': {
|
||||
'index': ctx['fragment_index'],
|
||||
},
|
||||
}
|
||||
if 'extra_state' in ctx:
|
||||
downloader['extra_state'] = ctx['extra_state']
|
||||
if ctx.get('fragment_count') is not None:
|
||||
downloader['fragment_count'] = ctx['fragment_count']
|
||||
frag_index_stream.write(json.dumps({'downloader': downloader}))
|
||||
frag_index_stream.close()
|
||||
try:
|
||||
downloader = {
|
||||
'current_fragment': {
|
||||
'index': ctx['fragment_index'],
|
||||
},
|
||||
}
|
||||
if 'extra_state' in ctx:
|
||||
downloader['extra_state'] = ctx['extra_state']
|
||||
if ctx.get('fragment_count') is not None:
|
||||
downloader['fragment_count'] = ctx['fragment_count']
|
||||
frag_index_stream.write(json.dumps({'downloader': downloader}))
|
||||
finally:
|
||||
frag_index_stream.close()
|
||||
|
||||
def _download_fragment(self, ctx, frag_url, info_dict, headers=None, request_data=None):
|
||||
fragment_filename = '%s-Frag%d' % (ctx['tmpfilename'], ctx['fragment_index'])
|
||||
@@ -123,6 +121,7 @@ class FragmentFD(FileDownloader):
|
||||
'url': frag_url,
|
||||
'http_headers': headers or info_dict.get('http_headers'),
|
||||
'request_data': request_data,
|
||||
'ctx_id': ctx.get('ctx_id'),
|
||||
}
|
||||
success = ctx['dl'].download(fragment_filename, fragment_info_dict)
|
||||
if not success:
|
||||
@@ -222,6 +221,7 @@ class FragmentFD(FileDownloader):
|
||||
def _start_frag_download(self, ctx, info_dict):
|
||||
resume_len = ctx['complete_frags_downloaded_bytes']
|
||||
total_frags = ctx['total_frags']
|
||||
ctx_id = ctx.get('ctx_id')
|
||||
# This dict stores the download progress, it's updated by the progress
|
||||
# hook
|
||||
state = {
|
||||
@@ -245,6 +245,12 @@ class FragmentFD(FileDownloader):
|
||||
if s['status'] not in ('downloading', 'finished'):
|
||||
return
|
||||
|
||||
if ctx_id is not None and s.get('ctx_id') != ctx_id:
|
||||
return
|
||||
|
||||
state['max_progress'] = ctx.get('max_progress')
|
||||
state['progress_idx'] = ctx.get('progress_idx')
|
||||
|
||||
time_now = time.time()
|
||||
state['elapsed'] = time_now - start
|
||||
frag_total_bytes = s.get('total_bytes') or 0
|
||||
@@ -304,6 +310,9 @@ class FragmentFD(FileDownloader):
|
||||
'filename': ctx['filename'],
|
||||
'status': 'finished',
|
||||
'elapsed': elapsed,
|
||||
'ctx_id': ctx.get('ctx_id'),
|
||||
'max_progress': ctx.get('max_progress'),
|
||||
'progress_idx': ctx.get('progress_idx'),
|
||||
}, info_dict)
|
||||
|
||||
def _prepare_external_frag_download(self, ctx):
|
||||
@@ -327,7 +336,66 @@ class FragmentFD(FileDownloader):
|
||||
'fragment_index': 0,
|
||||
})
|
||||
|
||||
def download_and_append_fragments(self, ctx, fragments, info_dict, pack_func=None):
|
||||
def decrypter(self, info_dict):
|
||||
_key_cache = {}
|
||||
|
||||
def _get_key(url):
|
||||
if url not in _key_cache:
|
||||
_key_cache[url] = self.ydl.urlopen(self._prepare_url(info_dict, url)).read()
|
||||
return _key_cache[url]
|
||||
|
||||
def decrypt_fragment(fragment, frag_content):
|
||||
decrypt_info = fragment.get('decrypt_info')
|
||||
if not decrypt_info or decrypt_info['METHOD'] != 'AES-128':
|
||||
return frag_content
|
||||
iv = decrypt_info.get('IV') or compat_struct_pack('>8xq', fragment['media_sequence'])
|
||||
decrypt_info['KEY'] = decrypt_info.get('KEY') or _get_key(info_dict.get('_decryption_key_url') or decrypt_info['URI'])
|
||||
# Don't decrypt the content in tests since the data is explicitly truncated and it's not to a valid block
|
||||
# size (see https://github.com/ytdl-org/youtube-dl/pull/27660). Tests only care that the correct data downloaded,
|
||||
# not what it decrypts to.
|
||||
if self.params.get('test', False):
|
||||
return frag_content
|
||||
decrypted_data = aes_cbc_decrypt_bytes(frag_content, decrypt_info['KEY'], iv)
|
||||
return decrypted_data[:-decrypted_data[-1]]
|
||||
|
||||
return decrypt_fragment
|
||||
|
||||
def download_and_append_fragments_multiple(self, *args, pack_func=None, finish_func=None):
|
||||
'''
|
||||
@params (ctx1, fragments1, info_dict1), (ctx2, fragments2, info_dict2), ...
|
||||
all args must be either tuple or list
|
||||
'''
|
||||
max_progress = len(args)
|
||||
if max_progress == 1:
|
||||
return self.download_and_append_fragments(*args[0], pack_func=pack_func, finish_func=finish_func)
|
||||
max_workers = self.params.get('concurrent_fragment_downloads', max_progress)
|
||||
self._prepare_multiline_status(max_progress)
|
||||
|
||||
def thread_func(idx, ctx, fragments, info_dict, tpe):
|
||||
ctx['max_progress'] = max_progress
|
||||
ctx['progress_idx'] = idx
|
||||
return self.download_and_append_fragments(ctx, fragments, info_dict, pack_func=pack_func, finish_func=finish_func, tpe=tpe)
|
||||
|
||||
class FTPE(concurrent.futures.ThreadPoolExecutor):
|
||||
# has to stop this or it's going to wait on the worker thread itself
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
pass
|
||||
|
||||
spins = []
|
||||
for idx, (ctx, fragments, info_dict) in enumerate(args):
|
||||
tpe = FTPE(ceil(max_workers / max_progress))
|
||||
job = tpe.submit(thread_func, idx, ctx, fragments, info_dict, tpe)
|
||||
spins.append((tpe, job))
|
||||
|
||||
result = True
|
||||
for tpe, job in spins:
|
||||
try:
|
||||
result = result and job.result()
|
||||
finally:
|
||||
tpe.shutdown(wait=True)
|
||||
return result
|
||||
|
||||
def download_and_append_fragments(self, ctx, fragments, info_dict, *, pack_func=None, finish_func=None, tpe=None):
|
||||
fragment_retries = self.params.get('fragment_retries', 0)
|
||||
is_fatal = (lambda idx: idx == 0) if self.params.get('skip_unavailable_fragments', True) else (lambda _: True)
|
||||
if not pack_func:
|
||||
@@ -335,7 +403,7 @@ class FragmentFD(FileDownloader):
|
||||
|
||||
def download_fragment(fragment, ctx):
|
||||
frag_index = ctx['fragment_index'] = fragment['frag_index']
|
||||
headers = info_dict.get('http_headers', {})
|
||||
headers = info_dict.get('http_headers', {}).copy()
|
||||
byte_range = fragment.get('byte_range')
|
||||
if byte_range:
|
||||
headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'] - 1)
|
||||
@@ -372,20 +440,6 @@ class FragmentFD(FileDownloader):
|
||||
return False, frag_index
|
||||
return frag_content, frag_index
|
||||
|
||||
def decrypt_fragment(fragment, frag_content):
|
||||
decrypt_info = fragment.get('decrypt_info')
|
||||
if not decrypt_info or decrypt_info['METHOD'] != 'AES-128':
|
||||
return frag_content
|
||||
iv = decrypt_info.get('IV') or compat_struct_pack('>8xq', fragment['media_sequence'])
|
||||
decrypt_info['KEY'] = decrypt_info.get('KEY') or self.ydl.urlopen(
|
||||
self._prepare_url(info_dict, info_dict.get('_decryption_key_url') or decrypt_info['URI'])).read()
|
||||
# Don't decrypt the content in tests since the data is explicitly truncated and it's not to a valid block
|
||||
# size (see https://github.com/ytdl-org/youtube-dl/pull/27660). Tests only care that the correct data downloaded,
|
||||
# not what it decrypts to.
|
||||
if self.params.get('test', False):
|
||||
return frag_content
|
||||
return AES.new(decrypt_info['KEY'], AES.MODE_CBC, iv).decrypt(frag_content)
|
||||
|
||||
def append_fragment(frag_content, frag_index, ctx):
|
||||
if not frag_content:
|
||||
if not is_fatal(frag_index - 1):
|
||||
@@ -399,6 +453,8 @@ class FragmentFD(FileDownloader):
|
||||
self._append_fragment(ctx, pack_func(frag_content, frag_index))
|
||||
return True
|
||||
|
||||
decrypt_fragment = self.decrypter(info_dict)
|
||||
|
||||
max_workers = self.params.get('concurrent_fragment_downloads', 1)
|
||||
if can_threaded_download and max_workers > 1:
|
||||
|
||||
@@ -408,7 +464,7 @@ class FragmentFD(FileDownloader):
|
||||
return fragment, frag_content, frag_index, ctx_copy.get('fragment_filename_sanitized')
|
||||
|
||||
self.report_warning('The download speed shown is only of one thread. This is a known issue and patches are welcome')
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers) as pool:
|
||||
with tpe or concurrent.futures.ThreadPoolExecutor(max_workers) as pool:
|
||||
for fragment, frag_content, frag_index, frag_filename in pool.map(_download_fragment, fragments):
|
||||
ctx['fragment_filename_sanitized'] = frag_filename
|
||||
ctx['fragment_index'] = frag_index
|
||||
@@ -422,5 +478,8 @@ class FragmentFD(FileDownloader):
|
||||
if not result:
|
||||
return False
|
||||
|
||||
if finish_func is not None:
|
||||
ctx['dest_stream'].write(finish_func())
|
||||
ctx['dest_stream'].flush()
|
||||
self._finish_frag_download(ctx, info_dict)
|
||||
return True
|
||||
|
||||
@@ -4,11 +4,12 @@ import re
|
||||
import io
|
||||
import binascii
|
||||
|
||||
from ..downloader import _get_real_downloader
|
||||
from .fragment import FragmentFD, can_decrypt_frag
|
||||
from ..downloader import get_suitable_downloader
|
||||
from .fragment import FragmentFD
|
||||
from .external import FFmpegFD
|
||||
|
||||
from ..compat import (
|
||||
compat_pycrypto_AES,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
@@ -29,7 +30,7 @@ class HlsFD(FragmentFD):
|
||||
FD_NAME = 'hlsnative'
|
||||
|
||||
@staticmethod
|
||||
def can_download(manifest, info_dict, allow_unplayable_formats=False, with_crypto=can_decrypt_frag):
|
||||
def can_download(manifest, info_dict, allow_unplayable_formats=False):
|
||||
UNSUPPORTED_FEATURES = [
|
||||
# r'#EXT-X-BYTERANGE', # playlists composed of byte ranges of media files [2]
|
||||
|
||||
@@ -56,9 +57,6 @@ class HlsFD(FragmentFD):
|
||||
|
||||
def check_results():
|
||||
yield not info_dict.get('is_live')
|
||||
is_aes128_enc = '#EXT-X-KEY:METHOD=AES-128' in manifest
|
||||
yield with_crypto or not is_aes128_enc
|
||||
yield not (is_aes128_enc and r'#EXT-X-BYTERANGE' in manifest)
|
||||
for feature in UNSUPPORTED_FEATURES:
|
||||
yield not re.search(feature, manifest)
|
||||
return all(check_results())
|
||||
@@ -71,25 +69,27 @@ class HlsFD(FragmentFD):
|
||||
man_url = urlh.geturl()
|
||||
s = urlh.read().decode('utf-8', 'ignore')
|
||||
|
||||
if not self.can_download(s, info_dict, self.params.get('allow_unplayable_formats')):
|
||||
if info_dict.get('extra_param_to_segment_url') or info_dict.get('_decryption_key_url'):
|
||||
self.report_error('pycryptodome not found. Please install')
|
||||
return False
|
||||
if self.can_download(s, info_dict, with_crypto=True):
|
||||
self.report_warning('pycryptodome is needed to download this file natively')
|
||||
can_download, message = self.can_download(s, info_dict, self.params.get('allow_unplayable_formats')), None
|
||||
if can_download and not compat_pycrypto_AES and '#EXT-X-KEY:METHOD=AES-128' in s:
|
||||
if FFmpegFD.available():
|
||||
can_download, message = False, 'The stream has AES-128 encryption and pycryptodomex is not available'
|
||||
else:
|
||||
message = ('The stream has AES-128 encryption and neither ffmpeg nor pycryptodomex are available; '
|
||||
'Decryption will be performed natively, but will be extremely slow')
|
||||
if not can_download:
|
||||
message = message or 'Unsupported features have been detected'
|
||||
fd = FFmpegFD(self.ydl, self.params)
|
||||
self.report_warning(
|
||||
'%s detected unsupported features; extraction will be delegated to %s' % (self.FD_NAME, fd.get_basename()))
|
||||
# TODO: Make progress updates work without hooking twice
|
||||
# for ph in self._progress_hooks:
|
||||
# fd.add_progress_hook(ph)
|
||||
self.report_warning(f'{message}; extraction will be delegated to {fd.get_basename()}')
|
||||
return fd.real_download(filename, info_dict)
|
||||
elif message:
|
||||
self.report_warning(message)
|
||||
|
||||
is_webvtt = info_dict['ext'] == 'vtt'
|
||||
if is_webvtt:
|
||||
real_downloader = None # Packing the fragments is not currently supported for external downloader
|
||||
else:
|
||||
real_downloader = _get_real_downloader(info_dict, 'm3u8_frag_urls', self.params, None)
|
||||
real_downloader = get_suitable_downloader(
|
||||
info_dict, self.params, None, protocol='m3u8_frag_urls', to_stdout=(filename == '-'))
|
||||
if real_downloader and not real_downloader.supports_manifest(s):
|
||||
real_downloader = None
|
||||
if real_downloader:
|
||||
@@ -174,6 +174,7 @@ class HlsFD(FragmentFD):
|
||||
'byte_range': byte_range,
|
||||
'media_sequence': media_sequence,
|
||||
})
|
||||
media_sequence += 1
|
||||
|
||||
elif line.startswith('#EXT-X-MAP'):
|
||||
if format_index and discontinuity_count != format_index:
|
||||
@@ -198,6 +199,7 @@ class HlsFD(FragmentFD):
|
||||
'byte_range': byte_range,
|
||||
'media_sequence': media_sequence
|
||||
})
|
||||
media_sequence += 1
|
||||
|
||||
if map_info.get('BYTERANGE'):
|
||||
splitted_byte_range = map_info.get('BYTERANGE').split('@')
|
||||
@@ -237,7 +239,6 @@ class HlsFD(FragmentFD):
|
||||
elif line.startswith('#EXT-X-DISCONTINUITY'):
|
||||
discontinuity_count += 1
|
||||
i += 1
|
||||
media_sequence += 1
|
||||
|
||||
# We only download the first fragment during the test
|
||||
if self.params.get('test', False):
|
||||
@@ -256,35 +257,47 @@ class HlsFD(FragmentFD):
|
||||
def pack_fragment(frag_content, frag_index):
|
||||
output = io.StringIO()
|
||||
adjust = 0
|
||||
overflow = False
|
||||
mpegts_last = None
|
||||
for block in webvtt.parse_fragment(frag_content):
|
||||
if isinstance(block, webvtt.CueBlock):
|
||||
extra_state['webvtt_mpegts_last'] = mpegts_last
|
||||
if overflow:
|
||||
extra_state['webvtt_mpegts_adjust'] += 1
|
||||
overflow = False
|
||||
block.start += adjust
|
||||
block.end += adjust
|
||||
|
||||
dedup_window = extra_state.setdefault('webvtt_dedup_window', [])
|
||||
cue = block.as_json
|
||||
|
||||
# skip the cue if an identical one appears
|
||||
# in the window of potential duplicates
|
||||
# and prune the window of unviable candidates
|
||||
ready = []
|
||||
|
||||
i = 0
|
||||
skip = True
|
||||
is_new = True
|
||||
while i < len(dedup_window):
|
||||
window_cue = dedup_window[i]
|
||||
if window_cue == cue:
|
||||
break
|
||||
if window_cue['end'] >= cue['start']:
|
||||
i += 1
|
||||
wcue = dedup_window[i]
|
||||
wblock = webvtt.CueBlock.from_json(wcue)
|
||||
i += 1
|
||||
if wblock.hinges(block):
|
||||
wcue['end'] = block.end
|
||||
is_new = False
|
||||
continue
|
||||
if wblock == block:
|
||||
is_new = False
|
||||
continue
|
||||
if wblock.end > block.start:
|
||||
continue
|
||||
ready.append(wblock)
|
||||
i -= 1
|
||||
del dedup_window[i]
|
||||
else:
|
||||
skip = False
|
||||
|
||||
if skip:
|
||||
continue
|
||||
if is_new:
|
||||
dedup_window.append(block.as_json)
|
||||
for block in ready:
|
||||
block.write_into(output)
|
||||
|
||||
# add the cue to the window
|
||||
dedup_window.append(cue)
|
||||
# we only emit cues once they fall out of the duplicate window
|
||||
continue
|
||||
elif isinstance(block, webvtt.Magic):
|
||||
# take care of MPEG PES timestamp overflow
|
||||
if block.mpegts is None:
|
||||
@@ -292,9 +305,9 @@ class HlsFD(FragmentFD):
|
||||
extra_state.setdefault('webvtt_mpegts_adjust', 0)
|
||||
block.mpegts += extra_state['webvtt_mpegts_adjust'] << 33
|
||||
if block.mpegts < extra_state.get('webvtt_mpegts_last', 0):
|
||||
extra_state['webvtt_mpegts_adjust'] += 1
|
||||
overflow = True
|
||||
block.mpegts += 1 << 33
|
||||
extra_state['webvtt_mpegts_last'] = block.mpegts
|
||||
mpegts_last = block.mpegts
|
||||
|
||||
if frag_index == 1:
|
||||
extra_state['webvtt_mpegts'] = block.mpegts or 0
|
||||
@@ -319,6 +332,19 @@ class HlsFD(FragmentFD):
|
||||
block.write_into(output)
|
||||
|
||||
return output.getvalue().encode('utf-8')
|
||||
|
||||
def fin_fragments():
|
||||
dedup_window = extra_state.get('webvtt_dedup_window')
|
||||
if not dedup_window:
|
||||
return b''
|
||||
|
||||
output = io.StringIO()
|
||||
for cue in dedup_window:
|
||||
webvtt.CueBlock.from_json(cue).write_into(output)
|
||||
|
||||
return output.getvalue().encode('utf-8')
|
||||
|
||||
self.download_and_append_fragments(
|
||||
ctx, fragments, info_dict, pack_func=pack_fragment, finish_func=fin_fragments)
|
||||
else:
|
||||
pack_fragment = None
|
||||
return self.download_and_append_fragments(ctx, fragments, info_dict, pack_fragment)
|
||||
return self.download_and_append_fragments(ctx, fragments, info_dict)
|
||||
|
||||
@@ -48,8 +48,9 @@ class HttpFD(FileDownloader):
|
||||
|
||||
is_test = self.params.get('test', False)
|
||||
chunk_size = self._TEST_FILE_SIZE if is_test else (
|
||||
info_dict.get('downloader_options', {}).get('http_chunk_size')
|
||||
or self.params.get('http_chunk_size') or 0)
|
||||
self.params.get('http_chunk_size')
|
||||
or info_dict.get('downloader_options', {}).get('http_chunk_size')
|
||||
or 0)
|
||||
|
||||
ctx.open_mode = 'wb'
|
||||
ctx.resume_len = 0
|
||||
@@ -57,6 +58,7 @@ class HttpFD(FileDownloader):
|
||||
ctx.block_size = self.params.get('buffersize', 1024)
|
||||
ctx.start_time = time.time()
|
||||
ctx.chunk_size = None
|
||||
throttle_start = None
|
||||
|
||||
if self.params.get('continuedl', True):
|
||||
# Establish possible resume length
|
||||
@@ -196,6 +198,7 @@ class HttpFD(FileDownloader):
|
||||
raise RetryDownload(err)
|
||||
|
||||
def download():
|
||||
nonlocal throttle_start
|
||||
data_len = ctx.data.info().get('Content-length', None)
|
||||
|
||||
# Range HTTP header may be ignored/unsupported by a webserver
|
||||
@@ -224,7 +227,6 @@ class HttpFD(FileDownloader):
|
||||
# measure time over whole while-loop, so slow_down() and best_block_size() work together properly
|
||||
now = None # needed for slow_down() in the first loop run
|
||||
before = start # start measuring
|
||||
throttle_start = None
|
||||
|
||||
def retry(e):
|
||||
to_stdout = ctx.tmpfilename == '-'
|
||||
@@ -238,7 +240,7 @@ class HttpFD(FileDownloader):
|
||||
while True:
|
||||
try:
|
||||
# Download and write
|
||||
data_block = ctx.data.read(block_size if data_len is None else min(block_size, data_len - byte_counter))
|
||||
data_block = ctx.data.read(block_size if not is_test else min(block_size, data_len - byte_counter))
|
||||
# socket.timeout is a subclass of socket.error but may not have
|
||||
# errno set
|
||||
except socket.timeout as e:
|
||||
@@ -310,6 +312,7 @@ class HttpFD(FileDownloader):
|
||||
'eta': eta,
|
||||
'speed': speed,
|
||||
'elapsed': now - ctx.start_time,
|
||||
'ctx_id': info_dict.get('ctx_id'),
|
||||
}, info_dict)
|
||||
|
||||
if data_len is not None and byte_counter == data_len:
|
||||
@@ -324,7 +327,7 @@ class HttpFD(FileDownloader):
|
||||
if ctx.stream is not None and ctx.tmpfilename != '-':
|
||||
ctx.stream.close()
|
||||
raise ThrottledDownload()
|
||||
else:
|
||||
elif speed:
|
||||
throttle_start = None
|
||||
|
||||
if not is_test and ctx.chunk_size and ctx.data_len is not None and byte_counter < ctx.data_len:
|
||||
@@ -357,6 +360,7 @@ class HttpFD(FileDownloader):
|
||||
'filename': ctx.filename,
|
||||
'status': 'finished',
|
||||
'elapsed': time.time() - ctx.start_time,
|
||||
'ctx_id': info_dict.get('ctx_id'),
|
||||
}, info_dict)
|
||||
|
||||
return True
|
||||
|
||||
@@ -4,9 +4,9 @@ from __future__ import unicode_literals
|
||||
import threading
|
||||
|
||||
from .common import FileDownloader
|
||||
from ..downloader import _get_real_downloader
|
||||
from ..downloader import get_suitable_downloader
|
||||
from ..extractor.niconico import NiconicoIE
|
||||
from ..compat import compat_urllib_request
|
||||
from ..utils import sanitized_Request
|
||||
|
||||
|
||||
class NiconicoDmcFD(FileDownloader):
|
||||
@@ -20,7 +20,7 @@ class NiconicoDmcFD(FileDownloader):
|
||||
ie = NiconicoIE(self.ydl)
|
||||
info_dict, heartbeat_info_dict = ie._get_heartbeat_info(info_dict)
|
||||
|
||||
fd = _get_real_downloader(info_dict, params=self.params)(self.ydl, self.params)
|
||||
fd = get_suitable_downloader(info_dict, params=self.params)(self.ydl, self.params)
|
||||
|
||||
success = download_complete = False
|
||||
timer = [None]
|
||||
@@ -29,9 +29,11 @@ class NiconicoDmcFD(FileDownloader):
|
||||
heartbeat_data = heartbeat_info_dict['data'].encode()
|
||||
heartbeat_interval = heartbeat_info_dict.get('interval', 30)
|
||||
|
||||
request = sanitized_Request(heartbeat_url, heartbeat_data)
|
||||
|
||||
def heartbeat():
|
||||
try:
|
||||
compat_urllib_request.urlopen(url=heartbeat_url, data=heartbeat_data)
|
||||
self.ydl.urlopen(request).read()
|
||||
except Exception:
|
||||
self.to_screen('[%s] Heartbeat failed' % self.FD_NAME)
|
||||
|
||||
|
||||
@@ -183,7 +183,7 @@ class YoutubeLiveChatFD(FragmentFD):
|
||||
request_data['currentPlayerState'] = {'playerOffsetMs': str(max(offset - 5000, 0))}
|
||||
if click_tracking_params:
|
||||
request_data['context']['clickTracking'] = {'clickTrackingParams': click_tracking_params}
|
||||
headers = ie.generate_api_headers(ytcfg, visitor_data=visitor_data)
|
||||
headers = ie.generate_api_headers(ytcfg=ytcfg, visitor_data=visitor_data)
|
||||
headers.update({'content-type': 'application/json'})
|
||||
fragment_request_data = json.dumps(request_data, ensure_ascii=False).encode('utf-8') + b'\n'
|
||||
success, continuation_id, offset, click_tracking_params = download_and_parse_fragment(
|
||||
|
||||
@@ -6,7 +6,7 @@ try:
|
||||
from .lazy_extractors import *
|
||||
from .lazy_extractors import _ALL_CLASSES
|
||||
_LAZY_LOADER = True
|
||||
_PLUGIN_CLASSES = []
|
||||
_PLUGIN_CLASSES = {}
|
||||
except ImportError:
|
||||
_LAZY_LOADER = False
|
||||
|
||||
@@ -20,7 +20,7 @@ if not _LAZY_LOADER:
|
||||
_ALL_CLASSES.append(GenericIE)
|
||||
|
||||
_PLUGIN_CLASSES = load_plugins('extractor', 'IE', globals())
|
||||
_ALL_CLASSES = _PLUGIN_CLASSES + _ALL_CLASSES
|
||||
_ALL_CLASSES = list(_PLUGIN_CLASSES.values()) + _ALL_CLASSES
|
||||
|
||||
|
||||
def gen_extractor_classes():
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .amp import AMPIE
|
||||
from .common import InfoExtractor
|
||||
@@ -59,7 +58,7 @@ class AbcNewsVideoIE(AMPIE):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
mobj = self._match_valid_url(url)
|
||||
display_id = mobj.group('display_id')
|
||||
video_id = mobj.group('id')
|
||||
info_dict = self._extract_feed_info(
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_str
|
||||
@@ -55,7 +54,7 @@ class ABCOTVSIE(InfoExtractor):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
site, display_id, video_id = re.match(self._VALID_URL, url).groups()
|
||||
site, display_id, video_id = self._match_valid_url(url).groups()
|
||||
display_id = display_id or video_id
|
||||
station = self._SITE_MAP[site]
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
@@ -80,7 +79,7 @@ class ACastIE(ACastBaseIE):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
channel, display_id = re.match(self._VALID_URL, url).groups()
|
||||
channel, display_id = self._match_valid_url(url).groups()
|
||||
episode = self._call_api(
|
||||
'%s/episodes/%s' % (channel, display_id),
|
||||
display_id, {'showInfo': 'true'})
|
||||
|
||||
@@ -37,6 +37,11 @@ MSO_INFO = {
|
||||
'username_field': 'email',
|
||||
'password_field': 'loginpassword',
|
||||
},
|
||||
'RCN': {
|
||||
'name': 'RCN',
|
||||
'username_field': 'UserName',
|
||||
'password_field': 'UserPassword',
|
||||
},
|
||||
'Rogers': {
|
||||
'name': 'Rogers',
|
||||
'username_field': 'UserName',
|
||||
@@ -76,6 +81,11 @@ MSO_INFO = {
|
||||
'username_field': 'IDToken1',
|
||||
'password_field': 'IDToken2',
|
||||
},
|
||||
'Cablevision': {
|
||||
'name': 'Optimum/Cablevision',
|
||||
'username_field': 'j_username',
|
||||
'password_field': 'j_password',
|
||||
},
|
||||
'thr030': {
|
||||
'name': '3 Rivers Communications'
|
||||
},
|
||||
@@ -1330,6 +1340,11 @@ MSO_INFO = {
|
||||
'cou060': {
|
||||
'name': 'Zito Media'
|
||||
},
|
||||
'slingtv': {
|
||||
'name': 'Sling TV',
|
||||
'username_field': 'username',
|
||||
'password_field': 'password',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -1498,7 +1513,8 @@ class AdobePassIE(InfoExtractor):
|
||||
# In general, if you're connecting from a Verizon-assigned IP,
|
||||
# you will not actually pass your credentials.
|
||||
provider_redirect_page, urlh = provider_redirect_page_res
|
||||
if 'Please wait ...' in provider_redirect_page:
|
||||
# From non-Verizon IP, still gave 'Please wait', but noticed N==Y; will need to try on Verizon IP
|
||||
if 'Please wait ...' in provider_redirect_page and '\'N\'== "Y"' not in provider_redirect_page:
|
||||
saml_redirect_url = self._html_search_regex(
|
||||
r'self\.parent\.location=(["\'])(?P<url>.+?)\1',
|
||||
provider_redirect_page,
|
||||
@@ -1506,7 +1522,8 @@ class AdobePassIE(InfoExtractor):
|
||||
saml_login_page = self._download_webpage(
|
||||
saml_redirect_url, video_id,
|
||||
'Downloading SAML Login Page')
|
||||
else:
|
||||
elif 'Verizon FiOS - sign in' in provider_redirect_page:
|
||||
# FXNetworks from non-Verizon IP
|
||||
saml_login_page_res = post_form(
|
||||
provider_redirect_page_res, 'Logging in', {
|
||||
mso_info['username_field']: username,
|
||||
@@ -1516,6 +1533,26 @@ class AdobePassIE(InfoExtractor):
|
||||
if 'Please try again.' in saml_login_page:
|
||||
raise ExtractorError(
|
||||
'We\'re sorry, but either the User ID or Password entered is not correct.')
|
||||
else:
|
||||
# ABC from non-Verizon IP
|
||||
saml_redirect_url = self._html_search_regex(
|
||||
r'var\surl\s*=\s*(["\'])(?P<url>.+?)\1',
|
||||
provider_redirect_page,
|
||||
'SAML Redirect URL', group='url')
|
||||
saml_redirect_url = saml_redirect_url.replace(r'\/', '/')
|
||||
saml_redirect_url = saml_redirect_url.replace(r'\-', '-')
|
||||
saml_redirect_url = saml_redirect_url.replace(r'\x26', '&')
|
||||
saml_login_page = self._download_webpage(
|
||||
saml_redirect_url, video_id,
|
||||
'Downloading SAML Login Page')
|
||||
saml_login_page, urlh = post_form(
|
||||
[saml_login_page, saml_redirect_url], 'Logging in', {
|
||||
mso_info['username_field']: username,
|
||||
mso_info['password_field']: password,
|
||||
})
|
||||
if 'Please try again.' in saml_login_page:
|
||||
raise ExtractorError(
|
||||
'Failed to login, incorrect User ID or Password.')
|
||||
saml_login_url = self._search_regex(
|
||||
r'xmlHttp\.open\("POST"\s*,\s*(["\'])(?P<url>.+?)\1',
|
||||
saml_login_page, 'SAML Login URL', group='url')
|
||||
@@ -1565,6 +1602,40 @@ class AdobePassIE(InfoExtractor):
|
||||
}), headers={
|
||||
'Content-Type': 'application/x-www-form-urlencoded'
|
||||
})
|
||||
elif mso_id == 'slingtv':
|
||||
# SlingTV has a meta-refresh based authentication, but also
|
||||
# looks at the tab history to count the number of times the
|
||||
# browser has been on a page
|
||||
|
||||
first_bookend_page, urlh = provider_redirect_page_res
|
||||
|
||||
hidden_data = self._hidden_inputs(first_bookend_page)
|
||||
hidden_data['history'] = 1
|
||||
|
||||
provider_login_page_res = self._download_webpage_handle(
|
||||
urlh.geturl(), video_id, 'Sending first bookend',
|
||||
query=hidden_data)
|
||||
|
||||
provider_association_redirect, urlh = post_form(
|
||||
provider_login_page_res, 'Logging in', {
|
||||
mso_info['username_field']: username,
|
||||
mso_info['password_field']: password
|
||||
})
|
||||
|
||||
provider_refresh_redirect_url = extract_redirect_url(
|
||||
provider_association_redirect, url=urlh.geturl())
|
||||
|
||||
last_bookend_page, urlh = self._download_webpage_handle(
|
||||
provider_refresh_redirect_url, video_id,
|
||||
'Downloading Auth Association Redirect Page')
|
||||
hidden_data = self._hidden_inputs(last_bookend_page)
|
||||
hidden_data['history'] = 3
|
||||
|
||||
mvpd_confirm_page_res = self._download_webpage_handle(
|
||||
urlh.geturl(), video_id, 'Sending final bookend',
|
||||
query=hidden_data)
|
||||
|
||||
post_form(mvpd_confirm_page_res, 'Confirming Login')
|
||||
else:
|
||||
# Some providers (e.g. DIRECTV NOW) have another meta refresh
|
||||
# based redirect that should be followed.
|
||||
@@ -1577,10 +1648,13 @@ class AdobePassIE(InfoExtractor):
|
||||
'Downloading Provider Redirect Page (meta refresh)')
|
||||
provider_login_page_res = post_form(
|
||||
provider_redirect_page_res, self._DOWNLOADING_LOGIN_PAGE)
|
||||
mvpd_confirm_page_res = post_form(provider_login_page_res, 'Logging in', {
|
||||
form_data = {
|
||||
mso_info.get('username_field', 'username'): username,
|
||||
mso_info.get('password_field', 'password'): password,
|
||||
})
|
||||
mso_info.get('password_field', 'password'): password
|
||||
}
|
||||
if mso_id == 'Cablevision':
|
||||
form_data['_eventId_proceed'] = ''
|
||||
mvpd_confirm_page_res = post_form(provider_login_page_res, 'Logging in', form_data)
|
||||
if mso_id != 'Rogers':
|
||||
post_form(mvpd_confirm_page_res, 'Confirming Login')
|
||||
|
||||
|
||||
@@ -132,7 +132,7 @@ class AdobeTVIE(AdobeTVBaseIE):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
language, show_urlname, urlname = re.match(self._VALID_URL, url).groups()
|
||||
language, show_urlname, urlname = self._match_valid_url(url).groups()
|
||||
if not language:
|
||||
language = 'en'
|
||||
|
||||
@@ -178,7 +178,7 @@ class AdobeTVShowIE(AdobeTVPlaylistBaseIE):
|
||||
_process_data = AdobeTVBaseIE._parse_video_data
|
||||
|
||||
def _real_extract(self, url):
|
||||
language, show_urlname = re.match(self._VALID_URL, url).groups()
|
||||
language, show_urlname = self._match_valid_url(url).groups()
|
||||
if not language:
|
||||
language = 'en'
|
||||
query = {
|
||||
@@ -215,7 +215,7 @@ class AdobeTVChannelIE(AdobeTVPlaylistBaseIE):
|
||||
show_data['url'], 'AdobeTVShow', str_or_none(show_data.get('id')))
|
||||
|
||||
def _real_extract(self, url):
|
||||
language, channel_urlname, category_urlname = re.match(self._VALID_URL, url).groups()
|
||||
language, channel_urlname, category_urlname = self._match_valid_url(url).groups()
|
||||
if not language:
|
||||
language = 'en'
|
||||
query = {
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from .turner import TurnerBaseIE
|
||||
from ..utils import (
|
||||
@@ -89,7 +88,7 @@ class AdultSwimIE(TurnerBaseIE):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
show_path, episode_path = re.match(self._VALID_URL, url).groups()
|
||||
show_path, episode_path = self._match_valid_url(url).groups()
|
||||
display_id = episode_path or show_path
|
||||
query = '''query {
|
||||
getShowBySlug(slug:"%s") {
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .theplatform import ThePlatformIE
|
||||
from ..utils import (
|
||||
@@ -20,8 +19,8 @@ class AENetworksBaseIE(ThePlatformIE):
|
||||
(?:history(?:vault)?|aetv|mylifetime|lifetimemovieclub)\.com|
|
||||
fyi\.tv
|
||||
)/'''
|
||||
_THEPLATFORM_KEY = 'crazyjava'
|
||||
_THEPLATFORM_SECRET = 's3cr3t'
|
||||
_THEPLATFORM_KEY = '43jXaGRQud'
|
||||
_THEPLATFORM_SECRET = 'S10BPXHMlb'
|
||||
_DOMAIN_MAP = {
|
||||
'history.com': ('HISTORY', 'history'),
|
||||
'aetv.com': ('AETV', 'aetv'),
|
||||
@@ -170,7 +169,7 @@ class AENetworksIE(AENetworksBaseIE):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
domain, canonical = re.match(self._VALID_URL, url).groups()
|
||||
domain, canonical = self._match_valid_url(url).groups()
|
||||
return self._extract_aetn_info(domain, 'canonical', '/' + canonical, url)
|
||||
|
||||
|
||||
@@ -187,7 +186,7 @@ class AENetworksListBaseIE(AENetworksBaseIE):
|
||||
}))['data'][resource]
|
||||
|
||||
def _real_extract(self, url):
|
||||
domain, slug = re.match(self._VALID_URL, url).groups()
|
||||
domain, slug = self._match_valid_url(url).groups()
|
||||
_, brand = self._DOMAIN_MAP[domain]
|
||||
playlist = self._call_api(self._RESOURCE, slug, brand, self._FIELDS)
|
||||
base_url = 'http://watch.%s' % domain
|
||||
@@ -309,7 +308,7 @@ class HistoryPlayerIE(AENetworksBaseIE):
|
||||
_TESTS = []
|
||||
|
||||
def _real_extract(self, url):
|
||||
domain, video_id = re.match(self._VALID_URL, url).groups()
|
||||
domain, video_id = self._match_valid_url(url).groups()
|
||||
return self._extract_aetn_info(domain, 'id', video_id, url)
|
||||
|
||||
|
||||
|
||||
@@ -6,9 +6,11 @@ import re
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_xpath
|
||||
from ..utils import (
|
||||
date_from_str,
|
||||
determine_ext,
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
unified_strdate,
|
||||
url_or_none,
|
||||
urlencode_postdata,
|
||||
xpath_text,
|
||||
@@ -237,6 +239,7 @@ class AfreecaTVIE(InfoExtractor):
|
||||
r'nTitleNo\s*=\s*(\d+)', webpage, 'title', default=video_id)
|
||||
|
||||
partial_view = False
|
||||
adult_view = False
|
||||
for _ in range(2):
|
||||
query = {
|
||||
'nTitleNo': video_id,
|
||||
@@ -245,6 +248,8 @@ class AfreecaTVIE(InfoExtractor):
|
||||
}
|
||||
if partial_view:
|
||||
query['partialView'] = 'SKIP_ADULT'
|
||||
if adult_view:
|
||||
query['adultView'] = 'ADULT_VIEW'
|
||||
video_xml = self._download_xml(
|
||||
'http://afbbs.afreecatv.com:8080/api/video/get_video_info.php',
|
||||
video_id, 'Downloading video info XML%s'
|
||||
@@ -264,6 +269,9 @@ class AfreecaTVIE(InfoExtractor):
|
||||
partial_view = True
|
||||
continue
|
||||
elif flag == 'ADULT':
|
||||
if not adult_view:
|
||||
adult_view = True
|
||||
continue
|
||||
error = 'Only users older than 19 are able to watch this video. Provide account credentials to download this content.'
|
||||
else:
|
||||
error = flag
|
||||
@@ -309,8 +317,15 @@ class AfreecaTVIE(InfoExtractor):
|
||||
if not file_url:
|
||||
continue
|
||||
key = file_element.get('key', '')
|
||||
upload_date = self._search_regex(
|
||||
r'^(\d{8})_', key, 'upload date', default=None)
|
||||
upload_date = unified_strdate(self._search_regex(
|
||||
r'^(\d{8})_', key, 'upload date', default=None))
|
||||
if upload_date is not None:
|
||||
# sometimes the upload date isn't included in the file name
|
||||
# instead, another random ID is, which may parse as a valid
|
||||
# date but be wildly out of a reasonable range
|
||||
parsed_date = date_from_str(upload_date)
|
||||
if parsed_date.year < 2000 or parsed_date.year >= 2100:
|
||||
upload_date = None
|
||||
file_duration = int_or_none(file_element.get('duration'))
|
||||
format_id = key if key else '%s_%s' % (video_id, file_num)
|
||||
if determine_ext(file_url) == 'm3u8':
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
@@ -32,7 +31,7 @@ class AlJazeeraIE(InfoExtractor):
|
||||
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_default/index.html?videoId=%s'
|
||||
|
||||
def _real_extract(self, url):
|
||||
post_type, name = re.match(self._VALID_URL, url).groups()
|
||||
post_type, name = self._match_valid_url(url).groups()
|
||||
post_type = {
|
||||
'features': 'post',
|
||||
'program': 'episode',
|
||||
@@ -40,7 +39,7 @@ class AlJazeeraIE(InfoExtractor):
|
||||
}[post_type.split('/')[0]]
|
||||
video = self._download_json(
|
||||
'https://www.aljazeera.com/graphql', name, query={
|
||||
'operationName': 'SingleArticleQuery',
|
||||
'operationName': 'ArchipelagoSingleArticleQuery',
|
||||
'variables': json.dumps({
|
||||
'name': name,
|
||||
'postType': post_type,
|
||||
|
||||
@@ -42,8 +42,7 @@ class AluraIE(InfoExtractor):
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
||||
video_id = self._match_id(url)
|
||||
course = self._search_regex(self._VALID_URL, url, 'post url', group='course_name')
|
||||
course, video_id = self._match_valid_url(url)
|
||||
video_url = self._VIDEO_URL % (course, video_id)
|
||||
|
||||
video_dict = self._download_json(video_url, video_id, 'Searching for videos')
|
||||
|
||||
@@ -63,7 +63,7 @@ class AMCNetworksIE(ThePlatformIE):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
site, display_id = re.match(self._VALID_URL, url).groups()
|
||||
site, display_id = self._match_valid_url(url).groups()
|
||||
requestor_id = self._REQUESTOR_ID_MAP[site]
|
||||
page_data = self._download_json(
|
||||
'https://content-delivery-gw.svc.ds.amcn.com/api/v2/content/amcn/%s/url/%s'
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
@@ -69,7 +68,7 @@ class AmericasTestKitchenIE(InfoExtractor):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
resource_type, video_id = re.match(self._VALID_URL, url).groups()
|
||||
resource_type, video_id = self._match_valid_url(url).groups()
|
||||
is_episode = resource_type == 'episode'
|
||||
if is_episode:
|
||||
resource_type = 'episodes'
|
||||
@@ -114,7 +113,7 @@ class AmericasTestKitchenSeasonIE(InfoExtractor):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
show_name, season_number = re.match(self._VALID_URL, url).groups()
|
||||
show_name, season_number = self._match_valid_url(url).groups()
|
||||
season_number = int(season_number)
|
||||
|
||||
slug = 'atk' if show_name == 'americastestkitchen' else 'cco'
|
||||
|
||||
@@ -390,7 +390,7 @@ class AnvatoIE(InfoExtractor):
|
||||
'countries': smuggled_data.get('geo_countries'),
|
||||
})
|
||||
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
mobj = self._match_valid_url(url)
|
||||
access_key, video_id = mobj.group('access_key_or_mcp', 'id')
|
||||
if access_key not in self._ANVACK_TABLE:
|
||||
access_key = self._MCP_TO_ACCESS_KEY_TABLE.get(
|
||||
|
||||
@@ -4,13 +4,10 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
|
||||
from .yahoo import YahooIE
|
||||
from ..compat import (
|
||||
compat_parse_qs,
|
||||
compat_urllib_parse_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
parse_qs,
|
||||
url_or_none,
|
||||
)
|
||||
|
||||
@@ -119,7 +116,7 @@ class AolIE(YahooIE):
|
||||
'height': int(mobj.group(2)),
|
||||
})
|
||||
else:
|
||||
qs = compat_parse_qs(compat_urllib_parse_urlparse(video_url).query)
|
||||
qs = parse_qs(video_url)
|
||||
f.update({
|
||||
'width': int_or_none(qs.get('w', [None])[0]),
|
||||
'height': int_or_none(qs.get('h', [None])[0]),
|
||||
|
||||
@@ -42,7 +42,7 @@ class APAIE(InfoExtractor):
|
||||
webpage)]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
mobj = self._match_valid_url(url)
|
||||
video_id, base_url = mobj.group('id', 'base_url')
|
||||
|
||||
webpage = self._download_webpage(
|
||||
|
||||
@@ -94,7 +94,7 @@ class AppleTrailersIE(InfoExtractor):
|
||||
_JSON_RE = r'iTunes.playURL\((.*?)\);'
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
mobj = self._match_valid_url(url)
|
||||
movie = mobj.group('movie')
|
||||
uploader_id = mobj.group('company')
|
||||
|
||||
|
||||
@@ -9,8 +9,6 @@ from .youtube import YoutubeIE
|
||||
from ..compat import (
|
||||
compat_urllib_parse_unquote,
|
||||
compat_urllib_parse_unquote_plus,
|
||||
compat_urlparse,
|
||||
compat_parse_qs,
|
||||
compat_HTTPError
|
||||
)
|
||||
from ..utils import (
|
||||
@@ -25,6 +23,7 @@ from ..utils import (
|
||||
merge_dicts,
|
||||
mimetype2ext,
|
||||
parse_duration,
|
||||
parse_qs,
|
||||
RegexNotFoundError,
|
||||
str_to_int,
|
||||
str_or_none,
|
||||
@@ -399,7 +398,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
|
||||
expected=True)
|
||||
raise
|
||||
video_file_url = compat_urllib_parse_unquote(video_file_webpage.url)
|
||||
video_file_url_qs = compat_parse_qs(compat_urlparse.urlparse(video_file_url).query)
|
||||
video_file_url_qs = parse_qs(video_file_url)
|
||||
|
||||
# Attempt to recover any ext & format info from playback url
|
||||
format = {'url': video_file_url}
|
||||
|
||||
@@ -86,7 +86,7 @@ class ArcPublishingIE(InfoExtractor):
|
||||
return entries
|
||||
|
||||
def _real_extract(self, url):
|
||||
org, uuid = re.match(self._VALID_URL, url).groups()
|
||||
org, uuid = self._match_valid_url(url).groups()
|
||||
for orgs, tmpl in self._POWA_DEFAULTS:
|
||||
if org in orgs:
|
||||
base_api_tmpl = tmpl
|
||||
|
||||
@@ -199,7 +199,7 @@ class ARDMediathekIE(ARDMediathekBaseIE):
|
||||
|
||||
def _real_extract(self, url):
|
||||
# determine video id from url
|
||||
m = re.match(self._VALID_URL, url)
|
||||
m = self._match_valid_url(url)
|
||||
|
||||
document_id = None
|
||||
|
||||
@@ -325,7 +325,7 @@ class ARDIE(InfoExtractor):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
mobj = self._match_valid_url(url)
|
||||
display_id = mobj.group('id')
|
||||
|
||||
player_url = mobj.group('mainurl') + '~playerXml.xml'
|
||||
@@ -525,7 +525,7 @@ class ARDBetaMediathekIE(ARDMediathekBaseIE):
|
||||
return self.playlist_result(entries, playlist_title=display_id)
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
mobj = self._match_valid_url(url)
|
||||
video_id = mobj.group('video_id')
|
||||
display_id = mobj.group('display_id')
|
||||
if display_id:
|
||||
|
||||
@@ -4,12 +4,12 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urlparse
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
parse_iso8601,
|
||||
parse_qs,
|
||||
try_get,
|
||||
)
|
||||
|
||||
@@ -63,13 +63,13 @@ class ArkenaIE(InfoExtractor):
|
||||
return mobj.group('url')
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
mobj = self._match_valid_url(url)
|
||||
video_id = mobj.group('id')
|
||||
account_id = mobj.group('account_id')
|
||||
|
||||
# Handle http://video.arkena.com/play2/embed/player URL
|
||||
if not video_id:
|
||||
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
|
||||
qs = parse_qs(url)
|
||||
video_id = qs.get('mediaId', [None])[0]
|
||||
account_id = qs.get('accountId', [None])[0]
|
||||
if not video_id or not account_id:
|
||||
|
||||
@@ -6,11 +6,11 @@ import re
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_str,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
parse_qs,
|
||||
qualities,
|
||||
try_get,
|
||||
unified_strdate,
|
||||
@@ -49,7 +49,7 @@ class ArteTVIE(ArteTVBaseIE):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
mobj = self._match_valid_url(url)
|
||||
video_id = mobj.group('id')
|
||||
lang = mobj.group('lang') or mobj.group('lang_2')
|
||||
|
||||
@@ -174,7 +174,7 @@ class ArteTVIE(ArteTVBaseIE):
|
||||
return {
|
||||
'id': player_info.get('VID') or video_id,
|
||||
'title': title,
|
||||
'description': player_info.get('VDE'),
|
||||
'description': player_info.get('VDE') or player_info.get('V7T'),
|
||||
'upload_date': unified_strdate(upload_date_str),
|
||||
'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
|
||||
'formats': formats,
|
||||
@@ -204,7 +204,7 @@ class ArteTVEmbedIE(InfoExtractor):
|
||||
webpage)]
|
||||
|
||||
def _real_extract(self, url):
|
||||
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
|
||||
qs = parse_qs(url)
|
||||
json_url = qs['json_url'][0]
|
||||
video_id = ArteTVIE._match_id(json_url)
|
||||
return self.url_result(
|
||||
@@ -227,7 +227,7 @@ class ArteTVPlaylistIE(ArteTVBaseIE):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
lang, playlist_id = re.match(self._VALID_URL, url).groups()
|
||||
lang, playlist_id = self._match_valid_url(url).groups()
|
||||
collection = self._download_json(
|
||||
'%s/collectionData/%s/%s?source=videos'
|
||||
% (self._API_BASE, lang, playlist_id), playlist_id)
|
||||
|
||||
@@ -111,7 +111,7 @@ class AsianCrushIE(AsianCrushBaseIE):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
host, video_id = re.match(self._VALID_URL, url).groups()
|
||||
host, video_id = self._match_valid_url(url).groups()
|
||||
|
||||
if host == 'cocoro.tv':
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
@@ -161,7 +161,7 @@ class AsianCrushPlaylistIE(AsianCrushBaseIE):
|
||||
yield self._parse_video_data(video)
|
||||
|
||||
def _real_extract(self, url):
|
||||
host, playlist_id = re.match(self._VALID_URL, url).groups()
|
||||
host, playlist_id = self._match_valid_url(url).groups()
|
||||
|
||||
if host == 'cocoro.tv':
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_HTTPError
|
||||
@@ -75,7 +74,7 @@ class AtresPlayerIE(InfoExtractor):
|
||||
self._request_webpage(target_url, None, 'Following Target URL')
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id, video_id = re.match(self._VALID_URL, url).groups()
|
||||
display_id, video_id = self._match_valid_url(url).groups()
|
||||
|
||||
try:
|
||||
episode = self._download_json(
|
||||
|
||||
@@ -1,75 +1,106 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import datetime
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
int_or_none,
|
||||
unescapeHTML,
|
||||
float_or_none,
|
||||
jwt_encode_hs256,
|
||||
try_get,
|
||||
)
|
||||
|
||||
|
||||
class ATVAtIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?atv\.at/(?:[^/]+/){2}(?P<id>[dv]\d+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?atv\.at/tv/(?:[^/]+/){2,3}(?P<id>.*)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://atv.at/aktuell/di-210317-2005-uhr/v1698449/',
|
||||
'md5': 'c3b6b975fb3150fc628572939df205f2',
|
||||
'url': 'https://www.atv.at/tv/bauer-sucht-frau/staffel-18/bauer-sucht-frau/bauer-sucht-frau-staffel-18-folge-3-die-hofwochen',
|
||||
'md5': '3c3b4aaca9f63e32b35e04a9c2515903',
|
||||
'info_dict': {
|
||||
'id': '1698447',
|
||||
'id': 'v-ce9cgn1e70n5-1',
|
||||
'ext': 'mp4',
|
||||
'title': 'DI, 21.03.17 | 20:05 Uhr 1/1',
|
||||
'title': 'Bauer sucht Frau - Staffel 18 Folge 3 - Die Hofwochen',
|
||||
}
|
||||
}, {
|
||||
'url': 'http://atv.at/aktuell/meinrad-knapp/d8416/',
|
||||
'url': 'https://www.atv.at/tv/bauer-sucht-frau/staffel-18/episode-01/bauer-sucht-frau-staffel-18-vorstellungsfolge-1',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
# extracted from bootstrap.js function (search for e.encryption_key and use your browser's debugger)
|
||||
_ACCESS_ID = 'x_atv'
|
||||
_ENCRYPTION_KEY = 'Hohnaekeishoogh2omaeghooquooshia'
|
||||
|
||||
def _extract_video_info(self, url, content, video):
|
||||
clip_id = content.get('splitId', content['id'])
|
||||
formats = []
|
||||
clip_urls = video['urls']
|
||||
for protocol, variant in clip_urls.items():
|
||||
source_url = try_get(variant, lambda x: x['clear']['url'])
|
||||
if not source_url:
|
||||
continue
|
||||
if protocol == 'dash':
|
||||
formats.extend(self._extract_mpd_formats(
|
||||
source_url, clip_id, mpd_id=protocol, fatal=False))
|
||||
elif protocol == 'hls':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
source_url, clip_id, 'mp4', 'm3u8_native',
|
||||
m3u8_id=protocol, fatal=False))
|
||||
else:
|
||||
formats.append({
|
||||
'url': source_url,
|
||||
'format_id': protocol,
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': clip_id,
|
||||
'title': content.get('title'),
|
||||
'duration': float_or_none(content.get('duration')),
|
||||
'series': content.get('tvShowTitle'),
|
||||
'formats': formats,
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
video_data = self._parse_json(unescapeHTML(self._search_regex(
|
||||
[r'flashPlayerOptions\s*=\s*(["\'])(?P<json>(?:(?!\1).)+)\1',
|
||||
r'class="[^"]*jsb_video/FlashPlayer[^"]*"[^>]+data-jsb="(?P<json>[^"]+)"'],
|
||||
webpage, 'player data', group='json')),
|
||||
display_id)['config']['initial_video']
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
json_data = self._parse_json(
|
||||
self._search_regex(r'<script id="state" type="text/plain">(.*)</script>', webpage, 'json_data'),
|
||||
video_id=video_id)
|
||||
|
||||
video_id = video_data['id']
|
||||
video_title = video_data['title']
|
||||
video_title = json_data['views']['default']['page']['title']
|
||||
contentResource = json_data['views']['default']['page']['contentResource']
|
||||
content_id = contentResource[0]['id']
|
||||
content_ids = [{'id': id, 'subclip_start': content['start'], 'subclip_end': content['end']}
|
||||
for id, content in enumerate(contentResource)]
|
||||
|
||||
parts = []
|
||||
for part in video_data.get('parts', []):
|
||||
part_id = part['id']
|
||||
part_title = part['title']
|
||||
|
||||
formats = []
|
||||
for source in part.get('sources', []):
|
||||
source_url = source.get('src')
|
||||
if not source_url:
|
||||
continue
|
||||
ext = determine_ext(source_url)
|
||||
if ext == 'm3u8':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
source_url, part_id, 'mp4', 'm3u8_native',
|
||||
m3u8_id='hls', fatal=False))
|
||||
else:
|
||||
formats.append({
|
||||
'format_id': source.get('delivery'),
|
||||
'url': source_url,
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
parts.append({
|
||||
'id': part_id,
|
||||
'title': part_title,
|
||||
'thumbnail': part.get('preview_image_url'),
|
||||
'duration': int_or_none(part.get('duration')),
|
||||
'is_live': part.get('is_livestream'),
|
||||
'formats': formats,
|
||||
time_of_request = datetime.datetime.now()
|
||||
not_before = time_of_request - datetime.timedelta(minutes=5)
|
||||
expire = time_of_request + datetime.timedelta(minutes=5)
|
||||
payload = {
|
||||
'content_ids': {
|
||||
content_id: content_ids,
|
||||
},
|
||||
'secure_delivery': True,
|
||||
'iat': int(time_of_request.timestamp()),
|
||||
'nbf': int(not_before.timestamp()),
|
||||
'exp': int(expire.timestamp()),
|
||||
}
|
||||
jwt_token = jwt_encode_hs256(payload, self._ENCRYPTION_KEY, headers={'kid': self._ACCESS_ID})
|
||||
videos = self._download_json(
|
||||
'https://vas-v4.p7s1video.net/4.0/getsources',
|
||||
content_id, 'Downloading videos JSON', query={
|
||||
'token': jwt_token.decode('utf-8')
|
||||
})
|
||||
|
||||
video_id, videos_data = list(videos['data'].items())[0]
|
||||
entries = [
|
||||
self._extract_video_info(url, contentResource[video['id']], video)
|
||||
for video in videos_data]
|
||||
|
||||
return {
|
||||
'_type': 'multi_video',
|
||||
'id': video_id,
|
||||
'title': video_title,
|
||||
'entries': parts,
|
||||
'entries': entries,
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import random
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import ExtractorError, try_get, compat_str, str_or_none
|
||||
@@ -124,7 +123,7 @@ class AudiusIE(AudiusBaseIE):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
mobj = self._match_valid_url(url)
|
||||
track_id = try_get(mobj, lambda x: x.group('track_id'))
|
||||
if track_id is None:
|
||||
title = mobj.group('title')
|
||||
@@ -217,7 +216,7 @@ class AudiusPlaylistIE(AudiusBaseIE):
|
||||
|
||||
def _real_extract(self, url):
|
||||
self._select_api_base()
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
mobj = self._match_valid_url(url)
|
||||
title = mobj.group('title')
|
||||
# uploader = mobj.group('uploader')
|
||||
url = self._prepare_url(url, title)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import base64
|
||||
|
||||
from .common import InfoExtractor
|
||||
@@ -22,7 +21,7 @@ class AWAANIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?(?:awaan|dcndigital)\.ae/(?:#/)?show/(?P<show_id>\d+)/[^/]+(?:/(?P<id>\d+)/(?P<season_id>\d+))?'
|
||||
|
||||
def _real_extract(self, url):
|
||||
show_id, video_id, season_id = re.match(self._VALID_URL, url).groups()
|
||||
show_id, video_id, season_id = self._match_valid_url(url).groups()
|
||||
if video_id and int(video_id) > 0:
|
||||
return self.url_result(
|
||||
'http://awaan.ae/media/%s' % video_id, 'AWAANVideo')
|
||||
@@ -154,7 +153,7 @@ class AWAANSeasonIE(InfoExtractor):
|
||||
|
||||
def _real_extract(self, url):
|
||||
url, smuggled_data = unsmuggle_url(url, {})
|
||||
show_id, season_id = re.match(self._VALID_URL, url).groups()
|
||||
show_id, season_id = self._match_valid_url(url).groups()
|
||||
|
||||
data = {}
|
||||
if season_id:
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .kaltura import KalturaIE
|
||||
@@ -51,7 +50,7 @@ class AZMedienIE(InfoExtractor):
|
||||
_PARTNER_ID = '1719221'
|
||||
|
||||
def _real_extract(self, url):
|
||||
host, display_id, article_id, entry_id = re.match(self._VALID_URL, url).groups()
|
||||
host, display_id, article_id, entry_id = self._match_valid_url(url).groups()
|
||||
|
||||
if not entry_id:
|
||||
entry_id = self._download_json(
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import unescapeHTML
|
||||
@@ -33,7 +32,7 @@ class BaiduVideoIE(InfoExtractor):
|
||||
path, category, playlist_id), playlist_id, note)
|
||||
|
||||
def _real_extract(self, url):
|
||||
category, playlist_id = re.match(self._VALID_URL, url).groups()
|
||||
category, playlist_id = self._match_valid_url(url).groups()
|
||||
if category == 'show':
|
||||
category = 'tvshow'
|
||||
if category == 'tv':
|
||||
|
||||
@@ -212,7 +212,7 @@ class BandcampIE(InfoExtractor):
|
||||
|
||||
class BandcampAlbumIE(BandcampIE):
|
||||
IE_NAME = 'Bandcamp:album'
|
||||
_VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?:/album/(?P<id>[^/?#&]+))?'
|
||||
_VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?!/music)(?:/album/(?P<id>[^/?#&]+))?'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
|
||||
@@ -294,7 +294,7 @@ class BandcampAlbumIE(BandcampIE):
|
||||
else super(BandcampAlbumIE, cls).suitable(url))
|
||||
|
||||
def _real_extract(self, url):
|
||||
uploader_id, album_id = re.match(self._VALID_URL, url).groups()
|
||||
uploader_id, album_id = self._match_valid_url(url).groups()
|
||||
playlist_id = album_id or uploader_id
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
tralbum = self._extract_data_attr(webpage, playlist_id)
|
||||
@@ -389,3 +389,43 @@ class BandcampWeeklyIE(BandcampIE):
|
||||
'episode_id': show_id,
|
||||
'formats': formats
|
||||
}
|
||||
|
||||
|
||||
class BandcampMusicIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?P<id>[^/]+)\.bandcamp\.com/music'
|
||||
_TESTS = [{
|
||||
'url': 'https://steviasphere.bandcamp.com/music',
|
||||
'playlist_mincount': 47,
|
||||
'info_dict': {
|
||||
'id': 'steviasphere',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://coldworldofficial.bandcamp.com/music',
|
||||
'playlist_mincount': 10,
|
||||
'info_dict': {
|
||||
'id': 'coldworldofficial',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://nuclearwarnowproductions.bandcamp.com/music',
|
||||
'playlist_mincount': 399,
|
||||
'info_dict': {
|
||||
'id': 'nuclearwarnowproductions',
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
_TYPE_IE_DICT = {
|
||||
'album': BandcampAlbumIE.ie_key(),
|
||||
'track': BandcampIE.ie_key()
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, id)
|
||||
items = re.findall(r'href\=\"\/(?P<path>(?P<type>album|track)+/[^\"]+)', webpage)
|
||||
entries = [
|
||||
self.url_result(
|
||||
f'https://{id}.bandcamp.com/{item[0]}',
|
||||
ie=self._TYPE_IE_DICT[item[1]])
|
||||
for item in items]
|
||||
return self.playlist_result(entries, id)
|
||||
|
||||
165
yt_dlp/extractor/bannedvideo.py
Normal file
165
yt_dlp/extractor/bannedvideo.py
Normal file
@@ -0,0 +1,165 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
try_get,
|
||||
int_or_none,
|
||||
url_or_none,
|
||||
float_or_none,
|
||||
unified_timestamp,
|
||||
)
|
||||
|
||||
|
||||
class BannedVideoIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?banned\.video/watch\?id=(?P<id>[0-f]{24})'
|
||||
_TESTS = [{
|
||||
'url': 'https://banned.video/watch?id=5e7a859644e02200c6ef5f11',
|
||||
'md5': '14b6e81d41beaaee2215cd75c6ed56e4',
|
||||
'info_dict': {
|
||||
'id': '5e7a859644e02200c6ef5f11',
|
||||
'ext': 'mp4',
|
||||
'title': 'China Discovers Origin of Corona Virus: Issues Emergency Statement',
|
||||
'thumbnail': r're:^https?://(?:www\.)?assets\.infowarsmedia.com/images/',
|
||||
'description': 'md5:560d96f02abbebe6c6b78b47465f6b28',
|
||||
'upload_date': '20200324',
|
||||
'timestamp': 1585087895,
|
||||
}
|
||||
}]
|
||||
|
||||
_GRAPHQL_GETMETADATA_QUERY = '''
|
||||
query GetVideoAndComments($id: String!) {
|
||||
getVideo(id: $id) {
|
||||
streamUrl
|
||||
directUrl
|
||||
unlisted
|
||||
live
|
||||
tags {
|
||||
name
|
||||
}
|
||||
title
|
||||
summary
|
||||
playCount
|
||||
largeImage
|
||||
videoDuration
|
||||
channel {
|
||||
_id
|
||||
title
|
||||
}
|
||||
createdAt
|
||||
}
|
||||
getVideoComments(id: $id, limit: 999999, offset: 0) {
|
||||
_id
|
||||
content
|
||||
user {
|
||||
_id
|
||||
username
|
||||
}
|
||||
voteCount {
|
||||
positive
|
||||
}
|
||||
createdAt
|
||||
replyCount
|
||||
}
|
||||
}'''
|
||||
|
||||
_GRAPHQL_GETCOMMENTSREPLIES_QUERY = '''
|
||||
query GetCommentReplies($id: String!) {
|
||||
getCommentReplies(id: $id, limit: 999999, offset: 0) {
|
||||
_id
|
||||
content
|
||||
user {
|
||||
_id
|
||||
username
|
||||
}
|
||||
voteCount {
|
||||
positive
|
||||
}
|
||||
createdAt
|
||||
replyCount
|
||||
}
|
||||
}'''
|
||||
|
||||
_GRAPHQL_QUERIES = {
|
||||
'GetVideoAndComments': _GRAPHQL_GETMETADATA_QUERY,
|
||||
'GetCommentReplies': _GRAPHQL_GETCOMMENTSREPLIES_QUERY,
|
||||
}
|
||||
|
||||
def _call_api(self, video_id, id, operation, note):
|
||||
return self._download_json(
|
||||
'https://api.infowarsmedia.com/graphql', video_id, note=note,
|
||||
headers={
|
||||
'Content-Type': 'application/json; charset=utf-8'
|
||||
}, data=json.dumps({
|
||||
'variables': {'id': id},
|
||||
'operationName': operation,
|
||||
'query': self._GRAPHQL_QUERIES[operation]
|
||||
}).encode('utf8')).get('data')
|
||||
|
||||
def _extract_comments(self, video_id, comments, comment_data):
|
||||
for comment in comment_data.copy():
|
||||
comment_id = comment.get('_id')
|
||||
if comment.get('replyCount') > 0:
|
||||
reply_json = self._call_api(
|
||||
video_id, comment_id, 'GetCommentReplies',
|
||||
f'Downloading replies for comment {comment_id}')
|
||||
comments.extend(
|
||||
self._parse_comment(reply, comment_id)
|
||||
for reply in reply_json.get('getCommentReplies'))
|
||||
|
||||
return {
|
||||
'comments': comments,
|
||||
'comment_count': len(comments),
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _parse_comment(comment_data, parent):
|
||||
return {
|
||||
'id': comment_data.get('_id'),
|
||||
'text': comment_data.get('content'),
|
||||
'author': try_get(comment_data, lambda x: x['user']['username']),
|
||||
'author_id': try_get(comment_data, lambda x: x['user']['_id']),
|
||||
'timestamp': unified_timestamp(comment_data.get('createdAt')),
|
||||
'parent': parent,
|
||||
'like_count': try_get(comment_data, lambda x: x['voteCount']['positive']),
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
video_json = self._call_api(video_id, video_id, 'GetVideoAndComments', 'Downloading video metadata')
|
||||
video_info = video_json['getVideo']
|
||||
is_live = video_info.get('live')
|
||||
comments = [self._parse_comment(comment, 'root') for comment in video_json.get('getVideoComments')]
|
||||
|
||||
formats = [{
|
||||
'format_id': 'direct',
|
||||
'quality': 1,
|
||||
'url': video_info.get('directUrl'),
|
||||
'ext': 'mp4',
|
||||
}] if url_or_none(video_info.get('directUrl')) else []
|
||||
if video_info.get('streamUrl'):
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
video_info.get('streamUrl'), video_id, 'mp4',
|
||||
entry_protocol='m3u8_native', m3u8_id='hls', live=True))
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': video_info.get('title')[:-1],
|
||||
'formats': formats,
|
||||
'is_live': is_live,
|
||||
'description': video_info.get('summary'),
|
||||
'channel': try_get(video_info, lambda x: x['channel']['title']),
|
||||
'channel_id': try_get(video_info, lambda x: x['channel']['_id']),
|
||||
'view_count': int_or_none(video_info.get('playCount')),
|
||||
'thumbnail': url_or_none(video_info.get('largeImage')),
|
||||
'duration': float_or_none(video_info.get('videoDuration')),
|
||||
'timestamp': unified_timestamp(video_info.get('createdAt')),
|
||||
'tags': [tag.get('name') for tag in video_info.get('tags')],
|
||||
'availability': self._availability(is_unlisted=video_info.get('unlisted')),
|
||||
'comments': comments,
|
||||
'__post_extractor': (
|
||||
(lambda: self._extract_comments(video_id, comments, video_json.get('getVideoComments')))
|
||||
if self.get_param('getcomments') else None)
|
||||
}
|
||||
@@ -10,9 +10,7 @@ from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_etree_Element,
|
||||
compat_HTTPError,
|
||||
compat_parse_qs,
|
||||
compat_str,
|
||||
compat_urllib_parse_urlparse,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
@@ -26,6 +24,7 @@ from ..utils import (
|
||||
js_to_json,
|
||||
parse_duration,
|
||||
parse_iso8601,
|
||||
parse_qs,
|
||||
strip_or_none,
|
||||
try_get,
|
||||
unescapeHTML,
|
||||
@@ -589,8 +588,8 @@ class BBCIE(BBCCoUkIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?bbc\.(?:com|co\.uk)/(?:[^/]+/)+(?P<id>[^/#?]+)'
|
||||
|
||||
_MEDIA_SETS = [
|
||||
'mobile-tablet-main',
|
||||
'pc',
|
||||
'mobile-tablet-main',
|
||||
]
|
||||
|
||||
_TESTS = [{
|
||||
@@ -1410,7 +1409,7 @@ class BBCCoUkIPlayerPlaylistBaseIE(InfoExtractor):
|
||||
|
||||
def _real_extract(self, url):
|
||||
pid = self._match_id(url)
|
||||
qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
|
||||
qs = parse_qs(url)
|
||||
series_id = qs.get('seriesId', [None])[0]
|
||||
page = qs.get('page', [None])[0]
|
||||
per_page = 36 if page else self._PAGE_SIZE
|
||||
|
||||
@@ -40,7 +40,7 @@ class BeatportIE(InfoExtractor):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
mobj = self._match_valid_url(url)
|
||||
track_id = mobj.group('id')
|
||||
display_id = mobj.group('display_id')
|
||||
|
||||
|
||||
@@ -3,10 +3,10 @@ from __future__ import unicode_literals
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_str,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
parse_qs,
|
||||
unified_timestamp,
|
||||
)
|
||||
|
||||
@@ -57,7 +57,7 @@ class BeegIE(InfoExtractor):
|
||||
query = {
|
||||
'v': 2,
|
||||
}
|
||||
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
|
||||
qs = parse_qs(url)
|
||||
t = qs.get('t', [''])[0].split('-')
|
||||
if len(t) > 1:
|
||||
query.update({
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import url_basename
|
||||
@@ -24,7 +23,7 @@ class BehindKinkIE(InfoExtractor):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
mobj = self._match_valid_url(url)
|
||||
display_id = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
@@ -78,7 +77,7 @@ class BellMediaIE(InfoExtractor):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
domain, video_id = re.match(self._VALID_URL, url).groups()
|
||||
domain, video_id = self._match_valid_url(url).groups()
|
||||
domain = domain.split('.')[0]
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
|
||||
@@ -4,13 +4,16 @@ from __future__ import unicode_literals
|
||||
import hashlib
|
||||
import itertools
|
||||
import json
|
||||
import functools
|
||||
import re
|
||||
import math
|
||||
|
||||
from .common import InfoExtractor, SearchInfoExtractor
|
||||
from ..compat import (
|
||||
compat_str,
|
||||
compat_parse_qs,
|
||||
compat_urlparse,
|
||||
compat_urllib_parse_urlparse
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
@@ -19,11 +22,14 @@ from ..utils import (
|
||||
parse_iso8601,
|
||||
try_get,
|
||||
smuggle_url,
|
||||
srt_subtitles_timecode,
|
||||
str_or_none,
|
||||
str_to_int,
|
||||
strip_jsonp,
|
||||
unified_timestamp,
|
||||
unsmuggle_url,
|
||||
urlencode_postdata,
|
||||
OnDemandPagedList
|
||||
)
|
||||
|
||||
|
||||
@@ -37,7 +43,7 @@ class BiliBiliIE(InfoExtractor):
|
||||
video/[aA][vV]|
|
||||
anime/(?P<anime_id>\d+)/play\#
|
||||
)(?P<id>\d+)|
|
||||
video/[bB][vV](?P<id_bv>[^/?#&]+)
|
||||
(s/)?video/[bB][vV](?P<id_bv>[^/?#&]+)
|
||||
)
|
||||
(?:/?\?p=(?P<page>\d+))?
|
||||
'''
|
||||
@@ -140,7 +146,7 @@ class BiliBiliIE(InfoExtractor):
|
||||
def _real_extract(self, url):
|
||||
url, smuggled_data = unsmuggle_url(url, {})
|
||||
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
mobj = self._match_valid_url(url)
|
||||
video_id = mobj.group('id_bv') or mobj.group('id')
|
||||
|
||||
av_id, bv_id = self._get_video_id_set(video_id, mobj.group('id_bv') is not None)
|
||||
@@ -535,6 +541,75 @@ class BilibiliChannelIE(InfoExtractor):
|
||||
return self.playlist_result(self._entries(list_id), list_id)
|
||||
|
||||
|
||||
class BilibiliCategoryIE(InfoExtractor):
|
||||
IE_NAME = 'Bilibili category extractor'
|
||||
_MAX_RESULTS = 1000000
|
||||
_VALID_URL = r'https?://www\.bilibili\.com/v/[a-zA-Z]+\/[a-zA-Z]+'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.bilibili.com/v/kichiku/mad',
|
||||
'info_dict': {
|
||||
'id': 'kichiku: mad',
|
||||
'title': 'kichiku: mad'
|
||||
},
|
||||
'playlist_mincount': 45,
|
||||
'params': {
|
||||
'playlistend': 45
|
||||
}
|
||||
}]
|
||||
|
||||
def _fetch_page(self, api_url, num_pages, query, page_num):
|
||||
parsed_json = self._download_json(
|
||||
api_url, query, query={'Search_key': query, 'pn': page_num},
|
||||
note='Extracting results from page %s of %s' % (page_num, num_pages))
|
||||
|
||||
video_list = try_get(parsed_json, lambda x: x['data']['archives'], list)
|
||||
if not video_list:
|
||||
raise ExtractorError('Failed to retrieve video list for page %d' % page_num)
|
||||
|
||||
for video in video_list:
|
||||
yield self.url_result(
|
||||
'https://www.bilibili.com/video/%s' % video['bvid'], 'BiliBili', video['bvid'])
|
||||
|
||||
def _entries(self, category, subcategory, query):
|
||||
# map of categories : subcategories : RIDs
|
||||
rid_map = {
|
||||
'kichiku': {
|
||||
'mad': 26,
|
||||
'manual_vocaloid': 126,
|
||||
'guide': 22,
|
||||
'theatre': 216,
|
||||
'course': 127
|
||||
},
|
||||
}
|
||||
|
||||
if category not in rid_map:
|
||||
raise ExtractorError('The supplied category, %s, is not supported. List of supported categories: %s' % (category, list(rid_map.keys())))
|
||||
|
||||
if subcategory not in rid_map[category]:
|
||||
raise ExtractorError('The subcategory, %s, isn\'t supported for this category. Supported subcategories: %s' % (subcategory, list(rid_map[category].keys())))
|
||||
|
||||
rid_value = rid_map[category][subcategory]
|
||||
|
||||
api_url = 'https://api.bilibili.com/x/web-interface/newlist?rid=%d&type=1&ps=20&jsonp=jsonp' % rid_value
|
||||
page_json = self._download_json(api_url, query, query={'Search_key': query, 'pn': '1'})
|
||||
page_data = try_get(page_json, lambda x: x['data']['page'], dict)
|
||||
count, size = int_or_none(page_data.get('count')), int_or_none(page_data.get('size'))
|
||||
if count is None or not size:
|
||||
raise ExtractorError('Failed to calculate either page count or size')
|
||||
|
||||
num_pages = math.ceil(count / size)
|
||||
|
||||
return OnDemandPagedList(functools.partial(
|
||||
self._fetch_page, api_url, num_pages, query), size)
|
||||
|
||||
def _real_extract(self, url):
|
||||
u = compat_urllib_parse_urlparse(url)
|
||||
category, subcategory = u.path.split('/')[2:4]
|
||||
query = '%s: %s' % (category, subcategory)
|
||||
|
||||
return self.playlist_result(self._entries(category, subcategory, query), query, query)
|
||||
|
||||
|
||||
class BiliBiliSearchIE(SearchInfoExtractor):
|
||||
IE_DESC = 'Bilibili video search, "bilisearch" keyword'
|
||||
_MAX_RESULTS = 100000
|
||||
@@ -549,7 +624,7 @@ class BiliBiliSearchIE(SearchInfoExtractor):
|
||||
while True:
|
||||
pageNumber += 1
|
||||
# FIXME
|
||||
api_url = "https://api.bilibili.com/x/web-interface/search/type?context=&page=%s&order=pubdate&keyword=%s&duration=0&tids_2=&__refresh__=true&search_type=video&tids=0&highlight=1" % (pageNumber, query)
|
||||
api_url = 'https://api.bilibili.com/x/web-interface/search/type?context=&page=%s&order=pubdate&keyword=%s&duration=0&tids_2=&__refresh__=true&search_type=video&tids=0&highlight=1' % (pageNumber, query)
|
||||
json_str = self._download_webpage(
|
||||
api_url, "None", query={"Search_key": query},
|
||||
note='Extracting results from page %s' % pageNumber)
|
||||
@@ -701,3 +776,152 @@ class BiliBiliPlayerIE(InfoExtractor):
|
||||
return self.url_result(
|
||||
'http://www.bilibili.tv/video/av%s/' % video_id,
|
||||
ie=BiliBiliIE.ie_key(), video_id=video_id)
|
||||
|
||||
|
||||
class BiliIntlBaseIE(InfoExtractor):
|
||||
_API_URL = 'https://api.bili{}/intl/gateway{}'
|
||||
|
||||
def _call_api(self, type, endpoint, id):
|
||||
return self._download_json(self._API_URL.format(type, endpoint), id)['data']
|
||||
|
||||
def json2srt(self, json):
|
||||
data = '\n\n'.join(
|
||||
f'{i + 1}\n{srt_subtitles_timecode(line["from"])} --> {srt_subtitles_timecode(line["to"])}\n{line["content"]}'
|
||||
for i, line in enumerate(json['body']))
|
||||
return data
|
||||
|
||||
def _get_subtitles(self, type, ep_id):
|
||||
sub_json = self._call_api(type, f'/m/subtitle?ep_id={ep_id}&platform=web', ep_id)
|
||||
subtitles = {}
|
||||
for sub in sub_json.get('subtitles', []):
|
||||
sub_url = sub.get('url')
|
||||
if not sub_url:
|
||||
continue
|
||||
sub_data = self._download_json(sub_url, ep_id, fatal=False)
|
||||
if not sub_data:
|
||||
continue
|
||||
subtitles.setdefault(sub.get('key', 'en'), []).append({
|
||||
'ext': 'srt',
|
||||
'data': self.json2srt(sub_data)
|
||||
})
|
||||
return subtitles
|
||||
|
||||
def _get_formats(self, type, ep_id):
|
||||
video_json = self._call_api(type, f'/web/playurl?ep_id={ep_id}&platform=web', ep_id)
|
||||
if not video_json:
|
||||
self.raise_login_required(method='cookies')
|
||||
video_json = video_json['playurl']
|
||||
formats = []
|
||||
for vid in video_json.get('video', []):
|
||||
video_res = vid.get('video_resource') or {}
|
||||
video_info = vid.get('stream_info') or {}
|
||||
if not video_res.get('url'):
|
||||
continue
|
||||
formats.append({
|
||||
'url': video_res['url'],
|
||||
'ext': 'mp4',
|
||||
'format_note': video_info.get('desc_words'),
|
||||
'width': video_res.get('width'),
|
||||
'height': video_res.get('height'),
|
||||
'vbr': video_res.get('bandwidth'),
|
||||
'acodec': 'none',
|
||||
'vcodec': video_res.get('codecs'),
|
||||
'filesize': video_res.get('size'),
|
||||
})
|
||||
for aud in video_json.get('audio_resource', []):
|
||||
if not aud.get('url'):
|
||||
continue
|
||||
formats.append({
|
||||
'url': aud['url'],
|
||||
'ext': 'mp4',
|
||||
'abr': aud.get('bandwidth'),
|
||||
'acodec': aud.get('codecs'),
|
||||
'vcodec': 'none',
|
||||
'filesize': aud.get('size'),
|
||||
})
|
||||
|
||||
self._sort_formats(formats)
|
||||
return formats
|
||||
|
||||
def _extract_ep_info(self, type, episode_data, ep_id):
|
||||
return {
|
||||
'id': ep_id,
|
||||
'title': episode_data.get('long_title') or episode_data['title'],
|
||||
'thumbnail': episode_data.get('cover'),
|
||||
'episode_number': str_to_int(episode_data.get('title')),
|
||||
'formats': self._get_formats(type, ep_id),
|
||||
'subtitles': self._get_subtitles(type, ep_id),
|
||||
'extractor_key': BiliIntlIE.ie_key(),
|
||||
}
|
||||
|
||||
|
||||
class BiliIntlIE(BiliIntlBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?bili(?P<type>bili\.tv|intl.com)/(?:[a-z]{2}/)?play/(?P<season_id>\d+)/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.bilibili.tv/en/play/34613/341736',
|
||||
'info_dict': {
|
||||
'id': '341736',
|
||||
'ext': 'mp4',
|
||||
'title': 'The First Night',
|
||||
'thumbnail': 'https://i0.hdslb.com/bfs/intl/management/91e30e5521235d9b163339a26a0b030ebda54310.png',
|
||||
'episode_number': 2,
|
||||
},
|
||||
'params': {
|
||||
'format': 'bv',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.biliintl.com/en/play/34613/341736',
|
||||
'info_dict': {
|
||||
'id': '341736',
|
||||
'ext': 'mp4',
|
||||
'title': 'The First Night',
|
||||
'thumbnail': 'https://i0.hdslb.com/bfs/intl/management/91e30e5521235d9b163339a26a0b030ebda54310.png',
|
||||
'episode_number': 2,
|
||||
},
|
||||
'params': {
|
||||
'format': 'bv',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
type, season_id, id = self._match_valid_url(url).groups()
|
||||
data_json = self._call_api(type, f'/web/view/ogv_collection?season_id={season_id}', id)
|
||||
episode_data = next(
|
||||
episode for episode in data_json.get('episodes', [])
|
||||
if str(episode.get('ep_id')) == id)
|
||||
return self._extract_ep_info(type, episode_data, id)
|
||||
|
||||
|
||||
class BiliIntlSeriesIE(BiliIntlBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?bili(?P<type>bili\.tv|intl.com)/(?:[a-z]{2}/)?play/(?P<id>\d+)$'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.bilibili.tv/en/play/34613',
|
||||
'playlist_mincount': 15,
|
||||
'info_dict': {
|
||||
'id': '34613',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
'format': 'bv',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.biliintl.com/en/play/34613',
|
||||
'playlist_mincount': 15,
|
||||
'info_dict': {
|
||||
'id': '34613',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
'format': 'bv',
|
||||
},
|
||||
}]
|
||||
|
||||
def _entries(self, id, type):
|
||||
data_json = self._call_api(type, f'/web/view/ogv_collection?season_id={id}', id)
|
||||
for episode in data_json.get('episodes', []):
|
||||
episode_id = str(episode.get('ep_id'))
|
||||
yield self._extract_ep_info(type, episode, episode_id)
|
||||
|
||||
def _real_extract(self, url):
|
||||
type, id = self._match_valid_url(url).groups()
|
||||
return self.playlist_result(self._entries(id, type), playlist_id=id)
|
||||
|
||||
@@ -17,16 +17,16 @@ from ..utils import (
|
||||
class BitChuteIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?bitchute\.com/(?:video|embed|torrent/[^/]+)/(?P<id>[^/?#&]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.bitchute.com/video/szoMrox2JEI/',
|
||||
'md5': '66c4a70e6bfc40dcb6be3eb1d74939eb',
|
||||
'url': 'https://www.bitchute.com/video/UGlrF9o9b-Q/',
|
||||
'md5': '7e427d7ed7af5a75b5855705ec750e2b',
|
||||
'info_dict': {
|
||||
'id': 'szoMrox2JEI',
|
||||
'ext': 'mp4',
|
||||
'title': 'Fuck bitches get money',
|
||||
'description': 'md5:3f21f6fb5b1d17c3dee9cf6b5fe60b3a',
|
||||
'title': 'This is the first video on #BitChute !',
|
||||
'description': 'md5:a0337e7b1fe39e32336974af8173a034',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'uploader': 'Victoria X Rave',
|
||||
'upload_date': '20170813',
|
||||
'uploader': 'BitChute',
|
||||
'upload_date': '20170103',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.bitchute.com/embed/lbb5G1hjPhw/',
|
||||
|
||||
67
yt_dlp/extractor/blackboardcollaborate.py
Normal file
67
yt_dlp/extractor/blackboardcollaborate.py
Normal file
@@ -0,0 +1,67 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import parse_iso8601
|
||||
|
||||
|
||||
class BlackboardCollaborateIE(InfoExtractor):
|
||||
_VALID_URL = r'''(?x)
|
||||
https?://
|
||||
(?P<region>[a-z-]+)\.bbcollab\.com/
|
||||
(?:
|
||||
collab/ui/session/playback/load|
|
||||
recording
|
||||
)/
|
||||
(?P<id>[^/]+)'''
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'https://us-lti.bbcollab.com/collab/ui/session/playback/load/0a633b6a88824deb8c918f470b22b256',
|
||||
'md5': 'bb7a055682ee4f25fdb5838cdf014541',
|
||||
'info_dict': {
|
||||
'id': '0a633b6a88824deb8c918f470b22b256',
|
||||
'title': 'HESI A2 Information Session - Thursday, May 6, 2021 - recording_1',
|
||||
'ext': 'mp4',
|
||||
'duration': 1896000,
|
||||
'timestamp': 1620331399,
|
||||
'upload_date': '20210506',
|
||||
},
|
||||
},
|
||||
{
|
||||
'url': 'https://us.bbcollab.com/collab/ui/session/playback/load/76761522adfe4345a0dee6794bbcabda',
|
||||
'only_matching': True,
|
||||
},
|
||||
{
|
||||
'url': 'https://ca.bbcollab.com/collab/ui/session/playback/load/b6399dcb44df4f21b29ebe581e22479d',
|
||||
'only_matching': True,
|
||||
},
|
||||
{
|
||||
'url': 'https://eu.bbcollab.com/recording/51ed7b50810c4444a106e48cefb3e6b5',
|
||||
'only_matching': True,
|
||||
},
|
||||
{
|
||||
'url': 'https://au.bbcollab.com/collab/ui/session/playback/load/2bccf7165d7c419ab87afc1ec3f3bb15',
|
||||
'only_matching': True,
|
||||
},
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = self._match_valid_url(url)
|
||||
region = mobj.group('region')
|
||||
video_id = mobj.group('id')
|
||||
info = self._download_json(
|
||||
'https://{}.bbcollab.com/collab/api/csa/recordings/{}/data'.format(region, video_id), video_id)
|
||||
duration = info.get('duration')
|
||||
title = info['name']
|
||||
upload_date = info.get('created')
|
||||
streams = info['streams']
|
||||
formats = [{'format_id': k, 'url': url} for k, url in streams.items()]
|
||||
|
||||
return {
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
'id': video_id,
|
||||
'timestamp': parse_iso8601(upload_date),
|
||||
'title': title,
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_parse_qs
|
||||
@@ -45,7 +44,7 @@ class BokeCCIE(BokeCCBaseIE):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
qs = compat_parse_qs(re.match(self._VALID_URL, url).group('query'))
|
||||
qs = compat_parse_qs(self._match_valid_url(url).group('query'))
|
||||
if not qs.get('vid') or not qs.get('uid'):
|
||||
raise ExtractorError('Invalid URL', expected=True)
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user