mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2025-12-08 15:12:47 +01:00
Compare commits
520 Commits
2021.08.02
...
2021.11.10
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9ebf3c6ab9 | ||
|
|
7144b697fc | ||
|
|
2e9a445bc3 | ||
|
|
86c1a8aae4 | ||
|
|
ebfab36fca | ||
|
|
c15de6ffe6 | ||
|
|
56bb56f3cf | ||
|
|
c0599d4fe4 | ||
|
|
3f771f75d7 | ||
|
|
ed76230b3f | ||
|
|
89fcdff5d8 | ||
|
|
f98709af31 | ||
|
|
c586f9e8de | ||
|
|
59a7a13ef9 | ||
|
|
4476d2c764 | ||
|
|
aa9369a2d8 | ||
|
|
d54c6003ab | ||
|
|
1ee316a34a | ||
|
|
358247ed2a | ||
|
|
9b12e9a573 | ||
|
|
a109acbf82 | ||
|
|
a49891c761 | ||
|
|
582fad70f5 | ||
|
|
aeec0e44e2 | ||
|
|
d9190e4467 | ||
|
|
e1b7c54d78 | ||
|
|
244644c02c | ||
|
|
34921b4345 | ||
|
|
a331949df3 | ||
|
|
2c5e8a961e | ||
|
|
b515b37cc4 | ||
|
|
3c4eebf772 | ||
|
|
fb2d1ee6cc | ||
|
|
9cb070f9c0 | ||
|
|
2a6f8475ac | ||
|
|
73673ccff3 | ||
|
|
aeb2a9ad27 | ||
|
|
df6c409d1f | ||
|
|
a9d4da606d | ||
|
|
c18d4482b1 | ||
|
|
0f6518938d | ||
|
|
22cd06c452 | ||
|
|
a4211baff5 | ||
|
|
8913ef74d7 | ||
|
|
832e9000c7 | ||
|
|
673c0057e8 | ||
|
|
9af98e17bd | ||
|
|
31c49255bf | ||
|
|
bd93fd5d45 | ||
|
|
d89257f398 | ||
|
|
9bd979ca40 | ||
|
|
a1fc7ca074 | ||
|
|
c588b602d3 | ||
|
|
f0ffaa1621 | ||
|
|
0930b11fda | ||
|
|
a0bb6ce58d | ||
|
|
da48320075 | ||
|
|
5b6cb56207 | ||
|
|
b2f25dc242 | ||
|
|
2f9e021299 | ||
|
|
8dcf65c92e | ||
|
|
92592bd305 | ||
|
|
404f611f1c | ||
|
|
cd9ea4104b | ||
|
|
652fb0d446 | ||
|
|
6b301aaa34 | ||
|
|
fa0b816e37 | ||
|
|
5e7bbac305 | ||
|
|
10beccc980 | ||
|
|
e6ff66efc0 | ||
|
|
aeaf3b2b92 | ||
|
|
7b5f3f7c3d | ||
|
|
3783b5f1d1 | ||
|
|
ab630a57b9 | ||
|
|
16b0d7e621 | ||
|
|
5be76d1ab7 | ||
|
|
b7b186e7de | ||
|
|
bd1c792327 | ||
|
|
dc88e9be03 | ||
|
|
673944b001 | ||
|
|
0c873df3a8 | ||
|
|
c35ada3360 | ||
|
|
0db3bae879 | ||
|
|
48f796874d | ||
|
|
abad800058 | ||
|
|
08438d2ca5 | ||
|
|
7de837a5e3 | ||
|
|
7e59ca440a | ||
|
|
8e7ab2cf08 | ||
|
|
ad64a2323f | ||
|
|
f2fe69c7b0 | ||
|
|
fccf502118 | ||
|
|
9f1a1c36e6 | ||
|
|
96565c7e55 | ||
|
|
ec11a9f4a2 | ||
|
|
93c7f3398d | ||
|
|
1117579b94 | ||
|
|
0676afb126 | ||
|
|
49a57e70a9 | ||
|
|
457f6d6866 | ||
|
|
ad0090d0d2 | ||
|
|
d183af3cc1 | ||
|
|
3c239332b0 | ||
|
|
ab2ffab22d | ||
|
|
f656a23cb1 | ||
|
|
58ab5cbc58 | ||
|
|
17ec8bcfa9 | ||
|
|
0f6e60bb57 | ||
|
|
ef58c47637 | ||
|
|
19b824f693 | ||
|
|
f0ded3dad3 | ||
|
|
733d8e8f99 | ||
|
|
386cdfdb5b | ||
|
|
6e21fdd279 | ||
|
|
0e5927eebf | ||
|
|
27f817a84b | ||
|
|
d3c93ec2b7 | ||
|
|
b4b855ebc7 | ||
|
|
2cda6b401d | ||
|
|
aa7785f860 | ||
|
|
9fab498fbf | ||
|
|
e619d8a752 | ||
|
|
1e520b5535 | ||
|
|
176f1866cb | ||
|
|
17bddf3e95 | ||
|
|
2d9ec70423 | ||
|
|
e820fbaa6f | ||
|
|
b11d210156 | ||
|
|
24b0a72b30 | ||
|
|
aae16f6ed9 | ||
|
|
373475f035 | ||
|
|
920134b2e5 | ||
|
|
72ab768719 | ||
|
|
01b052b2b1 | ||
|
|
019a94f7d6 | ||
|
|
e69585f8c6 | ||
|
|
693ec74401 | ||
|
|
239df02103 | ||
|
|
18f96d129b | ||
|
|
ec3f6640c1 | ||
|
|
dd078970ba | ||
|
|
71ce444a3f | ||
|
|
580d3274e5 | ||
|
|
03b4de722a | ||
|
|
48ee10ee8a | ||
|
|
6ff34542d2 | ||
|
|
e3950399e4 | ||
|
|
974208e151 | ||
|
|
883d4b1eec | ||
|
|
a0c716bb61 | ||
|
|
d5a39f0bad | ||
|
|
a64907d0ac | ||
|
|
6993f78d1b | ||
|
|
993191c0d5 | ||
|
|
fc5c8b6492 | ||
|
|
b836dc94f2 | ||
|
|
c111cefa5d | ||
|
|
975a0d0df9 | ||
|
|
a387b69a7c | ||
|
|
ecdc9049c0 | ||
|
|
7b38649845 | ||
|
|
e88d44c6ee | ||
|
|
a2160aa45f | ||
|
|
cc16383ff3 | ||
|
|
a903d8285c | ||
|
|
9dda99f2fc | ||
|
|
ba10757412 | ||
|
|
e6faf2be36 | ||
|
|
ed39cac53d | ||
|
|
a169858f24 | ||
|
|
0481e266f5 | ||
|
|
2c4bba96ac | ||
|
|
e8f726a57f | ||
|
|
8063de5109 | ||
|
|
dec0d56fa9 | ||
|
|
21186af70a | ||
|
|
84999521c8 | ||
|
|
d1d5c08f29 | ||
|
|
2e01ba6218 | ||
|
|
c9652aa418 | ||
|
|
91b6c884c9 | ||
|
|
28fe35b4e3 | ||
|
|
aa9a92fdbb | ||
|
|
a170527e1f | ||
|
|
90d55df330 | ||
|
|
81bcd43a03 | ||
|
|
b5ae35ee6d | ||
|
|
4e3b637d5b | ||
|
|
8cd69fc407 | ||
|
|
2614f64600 | ||
|
|
b922db9fe5 | ||
|
|
f2cad2e496 | ||
|
|
d6124e191e | ||
|
|
8c6f4daa4c | ||
|
|
ac56cf38a4 | ||
|
|
c08b8873ea | ||
|
|
819e05319b | ||
|
|
fee3f44f5f | ||
|
|
705e7c2005 | ||
|
|
49e7e9c3ce | ||
|
|
8472674399 | ||
|
|
1276a43a77 | ||
|
|
519804a92f | ||
|
|
1b6bb4a85a | ||
|
|
644149afec | ||
|
|
4e3d1898a8 | ||
|
|
f85e6be42e | ||
|
|
762e509d91 | ||
|
|
d92125aeba | ||
|
|
0f0ac87be3 | ||
|
|
755203fc3f | ||
|
|
943d5ab133 | ||
|
|
3001a84dca | ||
|
|
ebf2fb4d61 | ||
|
|
efc947fb3e | ||
|
|
b11c04a8ae | ||
|
|
5d535b4a55 | ||
|
|
a1c3967307 | ||
|
|
e919569e67 | ||
|
|
ff1dec819a | ||
|
|
9359f3d4f0 | ||
|
|
0eaec13ba6 | ||
|
|
ad095c4283 | ||
|
|
e6f21b3d92 | ||
|
|
d710cc6d36 | ||
|
|
3ae5e79774 | ||
|
|
8e3fd7e034 | ||
|
|
80c03fa98f | ||
|
|
1f2a268bd3 | ||
|
|
804ca01cc7 | ||
|
|
851876095b | ||
|
|
2d997542ca | ||
|
|
7756277882 | ||
|
|
7687c8ac6e | ||
|
|
80c360d7aa | ||
|
|
250a938de8 | ||
|
|
f1d42a83ab | ||
|
|
3cf4b91dc5 | ||
|
|
fecb20a503 | ||
|
|
360167b9fc | ||
|
|
28234287f1 | ||
|
|
91dd88b90f | ||
|
|
d31dab7084 | ||
|
|
c470901ccf | ||
|
|
2333ea1029 | ||
|
|
9a13345439 | ||
|
|
524e2e4fda | ||
|
|
f440b14f87 | ||
|
|
8dc831f715 | ||
|
|
e99b2d2771 | ||
|
|
1fed277349 | ||
|
|
0ef787d773 | ||
|
|
a5de4099cb | ||
|
|
ff1c7fc9d3 | ||
|
|
600e900300 | ||
|
|
20b91b9b63 | ||
|
|
4c88ff87fc | ||
|
|
e27cc5d864 | ||
|
|
eb6d4ad1ca | ||
|
|
99e9e001de | ||
|
|
51ff9ca0b0 | ||
|
|
b19404591a | ||
|
|
1f8471e22c | ||
|
|
77c4a9ef68 | ||
|
|
8f70b0b82f | ||
|
|
be867b03f5 | ||
|
|
1813a6ccd4 | ||
|
|
8100c77223 | ||
|
|
9ada988bfc | ||
|
|
d1a7768432 | ||
|
|
49fa4d9af7 | ||
|
|
ee2b3563f3 | ||
|
|
bdc196a444 | ||
|
|
388bc4a640 | ||
|
|
50eff38c1c | ||
|
|
4be9dbdc24 | ||
|
|
a21e0ab1a1 | ||
|
|
a76e2e0f88 | ||
|
|
bd50a52b0d | ||
|
|
c12977bdc4 | ||
|
|
f6d8776d34 | ||
|
|
d806c9fd97 | ||
|
|
5e3f2f8fc4 | ||
|
|
1009f67c2a | ||
|
|
bd6f722de8 | ||
|
|
d9d8b85747 | ||
|
|
daf7ac2b92 | ||
|
|
96933fc1b6 | ||
|
|
0d32e124c6 | ||
|
|
cb2ec90e91 | ||
|
|
3cd786dbd7 | ||
|
|
1b629e1b4c | ||
|
|
8f8e8eba24 | ||
|
|
09906f554d | ||
|
|
a63d9bd0b0 | ||
|
|
f137e4c27c | ||
|
|
4762621925 | ||
|
|
57aa7b8511 | ||
|
|
9c1c3ec016 | ||
|
|
f9cc0161e6 | ||
|
|
c6af2dd8e5 | ||
|
|
7738bd3272 | ||
|
|
7c37ff97d3 | ||
|
|
d47f46e17e | ||
|
|
298bf1d275 | ||
|
|
d1b39ad844 | ||
|
|
edf65256aa | ||
|
|
7303f84abe | ||
|
|
f5aa5cfbff | ||
|
|
f1f6ca78b4 | ||
|
|
2fac2e9136 | ||
|
|
23dd2d9a32 | ||
|
|
b89378a69a | ||
|
|
0001fcb586 | ||
|
|
c589c1d395 | ||
|
|
f7590d4764 | ||
|
|
dbf7eca917 | ||
|
|
d21bba7853 | ||
|
|
a8cb7eca61 | ||
|
|
92790da2bb | ||
|
|
b5a39ed43b | ||
|
|
cc33cc4395 | ||
|
|
1722099ded | ||
|
|
40b18348e7 | ||
|
|
e9a30b181e | ||
|
|
9c95ac677e | ||
|
|
ea706726d6 | ||
|
|
f60990ddfc | ||
|
|
ad226b1dc9 | ||
|
|
ca46b94134 | ||
|
|
67ad7759af | ||
|
|
d5fe04f5c7 | ||
|
|
03c862794f | ||
|
|
0fd6661edb | ||
|
|
02c7ae8104 | ||
|
|
16f7e6be3a | ||
|
|
ffecd3034b | ||
|
|
1c5ce74c04 | ||
|
|
81a136b80f | ||
|
|
eab3f867e2 | ||
|
|
a7e999beec | ||
|
|
71407b3eca | ||
|
|
dc9de9cbd2 | ||
|
|
92ddaa415e | ||
|
|
b6de707d13 | ||
|
|
bccdbd22d5 | ||
|
|
bd9ff55bcd | ||
|
|
526d74ec5a | ||
|
|
e04a1ff92e | ||
|
|
aa6c25309a | ||
|
|
d98b006b85 | ||
|
|
265a7a8ee5 | ||
|
|
826446bd82 | ||
|
|
bc79491368 | ||
|
|
421ddcb8b4 | ||
|
|
c0ac49bcca | ||
|
|
02def2714c | ||
|
|
f9be9cb9fd | ||
|
|
4614bc22c1 | ||
|
|
8e5fecc88c | ||
|
|
165efb823b | ||
|
|
dd594deb2a | ||
|
|
409e18286e | ||
|
|
8113999995 | ||
|
|
8026e50152 | ||
|
|
9ee4f0bb5b | ||
|
|
be4d9f4cd9 | ||
|
|
347182a0cd | ||
|
|
a7429aa9fa | ||
|
|
7a340e0df3 | ||
|
|
f0e5366335 | ||
|
|
49ca8db06b | ||
|
|
ee57a19d84 | ||
|
|
908b56eaf7 | ||
|
|
1461d7bef2 | ||
|
|
8a2d992389 | ||
|
|
8e25d624df | ||
|
|
e88dabb35e | ||
|
|
8eb7ba82ca | ||
|
|
b2eeee0ce0 | ||
|
|
875cfb8cbc | ||
|
|
b8773e63f0 | ||
|
|
05664a2f7b | ||
|
|
2ee6389bef | ||
|
|
62cdaaf0e2 | ||
|
|
419508eabb | ||
|
|
54153fb71b | ||
|
|
1dd6d9ca9d | ||
|
|
356ac009d3 | ||
|
|
9a292a620c | ||
|
|
7e55872286 | ||
|
|
2fc14b9925 | ||
|
|
58f68fe703 | ||
|
|
abafce59a1 | ||
|
|
2e7781a93c | ||
|
|
bc36bc36a1 | ||
|
|
d75201a873 | ||
|
|
691d5823d6 | ||
|
|
c311988d19 | ||
|
|
26e8e04454 | ||
|
|
198e3a04c9 | ||
|
|
61bfacb233 | ||
|
|
85a0021fb3 | ||
|
|
7a45a1590b | ||
|
|
1c36c1f320 | ||
|
|
e0493e90fc | ||
|
|
1931a55ee8 | ||
|
|
63b1ad0f05 | ||
|
|
0bb1bc1b10 | ||
|
|
45842107b9 | ||
|
|
6251555f1c | ||
|
|
330690a214 | ||
|
|
91d4b32bb6 | ||
|
|
a181cd0c60 | ||
|
|
ea81966e64 | ||
|
|
2acf2ce5cb | ||
|
|
f7f18f905c | ||
|
|
4f8b70b593 | ||
|
|
e43e9f3c2c | ||
|
|
71dd5d4a00 | ||
|
|
52a2f994c9 | ||
|
|
8b7491c8d1 | ||
|
|
251ae04e6a | ||
|
|
5bc4a65eea | ||
|
|
1151c4079a | ||
|
|
88acdbc269 | ||
|
|
9b5fa9ee7c | ||
|
|
aca5774e68 | ||
|
|
3fb4e21b38 | ||
|
|
4dfbf8696b | ||
|
|
8fc54b1230 | ||
|
|
da33e35b05 | ||
|
|
5ad28e7ffd | ||
|
|
f79ec47d71 | ||
|
|
45b0596290 | ||
|
|
96c23f3be8 | ||
|
|
6e7dfe4959 | ||
|
|
c34f505b04 | ||
|
|
14183d1f80 | ||
|
|
58adec4677 | ||
|
|
9e598870dd | ||
|
|
8f18aca871 | ||
|
|
3ad56b4236 | ||
|
|
5d62709bc7 | ||
|
|
7581d2467a | ||
|
|
5fa206fb54 | ||
|
|
df2a5633da | ||
|
|
7a6742b5f9 | ||
|
|
e040bb0a41 | ||
|
|
f8fabc9930 | ||
|
|
d967c68e4c | ||
|
|
3dd39c5f9a | ||
|
|
be44eefd5e | ||
|
|
f775c83110 | ||
|
|
b714b41f81 | ||
|
|
31654882e9 | ||
|
|
86c66b2d3e | ||
|
|
37242e56f2 | ||
|
|
6c7274ecd2 | ||
|
|
5c333d7496 | ||
|
|
641ad5d813 | ||
|
|
0715f7e19b | ||
|
|
a8731fcc1d | ||
|
|
5a64127f94 | ||
|
|
ade6dc5e9e | ||
|
|
418964fa91 | ||
|
|
c196640ff1 | ||
|
|
60c8fc73c6 | ||
|
|
bc8745480e | ||
|
|
ff5e16f2f6 | ||
|
|
be2fc5b212 | ||
|
|
7be9ccff0b | ||
|
|
245d43cacf | ||
|
|
246fb276e0 | ||
|
|
6e6e0d95b3 | ||
|
|
25a3f4f5d6 | ||
|
|
ad3dc496bb | ||
|
|
2831b4686c | ||
|
|
8c0ae192a4 | ||
|
|
e9f4ccd19e | ||
|
|
a38bd1defa | ||
|
|
476febeb3a | ||
|
|
b6a35ad83b | ||
|
|
bfd56b74b9 | ||
|
|
858a65ecc1 | ||
|
|
3b34e38813 | ||
|
|
3448870205 | ||
|
|
b868936cd6 | ||
|
|
c681cb5d93 | ||
|
|
379e44ed3c | ||
|
|
243c57cfe8 | ||
|
|
28f436bad0 | ||
|
|
2b8a2973bd | ||
|
|
b7b04c782e | ||
|
|
6e84b21559 | ||
|
|
575e17a1b9 | ||
|
|
57015a4a3f | ||
|
|
9cc1a3130a | ||
|
|
b51d2ae3ca | ||
|
|
fee5f0c909 | ||
|
|
7bb6434767 | ||
|
|
124bc071ee | ||
|
|
a047eeb6d2 | ||
|
|
77b87f0519 | ||
|
|
678da2f21b | ||
|
|
cc3fa8d39d | ||
|
|
89efdc15dd | ||
|
|
8012d892bd | ||
|
|
9d65e7bd6d | ||
|
|
36576d7c4c | ||
|
|
bb36a55c41 | ||
|
|
3dbb2a9dcb | ||
|
|
9997eee4af | ||
|
|
3e376d183e | ||
|
|
888299e6ca | ||
|
|
c31be5b009 | ||
|
|
e5611e8eda | ||
|
|
8e6cc12c80 | ||
|
|
e980017ac8 | ||
|
|
e9d9efc0f2 |
2
.github/FUNDING.yml
vendored
2
.github/FUNDING.yml
vendored
@@ -10,4 +10,4 @@ liberapay: # Replace with a single Liberapay username
|
|||||||
issuehunt: # Replace with a single IssueHunt username
|
issuehunt: # Replace with a single IssueHunt username
|
||||||
otechie: # Replace with a single Otechie username
|
otechie: # Replace with a single Otechie username
|
||||||
|
|
||||||
custom: ['https://github.com/yt-dlp/yt-dlp/blob/master/Collaborators.md']
|
custom: ['https://github.com/yt-dlp/yt-dlp/blob/master/Collaborators.md#collaborators']
|
||||||
|
|||||||
70
.github/ISSUE_TEMPLATE/1_broken_site.md
vendored
70
.github/ISSUE_TEMPLATE/1_broken_site.md
vendored
@@ -1,70 +0,0 @@
|
|||||||
---
|
|
||||||
name: Broken site support
|
|
||||||
about: Report broken or misfunctioning site
|
|
||||||
title: "[Broken]"
|
|
||||||
labels: Broken
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<!--
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
WARNING!
|
|
||||||
IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
-->
|
|
||||||
|
|
||||||
|
|
||||||
## Checklist
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
|
||||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.07.24. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
|
||||||
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
|
||||||
- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in https://github.com/yt-dlp/yt-dlp.
|
|
||||||
- Search the bugtracker for similar issues: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
|
||||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
|
||||||
-->
|
|
||||||
|
|
||||||
- [ ] I'm reporting a broken site support
|
|
||||||
- [ ] I've verified that I'm running yt-dlp version **2021.07.24**
|
|
||||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
|
||||||
- [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
|
|
||||||
- [ ] I've searched the bugtracker for similar issues including closed ones
|
|
||||||
|
|
||||||
|
|
||||||
## Verbose log
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Provide the complete verbose output of yt-dlp that clearly demonstrates the problem.
|
|
||||||
Add the `-v` flag to your command line you run yt-dlp with (`yt-dlp -v <your command line>`), copy the WHOLE output and insert it below. It should look similar to this:
|
|
||||||
[debug] System config: []
|
|
||||||
[debug] User config: []
|
|
||||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKc']
|
|
||||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
|
||||||
[debug] yt-dlp version 2021.07.24
|
|
||||||
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
|
||||||
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
|
||||||
[debug] Proxy map: {}
|
|
||||||
<more lines>
|
|
||||||
-->
|
|
||||||
|
|
||||||
```
|
|
||||||
PASTE VERBOSE LOG HERE
|
|
||||||
|
|
||||||
```
|
|
||||||
<!--
|
|
||||||
Do not remove the above ```
|
|
||||||
-->
|
|
||||||
|
|
||||||
|
|
||||||
## Description
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Provide an explanation of your issue in an arbitrary form. Provide any additional information, suggested solution and as much context and examples as possible.
|
|
||||||
If work on your issue requires account credentials please provide them or explain how one can obtain them.
|
|
||||||
-->
|
|
||||||
|
|
||||||
WRITE DESCRIPTION HERE
|
|
||||||
63
.github/ISSUE_TEMPLATE/1_broken_site.yml
vendored
Normal file
63
.github/ISSUE_TEMPLATE/1_broken_site.yml
vendored
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
name: Broken site support
|
||||||
|
description: Report broken or misfunctioning site
|
||||||
|
labels: [triage, extractor-bug]
|
||||||
|
body:
|
||||||
|
- type: checkboxes
|
||||||
|
id: checklist
|
||||||
|
attributes:
|
||||||
|
label: Checklist
|
||||||
|
description: |
|
||||||
|
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||||
|
options:
|
||||||
|
- label: I'm reporting a broken site
|
||||||
|
required: true
|
||||||
|
- label: I've verified that I'm running yt-dlp version **2021.11.10.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||||
|
required: true
|
||||||
|
- label: I've checked that all provided URLs are alive and playable in a browser
|
||||||
|
required: true
|
||||||
|
- label: I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
|
||||||
|
required: true
|
||||||
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates
|
||||||
|
required: true
|
||||||
|
- label: I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
|
||||||
|
required: true
|
||||||
|
- label: I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
|
||||||
|
- type: input
|
||||||
|
id: region
|
||||||
|
attributes:
|
||||||
|
label: Region
|
||||||
|
description: "Enter the region the site is accessible from"
|
||||||
|
placeholder: "India"
|
||||||
|
- type: textarea
|
||||||
|
id: description
|
||||||
|
attributes:
|
||||||
|
label: Description
|
||||||
|
description: |
|
||||||
|
Provide an explanation of your issue in an arbitrary form.
|
||||||
|
Provide any additional information, any suggested solutions, and as much context and examples as possible
|
||||||
|
placeholder: WRITE DESCRIPTION HERE
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: log
|
||||||
|
attributes:
|
||||||
|
label: Verbose log
|
||||||
|
description: |
|
||||||
|
Provide the complete verbose output of yt-dlp **that clearly demonstrates the problem**.
|
||||||
|
Add the `-Uv` flag to your command line you run yt-dlp with (`yt-dlp -Uv <your command line>`), copy the WHOLE output and insert it below.
|
||||||
|
It should look similar to this:
|
||||||
|
placeholder: |
|
||||||
|
[debug] Command-line config: ['-Uv', 'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||||
|
[debug] Portable config file: yt-dlp.conf
|
||||||
|
[debug] Portable config: ['-i']
|
||||||
|
[debug] Encodings: locale cp1252, fs utf-8, stdout utf-8, stderr utf-8, pref cp1252
|
||||||
|
[debug] yt-dlp version 2021.11.10.1 (exe)
|
||||||
|
[debug] Python version 3.8.8 (CPython 64bit) - Windows-10-10.0.19041-SP0
|
||||||
|
[debug] exe versions: ffmpeg 3.0.1, ffprobe 3.0.1
|
||||||
|
[debug] Optional libraries: Cryptodome, keyring, mutagen, sqlite, websockets
|
||||||
|
[debug] Proxy map: {}
|
||||||
|
yt-dlp is up to date (2021.11.10.1)
|
||||||
|
<more lines>
|
||||||
|
render: shell
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
56
.github/ISSUE_TEMPLATE/2_site_support_request.md
vendored
56
.github/ISSUE_TEMPLATE/2_site_support_request.md
vendored
@@ -1,56 +0,0 @@
|
|||||||
---
|
|
||||||
name: Site support request
|
|
||||||
about: Request support for a new site
|
|
||||||
title: "[Site Request]"
|
|
||||||
labels: Request
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<!--
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
WARNING!
|
|
||||||
IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
-->
|
|
||||||
|
|
||||||
|
|
||||||
## Checklist
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
|
||||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.07.24. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
|
||||||
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
|
||||||
- Make sure that site you are requesting is not dedicated to copyright infringement, see https://github.com/yt-dlp/yt-dlp. yt-dlp does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
|
|
||||||
- Search the bugtracker for similar site support requests: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
|
||||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
|
||||||
-->
|
|
||||||
|
|
||||||
- [ ] I'm reporting a new site support request
|
|
||||||
- [ ] I've verified that I'm running yt-dlp version **2021.07.24**
|
|
||||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
|
||||||
- [ ] I've checked that none of provided URLs violate any copyrights
|
|
||||||
- [ ] I've searched the bugtracker for similar site support requests including closed ones
|
|
||||||
|
|
||||||
|
|
||||||
## Example URLs
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Provide all kinds of example URLs support for which should be included. Replace following example URLs by yours.
|
|
||||||
-->
|
|
||||||
|
|
||||||
- Single video: https://www.youtube.com/watch?v=BaW_jenozKc
|
|
||||||
- Single video: https://youtu.be/BaW_jenozKc
|
|
||||||
- Playlist: https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc
|
|
||||||
|
|
||||||
|
|
||||||
## Description
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Provide any additional information.
|
|
||||||
If work on your issue requires account credentials please provide them or explain how one can obtain them.
|
|
||||||
-->
|
|
||||||
|
|
||||||
WRITE DESCRIPTION HERE
|
|
||||||
74
.github/ISSUE_TEMPLATE/2_site_support_request.yml
vendored
Normal file
74
.github/ISSUE_TEMPLATE/2_site_support_request.yml
vendored
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
name: Site support request
|
||||||
|
description: Request support for a new site
|
||||||
|
labels: [triage, site-request]
|
||||||
|
body:
|
||||||
|
- type: checkboxes
|
||||||
|
id: checklist
|
||||||
|
attributes:
|
||||||
|
label: Checklist
|
||||||
|
description: |
|
||||||
|
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||||
|
options:
|
||||||
|
- label: I'm reporting a new site support request
|
||||||
|
required: true
|
||||||
|
- label: I've verified that I'm running yt-dlp version **2021.11.10.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||||
|
required: true
|
||||||
|
- label: I've checked that all provided URLs are alive and playable in a browser
|
||||||
|
required: true
|
||||||
|
- label: I've checked that none of provided URLs [violate any copyrights](https://github.com/ytdl-org/youtube-dl#can-you-add-support-for-this-anime-video-site-or-site-which-shows-current-movies-for-free) or contain any [DRM](https://en.wikipedia.org/wiki/Digital_rights_management) to the best of my knowledge
|
||||||
|
required: true
|
||||||
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates
|
||||||
|
required: true
|
||||||
|
- label: I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
|
||||||
|
required: true
|
||||||
|
- label: I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and am willing to share it if required
|
||||||
|
- type: input
|
||||||
|
id: region
|
||||||
|
attributes:
|
||||||
|
label: Region
|
||||||
|
description: "Enter the region the site is accessible from"
|
||||||
|
placeholder: "India"
|
||||||
|
- type: textarea
|
||||||
|
id: example-urls
|
||||||
|
attributes:
|
||||||
|
label: Example URLs
|
||||||
|
description: |
|
||||||
|
Provide all kinds of example URLs for which support should be added
|
||||||
|
value: |
|
||||||
|
- Single video: https://www.youtube.com/watch?v=BaW_jenozKc
|
||||||
|
- Single video: https://youtu.be/BaW_jenozKc
|
||||||
|
- Playlist: https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: description
|
||||||
|
attributes:
|
||||||
|
label: Description
|
||||||
|
description: |
|
||||||
|
Provide any additional information
|
||||||
|
placeholder: WRITE DESCRIPTION HERE
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: log
|
||||||
|
attributes:
|
||||||
|
label: Verbose log
|
||||||
|
description: |
|
||||||
|
Provide the complete verbose output **using one of the example URLs provided above**.
|
||||||
|
Add the `-Uv` flag to your command line you run yt-dlp with (`yt-dlp -Uv <your command line>`), copy the WHOLE output and insert it below.
|
||||||
|
It should look similar to this:
|
||||||
|
placeholder: |
|
||||||
|
[debug] Command-line config: ['-Uv', 'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||||
|
[debug] Portable config file: yt-dlp.conf
|
||||||
|
[debug] Portable config: ['-i']
|
||||||
|
[debug] Encodings: locale cp1252, fs utf-8, stdout utf-8, stderr utf-8, pref cp1252
|
||||||
|
[debug] yt-dlp version 2021.11.10.1 (exe)
|
||||||
|
[debug] Python version 3.8.8 (CPython 64bit) - Windows-10-10.0.19041-SP0
|
||||||
|
[debug] exe versions: ffmpeg 3.0.1, ffprobe 3.0.1
|
||||||
|
[debug] Optional libraries: Cryptodome, keyring, mutagen, sqlite, websockets
|
||||||
|
[debug] Proxy map: {}
|
||||||
|
yt-dlp is up to date (2021.11.10.1)
|
||||||
|
<more lines>
|
||||||
|
render: shell
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
40
.github/ISSUE_TEMPLATE/3_site_feature_request.md
vendored
40
.github/ISSUE_TEMPLATE/3_site_feature_request.md
vendored
@@ -1,40 +0,0 @@
|
|||||||
---
|
|
||||||
name: Site feature request
|
|
||||||
about: Request a new functionality for a site
|
|
||||||
title: "[Site Request]"
|
|
||||||
labels: Request
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<!--
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
WARNING!
|
|
||||||
IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
-->
|
|
||||||
|
|
||||||
|
|
||||||
## Checklist
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
|
||||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.07.24. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
|
||||||
- Search the bugtracker for similar site feature requests: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
|
||||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
|
||||||
-->
|
|
||||||
|
|
||||||
- [ ] I'm reporting a site feature request
|
|
||||||
- [ ] I've verified that I'm running yt-dlp version **2021.07.24**
|
|
||||||
- [ ] I've searched the bugtracker for similar site feature requests including closed ones
|
|
||||||
|
|
||||||
|
|
||||||
## Description
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Provide an explanation of your site feature request in an arbitrary form. Please make sure the description is worded well enough to be understood, see https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient. Provide any additional information, suggested solution and as much context and examples as possible.
|
|
||||||
-->
|
|
||||||
|
|
||||||
WRITE DESCRIPTION HERE
|
|
||||||
49
.github/ISSUE_TEMPLATE/3_site_feature_request.yml
vendored
Normal file
49
.github/ISSUE_TEMPLATE/3_site_feature_request.yml
vendored
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
name: Site feature request
|
||||||
|
description: Request a new functionality for a site
|
||||||
|
labels: [triage, site-enhancement]
|
||||||
|
body:
|
||||||
|
- type: checkboxes
|
||||||
|
id: checklist
|
||||||
|
attributes:
|
||||||
|
label: Checklist
|
||||||
|
description: |
|
||||||
|
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||||
|
options:
|
||||||
|
- label: I'm reporting a site feature request
|
||||||
|
required: true
|
||||||
|
- label: I've verified that I'm running yt-dlp version **2021.11.10.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||||
|
required: true
|
||||||
|
- label: I've checked that all provided URLs are alive and playable in a browser
|
||||||
|
required: true
|
||||||
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates
|
||||||
|
required: true
|
||||||
|
- label: I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
|
||||||
|
required: true
|
||||||
|
- label: I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
|
||||||
|
- type: input
|
||||||
|
id: region
|
||||||
|
attributes:
|
||||||
|
label: Region
|
||||||
|
description: "Enter the region the site is accessible from"
|
||||||
|
placeholder: "India"
|
||||||
|
- type: textarea
|
||||||
|
id: example-urls
|
||||||
|
attributes:
|
||||||
|
label: Example URLs
|
||||||
|
description: |
|
||||||
|
Example URLs that can be used to demonstrate the requested feature
|
||||||
|
value: |
|
||||||
|
https://www.youtube.com/watch?v=BaW_jenozKc
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: description
|
||||||
|
attributes:
|
||||||
|
label: Description
|
||||||
|
description: |
|
||||||
|
Provide an explanation of your site feature request in an arbitrary form.
|
||||||
|
Please make sure the description is worded well enough to be understood, see [is-the-description-of-the-issue-itself-sufficient](https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient).
|
||||||
|
Provide any additional information, any suggested solutions, and as much context and examples as possible
|
||||||
|
placeholder: WRITE DESCRIPTION HERE
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
72
.github/ISSUE_TEMPLATE/4_bug_report.md
vendored
72
.github/ISSUE_TEMPLATE/4_bug_report.md
vendored
@@ -1,72 +0,0 @@
|
|||||||
---
|
|
||||||
name: Bug report
|
|
||||||
about: Report a bug unrelated to any particular site or extractor
|
|
||||||
title: ''
|
|
||||||
labels: ''
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<!--
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
WARNING!
|
|
||||||
IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
-->
|
|
||||||
|
|
||||||
|
|
||||||
## Checklist
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
|
||||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.07.24. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
|
||||||
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
|
||||||
- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in https://github.com/yt-dlp/yt-dlp.
|
|
||||||
- Search the bugtracker for similar issues: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
|
||||||
- Read bugs section in FAQ: https://github.com/yt-dlp/yt-dlp
|
|
||||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
|
||||||
-->
|
|
||||||
|
|
||||||
- [ ] I'm reporting a broken site support issue
|
|
||||||
- [ ] I've verified that I'm running yt-dlp version **2021.07.24**
|
|
||||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
|
||||||
- [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
|
|
||||||
- [ ] I've searched the bugtracker for similar bug reports including closed ones
|
|
||||||
- [ ] I've read bugs section in FAQ
|
|
||||||
|
|
||||||
|
|
||||||
## Verbose log
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Provide the complete verbose output of yt-dlp that clearly demonstrates the problem.
|
|
||||||
Add the `-v` flag to your command line you run yt-dlp with (`yt-dlp -v <your command line>`), copy the WHOLE output and insert it below. It should look similar to this:
|
|
||||||
[debug] System config: []
|
|
||||||
[debug] User config: []
|
|
||||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKc']
|
|
||||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
|
||||||
[debug] yt-dlp version 2021.07.24
|
|
||||||
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
|
||||||
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
|
||||||
[debug] Proxy map: {}
|
|
||||||
<more lines>
|
|
||||||
-->
|
|
||||||
|
|
||||||
```
|
|
||||||
PASTE VERBOSE LOG HERE
|
|
||||||
|
|
||||||
```
|
|
||||||
<!--
|
|
||||||
Do not remove the above ```
|
|
||||||
-->
|
|
||||||
|
|
||||||
|
|
||||||
## Description
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Provide an explanation of your issue in an arbitrary form. Please make sure the description is worded well enough to be understood, see https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient. Provide any additional information, suggested solution and as much context and examples as possible.
|
|
||||||
If work on your issue requires account credentials please provide them or explain how one can obtain them.
|
|
||||||
-->
|
|
||||||
|
|
||||||
WRITE DESCRIPTION HERE
|
|
||||||
57
.github/ISSUE_TEMPLATE/4_bug_report.yml
vendored
Normal file
57
.github/ISSUE_TEMPLATE/4_bug_report.yml
vendored
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
name: Bug report
|
||||||
|
description: Report a bug unrelated to any particular site or extractor
|
||||||
|
labels: [triage,bug]
|
||||||
|
body:
|
||||||
|
- type: checkboxes
|
||||||
|
id: checklist
|
||||||
|
attributes:
|
||||||
|
label: Checklist
|
||||||
|
description: |
|
||||||
|
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||||
|
options:
|
||||||
|
- label: I'm reporting a bug unrelated to a specific site
|
||||||
|
required: true
|
||||||
|
- label: I've verified that I'm running yt-dlp version **2021.11.10.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||||
|
required: true
|
||||||
|
- label: I've checked that all provided URLs are alive and playable in a browser
|
||||||
|
required: true
|
||||||
|
- label: I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
|
||||||
|
required: true
|
||||||
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates
|
||||||
|
required: true
|
||||||
|
- label: I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: description
|
||||||
|
attributes:
|
||||||
|
label: Description
|
||||||
|
description: |
|
||||||
|
Provide an explanation of your issue in an arbitrary form.
|
||||||
|
Please make sure the description is worded well enough to be understood, see [is-the-description-of-the-issue-itself-sufficient](https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient).
|
||||||
|
Provide any additional information, any suggested solutions, and as much context and examples as possible
|
||||||
|
placeholder: WRITE DESCRIPTION HERE
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: log
|
||||||
|
attributes:
|
||||||
|
label: Verbose log
|
||||||
|
description: |
|
||||||
|
Provide the complete verbose output of yt-dlp **that clearly demonstrates the problem**.
|
||||||
|
Add the `-Uv` flag to **your** command line you run yt-dlp with (`yt-dlp -Uv <your command line>`), copy the WHOLE output and insert it below.
|
||||||
|
It should look similar to this:
|
||||||
|
placeholder: |
|
||||||
|
[debug] Command-line config: ['-Uv', 'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||||
|
[debug] Portable config file: yt-dlp.conf
|
||||||
|
[debug] Portable config: ['-i']
|
||||||
|
[debug] Encodings: locale cp1252, fs utf-8, stdout utf-8, stderr utf-8, pref cp1252
|
||||||
|
[debug] yt-dlp version 2021.11.10.1 (exe)
|
||||||
|
[debug] Python version 3.8.8 (CPython 64bit) - Windows-10-10.0.19041-SP0
|
||||||
|
[debug] exe versions: ffmpeg 3.0.1, ffprobe 3.0.1
|
||||||
|
[debug] Optional libraries: Cryptodome, keyring, mutagen, sqlite, websockets
|
||||||
|
[debug] Proxy map: {}
|
||||||
|
yt-dlp is up to date (2021.11.10.1)
|
||||||
|
<more lines>
|
||||||
|
render: shell
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
40
.github/ISSUE_TEMPLATE/5_feature_request.md
vendored
40
.github/ISSUE_TEMPLATE/5_feature_request.md
vendored
@@ -1,40 +0,0 @@
|
|||||||
---
|
|
||||||
name: Feature request
|
|
||||||
about: Request a new functionality unrelated to any particular site or extractor
|
|
||||||
title: "[Feature Request]"
|
|
||||||
labels: Request
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<!--
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
WARNING!
|
|
||||||
IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
-->
|
|
||||||
|
|
||||||
|
|
||||||
## Checklist
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
|
||||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is 2021.07.24. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
|
||||||
- Search the bugtracker for similar feature requests: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
|
||||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
|
||||||
-->
|
|
||||||
|
|
||||||
- [ ] I'm reporting a feature request
|
|
||||||
- [ ] I've verified that I'm running yt-dlp version **2021.07.24**
|
|
||||||
- [ ] I've searched the bugtracker for similar feature requests including closed ones
|
|
||||||
|
|
||||||
|
|
||||||
## Description
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Provide an explanation of your issue in an arbitrary form. Please make sure the description is worded well enough to be understood, see https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient. Provide any additional information, suggested solution and as much context and examples as possible.
|
|
||||||
-->
|
|
||||||
|
|
||||||
WRITE DESCRIPTION HERE
|
|
||||||
30
.github/ISSUE_TEMPLATE/5_feature_request.yml
vendored
Normal file
30
.github/ISSUE_TEMPLATE/5_feature_request.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
name: Feature request request
|
||||||
|
description: Request a new functionality unrelated to any particular site or extractor
|
||||||
|
labels: [triage, enhancement]
|
||||||
|
body:
|
||||||
|
- type: checkboxes
|
||||||
|
id: checklist
|
||||||
|
attributes:
|
||||||
|
label: Checklist
|
||||||
|
description: |
|
||||||
|
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||||
|
options:
|
||||||
|
- label: I'm reporting a feature request
|
||||||
|
required: true
|
||||||
|
- label: I've verified that I'm running yt-dlp version **2021.11.10.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||||
|
required: true
|
||||||
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates
|
||||||
|
required: true
|
||||||
|
- label: I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: description
|
||||||
|
attributes:
|
||||||
|
label: Description
|
||||||
|
description: |
|
||||||
|
Provide an explanation of your site feature request in an arbitrary form.
|
||||||
|
Please make sure the description is worded well enough to be understood, see [is-the-description-of-the-issue-itself-sufficient](https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient).
|
||||||
|
Provide any additional information, any suggested solutions, and as much context and examples as possible
|
||||||
|
placeholder: WRITE DESCRIPTION HERE
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
40
.github/ISSUE_TEMPLATE/6_question.md
vendored
40
.github/ISSUE_TEMPLATE/6_question.md
vendored
@@ -1,40 +0,0 @@
|
|||||||
---
|
|
||||||
name: Ask question
|
|
||||||
about: Ask youtube-dl related question
|
|
||||||
title: "[Question]"
|
|
||||||
labels: question
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<!--
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
WARNING!
|
|
||||||
IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
-->
|
|
||||||
|
|
||||||
|
|
||||||
## Checklist
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
|
||||||
- Look through the README (https://github.com/yt-dlp/yt-dlp) and FAQ (https://github.com/yt-dlp/yt-dlp) for similar questions
|
|
||||||
- Search the bugtracker for similar questions: https://github.com/yt-dlp/yt-dlp
|
|
||||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
|
||||||
-->
|
|
||||||
|
|
||||||
- [ ] I'm asking a question
|
|
||||||
- [ ] I've looked through the README and FAQ for similar questions
|
|
||||||
- [ ] I've searched the bugtracker for similar questions including closed ones
|
|
||||||
|
|
||||||
|
|
||||||
## Question
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Ask your question in an arbitrary form. Please make sure it's worded well enough to be understood, see https://github.com/yt-dlp/yt-dlp.
|
|
||||||
-->
|
|
||||||
|
|
||||||
WRITE QUESTION HERE
|
|
||||||
30
.github/ISSUE_TEMPLATE/6_question.yml
vendored
Normal file
30
.github/ISSUE_TEMPLATE/6_question.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
name: Ask question
|
||||||
|
description: Ask yt-dlp related question
|
||||||
|
labels: [question]
|
||||||
|
body:
|
||||||
|
- type: checkboxes
|
||||||
|
id: checklist
|
||||||
|
attributes:
|
||||||
|
label: Checklist
|
||||||
|
description: |
|
||||||
|
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||||
|
options:
|
||||||
|
- label: I'm asking a question and not reporting a bug/feature request
|
||||||
|
required: true
|
||||||
|
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
||||||
|
required: true
|
||||||
|
- label: I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
|
||||||
|
required: true
|
||||||
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar questions including closed ones
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: question
|
||||||
|
attributes:
|
||||||
|
label: Question
|
||||||
|
description: |
|
||||||
|
Ask your question in an arbitrary form.
|
||||||
|
Please make sure it's worded well enough to be understood, see [is-the-description-of-the-issue-itself-sufficient](https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient).
|
||||||
|
Provide any additional information and as much context and examples as possible
|
||||||
|
placeholder: WRITE QUESTION HERE
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
5
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
5
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
blank_issues_enabled: false
|
||||||
|
contact_links:
|
||||||
|
- name: Get help from the community on Discord
|
||||||
|
url: https://discord.gg/H5MNcFW63r
|
||||||
|
about: Join the yt-dlp Discord for community-powered support!
|
||||||
70
.github/ISSUE_TEMPLATE_tmpl/1_broken_site.md
vendored
70
.github/ISSUE_TEMPLATE_tmpl/1_broken_site.md
vendored
@@ -1,70 +0,0 @@
|
|||||||
---
|
|
||||||
name: Broken site support
|
|
||||||
about: Report broken or misfunctioning site
|
|
||||||
title: "[Broken]"
|
|
||||||
labels: Broken
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<!--
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
WARNING!
|
|
||||||
IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
-->
|
|
||||||
|
|
||||||
|
|
||||||
## Checklist
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
|
||||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is %(version)s. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
|
||||||
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
|
||||||
- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in https://github.com/yt-dlp/yt-dlp.
|
|
||||||
- Search the bugtracker for similar issues: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
|
||||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
|
||||||
-->
|
|
||||||
|
|
||||||
- [ ] I'm reporting a broken site support
|
|
||||||
- [ ] I've verified that I'm running yt-dlp version **%(version)s**
|
|
||||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
|
||||||
- [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
|
|
||||||
- [ ] I've searched the bugtracker for similar issues including closed ones
|
|
||||||
|
|
||||||
|
|
||||||
## Verbose log
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Provide the complete verbose output of yt-dlp that clearly demonstrates the problem.
|
|
||||||
Add the `-v` flag to your command line you run yt-dlp with (`yt-dlp -v <your command line>`), copy the WHOLE output and insert it below. It should look similar to this:
|
|
||||||
[debug] System config: []
|
|
||||||
[debug] User config: []
|
|
||||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKc']
|
|
||||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
|
||||||
[debug] yt-dlp version %(version)s
|
|
||||||
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
|
||||||
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
|
||||||
[debug] Proxy map: {}
|
|
||||||
<more lines>
|
|
||||||
-->
|
|
||||||
|
|
||||||
```
|
|
||||||
PASTE VERBOSE LOG HERE
|
|
||||||
|
|
||||||
```
|
|
||||||
<!--
|
|
||||||
Do not remove the above ```
|
|
||||||
-->
|
|
||||||
|
|
||||||
|
|
||||||
## Description
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Provide an explanation of your issue in an arbitrary form. Provide any additional information, suggested solution and as much context and examples as possible.
|
|
||||||
If work on your issue requires account credentials please provide them or explain how one can obtain them.
|
|
||||||
-->
|
|
||||||
|
|
||||||
WRITE DESCRIPTION HERE
|
|
||||||
63
.github/ISSUE_TEMPLATE_tmpl/1_broken_site.yml
vendored
Normal file
63
.github/ISSUE_TEMPLATE_tmpl/1_broken_site.yml
vendored
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
name: Broken site support
|
||||||
|
description: Report broken or misfunctioning site
|
||||||
|
labels: [triage, extractor-bug]
|
||||||
|
body:
|
||||||
|
- type: checkboxes
|
||||||
|
id: checklist
|
||||||
|
attributes:
|
||||||
|
label: Checklist
|
||||||
|
description: |
|
||||||
|
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||||
|
options:
|
||||||
|
- label: I'm reporting a broken site
|
||||||
|
required: true
|
||||||
|
- label: I've verified that I'm running yt-dlp version **%(version)s**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||||
|
required: true
|
||||||
|
- label: I've checked that all provided URLs are alive and playable in a browser
|
||||||
|
required: true
|
||||||
|
- label: I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
|
||||||
|
required: true
|
||||||
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates
|
||||||
|
required: true
|
||||||
|
- label: I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
|
||||||
|
required: true
|
||||||
|
- label: I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
|
||||||
|
- type: input
|
||||||
|
id: region
|
||||||
|
attributes:
|
||||||
|
label: Region
|
||||||
|
description: "Enter the region the site is accessible from"
|
||||||
|
placeholder: "India"
|
||||||
|
- type: textarea
|
||||||
|
id: description
|
||||||
|
attributes:
|
||||||
|
label: Description
|
||||||
|
description: |
|
||||||
|
Provide an explanation of your issue in an arbitrary form.
|
||||||
|
Provide any additional information, any suggested solutions, and as much context and examples as possible
|
||||||
|
placeholder: WRITE DESCRIPTION HERE
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: log
|
||||||
|
attributes:
|
||||||
|
label: Verbose log
|
||||||
|
description: |
|
||||||
|
Provide the complete verbose output of yt-dlp **that clearly demonstrates the problem**.
|
||||||
|
Add the `-Uv` flag to your command line you run yt-dlp with (`yt-dlp -Uv <your command line>`), copy the WHOLE output and insert it below.
|
||||||
|
It should look similar to this:
|
||||||
|
placeholder: |
|
||||||
|
[debug] Command-line config: ['-Uv', 'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||||
|
[debug] Portable config file: yt-dlp.conf
|
||||||
|
[debug] Portable config: ['-i']
|
||||||
|
[debug] Encodings: locale cp1252, fs utf-8, stdout utf-8, stderr utf-8, pref cp1252
|
||||||
|
[debug] yt-dlp version %(version)s (exe)
|
||||||
|
[debug] Python version 3.8.8 (CPython 64bit) - Windows-10-10.0.19041-SP0
|
||||||
|
[debug] exe versions: ffmpeg 3.0.1, ffprobe 3.0.1
|
||||||
|
[debug] Optional libraries: Cryptodome, keyring, mutagen, sqlite, websockets
|
||||||
|
[debug] Proxy map: {}
|
||||||
|
yt-dlp is up to date (%(version)s)
|
||||||
|
<more lines>
|
||||||
|
render: shell
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
@@ -1,56 +0,0 @@
|
|||||||
---
|
|
||||||
name: Site support request
|
|
||||||
about: Request support for a new site
|
|
||||||
title: "[Site Request]"
|
|
||||||
labels: Request
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<!--
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
WARNING!
|
|
||||||
IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
-->
|
|
||||||
|
|
||||||
|
|
||||||
## Checklist
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
|
||||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is %(version)s. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
|
||||||
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
|
||||||
- Make sure that site you are requesting is not dedicated to copyright infringement, see https://github.com/yt-dlp/yt-dlp. yt-dlp does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
|
|
||||||
- Search the bugtracker for similar site support requests: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
|
||||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
|
||||||
-->
|
|
||||||
|
|
||||||
- [ ] I'm reporting a new site support request
|
|
||||||
- [ ] I've verified that I'm running yt-dlp version **%(version)s**
|
|
||||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
|
||||||
- [ ] I've checked that none of provided URLs violate any copyrights
|
|
||||||
- [ ] I've searched the bugtracker for similar site support requests including closed ones
|
|
||||||
|
|
||||||
|
|
||||||
## Example URLs
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Provide all kinds of example URLs support for which should be included. Replace following example URLs by yours.
|
|
||||||
-->
|
|
||||||
|
|
||||||
- Single video: https://www.youtube.com/watch?v=BaW_jenozKc
|
|
||||||
- Single video: https://youtu.be/BaW_jenozKc
|
|
||||||
- Playlist: https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc
|
|
||||||
|
|
||||||
|
|
||||||
## Description
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Provide any additional information.
|
|
||||||
If work on your issue requires account credentials please provide them or explain how one can obtain them.
|
|
||||||
-->
|
|
||||||
|
|
||||||
WRITE DESCRIPTION HERE
|
|
||||||
74
.github/ISSUE_TEMPLATE_tmpl/2_site_support_request.yml
vendored
Normal file
74
.github/ISSUE_TEMPLATE_tmpl/2_site_support_request.yml
vendored
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
name: Site support request
|
||||||
|
description: Request support for a new site
|
||||||
|
labels: [triage, site-request]
|
||||||
|
body:
|
||||||
|
- type: checkboxes
|
||||||
|
id: checklist
|
||||||
|
attributes:
|
||||||
|
label: Checklist
|
||||||
|
description: |
|
||||||
|
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||||
|
options:
|
||||||
|
- label: I'm reporting a new site support request
|
||||||
|
required: true
|
||||||
|
- label: I've verified that I'm running yt-dlp version **%(version)s**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||||
|
required: true
|
||||||
|
- label: I've checked that all provided URLs are alive and playable in a browser
|
||||||
|
required: true
|
||||||
|
- label: I've checked that none of provided URLs [violate any copyrights](https://github.com/ytdl-org/youtube-dl#can-you-add-support-for-this-anime-video-site-or-site-which-shows-current-movies-for-free) or contain any [DRM](https://en.wikipedia.org/wiki/Digital_rights_management) to the best of my knowledge
|
||||||
|
required: true
|
||||||
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates
|
||||||
|
required: true
|
||||||
|
- label: I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
|
||||||
|
required: true
|
||||||
|
- label: I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and am willing to share it if required
|
||||||
|
- type: input
|
||||||
|
id: region
|
||||||
|
attributes:
|
||||||
|
label: Region
|
||||||
|
description: "Enter the region the site is accessible from"
|
||||||
|
placeholder: "India"
|
||||||
|
- type: textarea
|
||||||
|
id: example-urls
|
||||||
|
attributes:
|
||||||
|
label: Example URLs
|
||||||
|
description: |
|
||||||
|
Provide all kinds of example URLs for which support should be added
|
||||||
|
value: |
|
||||||
|
- Single video: https://www.youtube.com/watch?v=BaW_jenozKc
|
||||||
|
- Single video: https://youtu.be/BaW_jenozKc
|
||||||
|
- Playlist: https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: description
|
||||||
|
attributes:
|
||||||
|
label: Description
|
||||||
|
description: |
|
||||||
|
Provide any additional information
|
||||||
|
placeholder: WRITE DESCRIPTION HERE
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: log
|
||||||
|
attributes:
|
||||||
|
label: Verbose log
|
||||||
|
description: |
|
||||||
|
Provide the complete verbose output **using one of the example URLs provided above**.
|
||||||
|
Add the `-Uv` flag to your command line you run yt-dlp with (`yt-dlp -Uv <your command line>`), copy the WHOLE output and insert it below.
|
||||||
|
It should look similar to this:
|
||||||
|
placeholder: |
|
||||||
|
[debug] Command-line config: ['-Uv', 'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||||
|
[debug] Portable config file: yt-dlp.conf
|
||||||
|
[debug] Portable config: ['-i']
|
||||||
|
[debug] Encodings: locale cp1252, fs utf-8, stdout utf-8, stderr utf-8, pref cp1252
|
||||||
|
[debug] yt-dlp version %(version)s (exe)
|
||||||
|
[debug] Python version 3.8.8 (CPython 64bit) - Windows-10-10.0.19041-SP0
|
||||||
|
[debug] exe versions: ffmpeg 3.0.1, ffprobe 3.0.1
|
||||||
|
[debug] Optional libraries: Cryptodome, keyring, mutagen, sqlite, websockets
|
||||||
|
[debug] Proxy map: {}
|
||||||
|
yt-dlp is up to date (%(version)s)
|
||||||
|
<more lines>
|
||||||
|
render: shell
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
@@ -1,40 +0,0 @@
|
|||||||
---
|
|
||||||
name: Site feature request
|
|
||||||
about: Request a new functionality for a site
|
|
||||||
title: "[Site Request]"
|
|
||||||
labels: Request
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<!--
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
WARNING!
|
|
||||||
IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
-->
|
|
||||||
|
|
||||||
|
|
||||||
## Checklist
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
|
||||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is %(version)s. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
|
||||||
- Search the bugtracker for similar site feature requests: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
|
||||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
|
||||||
-->
|
|
||||||
|
|
||||||
- [ ] I'm reporting a site feature request
|
|
||||||
- [ ] I've verified that I'm running yt-dlp version **%(version)s**
|
|
||||||
- [ ] I've searched the bugtracker for similar site feature requests including closed ones
|
|
||||||
|
|
||||||
|
|
||||||
## Description
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Provide an explanation of your site feature request in an arbitrary form. Please make sure the description is worded well enough to be understood, see https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient. Provide any additional information, suggested solution and as much context and examples as possible.
|
|
||||||
-->
|
|
||||||
|
|
||||||
WRITE DESCRIPTION HERE
|
|
||||||
49
.github/ISSUE_TEMPLATE_tmpl/3_site_feature_request.yml
vendored
Normal file
49
.github/ISSUE_TEMPLATE_tmpl/3_site_feature_request.yml
vendored
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
name: Site feature request
|
||||||
|
description: Request a new functionality for a site
|
||||||
|
labels: [triage, site-enhancement]
|
||||||
|
body:
|
||||||
|
- type: checkboxes
|
||||||
|
id: checklist
|
||||||
|
attributes:
|
||||||
|
label: Checklist
|
||||||
|
description: |
|
||||||
|
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||||
|
options:
|
||||||
|
- label: I'm reporting a site feature request
|
||||||
|
required: true
|
||||||
|
- label: I've verified that I'm running yt-dlp version **%(version)s**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||||
|
required: true
|
||||||
|
- label: I've checked that all provided URLs are alive and playable in a browser
|
||||||
|
required: true
|
||||||
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates
|
||||||
|
required: true
|
||||||
|
- label: I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
|
||||||
|
required: true
|
||||||
|
- label: I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
|
||||||
|
- type: input
|
||||||
|
id: region
|
||||||
|
attributes:
|
||||||
|
label: Region
|
||||||
|
description: "Enter the region the site is accessible from"
|
||||||
|
placeholder: "India"
|
||||||
|
- type: textarea
|
||||||
|
id: example-urls
|
||||||
|
attributes:
|
||||||
|
label: Example URLs
|
||||||
|
description: |
|
||||||
|
Example URLs that can be used to demonstrate the requested feature
|
||||||
|
value: |
|
||||||
|
https://www.youtube.com/watch?v=BaW_jenozKc
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: description
|
||||||
|
attributes:
|
||||||
|
label: Description
|
||||||
|
description: |
|
||||||
|
Provide an explanation of your site feature request in an arbitrary form.
|
||||||
|
Please make sure the description is worded well enough to be understood, see [is-the-description-of-the-issue-itself-sufficient](https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient).
|
||||||
|
Provide any additional information, any suggested solutions, and as much context and examples as possible
|
||||||
|
placeholder: WRITE DESCRIPTION HERE
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
72
.github/ISSUE_TEMPLATE_tmpl/4_bug_report.md
vendored
72
.github/ISSUE_TEMPLATE_tmpl/4_bug_report.md
vendored
@@ -1,72 +0,0 @@
|
|||||||
---
|
|
||||||
name: Bug report
|
|
||||||
about: Report a bug unrelated to any particular site or extractor
|
|
||||||
title: ''
|
|
||||||
labels: ''
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<!--
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
WARNING!
|
|
||||||
IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
-->
|
|
||||||
|
|
||||||
|
|
||||||
## Checklist
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
|
||||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is %(version)s. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
|
||||||
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
|
||||||
- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in https://github.com/yt-dlp/yt-dlp.
|
|
||||||
- Search the bugtracker for similar issues: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
|
||||||
- Read bugs section in FAQ: https://github.com/yt-dlp/yt-dlp
|
|
||||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
|
||||||
-->
|
|
||||||
|
|
||||||
- [ ] I'm reporting a broken site support issue
|
|
||||||
- [ ] I've verified that I'm running yt-dlp version **%(version)s**
|
|
||||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
|
||||||
- [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
|
|
||||||
- [ ] I've searched the bugtracker for similar bug reports including closed ones
|
|
||||||
- [ ] I've read bugs section in FAQ
|
|
||||||
|
|
||||||
|
|
||||||
## Verbose log
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Provide the complete verbose output of yt-dlp that clearly demonstrates the problem.
|
|
||||||
Add the `-v` flag to your command line you run yt-dlp with (`yt-dlp -v <your command line>`), copy the WHOLE output and insert it below. It should look similar to this:
|
|
||||||
[debug] System config: []
|
|
||||||
[debug] User config: []
|
|
||||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKc']
|
|
||||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
|
||||||
[debug] yt-dlp version %(version)s
|
|
||||||
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
|
||||||
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
|
||||||
[debug] Proxy map: {}
|
|
||||||
<more lines>
|
|
||||||
-->
|
|
||||||
|
|
||||||
```
|
|
||||||
PASTE VERBOSE LOG HERE
|
|
||||||
|
|
||||||
```
|
|
||||||
<!--
|
|
||||||
Do not remove the above ```
|
|
||||||
-->
|
|
||||||
|
|
||||||
|
|
||||||
## Description
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Provide an explanation of your issue in an arbitrary form. Please make sure the description is worded well enough to be understood, see https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient. Provide any additional information, suggested solution and as much context and examples as possible.
|
|
||||||
If work on your issue requires account credentials please provide them or explain how one can obtain them.
|
|
||||||
-->
|
|
||||||
|
|
||||||
WRITE DESCRIPTION HERE
|
|
||||||
57
.github/ISSUE_TEMPLATE_tmpl/4_bug_report.yml
vendored
Normal file
57
.github/ISSUE_TEMPLATE_tmpl/4_bug_report.yml
vendored
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
name: Bug report
|
||||||
|
description: Report a bug unrelated to any particular site or extractor
|
||||||
|
labels: [triage,bug]
|
||||||
|
body:
|
||||||
|
- type: checkboxes
|
||||||
|
id: checklist
|
||||||
|
attributes:
|
||||||
|
label: Checklist
|
||||||
|
description: |
|
||||||
|
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||||
|
options:
|
||||||
|
- label: I'm reporting a bug unrelated to a specific site
|
||||||
|
required: true
|
||||||
|
- label: I've verified that I'm running yt-dlp version **%(version)s**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||||
|
required: true
|
||||||
|
- label: I've checked that all provided URLs are alive and playable in a browser
|
||||||
|
required: true
|
||||||
|
- label: I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
|
||||||
|
required: true
|
||||||
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates
|
||||||
|
required: true
|
||||||
|
- label: I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: description
|
||||||
|
attributes:
|
||||||
|
label: Description
|
||||||
|
description: |
|
||||||
|
Provide an explanation of your issue in an arbitrary form.
|
||||||
|
Please make sure the description is worded well enough to be understood, see [is-the-description-of-the-issue-itself-sufficient](https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient).
|
||||||
|
Provide any additional information, any suggested solutions, and as much context and examples as possible
|
||||||
|
placeholder: WRITE DESCRIPTION HERE
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: log
|
||||||
|
attributes:
|
||||||
|
label: Verbose log
|
||||||
|
description: |
|
||||||
|
Provide the complete verbose output of yt-dlp **that clearly demonstrates the problem**.
|
||||||
|
Add the `-Uv` flag to **your** command line you run yt-dlp with (`yt-dlp -Uv <your command line>`), copy the WHOLE output and insert it below.
|
||||||
|
It should look similar to this:
|
||||||
|
placeholder: |
|
||||||
|
[debug] Command-line config: ['-Uv', 'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||||
|
[debug] Portable config file: yt-dlp.conf
|
||||||
|
[debug] Portable config: ['-i']
|
||||||
|
[debug] Encodings: locale cp1252, fs utf-8, stdout utf-8, stderr utf-8, pref cp1252
|
||||||
|
[debug] yt-dlp version %(version)s (exe)
|
||||||
|
[debug] Python version 3.8.8 (CPython 64bit) - Windows-10-10.0.19041-SP0
|
||||||
|
[debug] exe versions: ffmpeg 3.0.1, ffprobe 3.0.1
|
||||||
|
[debug] Optional libraries: Cryptodome, keyring, mutagen, sqlite, websockets
|
||||||
|
[debug] Proxy map: {}
|
||||||
|
yt-dlp is up to date (%(version)s)
|
||||||
|
<more lines>
|
||||||
|
render: shell
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
40
.github/ISSUE_TEMPLATE_tmpl/5_feature_request.md
vendored
40
.github/ISSUE_TEMPLATE_tmpl/5_feature_request.md
vendored
@@ -1,40 +0,0 @@
|
|||||||
---
|
|
||||||
name: Feature request
|
|
||||||
about: Request a new functionality unrelated to any particular site or extractor
|
|
||||||
title: "[Feature Request]"
|
|
||||||
labels: Request
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<!--
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
WARNING!
|
|
||||||
IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
-->
|
|
||||||
|
|
||||||
|
|
||||||
## Checklist
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
|
||||||
- First of, make sure you are using the latest version of yt-dlp. Run `yt-dlp --version` and ensure your version is %(version)s. If it's not, see https://github.com/yt-dlp/yt-dlp on how to update. Issues with outdated version will be REJECTED.
|
|
||||||
- Search the bugtracker for similar feature requests: https://github.com/yt-dlp/yt-dlp. DO NOT post duplicates.
|
|
||||||
- Finally, put x into all relevant boxes like this [x] (Dont forget to delete the empty space)
|
|
||||||
-->
|
|
||||||
|
|
||||||
- [ ] I'm reporting a feature request
|
|
||||||
- [ ] I've verified that I'm running yt-dlp version **%(version)s**
|
|
||||||
- [ ] I've searched the bugtracker for similar feature requests including closed ones
|
|
||||||
|
|
||||||
|
|
||||||
## Description
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Provide an explanation of your issue in an arbitrary form. Please make sure the description is worded well enough to be understood, see https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient. Provide any additional information, suggested solution and as much context and examples as possible.
|
|
||||||
-->
|
|
||||||
|
|
||||||
WRITE DESCRIPTION HERE
|
|
||||||
30
.github/ISSUE_TEMPLATE_tmpl/5_feature_request.yml
vendored
Normal file
30
.github/ISSUE_TEMPLATE_tmpl/5_feature_request.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
name: Feature request request
|
||||||
|
description: Request a new functionality unrelated to any particular site or extractor
|
||||||
|
labels: [triage, enhancement]
|
||||||
|
body:
|
||||||
|
- type: checkboxes
|
||||||
|
id: checklist
|
||||||
|
attributes:
|
||||||
|
label: Checklist
|
||||||
|
description: |
|
||||||
|
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||||
|
options:
|
||||||
|
- label: I'm reporting a feature request
|
||||||
|
required: true
|
||||||
|
- label: I've verified that I'm running yt-dlp version **%(version)s**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||||
|
required: true
|
||||||
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates
|
||||||
|
required: true
|
||||||
|
- label: I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: description
|
||||||
|
attributes:
|
||||||
|
label: Description
|
||||||
|
description: |
|
||||||
|
Provide an explanation of your site feature request in an arbitrary form.
|
||||||
|
Please make sure the description is worded well enough to be understood, see [is-the-description-of-the-issue-itself-sufficient](https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient).
|
||||||
|
Provide any additional information, any suggested solutions, and as much context and examples as possible
|
||||||
|
placeholder: WRITE DESCRIPTION HERE
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
30
.github/ISSUE_TEMPLATE_tmpl/6_question.yml
vendored
Normal file
30
.github/ISSUE_TEMPLATE_tmpl/6_question.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
name: Ask question
|
||||||
|
description: Ask yt-dlp related question
|
||||||
|
labels: [question]
|
||||||
|
body:
|
||||||
|
- type: checkboxes
|
||||||
|
id: checklist
|
||||||
|
attributes:
|
||||||
|
label: Checklist
|
||||||
|
description: |
|
||||||
|
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||||
|
options:
|
||||||
|
- label: I'm asking a question and not reporting a bug/feature request
|
||||||
|
required: true
|
||||||
|
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
||||||
|
required: true
|
||||||
|
- label: I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
|
||||||
|
required: true
|
||||||
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar questions including closed ones
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: question
|
||||||
|
attributes:
|
||||||
|
label: Question
|
||||||
|
description: |
|
||||||
|
Ask your question in an arbitrary form.
|
||||||
|
Please make sure it's worded well enough to be understood, see [is-the-description-of-the-issue-itself-sufficient](https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient).
|
||||||
|
Provide any additional information and as much context and examples as possible
|
||||||
|
placeholder: WRITE QUESTION HERE
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -7,11 +7,11 @@
|
|||||||
---
|
---
|
||||||
|
|
||||||
### Before submitting a *pull request* make sure you have:
|
### Before submitting a *pull request* make sure you have:
|
||||||
- [ ] At least skimmed through [adding new extractor tutorial](https://github.com/ytdl-org/youtube-dl#adding-support-for-a-new-site) and [youtube-dl coding conventions](https://github.com/ytdl-org/youtube-dl#youtube-dl-coding-conventions) sections
|
- [ ] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions)
|
||||||
- [ ] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
|
- [ ] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
|
||||||
- [ ] Checked the code with [flake8](https://pypi.python.org/pypi/flake8)
|
- [ ] Checked the code with [flake8](https://pypi.python.org/pypi/flake8)
|
||||||
|
|
||||||
### In order to be accepted and merged into youtube-dl each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options:
|
### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options:
|
||||||
- [ ] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
|
- [ ] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
|
||||||
- [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
|
- [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
|
||||||
|
|
||||||
|
|||||||
321
.github/workflows/build.yml
vendored
321
.github/workflows/build.yml
vendored
@@ -8,15 +8,18 @@ on:
|
|||||||
jobs:
|
jobs:
|
||||||
build_unix:
|
build_unix:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
outputs:
|
outputs:
|
||||||
ytdlp_version: ${{ steps.bump_version.outputs.ytdlp_version }}
|
ytdlp_version: ${{ steps.bump_version.outputs.ytdlp_version }}
|
||||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||||
sha256_unix: ${{ steps.sha256_file.outputs.sha256_unix }}
|
sha256_bin: ${{ steps.sha256_bin.outputs.sha256_bin }}
|
||||||
sha512_unix: ${{ steps.sha512_file.outputs.sha512_unix }}
|
sha512_bin: ${{ steps.sha512_bin.outputs.sha512_bin }}
|
||||||
|
sha256_tar: ${{ steps.sha256_tar.outputs.sha256_tar }}
|
||||||
|
sha512_tar: ${{ steps.sha512_tar.outputs.sha512_tar }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@v2
|
||||||
with:
|
with:
|
||||||
@@ -25,11 +28,83 @@ jobs:
|
|||||||
run: sudo apt-get -y install zip pandoc man
|
run: sudo apt-get -y install zip pandoc man
|
||||||
- name: Bump version
|
- name: Bump version
|
||||||
id: bump_version
|
id: bump_version
|
||||||
run: python devscripts/update-version.py
|
run: |
|
||||||
|
python devscripts/update-version.py
|
||||||
|
make issuetemplates
|
||||||
- name: Print version
|
- name: Print version
|
||||||
run: echo "${{ steps.bump_version.outputs.ytdlp_version }}"
|
run: echo "${{ steps.bump_version.outputs.ytdlp_version }}"
|
||||||
|
- name: Update master
|
||||||
|
id: push_update
|
||||||
|
run: |
|
||||||
|
git config --global user.email "${{ github.event.pusher.email }}"
|
||||||
|
git config --global user.name "${{ github.event.pusher.name }}"
|
||||||
|
git add -u
|
||||||
|
git commit -m "[version] update" -m ":ci skip all"
|
||||||
|
git pull --rebase origin ${{ github.event.repository.master_branch }}
|
||||||
|
git push origin ${{ github.event.ref }}:${{ github.event.repository.master_branch }}
|
||||||
|
echo ::set-output name=head_sha::$(git rev-parse HEAD)
|
||||||
|
- name: Get Changelog
|
||||||
|
id: get_changelog
|
||||||
|
run: |
|
||||||
|
changelog=$(cat Changelog.md | grep -oPz '(?s)(?<=### ${{ steps.bump_version.outputs.ytdlp_version }}\n{2}).+?(?=\n{2,3}###)') || true
|
||||||
|
echo "changelog<<EOF" >> $GITHUB_ENV
|
||||||
|
echo "$changelog" >> $GITHUB_ENV
|
||||||
|
echo "EOF" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Build lazy extractors
|
||||||
|
id: lazy_extractors
|
||||||
|
run: python devscripts/make_lazy_extractors.py
|
||||||
- name: Run Make
|
- name: Run Make
|
||||||
run: make all tar
|
run: make all tar
|
||||||
|
- name: Get SHA2-256SUMS for yt-dlp
|
||||||
|
id: sha256_bin
|
||||||
|
run: echo "::set-output name=sha256_bin::$(sha256sum yt-dlp | awk '{print $1}')"
|
||||||
|
- name: Get SHA2-256SUMS for yt-dlp.tar.gz
|
||||||
|
id: sha256_tar
|
||||||
|
run: echo "::set-output name=sha256_tar::$(sha256sum yt-dlp.tar.gz | awk '{print $1}')"
|
||||||
|
- name: Get SHA2-512SUMS for yt-dlp
|
||||||
|
id: sha512_bin
|
||||||
|
run: echo "::set-output name=sha512_bin::$(sha512sum yt-dlp | awk '{print $1}')"
|
||||||
|
- name: Get SHA2-512SUMS for yt-dlp.tar.gz
|
||||||
|
id: sha512_tar
|
||||||
|
run: echo "::set-output name=sha512_tar::$(sha512sum yt-dlp.tar.gz | awk '{print $1}')"
|
||||||
|
|
||||||
|
- name: Install dependencies for pypi
|
||||||
|
env:
|
||||||
|
PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }}
|
||||||
|
if: "env.PYPI_TOKEN != ''"
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
pip install setuptools wheel twine
|
||||||
|
- name: Build and publish on pypi
|
||||||
|
env:
|
||||||
|
TWINE_USERNAME: __token__
|
||||||
|
TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }}
|
||||||
|
if: "env.TWINE_PASSWORD != ''"
|
||||||
|
run: |
|
||||||
|
rm -rf dist/*
|
||||||
|
python setup.py sdist bdist_wheel
|
||||||
|
twine upload dist/*
|
||||||
|
|
||||||
|
- name: Install SSH private key
|
||||||
|
env:
|
||||||
|
BREW_TOKEN: ${{ secrets.BREW_TOKEN }}
|
||||||
|
if: "env.BREW_TOKEN != ''"
|
||||||
|
uses: webfactory/ssh-agent@v0.5.3
|
||||||
|
with:
|
||||||
|
ssh-private-key: ${{ env.BREW_TOKEN }}
|
||||||
|
- name: Update Homebrew Formulae
|
||||||
|
env:
|
||||||
|
BREW_TOKEN: ${{ secrets.BREW_TOKEN }}
|
||||||
|
if: "env.BREW_TOKEN != ''"
|
||||||
|
run: |
|
||||||
|
git clone git@github.com:yt-dlp/homebrew-taps taps/
|
||||||
|
python3 devscripts/update-formulae.py taps/Formula/yt-dlp.rb "${{ steps.bump_version.outputs.ytdlp_version }}"
|
||||||
|
git -C taps/ config user.name github-actions
|
||||||
|
git -C taps/ config user.email github-actions@example.com
|
||||||
|
git -C taps/ commit -am 'yt-dlp: ${{ steps.bump_version.outputs.ytdlp_version }}'
|
||||||
|
git -C taps/ push
|
||||||
|
|
||||||
- name: Create Release
|
- name: Create Release
|
||||||
id: create_release
|
id: create_release
|
||||||
uses: actions/create-release@v1
|
uses: actions/create-release@v1
|
||||||
@@ -38,9 +113,14 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
tag_name: ${{ steps.bump_version.outputs.ytdlp_version }}
|
tag_name: ${{ steps.bump_version.outputs.ytdlp_version }}
|
||||||
release_name: yt-dlp ${{ steps.bump_version.outputs.ytdlp_version }}
|
release_name: yt-dlp ${{ steps.bump_version.outputs.ytdlp_version }}
|
||||||
|
commitish: ${{ steps.push_update.outputs.head_sha }}
|
||||||
body: |
|
body: |
|
||||||
Changelog:
|
#### [A description of the various files]((https://github.com/yt-dlp/yt-dlp#release-files)) are in the README
|
||||||
PLACEHOLDER
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Changelog:
|
||||||
|
${{ env.changelog }}
|
||||||
draft: false
|
draft: false
|
||||||
prerelease: false
|
prerelease: false
|
||||||
- name: Upload yt-dlp Unix binary
|
- name: Upload yt-dlp Unix binary
|
||||||
@@ -62,36 +142,82 @@ jobs:
|
|||||||
asset_path: ./yt-dlp.tar.gz
|
asset_path: ./yt-dlp.tar.gz
|
||||||
asset_name: yt-dlp.tar.gz
|
asset_name: yt-dlp.tar.gz
|
||||||
asset_content_type: application/gzip
|
asset_content_type: application/gzip
|
||||||
- name: Get SHA2-256SUMS for yt-dlp
|
|
||||||
id: sha256_file
|
build_macos:
|
||||||
run: echo "::set-output name=sha256_unix::$(sha256sum yt-dlp | awk '{print $1}')"
|
runs-on: macos-11
|
||||||
- name: Get SHA2-512SUMS for yt-dlp
|
needs: build_unix
|
||||||
id: sha512_file
|
if: False
|
||||||
run: echo "::set-output name=sha512_unix::$(sha512sum yt-dlp | awk '{print $1}')"
|
outputs:
|
||||||
- name: Install dependencies for pypi
|
sha256_macos: ${{ steps.sha256_macos.outputs.sha256_macos }}
|
||||||
env:
|
sha512_macos: ${{ steps.sha512_macos.outputs.sha512_macos }}
|
||||||
PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }}
|
sha256_macos_zip: ${{ steps.sha256_macos_zip.outputs.sha256_macos_zip }}
|
||||||
if: "env.PYPI_TOKEN != ''"
|
sha512_macos_zip: ${{ steps.sha512_macos_zip.outputs.sha512_macos_zip }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
# In order to create a universal2 application, the version of python3 in /usr/bin has to be used
|
||||||
|
- name: Install Requirements
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
brew install coreutils
|
||||||
pip install setuptools wheel twine
|
/usr/bin/python3 -m pip install -U --user pip Pyinstaller mutagen pycryptodomex websockets
|
||||||
- name: Build and publish on pypi
|
- name: Bump version
|
||||||
|
id: bump_version
|
||||||
|
run: /usr/bin/python3 devscripts/update-version.py
|
||||||
|
- name: Build lazy extractors
|
||||||
|
id: lazy_extractors
|
||||||
|
run: /usr/bin/python3 devscripts/make_lazy_extractors.py
|
||||||
|
- name: Run PyInstaller Script
|
||||||
|
run: /usr/bin/python3 pyinst.py --target-architecture universal2 --onefile
|
||||||
|
- name: Upload yt-dlp MacOS binary
|
||||||
|
id: upload-release-macos
|
||||||
|
uses: actions/upload-release-asset@v1
|
||||||
env:
|
env:
|
||||||
TWINE_USERNAME: __token__
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }}
|
with:
|
||||||
if: "env.TWINE_PASSWORD != ''"
|
upload_url: ${{ needs.build_unix.outputs.upload_url }}
|
||||||
run: |
|
asset_path: ./dist/yt-dlp_macos
|
||||||
rm -rf dist/*
|
asset_name: yt-dlp_macos
|
||||||
python setup.py sdist bdist_wheel
|
asset_content_type: application/octet-stream
|
||||||
twine upload dist/*
|
- name: Get SHA2-256SUMS for yt-dlp_macos
|
||||||
|
id: sha256_macos
|
||||||
|
run: echo "::set-output name=sha256_macos::$(sha256sum dist/yt-dlp_macos | awk '{print $1}')"
|
||||||
|
- name: Get SHA2-512SUMS for yt-dlp_macos
|
||||||
|
id: sha512_macos
|
||||||
|
run: echo "::set-output name=sha512_macos::$(sha512sum dist/yt-dlp_macos | awk '{print $1}')"
|
||||||
|
|
||||||
|
- name: Run PyInstaller Script with --onedir
|
||||||
|
run: /usr/bin/python3 pyinst.py --target-architecture universal2 --onedir
|
||||||
|
- uses: papeloto/action-zip@v1
|
||||||
|
with:
|
||||||
|
files: ./dist/yt-dlp_macos
|
||||||
|
dest: ./dist/yt-dlp_macos.zip
|
||||||
|
- name: Upload yt-dlp MacOS onedir
|
||||||
|
id: upload-release-macos-zip
|
||||||
|
uses: actions/upload-release-asset@v1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
with:
|
||||||
|
upload_url: ${{ needs.build_unix.outputs.upload_url }}
|
||||||
|
asset_path: ./dist/yt-dlp_macos.zip
|
||||||
|
asset_name: yt-dlp_macos.zip
|
||||||
|
asset_content_type: application/zip
|
||||||
|
- name: Get SHA2-256SUMS for yt-dlp_macos.zip
|
||||||
|
id: sha256_macos_zip
|
||||||
|
run: echo "::set-output name=sha256_macos_zip::$(sha256sum dist/yt-dlp_macos.zip | awk '{print $1}')"
|
||||||
|
- name: Get SHA2-512SUMS for yt-dlp_macos
|
||||||
|
id: sha512_macos_zip
|
||||||
|
run: echo "::set-output name=sha512_macos_zip::$(sha512sum dist/yt-dlp_macos.zip | awk '{print $1}')"
|
||||||
|
|
||||||
build_windows:
|
build_windows:
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
needs: build_unix
|
needs: build_unix
|
||||||
|
|
||||||
outputs:
|
outputs:
|
||||||
sha256_windows: ${{ steps.sha256_file_win.outputs.sha256_windows }}
|
sha256_win: ${{ steps.sha256_win.outputs.sha256_win }}
|
||||||
sha512_windows: ${{ steps.sha512_file_win.outputs.sha512_windows }}
|
sha512_win: ${{ steps.sha512_win.outputs.sha512_win }}
|
||||||
|
sha256_py2exe: ${{ steps.sha256_py2exe.outputs.sha256_py2exe }}
|
||||||
|
sha512_py2exe: ${{ steps.sha512_py2exe.outputs.sha512_py2exe }}
|
||||||
|
sha256_win_zip: ${{ steps.sha256_win_zip.outputs.sha256_win_zip }}
|
||||||
|
sha512_win_zip: ${{ steps.sha512_win_zip.outputs.sha512_win_zip }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
@@ -100,17 +226,19 @@ jobs:
|
|||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@v2
|
||||||
with:
|
with:
|
||||||
python-version: '3.8'
|
python-version: '3.8'
|
||||||
- name: Upgrade pip and enable wheel support
|
|
||||||
run: python -m pip install --upgrade pip setuptools wheel
|
|
||||||
- name: Install Requirements
|
- name: Install Requirements
|
||||||
run: pip install pyinstaller mutagen pycryptodome websockets
|
# Custom pyinstaller built with https://github.com/yt-dlp/pyinstaller-builds
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip setuptools wheel py2exe
|
||||||
|
pip install "https://yt-dlp.github.io/Pyinstaller-Builds/x86_64/pyinstaller-4.5.1-py3-none-any.whl" mutagen pycryptodomex websockets
|
||||||
- name: Bump version
|
- name: Bump version
|
||||||
id: bump_version
|
id: bump_version
|
||||||
run: python devscripts/update-version.py
|
run: python devscripts/update-version.py
|
||||||
- name: Print version
|
- name: Build lazy extractors
|
||||||
run: echo "${{ steps.bump_version.outputs.ytdlp_version }}"
|
id: lazy_extractors
|
||||||
|
run: python devscripts/make_lazy_extractors.py
|
||||||
- name: Run PyInstaller Script
|
- name: Run PyInstaller Script
|
||||||
run: python pyinst.py 64
|
run: python pyinst.py
|
||||||
- name: Upload yt-dlp.exe Windows binary
|
- name: Upload yt-dlp.exe Windows binary
|
||||||
id: upload-release-windows
|
id: upload-release-windows
|
||||||
uses: actions/upload-release-asset@v1
|
uses: actions/upload-release-asset@v1
|
||||||
@@ -122,19 +250,61 @@ jobs:
|
|||||||
asset_name: yt-dlp.exe
|
asset_name: yt-dlp.exe
|
||||||
asset_content_type: application/vnd.microsoft.portable-executable
|
asset_content_type: application/vnd.microsoft.portable-executable
|
||||||
- name: Get SHA2-256SUMS for yt-dlp.exe
|
- name: Get SHA2-256SUMS for yt-dlp.exe
|
||||||
id: sha256_file_win
|
id: sha256_win
|
||||||
run: echo "::set-output name=sha256_windows::$((Get-FileHash dist\yt-dlp.exe -Algorithm SHA256).Hash.ToLower())"
|
run: echo "::set-output name=sha256_win::$((Get-FileHash dist\yt-dlp.exe -Algorithm SHA256).Hash.ToLower())"
|
||||||
- name: Get SHA2-512SUMS for yt-dlp.exe
|
- name: Get SHA2-512SUMS for yt-dlp.exe
|
||||||
id: sha512_file_win
|
id: sha512_win
|
||||||
run: echo "::set-output name=sha512_windows::$((Get-FileHash dist\yt-dlp.exe -Algorithm SHA512).Hash.ToLower())"
|
run: echo "::set-output name=sha512_win::$((Get-FileHash dist\yt-dlp.exe -Algorithm SHA512).Hash.ToLower())"
|
||||||
|
|
||||||
|
- name: Run PyInstaller Script with --onedir
|
||||||
|
run: python pyinst.py --onedir
|
||||||
|
- uses: papeloto/action-zip@v1
|
||||||
|
with:
|
||||||
|
files: ./dist/yt-dlp
|
||||||
|
dest: ./dist/yt-dlp_win.zip
|
||||||
|
- name: Upload yt-dlp Windows onedir
|
||||||
|
id: upload-release-windows-zip
|
||||||
|
uses: actions/upload-release-asset@v1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
with:
|
||||||
|
upload_url: ${{ needs.build_unix.outputs.upload_url }}
|
||||||
|
asset_path: ./dist/yt-dlp_win.zip
|
||||||
|
asset_name: yt-dlp_win.zip
|
||||||
|
asset_content_type: application/zip
|
||||||
|
- name: Get SHA2-256SUMS for yt-dlp_win.zip
|
||||||
|
id: sha256_win_zip
|
||||||
|
run: echo "::set-output name=sha256_win_zip::$((Get-FileHash dist\yt-dlp_win.zip -Algorithm SHA256).Hash.ToLower())"
|
||||||
|
- name: Get SHA2-512SUMS for yt-dlp_win.zip
|
||||||
|
id: sha512_win_zip
|
||||||
|
run: echo "::set-output name=sha512_win_zip::$((Get-FileHash dist\yt-dlp_win.zip -Algorithm SHA512).Hash.ToLower())"
|
||||||
|
|
||||||
|
- name: Run py2exe Script
|
||||||
|
run: python setup.py py2exe
|
||||||
|
- name: Upload yt-dlp_min.exe Windows binary
|
||||||
|
id: upload-release-windows-py2exe
|
||||||
|
uses: actions/upload-release-asset@v1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
with:
|
||||||
|
upload_url: ${{ needs.build_unix.outputs.upload_url }}
|
||||||
|
asset_path: ./dist/yt-dlp.exe
|
||||||
|
asset_name: yt-dlp_min.exe
|
||||||
|
asset_content_type: application/vnd.microsoft.portable-executable
|
||||||
|
- name: Get SHA2-256SUMS for yt-dlp_min.exe
|
||||||
|
id: sha256_py2exe
|
||||||
|
run: echo "::set-output name=sha256_py2exe::$((Get-FileHash dist\yt-dlp.exe -Algorithm SHA256).Hash.ToLower())"
|
||||||
|
- name: Get SHA2-512SUMS for yt-dlp_min.exe
|
||||||
|
id: sha512_py2exe
|
||||||
|
run: echo "::set-output name=sha512_py2exe::$((Get-FileHash dist\yt-dlp.exe -Algorithm SHA512).Hash.ToLower())"
|
||||||
|
|
||||||
build_windows32:
|
build_windows32:
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
needs: [build_unix, build_windows]
|
needs: build_unix
|
||||||
|
|
||||||
outputs:
|
outputs:
|
||||||
sha256_windows32: ${{ steps.sha256_file_win32.outputs.sha256_windows32 }}
|
sha256_win32: ${{ steps.sha256_win32.outputs.sha256_win32 }}
|
||||||
sha512_windows32: ${{ steps.sha512_file_win32.outputs.sha512_windows32 }}
|
sha512_win32: ${{ steps.sha512_win32.outputs.sha512_win32 }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
@@ -144,17 +314,18 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
python-version: '3.7'
|
python-version: '3.7'
|
||||||
architecture: 'x86'
|
architecture: 'x86'
|
||||||
- name: Upgrade pip and enable wheel support
|
|
||||||
run: python -m pip install --upgrade pip setuptools wheel
|
|
||||||
- name: Install Requirements
|
- name: Install Requirements
|
||||||
run: pip install pyinstaller mutagen pycryptodome websockets
|
run: |
|
||||||
|
python -m pip install --upgrade pip setuptools wheel
|
||||||
|
pip install "https://yt-dlp.github.io/Pyinstaller-Builds/i686/pyinstaller-4.5.1-py3-none-any.whl" mutagen pycryptodomex websockets
|
||||||
- name: Bump version
|
- name: Bump version
|
||||||
id: bump_version
|
id: bump_version
|
||||||
run: python devscripts/update-version.py
|
run: python devscripts/update-version.py
|
||||||
- name: Print version
|
- name: Build lazy extractors
|
||||||
run: echo "${{ steps.bump_version.outputs.ytdlp_version }}"
|
id: lazy_extractors
|
||||||
|
run: python devscripts/make_lazy_extractors.py
|
||||||
- name: Run PyInstaller Script for 32 Bit
|
- name: Run PyInstaller Script for 32 Bit
|
||||||
run: python pyinst.py 32
|
run: python pyinst.py
|
||||||
- name: Upload Executable yt-dlp_x86.exe
|
- name: Upload Executable yt-dlp_x86.exe
|
||||||
id: upload-release-windows32
|
id: upload-release-windows32
|
||||||
uses: actions/upload-release-asset@v1
|
uses: actions/upload-release-asset@v1
|
||||||
@@ -166,11 +337,11 @@ jobs:
|
|||||||
asset_name: yt-dlp_x86.exe
|
asset_name: yt-dlp_x86.exe
|
||||||
asset_content_type: application/vnd.microsoft.portable-executable
|
asset_content_type: application/vnd.microsoft.portable-executable
|
||||||
- name: Get SHA2-256SUMS for yt-dlp_x86.exe
|
- name: Get SHA2-256SUMS for yt-dlp_x86.exe
|
||||||
id: sha256_file_win32
|
id: sha256_win32
|
||||||
run: echo "::set-output name=sha256_windows32::$((Get-FileHash dist\yt-dlp_x86.exe -Algorithm SHA256).Hash.ToLower())"
|
run: echo "::set-output name=sha256_win32::$((Get-FileHash dist\yt-dlp_x86.exe -Algorithm SHA256).Hash.ToLower())"
|
||||||
- name: Get SHA2-512SUMS for yt-dlp_x86.exe
|
- name: Get SHA2-512SUMS for yt-dlp_x86.exe
|
||||||
id: sha512_file_win32
|
id: sha512_win32
|
||||||
run: echo "::set-output name=sha512_windows32::$((Get-FileHash dist\yt-dlp_x86.exe -Algorithm SHA512).Hash.ToLower())"
|
run: echo "::set-output name=sha512_win32::$((Get-FileHash dist\yt-dlp_x86.exe -Algorithm SHA512).Hash.ToLower())"
|
||||||
|
|
||||||
finish:
|
finish:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -179,15 +350,23 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Make SHA2-256SUMS file
|
- name: Make SHA2-256SUMS file
|
||||||
env:
|
env:
|
||||||
SHA256_WINDOWS: ${{ needs.build_windows.outputs.sha256_windows }}
|
SHA256_BIN: ${{ needs.build_unix.outputs.sha256_bin }}
|
||||||
SHA256_WINDOWS32: ${{ needs.build_windows32.outputs.sha256_windows32 }}
|
SHA256_TAR: ${{ needs.build_unix.outputs.sha256_tar }}
|
||||||
SHA256_UNIX: ${{ needs.build_unix.outputs.sha256_unix }}
|
SHA256_WIN: ${{ needs.build_windows.outputs.sha256_win }}
|
||||||
YTDLP_VERSION: ${{ needs.build_unix.outputs.ytdlp_version }}
|
SHA256_PY2EXE: ${{ needs.build_windows.outputs.sha256_py2exe }}
|
||||||
|
SHA256_WIN_ZIP: ${{ needs.build_windows.outputs.sha256_win_zip }}
|
||||||
|
SHA256_WIN32: ${{ needs.build_windows32.outputs.sha256_win32 }}
|
||||||
|
SHA256_MACOS: ${{ needs.build_macos.outputs.sha256_macos }}
|
||||||
|
SHA256_MACOS_ZIP: ${{ needs.build_macos.outputs.sha256_macos_zip }}
|
||||||
run: |
|
run: |
|
||||||
echo "version:${{ env.YTDLP_VERSION }}" >> SHA2-256SUMS
|
echo "${{ env.SHA256_BIN }} yt-dlp" >> SHA2-256SUMS
|
||||||
echo "yt-dlp.exe:${{ env.SHA256_WINDOWS }}" >> SHA2-256SUMS
|
echo "${{ env.SHA256_TAR }} yt-dlp.tar.gz" >> SHA2-256SUMS
|
||||||
echo "yt-dlp_x86.exe:${{ env.SHA256_WINDOWS32 }}" >> SHA2-256SUMS
|
echo "${{ env.SHA256_WIN }} yt-dlp.exe" >> SHA2-256SUMS
|
||||||
echo "yt-dlp:${{ env.SHA256_UNIX }}" >> SHA2-256SUMS
|
echo "${{ env.SHA256_PY2EXE }} yt-dlp_min.exe" >> SHA2-256SUMS
|
||||||
|
echo "${{ env.SHA256_WIN32 }} yt-dlp_x86.exe" >> SHA2-256SUMS
|
||||||
|
echo "${{ env.SHA256_WIN_ZIP }} yt-dlp_win.zip" >> SHA2-256SUMS
|
||||||
|
# echo "${{ env.SHA256_MACOS }} yt-dlp_macos" >> SHA2-256SUMS
|
||||||
|
# echo "${{ env.SHA256_MACOS_ZIP }} yt-dlp_macos.zip" >> SHA2-256SUMS
|
||||||
- name: Upload 256SUMS file
|
- name: Upload 256SUMS file
|
||||||
id: upload-sums
|
id: upload-sums
|
||||||
uses: actions/upload-release-asset@v1
|
uses: actions/upload-release-asset@v1
|
||||||
@@ -200,13 +379,23 @@ jobs:
|
|||||||
asset_content_type: text/plain
|
asset_content_type: text/plain
|
||||||
- name: Make SHA2-512SUMS file
|
- name: Make SHA2-512SUMS file
|
||||||
env:
|
env:
|
||||||
SHA512_WINDOWS: ${{ needs.build_windows.outputs.sha512_windows }}
|
SHA512_BIN: ${{ needs.build_unix.outputs.sha512_bin }}
|
||||||
SHA512_WINDOWS32: ${{ needs.build_windows32.outputs.sha512_windows32 }}
|
SHA512_TAR: ${{ needs.build_unix.outputs.sha512_tar }}
|
||||||
SHA512_UNIX: ${{ needs.build_unix.outputs.sha512_unix }}
|
SHA512_WIN: ${{ needs.build_windows.outputs.sha512_win }}
|
||||||
|
SHA512_PY2EXE: ${{ needs.build_windows.outputs.sha512_py2exe }}
|
||||||
|
SHA512_WIN_ZIP: ${{ needs.build_windows.outputs.sha512_win_zip }}
|
||||||
|
SHA512_WIN32: ${{ needs.build_windows32.outputs.sha512_win32 }}
|
||||||
|
SHA512_MACOS: ${{ needs.build_macos.outputs.sha512_macos }}
|
||||||
|
SHA512_MACOS_ZIP: ${{ needs.build_macos.outputs.sha512_macos_zip }}
|
||||||
run: |
|
run: |
|
||||||
echo "${{ env.SHA512_WINDOWS }} yt-dlp.exe" >> SHA2-512SUMS
|
echo "${{ env.SHA512_BIN }} yt-dlp" >> SHA2-512SUMS
|
||||||
echo "${{ env.SHA512_WINDOWS32 }} yt-dlp_x86.exe" >> SHA2-512SUMS
|
echo "${{ env.SHA512_TAR }} yt-dlp.tar.gz" >> SHA2-512SUMS
|
||||||
echo "${{ env.SHA512_UNIX }} yt-dlp" >> SHA2-512SUMS
|
echo "${{ env.SHA512_WIN }} yt-dlp.exe" >> SHA2-512SUMS
|
||||||
|
echo "${{ env.SHA512_WIN_ZIP }} yt-dlp_win.zip" >> SHA2-512SUMS
|
||||||
|
echo "${{ env.SHA512_PY2EXE }} yt-dlp_min.exe" >> SHA2-512SUMS
|
||||||
|
echo "${{ env.SHA512_WIN32 }} yt-dlp_x86.exe" >> SHA2-512SUMS
|
||||||
|
# echo "${{ env.SHA512_MACOS }} yt-dlp_macos" >> SHA2-512SUMS
|
||||||
|
# echo "${{ env.SHA512_MACOS_ZIP }} yt-dlp_macos.zip" >> SHA2-512SUMS
|
||||||
- name: Upload 512SUMS file
|
- name: Upload 512SUMS file
|
||||||
id: upload-512sums
|
id: upload-512sums
|
||||||
uses: actions/upload-release-asset@v1
|
uses: actions/upload-release-asset@v1
|
||||||
|
|||||||
4
.github/workflows/quick-test.yml
vendored
4
.github/workflows/quick-test.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
python-version: 3.9
|
python-version: 3.9
|
||||||
- name: Install test requirements
|
- name: Install test requirements
|
||||||
run: pip install pytest pycryptodome
|
run: pip install pytest pycryptodomex
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: ./devscripts/run_tests.sh core
|
run: ./devscripts/run_tests.sh core
|
||||||
flake8:
|
flake8:
|
||||||
@@ -27,5 +27,7 @@ jobs:
|
|||||||
python-version: 3.9
|
python-version: 3.9
|
||||||
- name: Install flake8
|
- name: Install flake8
|
||||||
run: pip install flake8
|
run: pip install flake8
|
||||||
|
- name: Make lazy extractors
|
||||||
|
run: python devscripts/make_lazy_extractors.py
|
||||||
- name: Run flake8
|
- name: Run flake8
|
||||||
run: flake8 .
|
run: flake8 .
|
||||||
|
|||||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -2,7 +2,8 @@
|
|||||||
*.conf
|
*.conf
|
||||||
*.spec
|
*.spec
|
||||||
cookies
|
cookies
|
||||||
cookies.txt
|
*cookies.txt
|
||||||
|
.netrc
|
||||||
|
|
||||||
# Downloaded
|
# Downloaded
|
||||||
*.srt
|
*.srt
|
||||||
@@ -19,6 +20,8 @@ cookies.txt
|
|||||||
*.wav
|
*.wav
|
||||||
*.ape
|
*.ape
|
||||||
*.mkv
|
*.mkv
|
||||||
|
*.flac
|
||||||
|
*.avi
|
||||||
*.swf
|
*.swf
|
||||||
*.part
|
*.part
|
||||||
*.part-*
|
*.part-*
|
||||||
@@ -38,9 +41,10 @@ cookies.txt
|
|||||||
*.webp
|
*.webp
|
||||||
*.annotations.xml
|
*.annotations.xml
|
||||||
*.description
|
*.description
|
||||||
|
.cache/
|
||||||
|
|
||||||
# Allow config/media files in testdata
|
# Allow config/media files in testdata
|
||||||
!test/testdata/**
|
!test/**
|
||||||
|
|
||||||
# Python
|
# Python
|
||||||
*.pyc
|
*.pyc
|
||||||
|
|||||||
271
CONTRIBUTING.md
271
CONTRIBUTING.md
@@ -1,26 +1,59 @@
|
|||||||
**Please include the full output of youtube-dl when run with `-v`**, i.e. **add** `-v` flag to **your command line**, copy the **whole** output and post it in the issue body wrapped in \`\`\` for better formatting. It should look similar to this:
|
# CONTRIBUTING TO YT-DLP
|
||||||
|
|
||||||
|
- [OPENING AN ISSUE](#opening-an-issue)
|
||||||
|
- [Is the description of the issue itself sufficient?](#is-the-description-of-the-issue-itself-sufficient)
|
||||||
|
- [Are you using the latest version?](#are-you-using-the-latest-version)
|
||||||
|
- [Is the issue already documented?](#is-the-issue-already-documented)
|
||||||
|
- [Why are existing options not enough?](#why-are-existing-options-not-enough)
|
||||||
|
- [Have you read and understood the changes, between youtube-dl and yt-dlp](#have-you-read-and-understood-the-changes-between-youtube-dl-and-yt-dlp)
|
||||||
|
- [Is there enough context in your bug report?](#is-there-enough-context-in-your-bug-report)
|
||||||
|
- [Does the issue involve one problem, and one problem only?](#does-the-issue-involve-one-problem-and-one-problem-only)
|
||||||
|
- [Is anyone going to need the feature?](#is-anyone-going-to-need-the-feature)
|
||||||
|
- [Is your question about yt-dlp?](#is-your-question-about-yt-dlp)
|
||||||
|
- [DEVELOPER INSTRUCTIONS](#developer-instructions)
|
||||||
|
- [Adding new feature or making overarching changes](#adding-new-feature-or-making-overarching-changes)
|
||||||
|
- [Adding support for a new site](#adding-support-for-a-new-site)
|
||||||
|
- [yt-dlp coding conventions](#yt-dlp-coding-conventions)
|
||||||
|
- [Mandatory and optional metafields](#mandatory-and-optional-metafields)
|
||||||
|
- [Provide fallbacks](#provide-fallbacks)
|
||||||
|
- [Regular expressions](#regular-expressions)
|
||||||
|
- [Long lines policy](#long-lines-policy)
|
||||||
|
- [Inline values](#inline-values)
|
||||||
|
- [Collapse fallbacks](#collapse-fallbacks)
|
||||||
|
- [Trailing parentheses](#trailing-parentheses)
|
||||||
|
- [Use convenience conversion and parsing functions](#use-convenience-conversion-and-parsing-functions)
|
||||||
|
- [EMBEDDING YT-DLP](README.md#embedding-yt-dlp)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# OPENING AN ISSUE
|
||||||
|
|
||||||
|
Bugs and suggestions should be reported at: [yt-dlp/yt-dlp/issues](https://github.com/yt-dlp/yt-dlp/issues). Unless you were prompted to or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email. For discussions, join us in our [discord server](https://discord.gg/H5MNcFW63r).
|
||||||
|
|
||||||
|
**Please include the full output of yt-dlp when run with `-Uv`**, i.e. **add** `-Uv` flag to **your command line**, copy the **whole** output and post it in the issue body wrapped in \`\`\` for better formatting. It should look similar to this:
|
||||||
```
|
```
|
||||||
$ youtube-dl -v <your command line>
|
$ yt-dlp -Uv <your command line>
|
||||||
[debug] System config: []
|
[debug] Command-line config: ['-v', 'demo.com']
|
||||||
[debug] User config: []
|
[debug] Encodings: locale UTF-8, fs utf-8, out utf-8, pref UTF-8
|
||||||
[debug] Command-line args: [u'-v', u'https://www.youtube.com/watch?v=BaW_jenozKc']
|
[debug] yt-dlp version 2021.09.25 (zip)
|
||||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
[debug] Python version 3.8.10 (CPython 64bit) - Linux-5.4.0-74-generic-x86_64-with-glibc2.29
|
||||||
[debug] youtube-dl version 2015.12.06
|
[debug] exe versions: ffmpeg 4.2.4, ffprobe 4.2.4
|
||||||
[debug] Git HEAD: 135392e
|
|
||||||
[debug] Python version 2.6.6 - Windows-2003Server-5.2.3790-SP2
|
|
||||||
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
|
||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
|
Current Build Hash 25cc412d1d3c0725a1f2f5b7e4682f6fb40e6d15f7024e96f7afd572e9919535
|
||||||
|
yt-dlp is up to date (2021.09.25)
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
**Do not post screenshots of verbose logs; only plain text is acceptable.**
|
**Do not post screenshots of verbose logs; only plain text is acceptable.**
|
||||||
|
|
||||||
The output (including the first lines) contains important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
|
The output (including the first lines) contains important debugging information. Issues without the full output are often not reproducible and therefore will be closed as `incomplete`.
|
||||||
|
|
||||||
|
The templates provided for the Issues, should be completed and **not removed**, this helps aide the resolution of the issue.
|
||||||
|
|
||||||
Please re-read your issue once again to avoid a couple of common mistakes (you can and should use this as a checklist):
|
Please re-read your issue once again to avoid a couple of common mistakes (you can and should use this as a checklist):
|
||||||
|
|
||||||
### Is the description of the issue itself sufficient?
|
### Is the description of the issue itself sufficient?
|
||||||
|
|
||||||
We often get issue reports that we cannot really decipher. While in most cases we eventually get the required information after asking back multiple times, this poses an unnecessary drain on our resources. Many contributors, including myself, are also not native speakers, so we may misread some parts.
|
We often get issue reports that we cannot really decipher. While in most cases we eventually get the required information after asking back multiple times, this poses an unnecessary drain on our resources.
|
||||||
|
|
||||||
So please elaborate on what feature you are requesting, or what bug you want to be fixed. Make sure that it's obvious
|
So please elaborate on what feature you are requesting, or what bug you want to be fixed. Make sure that it's obvious
|
||||||
|
|
||||||
@@ -28,25 +61,31 @@ So please elaborate on what feature you are requesting, or what bug you want to
|
|||||||
- How it could be fixed
|
- How it could be fixed
|
||||||
- How your proposed solution would look like
|
- How your proposed solution would look like
|
||||||
|
|
||||||
If your report is shorter than two lines, it is almost certainly missing some of these, which makes it hard for us to respond to it. We're often too polite to close the issue outright, but the missing info makes misinterpretation likely. As a committer myself, I often get frustrated by these issues, since the only possible way for me to move forward on them is to ask for clarification over and over.
|
If your report is shorter than two lines, it is almost certainly missing some of these, which makes it hard for us to respond to it. We're often too polite to close the issue outright, but the missing info makes misinterpretation likely. We often get frustrated by these issues, since the only possible way for us to move forward on them is to ask for clarification over and over.
|
||||||
|
|
||||||
For bug reports, this means that your report should contain the *complete* output of youtube-dl when called with the `-v` flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information.
|
For bug reports, this means that your report should contain the **complete** output of yt-dlp when called with the `-Uv` flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information.
|
||||||
|
|
||||||
If your server has multiple IPs or you suspect censorship, adding `--call-home` may be a good idea to get more diagnostics. If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--dump-pages` (warning: this will yield a rather large output, redirect it to the file `log.txt` by adding `>log.txt 2>&1` to your command-line) or upload the `.dump` files you get when you add `--write-pages` [somewhere](https://gist.github.com/).
|
If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--write-pages` and upload the `.dump` files you get [somewhere](https://gist.github.com).
|
||||||
|
|
||||||
**Site support requests must contain an example URL**. An example URL is a URL you might want to download, like `https://www.youtube.com/watch?v=BaW_jenozKc`. There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. `https://www.youtube.com/`) is *not* an example URL.
|
**Site support requests must contain an example URL**. An example URL is a URL you might want to download, like `https://www.youtube.com/watch?v=BaW_jenozKc`. There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. `https://www.youtube.com/`) is *not* an example URL.
|
||||||
|
|
||||||
### Are you using the latest version?
|
### Are you using the latest version?
|
||||||
|
|
||||||
Before reporting any issue, type `youtube-dl -U`. This should report that you're up-to-date. About 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well.
|
Before reporting any issue, type `yt-dlp -U`. This should report that you're up-to-date. This goes for feature requests as well.
|
||||||
|
|
||||||
### Is the issue already documented?
|
### Is the issue already documented?
|
||||||
|
|
||||||
Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or browse the [GitHub Issues](https://github.com/ytdl-org/youtube-dl/search?type=Issues) of this repository. If there is an issue, feel free to write something along the lines of "This affects me as well, with version 2015.01.01. Here is some more information on the issue: ...". While some issues may be old, a new post into them often spurs rapid activity.
|
Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or browse the [GitHub Issues](https://github.com/yt-dlp/yt-dlp/search?type=Issues) of this repository. If there is an issue, feel free to write something along the lines of "This affects me as well, with version 2021.01.01. Here is some more information on the issue: ...". While some issues may be old, a new post into them often spurs rapid activity.
|
||||||
|
|
||||||
|
Additionally, it is also helpful to see if the issue has already been documented in the [youtube-dl issue tracker](https://github.com/ytdl-org/youtube-dl/issues). If similar issues have already been reported in youtube-dl (but not in our issue tracker), links to them can be included in your issue report here.
|
||||||
|
|
||||||
### Why are existing options not enough?
|
### Why are existing options not enough?
|
||||||
|
|
||||||
Before requesting a new feature, please have a quick peek at [the list of supported options](https://github.com/ytdl-org/youtube-dl/blob/master/README.md#options). Many feature requests are for features that actually exist already! Please, absolutely do show off your work in the issue report and detail how the existing similar options do *not* solve your problem.
|
Before requesting a new feature, please have a quick peek at [the list of supported options](README.md#usage-and-options). Many feature requests are for features that actually exist already! Please, absolutely do show off your work in the issue report and detail how the existing similar options do *not* solve your problem.
|
||||||
|
|
||||||
|
### Have you read and understood the changes, between youtube-dl and yt-dlp
|
||||||
|
|
||||||
|
There are many changes between youtube-dl and yt-dlp [(changes to default behavior)](README.md#differences-in-default-behavior), and some of the options available have a different behaviour in yt-dlp, or have been removed all together [(list of changes to options)](README.md#deprecated-options). Make sure you have read and understand the differences in the options and how this may impact your downloads before opening an issue.
|
||||||
|
|
||||||
### Is there enough context in your bug report?
|
### Is there enough context in your bug report?
|
||||||
|
|
||||||
@@ -58,23 +97,40 @@ We are then presented with a very complicated request when the original problem
|
|||||||
|
|
||||||
Some of our users seem to think there is a limit of issues they can or should open. There is no limit of issues they can or should open. While it may seem appealing to be able to dump all your issues into one ticket, that means that someone who solves one of your issues cannot mark the issue as closed. Typically, reporting a bunch of issues leads to the ticket lingering since nobody wants to attack that behemoth, until someone mercifully splits the issue into multiple ones.
|
Some of our users seem to think there is a limit of issues they can or should open. There is no limit of issues they can or should open. While it may seem appealing to be able to dump all your issues into one ticket, that means that someone who solves one of your issues cannot mark the issue as closed. Typically, reporting a bunch of issues leads to the ticket lingering since nobody wants to attack that behemoth, until someone mercifully splits the issue into multiple ones.
|
||||||
|
|
||||||
In particular, every site support request issue should only pertain to services at one site (generally under a common domain, but always using the same backend technology). Do not request support for vimeo user videos, White house podcasts, and Google Plus pages in the same issue. Also, make sure that you don't post bug reports alongside feature requests. As a rule of thumb, a feature request does not include outputs of youtube-dl that are not immediately related to the feature at hand. Do not post reports of a network error alongside the request for a new video service.
|
In particular, every site support request issue should only pertain to services at one site (generally under a common domain, but always using the same backend technology). Do not request support for vimeo user videos, White house podcasts, and Google Plus pages in the same issue. Also, make sure that you don't post bug reports alongside feature requests. As a rule of thumb, a feature request does not include outputs of yt-dlp that are not immediately related to the feature at hand. Do not post reports of a network error alongside the request for a new video service.
|
||||||
|
|
||||||
### Is anyone going to need the feature?
|
### Is anyone going to need the feature?
|
||||||
|
|
||||||
Only post features that you (or an incapacitated friend you can personally talk to) require. Do not post features because they seem like a good idea. If they are really useful, they will be requested by someone who requires them.
|
Only post features that you (or an incapacitated friend you can personally talk to) require. Do not post features because they seem like a good idea. If they are really useful, they will be requested by someone who requires them.
|
||||||
|
|
||||||
### Is your question about youtube-dl?
|
### Is your question about yt-dlp?
|
||||||
|
|
||||||
|
Some bug reports are completely unrelated to yt-dlp and relate to a different, or even the reporter's own, application. Please make sure that you are actually using yt-dlp. If you are using a UI for yt-dlp, report the bug to the maintainer of the actual application providing the UI. In general, if you are unable to provide the verbose log, you should not be opening the issue here.
|
||||||
|
|
||||||
|
If the issue is with `youtube-dl` (the upstream fork of yt-dlp) and not with yt-dlp, the issue should be raised in the youtube-dl project.
|
||||||
|
|
||||||
|
### Are you willing to share account details if needed?
|
||||||
|
|
||||||
|
The maintainers and potential contributors of the project often do not have an account for the website you are asking support for. So any developer interested in solving your issue may ask you for account details. It is your personal discression whether you are willing to share the account in order for the developer to try and solve your issue. However, if you are unwilling or unable to provide details, they obviously cannot work on the issue and it cannot be solved unless some developer who both has an account and is willing/able to contribute decides to solve it.
|
||||||
|
|
||||||
|
By sharing an account with anyone, you agree to bear all risks associated with it. The maintainers and yt-dlp can't be held responsible for any misuse of the credentials.
|
||||||
|
|
||||||
|
While these steps won't necessarily ensure that no misuse of the account takes place, these are still some good practices to follow.
|
||||||
|
|
||||||
|
- Look for people with `Member` (maintainers of the project) or `Contributor` (people who have previously contributed code) tag on their messages.
|
||||||
|
- Change the password before sharing the account to something random (use [this](https://passwordsgenerator.net/) if you don't have a random password generator).
|
||||||
|
- Change the password after receiving the account back.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
It may sound strange, but some bug reports we receive are completely unrelated to youtube-dl and relate to a different, or even the reporter's own, application. Please make sure that you are actually using youtube-dl. If you are using a UI for youtube-dl, report the bug to the maintainer of the actual application providing the UI. On the other hand, if your UI for youtube-dl fails in some way you believe is related to youtube-dl, by all means, go ahead and report the bug.
|
|
||||||
|
|
||||||
# DEVELOPER INSTRUCTIONS
|
# DEVELOPER INSTRUCTIONS
|
||||||
|
|
||||||
Most users do not need to build youtube-dl and can [download the builds](https://ytdl-org.github.io/youtube-dl/download.html) or get them from their distribution.
|
Most users do not need to build yt-dlp and can [download the builds](https://github.com/yt-dlp/yt-dlp/releases) or get them via [the other installation methods](README.md#installation).
|
||||||
|
|
||||||
To run youtube-dl as a developer, you don't need to build anything either. Simply execute
|
To run yt-dlp as a developer, you don't need to build anything either. Simply execute
|
||||||
|
|
||||||
python -m youtube_dl
|
python -m yt_dlp
|
||||||
|
|
||||||
To run the test, simply invoke your favorite test runner, or execute a test file directly; any of the following work:
|
To run the test, simply invoke your favorite test runner, or execute a test file directly; any of the following work:
|
||||||
|
|
||||||
@@ -85,42 +141,42 @@ To run the test, simply invoke your favorite test runner, or execute a test file
|
|||||||
|
|
||||||
See item 6 of [new extractor tutorial](#adding-support-for-a-new-site) for how to run extractor specific test cases.
|
See item 6 of [new extractor tutorial](#adding-support-for-a-new-site) for how to run extractor specific test cases.
|
||||||
|
|
||||||
If you want to create a build of youtube-dl yourself, you'll need
|
If you want to create a build of yt-dlp yourself, you can follow the instructions [here](README.md#compile).
|
||||||
|
|
||||||
* python3
|
|
||||||
* make (only GNU make is supported)
|
|
||||||
* pandoc
|
|
||||||
* zip
|
|
||||||
* pytest
|
|
||||||
|
|
||||||
### Adding support for a new site
|
## Adding new feature or making overarching changes
|
||||||
|
|
||||||
If you want to add support for a new site, first of all **make sure** this site is **not dedicated to [copyright infringement](README.md#can-you-add-support-for-this-anime-video-site-or-site-which-shows-current-movies-for-free)**. youtube-dl does **not support** such sites thus pull requests adding support for them **will be rejected**.
|
Before you start writing code for implementing a new feature, open an issue explaining your feature request and atleast one use case. This allows the maintainers to decide whether such a feature is desired for the project in the first place, and will provide an avenue to discuss some implementation details. If you open a pull request for a new feature without discussing with us first, do not be surprised when we ask for large changes to the code, or even reject it outright.
|
||||||
|
|
||||||
|
The same applies for changes to the documentation, code style, or overarching changes to the architecture
|
||||||
|
|
||||||
|
|
||||||
|
## Adding support for a new site
|
||||||
|
|
||||||
|
If you want to add support for a new site, first of all **make sure** this site is **not dedicated to [copyright infringement](https://www.github.com/ytdl-org/youtube-dl#can-you-add-support-for-this-anime-video-site-or-site-which-shows-current-movies-for-free)**. yt-dlp does **not support** such sites thus pull requests adding support for them **will be rejected**.
|
||||||
|
|
||||||
After you have ensured this site is distributing its content legally, you can follow this quick list (assuming your service is called `yourextractor`):
|
After you have ensured this site is distributing its content legally, you can follow this quick list (assuming your service is called `yourextractor`):
|
||||||
|
|
||||||
1. [Fork this repository](https://github.com/ytdl-org/youtube-dl/fork)
|
1. [Fork this repository](https://github.com/yt-dlp/yt-dlp/fork)
|
||||||
2. Check out the source code with:
|
1. Check out the source code with:
|
||||||
|
|
||||||
git clone git@github.com:YOUR_GITHUB_USERNAME/youtube-dl.git
|
git clone git@github.com:YOUR_GITHUB_USERNAME/yt-dlp.git
|
||||||
|
|
||||||
3. Start a new git branch with
|
1. Start a new git branch with
|
||||||
|
|
||||||
cd youtube-dl
|
cd yt-dlp
|
||||||
git checkout -b yourextractor
|
git checkout -b yourextractor
|
||||||
|
|
||||||
4. Start with this simple template and save it to `youtube_dl/extractor/yourextractor.py`:
|
1. Start with this simple template and save it to `yt_dlp/extractor/yourextractor.py`:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
class YourExtractorIE(InfoExtractor):
|
class YourExtractorIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
|
_VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
'url': 'https://yourextractor.com/watch/42',
|
'url': 'https://yourextractor.com/watch/42',
|
||||||
'md5': 'TODO: md5 sum of the first 10241 bytes of the video file (use --test)',
|
'md5': 'TODO: md5 sum of the first 10241 bytes of the video file (use --test)',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@@ -134,7 +190,7 @@ After you have ensured this site is distributing its content legally, you can fo
|
|||||||
# * A regular expression; start the string with re:
|
# * A regular expression; start the string with re:
|
||||||
# * Any Python type (for example int or float)
|
# * Any Python type (for example int or float)
|
||||||
}
|
}
|
||||||
}
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
@@ -148,45 +204,48 @@ After you have ensured this site is distributing its content legally, you can fo
|
|||||||
'title': title,
|
'title': title,
|
||||||
'description': self._og_search_description(webpage),
|
'description': self._og_search_description(webpage),
|
||||||
'uploader': self._search_regex(r'<div[^>]+id="uploader"[^>]*>([^<]+)<', webpage, 'uploader', fatal=False),
|
'uploader': self._search_regex(r'<div[^>]+id="uploader"[^>]*>([^<]+)<', webpage, 'uploader', fatal=False),
|
||||||
# TODO more properties (see youtube_dl/extractor/common.py)
|
# TODO more properties (see yt_dlp/extractor/common.py)
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
5. Add an import in [`youtube_dl/extractor/extractors.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/extractors.py).
|
1. Add an import in [`yt_dlp/extractor/extractors.py`](yt_dlp/extractor/extractors.py).
|
||||||
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. Note that tests with `only_matching` key in test's dict are not counted in.
|
1. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, the tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. Note that tests with `only_matching` key in test's dict are not counted in. You can also run all the tests in one go with `TestDownload.test_YourExtractor_all`
|
||||||
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/ytdl-org/youtube-dl/blob/7f41a598b3fba1bcab2817de64a08941200aa3c8/youtube_dl/extractor/common.py#L94-L303). Add tests and code for as many as you want.
|
1. Make sure you have atleast one test for your extractor. Even if all videos covered by the extractor are expected to be inaccessible for automated testing, tests should still be added with a `skip` parameter indicating why the purticular test is disabled from running.
|
||||||
8. Make sure your code follows [youtube-dl coding conventions](#youtube-dl-coding-conventions) and check the code with [flake8](https://flake8.pycqa.org/en/latest/index.html#quickstart):
|
1. Have a look at [`yt_dlp/extractor/common.py`](yt_dlp/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](yt_dlp/extractor/common.py#L91-L426). Add tests and code for as many as you want.
|
||||||
|
1. Make sure your code follows [yt-dlp coding conventions](#yt-dlp-coding-conventions) and check the code with [flake8](https://flake8.pycqa.org/en/latest/index.html#quickstart):
|
||||||
|
|
||||||
$ flake8 youtube_dl/extractor/yourextractor.py
|
$ flake8 yt_dlp/extractor/yourextractor.py
|
||||||
|
|
||||||
9. Make sure your code works under all [Python](https://www.python.org/) versions claimed supported by youtube-dl, namely 2.6, 2.7, and 3.2+.
|
1. Make sure your code works under all [Python](https://www.python.org/) versions supported by yt-dlp, namely CPython and PyPy for Python 3.6 and above. Backward compatability is not required for even older versions of Python.
|
||||||
10. When the tests pass, [add](https://git-scm.com/docs/git-add) the new files and [commit](https://git-scm.com/docs/git-commit) them and [push](https://git-scm.com/docs/git-push) the result, like this:
|
1. When the tests pass, [add](https://git-scm.com/docs/git-add) the new files, [commit](https://git-scm.com/docs/git-commit) them and [push](https://git-scm.com/docs/git-push) the result, like this:
|
||||||
|
|
||||||
$ git add youtube_dl/extractor/extractors.py
|
$ git add yt_dlp/extractor/extractors.py
|
||||||
$ git add youtube_dl/extractor/yourextractor.py
|
$ git add yt_dlp/extractor/yourextractor.py
|
||||||
$ git commit -m '[yourextractor] Add new extractor'
|
$ git commit -m '[yourextractor] Add extractor'
|
||||||
$ git push origin yourextractor
|
$ git push origin yourextractor
|
||||||
|
|
||||||
11. Finally, [create a pull request](https://help.github.com/articles/creating-a-pull-request). We'll then review and merge it.
|
1. Finally, [create a pull request](https://help.github.com/articles/creating-a-pull-request). We'll then review and merge it.
|
||||||
|
|
||||||
In any case, thank you very much for your contributions!
|
In any case, thank you very much for your contributions!
|
||||||
|
|
||||||
## youtube-dl coding conventions
|
|
||||||
|
## yt-dlp coding conventions
|
||||||
|
|
||||||
This section introduces a guide lines for writing idiomatic, robust and future-proof extractor code.
|
This section introduces a guide lines for writing idiomatic, robust and future-proof extractor code.
|
||||||
|
|
||||||
Extractors are very fragile by nature since they depend on the layout of the source data provided by 3rd party media hosters out of your control and this layout tends to change. As an extractor implementer your task is not only to write code that will extract media links and metadata correctly but also to minimize dependency on the source's layout and even to make the code foresee potential future changes and be ready for that. This is important because it will allow the extractor not to break on minor layout changes thus keeping old youtube-dl versions working. Even though this breakage issue is easily fixed by emitting a new version of youtube-dl with a fix incorporated, all the previous versions become broken in all repositories and distros' packages that may not be so prompt in fetching the update from us. Needless to say, some non rolling release distros may never receive an update at all.
|
Extractors are very fragile by nature since they depend on the layout of the source data provided by 3rd party media hosters out of your control and this layout tends to change. As an extractor implementer your task is not only to write code that will extract media links and metadata correctly but also to minimize dependency on the source's layout and even to make the code foresee potential future changes and be ready for that. This is important because it will allow the extractor not to break on minor layout changes thus keeping old yt-dlp versions working. Even though this breakage issue may be easily fixed by a new version of yt-dlp, this could take some time, during which the the extractor will remain broken.
|
||||||
|
|
||||||
|
|
||||||
### Mandatory and optional metafields
|
### Mandatory and optional metafields
|
||||||
|
|
||||||
For extraction to work youtube-dl relies on metadata your extractor extracts and provides to youtube-dl expressed by an [information dictionary](https://github.com/ytdl-org/youtube-dl/blob/7f41a598b3fba1bcab2817de64a08941200aa3c8/youtube_dl/extractor/common.py#L94-L303) or simply *info dict*. Only the following meta fields in the *info dict* are considered mandatory for a successful extraction process by youtube-dl:
|
For extraction to work yt-dlp relies on metadata your extractor extracts and provides to yt-dlp expressed by an [information dictionary](yt_dlp/extractor/common.py#L91-L426) or simply *info dict*. Only the following meta fields in the *info dict* are considered mandatory for a successful extraction process by yt-dlp:
|
||||||
|
|
||||||
- `id` (media identifier)
|
- `id` (media identifier)
|
||||||
- `title` (media title)
|
- `title` (media title)
|
||||||
- `url` (media download URL) or `formats`
|
- `url` (media download URL) or `formats`
|
||||||
|
|
||||||
In fact only the last option is technically mandatory (i.e. if you can't figure out the download location of the media the extraction does not make any sense). But by convention youtube-dl also treats `id` and `title` as mandatory. Thus the aforementioned metafields are the critical data that the extraction does not make any sense without and if any of them fail to be extracted then the extractor is considered completely broken.
|
The aforementioned metafields are the critical data that the extraction does not make any sense without and if any of them fail to be extracted then the extractor is considered completely broken. While, in fact, only `id` is technically mandatory, due to compatability reasons, yt-dlp also treats `title` as mandatory. The extractor is allowed to return the info dict without url or formats in some special cases if it allows the user to extract usefull information with `--ignore-no-formats-error` - Eg: when the video is a live stream that has not started yet.
|
||||||
|
|
||||||
[Any field](https://github.com/ytdl-org/youtube-dl/blob/7f41a598b3fba1bcab2817de64a08941200aa3c8/youtube_dl/extractor/common.py#L188-L303) apart from the aforementioned ones are considered **optional**. That means that extraction should be **tolerant** to situations when sources for these fields can potentially be unavailable (even if they are always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields.
|
[Any field](yt_dlp/extractor/common.py#219-L426) apart from the aforementioned ones are considered **optional**. That means that extraction should be **tolerant** to situations when sources for these fields can potentially be unavailable (even if they are always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields.
|
||||||
|
|
||||||
#### Example
|
#### Example
|
||||||
|
|
||||||
@@ -200,8 +259,10 @@ Assume at this point `meta`'s layout is:
|
|||||||
|
|
||||||
```python
|
```python
|
||||||
{
|
{
|
||||||
...
|
|
||||||
"summary": "some fancy summary text",
|
"summary": "some fancy summary text",
|
||||||
|
"user": {
|
||||||
|
"name": "uploader name"
|
||||||
|
},
|
||||||
...
|
...
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@@ -220,6 +281,30 @@ description = meta['summary'] # incorrect
|
|||||||
|
|
||||||
The latter will break extraction process with `KeyError` if `summary` disappears from `meta` at some later time but with the former approach extraction will just go ahead with `description` set to `None` which is perfectly fine (remember `None` is equivalent to the absence of data).
|
The latter will break extraction process with `KeyError` if `summary` disappears from `meta` at some later time but with the former approach extraction will just go ahead with `description` set to `None` which is perfectly fine (remember `None` is equivalent to the absence of data).
|
||||||
|
|
||||||
|
|
||||||
|
If the data is nested, do not use `.get` chains, but instead make use of the utility functions `try_get` or `traverse_obj`
|
||||||
|
|
||||||
|
Considering the above `meta` again, assume you want to extract `["user"]["name"]` and put it in the resulting info dict as `uploader`
|
||||||
|
|
||||||
|
```python
|
||||||
|
uploader = try_get(meta, lambda x: x['user']['name']) # correct
|
||||||
|
```
|
||||||
|
or
|
||||||
|
```python
|
||||||
|
uploader = traverse_obj(meta, ('user', 'name')) # correct
|
||||||
|
```
|
||||||
|
|
||||||
|
and not like:
|
||||||
|
|
||||||
|
```python
|
||||||
|
uploader = meta['user']['name'] # incorrect
|
||||||
|
```
|
||||||
|
or
|
||||||
|
```python
|
||||||
|
uploader = meta.get('user', {}).get('name') # incorrect
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
Similarly, you should pass `fatal=False` when extracting optional data from a webpage with `_search_regex`, `_html_search_regex` or similar methods, for instance:
|
Similarly, you should pass `fatal=False` when extracting optional data from a webpage with `_search_regex`, `_html_search_regex` or similar methods, for instance:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
@@ -240,10 +325,35 @@ description = self._search_regex(
|
|||||||
|
|
||||||
On failure this code will silently continue the extraction with `description` set to `None`. That is useful for metafields that may or may not be present.
|
On failure this code will silently continue the extraction with `description` set to `None`. That is useful for metafields that may or may not be present.
|
||||||
|
|
||||||
|
|
||||||
|
Another thing to remember is not to try to iterate over `None`
|
||||||
|
|
||||||
|
Say you extracted a list of thumbnails into `thumbnail_data` using `try_get` and now want to iterate over them
|
||||||
|
|
||||||
|
```python
|
||||||
|
thumbnail_data = try_get(...)
|
||||||
|
thumbnails = [{
|
||||||
|
'url': item['url']
|
||||||
|
} for item in thumbnail_data or []] # correct
|
||||||
|
```
|
||||||
|
|
||||||
|
and not like:
|
||||||
|
|
||||||
|
```python
|
||||||
|
thumbnail_data = try_get(...)
|
||||||
|
thumbnails = [{
|
||||||
|
'url': item['url']
|
||||||
|
} for item in thumbnail_data] # incorrect
|
||||||
|
```
|
||||||
|
|
||||||
|
In the later case, `thumbnail_data` will be `None` if the field was not found and this will cause the loop `for item in thumbnail_data` to raise a fatal error. Using `for item in thumbnail_data or []` avoids this error and results in setting an empty list in `thumbnails` instead.
|
||||||
|
|
||||||
|
|
||||||
### Provide fallbacks
|
### Provide fallbacks
|
||||||
|
|
||||||
When extracting metadata try to do so from multiple sources. For example if `title` is present in several places, try extracting from at least some of them. This makes it more future-proof in case some of the sources become unavailable.
|
When extracting metadata try to do so from multiple sources. For example if `title` is present in several places, try extracting from at least some of them. This makes it more future-proof in case some of the sources become unavailable.
|
||||||
|
|
||||||
|
|
||||||
#### Example
|
#### Example
|
||||||
|
|
||||||
Say `meta` from the previous example has a `title` and you are about to extract it. Since `title` is a mandatory meta field you should end up with something like:
|
Say `meta` from the previous example has a `title` and you are about to extract it. Since `title` is a mandatory meta field you should end up with something like:
|
||||||
@@ -262,6 +372,7 @@ title = meta.get('title') or self._og_search_title(webpage)
|
|||||||
|
|
||||||
This code will try to extract from `meta` first and if it fails it will try extracting `og:title` from a `webpage`.
|
This code will try to extract from `meta` first and if it fails it will try extracting `og:title` from a `webpage`.
|
||||||
|
|
||||||
|
|
||||||
### Regular expressions
|
### Regular expressions
|
||||||
|
|
||||||
#### Don't capture groups you don't use
|
#### Don't capture groups you don't use
|
||||||
@@ -283,7 +394,6 @@ Incorrect:
|
|||||||
r'(id|ID)=(?P<id>\d+)'
|
r'(id|ID)=(?P<id>\d+)'
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
#### Make regular expressions relaxed and flexible
|
#### Make regular expressions relaxed and flexible
|
||||||
|
|
||||||
When using regular expressions try to write them fuzzy, relaxed and flexible, skipping insignificant parts that are more likely to change, allowing both single and double quotes for quoted values and so on.
|
When using regular expressions try to write them fuzzy, relaxed and flexible, skipping insignificant parts that are more likely to change, allowing both single and double quotes for quoted values and so on.
|
||||||
@@ -299,14 +409,14 @@ Say you need to extract `title` from the following HTML code:
|
|||||||
The code for that task should look similar to:
|
The code for that task should look similar to:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
title = self._search_regex(
|
title = self._search_regex( # correct
|
||||||
r'<span[^>]+class="title"[^>]*>([^<]+)', webpage, 'title')
|
r'<span[^>]+class="title"[^>]*>([^<]+)', webpage, 'title')
|
||||||
```
|
```
|
||||||
|
|
||||||
Or even better:
|
Or even better:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
title = self._search_regex(
|
title = self._search_regex( # correct
|
||||||
r'<span[^>]+class=(["\'])title\1[^>]*>(?P<title>[^<]+)',
|
r'<span[^>]+class=(["\'])title\1[^>]*>(?P<title>[^<]+)',
|
||||||
webpage, 'title', group='title')
|
webpage, 'title', group='title')
|
||||||
```
|
```
|
||||||
@@ -316,14 +426,25 @@ Note how you tolerate potential changes in the `style` attribute's value or swit
|
|||||||
The code definitely should not look like:
|
The code definitely should not look like:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
title = self._search_regex(
|
title = self._search_regex( # incorrect
|
||||||
r'<span style="position: absolute; left: 910px; width: 90px; float: right; z-index: 9999;" class="title">(.*?)</span>',
|
r'<span style="position: absolute; left: 910px; width: 90px; float: right; z-index: 9999;" class="title">(.*?)</span>',
|
||||||
webpage, 'title', group='title')
|
webpage, 'title', group='title')
|
||||||
```
|
```
|
||||||
|
|
||||||
|
or even
|
||||||
|
|
||||||
|
```python
|
||||||
|
title = self._search_regex( # incorrect
|
||||||
|
r'<span style=".*?" class="title">(.*?)</span>',
|
||||||
|
webpage, 'title', group='title')
|
||||||
|
```
|
||||||
|
|
||||||
|
Here the presence or absence of other attributes including `style` is irrelevent for the data we need, and so the regex must not depend on it
|
||||||
|
|
||||||
|
|
||||||
### Long lines policy
|
### Long lines policy
|
||||||
|
|
||||||
There is a soft limit to keep lines of code under 80 characters long. This means it should be respected if possible and if it does not make readability and code maintenance worse.
|
There is a soft limit to keep lines of code under 100 characters long. This means it should be respected if possible and if it does not make readability and code maintenance worse. Sometimes, it may be reasonable to go upto 120 characters and sometimes even 80 can be unreadable. Keep in mind that this is not a hard limit and is just one of many tools to make the code more readable
|
||||||
|
|
||||||
For example, you should **never** split long string literals like URLs or some other often copied entities over multiple lines to fit this limit:
|
For example, you should **never** split long string literals like URLs or some other often copied entities over multiple lines to fit this limit:
|
||||||
|
|
||||||
@@ -360,6 +481,7 @@ TITLE_RE = r'<title>([^<]+)</title>'
|
|||||||
title = self._html_search_regex(TITLE_RE, webpage, 'title')
|
title = self._html_search_regex(TITLE_RE, webpage, 'title')
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
### Collapse fallbacks
|
### Collapse fallbacks
|
||||||
|
|
||||||
Multiple fallback values can quickly become unwieldy. Collapse multiple fallback values into a single expression via a list of patterns.
|
Multiple fallback values can quickly become unwieldy. Collapse multiple fallback values into a single expression via a list of patterns.
|
||||||
@@ -385,10 +507,13 @@ description = (
|
|||||||
|
|
||||||
Methods supporting list of patterns are: `_search_regex`, `_html_search_regex`, `_og_search_property`, `_html_search_meta`.
|
Methods supporting list of patterns are: `_search_regex`, `_html_search_regex`, `_og_search_property`, `_html_search_meta`.
|
||||||
|
|
||||||
|
|
||||||
### Trailing parentheses
|
### Trailing parentheses
|
||||||
|
|
||||||
Always move trailing parentheses after the last argument.
|
Always move trailing parentheses after the last argument.
|
||||||
|
|
||||||
|
Note that this *does not* apply to braces `}` or square brackets `]` both of which should closed be in a new line
|
||||||
|
|
||||||
#### Example
|
#### Example
|
||||||
|
|
||||||
Correct:
|
Correct:
|
||||||
@@ -406,30 +531,36 @@ Incorrect:
|
|||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
### Use convenience conversion and parsing functions
|
### Use convenience conversion and parsing functions
|
||||||
|
|
||||||
Wrap all extracted numeric data into safe functions from [`youtube_dl/utils.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/utils.py): `int_or_none`, `float_or_none`. Use them for string to number conversions as well.
|
Wrap all extracted numeric data into safe functions from [`yt_dlp/utils.py`](yt_dlp/utils.py): `int_or_none`, `float_or_none`. Use them for string to number conversions as well.
|
||||||
|
|
||||||
Use `url_or_none` for safe URL processing.
|
Use `url_or_none` for safe URL processing.
|
||||||
|
|
||||||
Use `try_get` for safe metadata extraction from parsed JSON.
|
Use `try_get`, `dict_get` and `traverse_obj` for safe metadata extraction from parsed JSON.
|
||||||
|
|
||||||
Use `unified_strdate` for uniform `upload_date` or any `YYYYMMDD` meta field extraction, `unified_timestamp` for uniform `timestamp` extraction, `parse_filesize` for `filesize` extraction, `parse_count` for count meta fields extraction, `parse_resolution`, `parse_duration` for `duration` extraction, `parse_age_limit` for `age_limit` extraction.
|
Use `unified_strdate` for uniform `upload_date` or any `YYYYMMDD` meta field extraction, `unified_timestamp` for uniform `timestamp` extraction, `parse_filesize` for `filesize` extraction, `parse_count` for count meta fields extraction, `parse_resolution`, `parse_duration` for `duration` extraction, `parse_age_limit` for `age_limit` extraction.
|
||||||
|
|
||||||
Explore [`youtube_dl/utils.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/utils.py) for more useful convenience functions.
|
Explore [`yt_dlp/utils.py`](yt_dlp/utils.py) for more useful convenience functions.
|
||||||
|
|
||||||
#### More examples
|
#### More examples
|
||||||
|
|
||||||
##### Safely extract optional description from parsed JSON
|
##### Safely extract optional description from parsed JSON
|
||||||
```python
|
```python
|
||||||
description = try_get(response, lambda x: x['result']['video'][0]['summary'], compat_str)
|
description = traverse_obj(response, ('result', 'video', 'summary'), expected_type=str)
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Safely extract more optional metadata
|
##### Safely extract more optional metadata
|
||||||
```python
|
```python
|
||||||
video = try_get(response, lambda x: x['result']['video'][0], dict) or {}
|
video = traverse_obj(response, ('result', 'video', 0), default={}, expected_type=dict)
|
||||||
description = video.get('summary')
|
description = video.get('summary')
|
||||||
duration = float_or_none(video.get('durationMs'), scale=1000)
|
duration = float_or_none(video.get('durationMs'), scale=1000)
|
||||||
view_count = int_or_none(video.get('views'))
|
view_count = int_or_none(video.get('views'))
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# EMBEDDING YT-DLP
|
||||||
|
See [README.md#embedding-yt-dlp](README.md#embedding-yt-dlp) for instructions on how to embed yt-dlp in another Python program
|
||||||
|
|||||||
74
CONTRIBUTORS
74
CONTRIBUTORS
@@ -22,7 +22,7 @@ Zocker1999NET
|
|||||||
nao20010128nao
|
nao20010128nao
|
||||||
kurumigi
|
kurumigi
|
||||||
bbepis
|
bbepis
|
||||||
animelover1984
|
animelover1984/horahoradev
|
||||||
Pccode66
|
Pccode66
|
||||||
RobinD42
|
RobinD42
|
||||||
hseg
|
hseg
|
||||||
@@ -67,3 +67,75 @@ zerodytrash
|
|||||||
wesnm
|
wesnm
|
||||||
pento
|
pento
|
||||||
rigstot
|
rigstot
|
||||||
|
dirkf
|
||||||
|
funniray
|
||||||
|
Jessecar96
|
||||||
|
jhwgh1968
|
||||||
|
kikuyan
|
||||||
|
max-te
|
||||||
|
nchilada
|
||||||
|
pgaig
|
||||||
|
PSlava
|
||||||
|
stdedos
|
||||||
|
u-spec-png
|
||||||
|
Sipherdrakon
|
||||||
|
kidonng
|
||||||
|
smege1001
|
||||||
|
tandy1000
|
||||||
|
IONECarter
|
||||||
|
capntrips
|
||||||
|
mrfade
|
||||||
|
ParadoxGBB
|
||||||
|
wlritchi
|
||||||
|
NeroBurner
|
||||||
|
mahanstreamer
|
||||||
|
alerikaisattera
|
||||||
|
Derkades
|
||||||
|
BunnyHelp
|
||||||
|
i6t
|
||||||
|
std-move
|
||||||
|
Chocobozzz
|
||||||
|
ouwou
|
||||||
|
korli
|
||||||
|
octotherp
|
||||||
|
CeruleanSky
|
||||||
|
zootedb0t
|
||||||
|
chao813
|
||||||
|
ChillingPepper
|
||||||
|
ConquerorDopy
|
||||||
|
dalanmiller
|
||||||
|
DigitalDJ
|
||||||
|
f4pp3rk1ng
|
||||||
|
gesa
|
||||||
|
Jules-A
|
||||||
|
makeworld-the-better-one
|
||||||
|
MKSherbini
|
||||||
|
mrx23dot
|
||||||
|
poschi3
|
||||||
|
raphaeldore
|
||||||
|
renalid
|
||||||
|
sleaux-meaux
|
||||||
|
sulyi
|
||||||
|
tmarki
|
||||||
|
Vangelis66
|
||||||
|
AjaxGb
|
||||||
|
ajj8
|
||||||
|
jakubadamw
|
||||||
|
jfogelman
|
||||||
|
timethrow
|
||||||
|
sarnoud
|
||||||
|
Bojidarist
|
||||||
|
18928172992817182/gustaf
|
||||||
|
nixklai
|
||||||
|
smplayer-dev
|
||||||
|
Zirro
|
||||||
|
CrypticSignal
|
||||||
|
flashdagger
|
||||||
|
fractalf
|
||||||
|
frafra
|
||||||
|
kaz-us
|
||||||
|
ozburo
|
||||||
|
rhendric
|
||||||
|
sdomi
|
||||||
|
selfisekai
|
||||||
|
stanoarn
|
||||||
|
|||||||
529
Changelog.md
529
Changelog.md
@@ -7,23 +7,532 @@
|
|||||||
* Update Changelog.md and CONTRIBUTORS
|
* Update Changelog.md and CONTRIBUTORS
|
||||||
* Change "Merged with ytdl" version in Readme.md if needed
|
* Change "Merged with ytdl" version in Readme.md if needed
|
||||||
* Add new/fixed extractors in "new features" section of Readme.md
|
* Add new/fixed extractors in "new features" section of Readme.md
|
||||||
* Commit to master as `Release <version>`
|
* Commit as `Release <version>`
|
||||||
* Push to origin/release using `git push origin master:release`
|
* Push to origin/release using `git push origin master:release`
|
||||||
build task will now run
|
build task will now run
|
||||||
* Update version.py using `devscripts\update-version.py`
|
|
||||||
* Run `make issuetemplates`
|
|
||||||
* Commit to master as `[version] update :ci skip all`
|
|
||||||
* Push to origin/master
|
|
||||||
* Update changelog in /releases
|
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
|
|
||||||
|
### 2021.11.10.1
|
||||||
|
|
||||||
|
* Temporarily disable MacOS Build
|
||||||
|
|
||||||
|
### 2021.11.10
|
||||||
|
|
||||||
|
* [youtube] **Fix throttling by decrypting n-sig**
|
||||||
|
* Merging extractors from [haruhi-dl](https://git.sakamoto.pl/laudom/haruhi-dl) by [selfisekai](https://github.com/selfisekai)
|
||||||
|
* [extractor] Add `_search_nextjs_data`
|
||||||
|
* [tvp] Fix extractors
|
||||||
|
* [tvp] Add TVPStreamIE
|
||||||
|
* [wppilot] Add extractors
|
||||||
|
* [polskieradio] Add extractors
|
||||||
|
* [radiokapital] Add extractors
|
||||||
|
* [polsatgo] Add extractor by [selfisekai](https://github.com/selfisekai), [sdomi](https://github.com/sdomi)
|
||||||
|
* Separate `--check-all-formats` from `--check-formats`
|
||||||
|
* Approximate filesize from bitrate
|
||||||
|
* Don't create console in `windows_enable_vt_mode`
|
||||||
|
* Fix bug in `--load-infojson` of playlists
|
||||||
|
* [minicurses] Add colors to `-F` and standardize color-printing code
|
||||||
|
* [outtmpl] Add type `link` for internet shortcut files
|
||||||
|
* [outtmpl] Add alternate forms for `q` and `j`
|
||||||
|
* [outtmpl] Do not traverse `None`
|
||||||
|
* [fragment] Fix progress display in fragmented downloads
|
||||||
|
* [downloader/ffmpeg] Fix vtt download with ffmpeg
|
||||||
|
* [ffmpeg] Detect presence of setts and libavformat version
|
||||||
|
* [ExtractAudio] Rescale --audio-quality correctly by [CrypticSignal](https://github.com/CrypticSignal), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [ExtractAudio] Use `libfdk_aac` if available by [CrypticSignal](https://github.com/CrypticSignal)
|
||||||
|
* [FormatSort] `eac3` is better than `ac3`
|
||||||
|
* [FormatSort] Fix some fields' defaults
|
||||||
|
* [generic] Detect more json_ld
|
||||||
|
* [generic] parse jwplayer with only the json URL
|
||||||
|
* [extractor] Add keyword automatically to SearchIE descriptions
|
||||||
|
* [extractor] Fix some errors being converted to `ExtractorError`
|
||||||
|
* [utils] Add `join_nonempty`
|
||||||
|
* [utils] Add `jwt_decode_hs256` by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [utils] Create `DownloadCancelled` exception
|
||||||
|
* [utils] Parse `vp09` as vp9
|
||||||
|
* [utils] Sanitize URL when determining protocol
|
||||||
|
* [test/download] Fallback test to `bv`
|
||||||
|
* [docs] Minor documentation improvements
|
||||||
|
* [cleanup] Improvements to error and debug messages
|
||||||
|
* [cleanup] Minor fixes and cleanup
|
||||||
|
* [3speak] Add extractors by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [AmazonStore] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [Gab] Add extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [mediaset] Add playlist support by [nixxo](https://github.com/nixxo)
|
||||||
|
* [MLSScoccer] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [N1] Add support for nova.rs by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [PlanetMarathi] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [RaiplayRadio] Add extractors by [frafra](https://github.com/frafra)
|
||||||
|
* [roosterteeth] Add series extractor
|
||||||
|
* [sky] Add `SkyNewsStoryIE` by [ajj8](https://github.com/ajj8)
|
||||||
|
* [youtube] Fix sorting for some videos
|
||||||
|
* [youtube] Populate `thumbnail` with the best "known" thumbnail
|
||||||
|
* [youtube] Refactor itag processing
|
||||||
|
* [youtube] Remove unnecessary no-playlist warning
|
||||||
|
* [youtube:tab] Add Invidious list for playlists/channels by [rhendric](https://github.com/rhendric)
|
||||||
|
* [Bilibili:comments] Fix infinite loop by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [ceskatelevize] Fix extractor by [flashdagger](https://github.com/flashdagger)
|
||||||
|
* [Coub] Fix media format identification by [wlritchi](https://github.com/wlritchi)
|
||||||
|
* [crunchyroll] Add extractor-args `language` and `hardsub`
|
||||||
|
* [DiscoveryPlus] Allow language codes in URL
|
||||||
|
* [imdb] Fix thumbnail by [ozburo](https://github.com/ozburo)
|
||||||
|
* [instagram] Add IOS URL support by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [instagram] Improve login code by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [Instagram] Improve metadata extraction by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [iPrima] Fix extractor by [stanoarn](https://github.com/stanoarn)
|
||||||
|
* [itv] Add support for ITV News by [ajj8](https://github.com/ajj8)
|
||||||
|
* [la7] Fix extractor by [nixxo](https://github.com/nixxo)
|
||||||
|
* [linkedin] Don't login multiple times
|
||||||
|
* [mtv] Fix some videos by [Sipherdrakon](https://github.com/Sipherdrakon)
|
||||||
|
* [Newgrounds] Fix description by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [Nrk] Minor fixes by [fractalf](https://github.com/fractalf)
|
||||||
|
* [Olympics] Fix extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [piksel] Fix sorting
|
||||||
|
* [twitter] Do not sort by codec
|
||||||
|
* [viewlift] Add cookie-based login and series support by [Ashish0804](https://github.com/Ashish0804), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [vimeo] Detect source extension and misc cleanup by [flashdagger](https://github.com/flashdagger)
|
||||||
|
* [vimeo] Fix ondemand videos and direct URLs with hash
|
||||||
|
* [vk] Fix login and add subtitles by [kaz-us](https://github.com/kaz-us)
|
||||||
|
* [VLive] Add upload_date and thumbnail by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [VRT] Fix login by [pgaig](https://github.com/pgaig)
|
||||||
|
* [Vupload] Fix extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [wakanim] Add support for MPD manifests by [nyuszika7h](https://github.com/nyuszika7h)
|
||||||
|
* [wakanim] Detect geo-restriction by [nyuszika7h](https://github.com/nyuszika7h)
|
||||||
|
* [ZenYandex] Fix extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
|
||||||
|
|
||||||
|
### 2021.10.22
|
||||||
|
|
||||||
|
* [build] Improvements
|
||||||
|
* Build standalone MacOS packages by [smplayer-dev](https://github.com/smplayer-dev)
|
||||||
|
* Release windows exe built with `py2exe`
|
||||||
|
* Enable lazy-extractors in releases.
|
||||||
|
* Set env var `YTDLP_NO_LAZY_EXTRACTORS` to forcefully disable this (experimental)
|
||||||
|
* Clean up error reporting in update
|
||||||
|
* Refactor `pyinst.py`, misc cleanup and improve docs
|
||||||
|
* [docs] Migrate issues to use forms by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [downloader] **Fix slow progress hooks**
|
||||||
|
* This was causing HLS/DASH downloads to be extremely slow in some situations
|
||||||
|
* [downloader/ffmpeg] Improve simultaneous download and merge
|
||||||
|
* [EmbedMetadata] Allow overwriting all default metadata with `meta_default` key
|
||||||
|
* [ModifyChapters] Add ability for `--remove-chapters` to remove sections by timestamp
|
||||||
|
* [utils] Allow duration strings in `--match-filter`
|
||||||
|
* Add HDR information to formats
|
||||||
|
* Add negative option `--no-batch-file` by [Zirro](https://github.com/Zirro)
|
||||||
|
* Calculate more fields for merged formats
|
||||||
|
* Do not verify thumbnail URLs unless `--check-formats` is specified
|
||||||
|
* Don't create console for subprocesses on Windows
|
||||||
|
* Fix `--restrict-filename` when used with default template
|
||||||
|
* Fix `check_formats` output being written to stdout when `-qv`
|
||||||
|
* Fix bug in storyboards
|
||||||
|
* Fix conflict b/w id and ext in format selection
|
||||||
|
* Fix verbose head not showing custom configs
|
||||||
|
* Load archive only after printing verbose head
|
||||||
|
* Make `duration_string` and `resolution` available in --match-filter
|
||||||
|
* Re-implement deprecated option `--id`
|
||||||
|
* Reduce default `--socket-timeout`
|
||||||
|
* Write verbose header to logger
|
||||||
|
* [outtmpl] Fix bug in expanding environment variables
|
||||||
|
* [cookies] Local State should be opened as utf-8
|
||||||
|
* [extractor,utils] Detect more codecs/mimetypes
|
||||||
|
* [extractor] Detect `EXT-X-KEY` Apple FairPlay
|
||||||
|
* [utils] Use `importlib` to load plugins by [sulyi](https://github.com/sulyi)
|
||||||
|
* [http] Retry on socket timeout and show the last encountered error
|
||||||
|
* [fragment] Print error message when skipping fragment
|
||||||
|
* [aria2c] Fix `--skip-unavailable-fragment`
|
||||||
|
* [SponsorBlock] Obey `extractor-retries` and `sleep-requests`
|
||||||
|
* [Merger] Do not add `aac_adtstoasc` to non-hls audio
|
||||||
|
* [ModifyChapters] Do not mutate original chapters by [nihil-admirari](https://github.com/nihil-admirari)
|
||||||
|
* [devscripts/run_tests] Use markers to filter tests by [sulyi](https://github.com/sulyi)
|
||||||
|
* [7plus] Add cookie based authentication by [nyuszika7h](https://github.com/nyuszika7h)
|
||||||
|
* [AdobePass] Fix RCN MSO by [jfogelman](https://github.com/jfogelman)
|
||||||
|
* [CBC] Fix Gem livestream by [makeworld-the-better-one](https://github.com/makeworld-the-better-one)
|
||||||
|
* [CBC] Support CBC Gem member content by [makeworld-the-better-one](https://github.com/makeworld-the-better-one)
|
||||||
|
* [crunchyroll] Add season to flat-playlist
|
||||||
|
* [crunchyroll] Add support for `beta.crunchyroll` URLs and fix series URLs with language code
|
||||||
|
* [EUScreen] Add Extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [Gronkh] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [hidive] Fix typo
|
||||||
|
* [Hotstar] Mention Dynamic Range in `format_id` by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [Hotstar] Raise appropriate error for DRM
|
||||||
|
* [instagram] Add login by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [instagram] Show appropriate error when login is needed
|
||||||
|
* [microsoftstream] Add extractor by [damianoamatruda](https://github.com/damianoamatruda), [nixklai](https://github.com/nixklai)
|
||||||
|
* [on24] Add extractor by [damianoamatruda](https://github.com/damianoamatruda)
|
||||||
|
* [patreon] Fix vimeo player regex by [zenerdi0de](https://github.com/zenerdi0de)
|
||||||
|
* [SkyNewsAU] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [tagesschau] Fix extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [tbs] Add tbs live streams by [llacb47](https://github.com/llacb47)
|
||||||
|
* [tiktok] Fix typo and update tests
|
||||||
|
* [trovo] Support channel clips and VODs by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [Viafree] Add support for Finland by [18928172992817182](https://github.com/18928172992817182)
|
||||||
|
* [vimeo] Fix embedded `player.vimeo`
|
||||||
|
* [vlive:channel] Fix extraction by [kikuyan](https://github.com/kikuyan), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [youtube] Add auto-translated subtitles
|
||||||
|
* [youtube] Expose different formats with same itag
|
||||||
|
* [youtube:comments] Fix for new layout by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [cleanup] Cleanup bilibili code by [pukkandan](https://github.com/pukkandan), [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [cleanup] Remove broken youtube login code
|
||||||
|
* [cleanup] Standardize timestamp formatting code
|
||||||
|
* [cleanup] Generalize `getcomments` implementation for extractors
|
||||||
|
* [cleanup] Simplify search extractors code
|
||||||
|
* [cleanup] misc
|
||||||
|
|
||||||
|
|
||||||
|
### 2021.10.10
|
||||||
|
|
||||||
|
* [downloader/ffmpeg] Fix bug in initializing `FFmpegPostProcessor`
|
||||||
|
* [minicurses] Fix when printing to file
|
||||||
|
* [downloader] Fix throttledratelimit
|
||||||
|
* [francetv] Fix extractor by [fstirlitz](https://github.com/fstirlitz), [sarnoud](https://github.com/sarnoud)
|
||||||
|
* [NovaPlay] Add extractor by [Bojidarist](https://github.com/Bojidarist)
|
||||||
|
* [ffmpeg] Revert "Set max probesize" - No longer needed
|
||||||
|
* [docs] Remove incorrect dependency on VC++10
|
||||||
|
* [build] Allow to release without changelog
|
||||||
|
|
||||||
|
### 2021.10.09
|
||||||
|
|
||||||
|
* Improved progress reporting
|
||||||
|
* Separate `--console-title` and `--no-progress`
|
||||||
|
* Add option `--progress` to show progress-bar even in quiet mode
|
||||||
|
* Fix and refactor `minicurses` and use it for all progress reporting
|
||||||
|
* Standardize use of terminal sequences and enable color support for windows 10
|
||||||
|
* Add option `--progress-template` to customize progress-bar and console-title
|
||||||
|
* Add postprocessor hooks and progress reporting
|
||||||
|
* [postprocessor] Add plugin support with option `--use-postprocessor`
|
||||||
|
* [extractor] Extract storyboards from SMIL manifests by [fstirlitz](https://github.com/fstirlitz)
|
||||||
|
* [outtmpl] Alternate form of format type `l` for `\n` delimited list
|
||||||
|
* [outtmpl] Format type `U` for unicode normalization
|
||||||
|
* [outtmpl] Allow empty output template to skip a type of file
|
||||||
|
* Merge webm formats into mkv if thumbnails are to be embedded
|
||||||
|
* [adobepass] Add RCN as MSO by [jfogelman](https://github.com/jfogelman)
|
||||||
|
* [ciscowebex] Add extractor by [damianoamatruda](https://github.com/damianoamatruda)
|
||||||
|
* [Gettr] Add extractor by [i6t](https://github.com/i6t)
|
||||||
|
* [GoPro] Add extractor by [i6t](https://github.com/i6t)
|
||||||
|
* [N1] Add extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [Theta] Add video extractor by [alerikaisattera](https://github.com/alerikaisattera)
|
||||||
|
* [Veo] Add extractor by [i6t](https://github.com/i6t)
|
||||||
|
* [Vupload] Add extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [bbc] Extract better quality videos by [ajj8](https://github.com/ajj8)
|
||||||
|
* [Bilibili] Add subtitle converter by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [CBC] Cleanup tests by [makeworld-the-better-one](https://github.com/makeworld-the-better-one)
|
||||||
|
* [Douyin] Rewrite extractor by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||||
|
* [Funimation] Fix for /v/ urls by [pukkandan](https://github.com/pukkandan), [Jules-A](https://github.com/Jules-A)
|
||||||
|
* [Funimation] Sort formats according to the relevant extractor-args
|
||||||
|
* [Hidive] Fix duplicate and incorrect formats
|
||||||
|
* [HotStarSeries] Fix cookies by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [LinkedInLearning] Add subtitles by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [Mediaite] Relax valid url by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [Newgrounds] Add age_limit and fix duration by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [Newgrounds] Fix view count on songs by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [parliamentlive.tv] Fix extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [PolskieRadio] Fix extractors by [jakubadamw](https://github.com/jakubadamw), [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [reddit] Add embedded url by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [reddit] Fix 429 by generating a random `reddit_session` by [AjaxGb](https://github.com/AjaxGb)
|
||||||
|
* [Rumble] Add RumbleChannelIE by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [soundcloud:playlist] Detect last page correctly
|
||||||
|
* [SovietsCloset] Add duration from m3u8 by [ChillingPepper](https://github.com/ChillingPepper)
|
||||||
|
* [Streamable] Add codecs by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [vidme] Remove extractor by [alerikaisattera](https://github.com/alerikaisattera)
|
||||||
|
* [youtube:tab] Fallback to API when webpage fails to download by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [youtube] Fix non-fatal errors in fetching player
|
||||||
|
* Fix `--flat-playlist` when neither IE nor id is known
|
||||||
|
* Fix `-f mp4` behaving differently from youtube-dl
|
||||||
|
* Workaround for bug in `ssl.SSLContext.load_default_certs`
|
||||||
|
* [aes] Improve performance slightly by [sulyi](https://github.com/sulyi)
|
||||||
|
* [cookies] Fix keyring fallback by [mbway](https://github.com/mbway)
|
||||||
|
* [embedsubtitle] Fix error when duration is unknown
|
||||||
|
* [ffmpeg] Fix error when subtitle file is missing
|
||||||
|
* [ffmpeg] Set max probesize to workaround AAC HLS stream issues by [shirt](https://github.com/shirt-dev)
|
||||||
|
* [FixupM3u8] Remove redundant run if merged is needed
|
||||||
|
* [hls] Fix decryption issues by [shirt](https://github.com/shirt-dev), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [http] Respect user-provided chunk size over extractor's
|
||||||
|
* [utils] Let traverse_obj accept functions as keys
|
||||||
|
* [docs] Add note about our custom ffmpeg builds
|
||||||
|
* [docs] Write embedding and contributing documentation by [pukkandan](https://github.com/pukkandan), [timethrow](https://github.com/timethrow)
|
||||||
|
* [update] Check for new version even if not updateable
|
||||||
|
* [build] Add more files to the tarball
|
||||||
|
* [build] Allow building with py2exe (and misc fixes)
|
||||||
|
* [build] Use pycryptodomex by [shirt](https://github.com/shirt-dev), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [cleanup] Some minor refactoring, improve docs and misc cleanup
|
||||||
|
|
||||||
|
|
||||||
|
### 2021.09.25
|
||||||
|
|
||||||
|
* Add new option `--netrc-location`
|
||||||
|
* [outtmpl] Allow alternate fields using `,`
|
||||||
|
* [outtmpl] Add format type `B` to treat the value as bytes (eg: to limit the filename to a certain number of bytes)
|
||||||
|
* Separate the options `--ignore-errors` and `--no-abort-on-error`
|
||||||
|
* Basic framework for simultaneous download of multiple formats by [nao20010128nao](https://github.com/nao20010128nao)
|
||||||
|
* [17live] Add 17.live extractor by [nao20010128nao](https://github.com/nao20010128nao)
|
||||||
|
* [bilibili] Add BiliIntlIE and BiliIntlSeriesIE by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [CAM4] Add extractor by [alerikaisattera](https://github.com/alerikaisattera)
|
||||||
|
* [Chingari] Add extractors by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [CGTN] Add extractor by [chao813](https://github.com/chao813)
|
||||||
|
* [damtomo] Add extractor by [nao20010128nao](https://github.com/nao20010128nao)
|
||||||
|
* [gotostage] Add extractor by [poschi3](https://github.com/poschi3)
|
||||||
|
* [Koo] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [Mediaite] Add Extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [Mediaklikk] Add Extractor by [tmarki](https://github.com/tmarki), [mrx23dot](https://github.com/mrx23dot), [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [MuseScore] Add Extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [Newgrounds] Add NewgroundsUserIE and improve extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [nzherald] Add NZHeraldIE by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [Olympics] Add replay extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [Peertube] Add channel and playlist extractors by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [radlive] Add extractor by [nyuszika7h](https://github.com/nyuszika7h)
|
||||||
|
* [SovietsCloset] Add extractor by [ChillingPepper](https://github.com/ChillingPepper)
|
||||||
|
* [Streamanity] Add Extractor by [alerikaisattera](https://github.com/alerikaisattera)
|
||||||
|
* [Theta] Add extractor by [alerikaisattera](https://github.com/alerikaisattera)
|
||||||
|
* [Yandex] Add ZenYandexIE and ZenYandexChannelIE by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [9Now] handle episodes of series by [dalanmiller](https://github.com/dalanmiller)
|
||||||
|
* [AnimalPlanet] Fix extractor by [Sipherdrakon](https://github.com/Sipherdrakon)
|
||||||
|
* [Arte] Improve description extraction by [renalid](https://github.com/renalid)
|
||||||
|
* [atv.at] Use jwt for API by [NeroBurner](https://github.com/NeroBurner)
|
||||||
|
* [brightcove] Extract subtitles from manifests
|
||||||
|
* [CBC] Fix CBC Gem extractors by [makeworld-the-better-one](https://github.com/makeworld-the-better-one)
|
||||||
|
* [cbs] Report appropriate error for DRM
|
||||||
|
* [comedycentral] Support `collection-playlist` by [nixxo](https://github.com/nixxo)
|
||||||
|
* [DIYNetwork] Support new format by [Sipherdrakon](https://github.com/Sipherdrakon)
|
||||||
|
* [downloader/niconico] Pass custom headers by [nao20010128nao](https://github.com/nao20010128nao)
|
||||||
|
* [dw] Fix extractor
|
||||||
|
* [Fancode] Fix live streams by [zenerdi0de](https://github.com/zenerdi0de)
|
||||||
|
* [funimation] Fix for locations outside US by [Jules-A](https://github.com/Jules-A), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [globo] Fix GloboIE by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [HiDive] Fix extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [Hotstar] Add referer for subs by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [itv] Fix extractor, add subtitles and thumbnails by [coletdjnz](https://github.com/coletdjnz), [sleaux-meaux](https://github.com/sleaux-meaux), [Vangelis66](https://github.com/Vangelis66)
|
||||||
|
* [lbry] Show error message from API response
|
||||||
|
* [Mxplayer] Use mobile API by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [NDR] Rewrite NDRIE by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [Nuvid] Fix extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [Oreilly] Handle new web url by [MKSherbini](https://github.com/MKSherbini)
|
||||||
|
* [pbs] Fix subtitle extraction by [coletdjnz](https://github.com/coletdjnz), [gesa](https://github.com/gesa), [raphaeldore](https://github.com/raphaeldore)
|
||||||
|
* [peertube] Update instances by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [plutotv] Fix extractor for URLs with `/en`
|
||||||
|
* [reddit] Workaround for 429 by redirecting to old.reddit.com
|
||||||
|
* [redtube] Fix exts
|
||||||
|
* [soundcloud] Make playlist extraction lazy
|
||||||
|
* [soundcloud] Retry playlist pages on `502` error and update `_CLIENT_ID`
|
||||||
|
* [southpark] Fix SouthParkDE by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [SovietsCloset] Fix playlists for games with only named categories by [ConquerorDopy](https://github.com/ConquerorDopy)
|
||||||
|
* [SpankBang] Fix uploader by [f4pp3rk1ng](https://github.com/f4pp3rk1ng), [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [tiktok] Use API to fetch higher quality video by [MinePlayersPE](https://github.com/MinePlayersPE), [llacb47](https://github.com/llacb47)
|
||||||
|
* [TikTokUser] Fix extractor using mobile API by [MinePlayersPE](https://github.com/MinePlayersPE), [llacb47](https://github.com/llacb47)
|
||||||
|
* [videa] Fix some extraction errors by [nyuszika7h](https://github.com/nyuszika7h)
|
||||||
|
* [VrtNU] Handle login errors by [llacb47](https://github.com/llacb47)
|
||||||
|
* [vrv] Don't raise error when thumbnails are missing
|
||||||
|
* [youtube] Cleanup authentication code by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [youtube] Fix `--mark-watched` with `--cookies-from-browser`
|
||||||
|
* [youtube] Improvements to JS player extraction and add extractor-args to skip it by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [youtube] Retry on 'Unknown Error' by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [youtube] Return full URL instead of just ID
|
||||||
|
* [youtube] Warn when trying to download clips
|
||||||
|
* [zdf] Improve format sorting
|
||||||
|
* [zype] Extract subtitles from the m3u8 manifest by [fstirlitz](https://github.com/fstirlitz)
|
||||||
|
* Allow `--force-write-archive` to work with `--flat-playlist`
|
||||||
|
* Download subtitles in order of `--sub-langs`
|
||||||
|
* Allow `0` in `--playlist-items`
|
||||||
|
* Handle more playlist errors with `-i`
|
||||||
|
* Fix `--no-get-comments`
|
||||||
|
* Fix `extra_info` being reused across runs
|
||||||
|
* Fix compat options `no-direct-merge` and `playlist-index`
|
||||||
|
* Dump files should obey `--trim-filename` by [sulyi](https://github.com/sulyi)
|
||||||
|
* [aes] Add `aes_gcm_decrypt_and_verify` by [sulyi](https://github.com/sulyi), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [aria2c] Fix IV for some AES-128 streams by [shirt](https://github.com/shirt-dev)
|
||||||
|
* [compat] Don't ignore `HOME` (if set) on windows
|
||||||
|
* [cookies] Make browser names case insensitive
|
||||||
|
* [cookies] Print warning for cookie decoding error only once
|
||||||
|
* [extractor] Fix root-relative URLs in MPD by [DigitalDJ](https://github.com/DigitalDJ)
|
||||||
|
* [ffmpeg] Add `aac_adtstoasc` when merging if needed
|
||||||
|
* [fragment,aria2c] Generalize and refactor some code
|
||||||
|
* [fragment] Avoid repeated request for AES key
|
||||||
|
* [fragment] Fix range header when using `-N` and media sequence by [shirt](https://github.com/shirt-dev)
|
||||||
|
* [hls,aes] Fallback to native implementation for AES-CBC and detect `Cryptodome` in addition to `Crypto`
|
||||||
|
* [hls] Byterange + AES128 is supported by native downloader
|
||||||
|
* [ModifyChapters] Improve sponsor chapter merge algorithm by [nihil-admirari](https://github.com/nihil-admirari)
|
||||||
|
* [ModifyChapters] Minor fixes
|
||||||
|
* [WebVTT] Adjust parser to accommodate PBS subtitles
|
||||||
|
* [utils] Improve `extract_timezone` by [dirkf](https://github.com/dirkf)
|
||||||
|
* [options] Fix `--no-config` and refactor reading of config files
|
||||||
|
* [options] Strip spaces and ignore empty entries in list-like switches
|
||||||
|
* [test/cookies] Improve logging
|
||||||
|
* [build] Automate more of the release process by [animelover1984](https://github.com/animelover1984), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [build] Fix sha256 by [nihil-admirari](https://github.com/nihil-admirari)
|
||||||
|
* [build] Bring back brew taps by [nao20010128nao](https://github.com/nao20010128nao)
|
||||||
|
* [build] Provide `--onedir` zip for windows by [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [cleanup,docs] Add deprecation warning in docs for some counter intuitive behaviour
|
||||||
|
* [cleanup] Fix line endings for `nebula.py` by [glenn-slayden](https://github.com/glenn-slayden)
|
||||||
|
* [cleanup] Improve `make clean-test` by [sulyi](https://github.com/sulyi)
|
||||||
|
* [cleanup] Misc
|
||||||
|
|
||||||
|
|
||||||
|
### 2021.09.02
|
||||||
|
|
||||||
|
* **Native SponsorBlock** implementation by [nihil-admirari](https://github.com/nihil-admirari), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* `--sponsorblock-remove CATS` removes specified chapters from file
|
||||||
|
* `--sponsorblock-mark CATS` marks the specified sponsor sections as chapters
|
||||||
|
* `--sponsorblock-chapter-title TMPL` to specify sponsor chapter template
|
||||||
|
* `--sponsorblock-api URL` to use a different API
|
||||||
|
* No re-encoding is done unless `--force-keyframes-at-cuts` is used
|
||||||
|
* The fetched sponsor sections are written to the infojson
|
||||||
|
* Deprecates: `--sponskrub`, `--no-sponskrub`, `--sponskrub-cut`, `--no-sponskrub-cut`, `--sponskrub-force`, `--no-sponskrub-force`, `--sponskrub-location`, `--sponskrub-args`
|
||||||
|
* Split `--embed-chapters` from `--embed-metadata` (it still implies the former by default)
|
||||||
|
* Add option `--remove-chapters` to remove arbitrary chapters by [nihil-admirari](https://github.com/nihil-admirari), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* Add option `--force-keyframes-at-cuts` for more accurate cuts when removing and splitting chapters by [nihil-admirari](https://github.com/nihil-admirari)
|
||||||
|
* Let `--match-filter` reject entries early
|
||||||
|
* Makes redundant: `--match-title`, `--reject-title`, `--min-views`, `--max-views`
|
||||||
|
* [lazy_extractor] Improvements (It now passes all tests)
|
||||||
|
* Bugfix for when plugin directory doesn't exist by [kidonng](https://github.com/kidonng)
|
||||||
|
* Create instance only after pre-checking archive
|
||||||
|
* Import actual class if an attribute is accessed
|
||||||
|
* Fix `suitable` and add flake8 test
|
||||||
|
* [downloader/ffmpeg] Experimental support for DASH manifests (including live)
|
||||||
|
* Your ffmpeg must have [this patch](https://github.com/FFmpeg/FFmpeg/commit/3249c757aed678780e22e99a1a49f4672851bca9) applied for YouTube DASH to work
|
||||||
|
* [downloader/ffmpeg] Allow passing custom arguments before `-i`
|
||||||
|
* [BannedVideo] Add extractor by [smege1001](https://github.com/smege1001), [blackjack4494](https://github.com/blackjack4494), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [bilibili] Add category extractor by [animelover1984](https://github.com/animelover1984)
|
||||||
|
* [Epicon] Add extractors by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [filmmodu] Add extractor by [mzbaulhaque](https://github.com/mzbaulhaque)
|
||||||
|
* [GabTV] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [Hungama] Fix `HungamaSongIE` and add `HungamaAlbumPlaylistIE` by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [ManotoTV] Add new extractors by [tandy1000](https://github.com/tandy1000)
|
||||||
|
* [Niconico] Add Search extractors by [animelover1984](https://github.com/animelover1984), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [Patreon] Add `PatreonUserIE` by [zenerdi0de](https://github.com/zenerdi0de)
|
||||||
|
* [peloton] Add extractor by [IONECarter](https://github.com/IONECarter), [capntrips](https://github.com/capntrips), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [ProjectVeritas] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [radiko] Add extractors by [nao20010128nao](https://github.com/nao20010128nao)
|
||||||
|
* [StarTV] Add extractor for `startv.com.tr` by [mrfade](https://github.com/mrfade), [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [tiktok] Add `TikTokUserIE` by [Ashish0804](https://github.com/Ashish0804), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [Tokentube] Add extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [TV2Hu] Fix `TV2HuIE` and add `TV2HuSeriesIE` by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [voicy] Add extractor by [nao20010128nao](https://github.com/nao20010128nao)
|
||||||
|
* [adobepass] Fix Verizon SAML login by [nyuszika7h](https://github.com/nyuszika7h), [ParadoxGBB](https://github.com/ParadoxGBB)
|
||||||
|
* [afreecatv] Fix adult VODs by [wlritchi](https://github.com/wlritchi)
|
||||||
|
* [afreecatv] Tolerate failure to parse date string by [wlritchi](https://github.com/wlritchi)
|
||||||
|
* [aljazeera] Fix extractor by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||||
|
* [ATV.at] Fix extractor for ATV.at by [NeroBurner](https://github.com/NeroBurner), [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [bitchute] Fix test by [mahanstreamer](https://github.com/mahanstreamer)
|
||||||
|
* [camtube] Remove obsolete extractor by [alerikaisattera](https://github.com/alerikaisattera)
|
||||||
|
* [CDA] Add more formats by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [eroprofile] Fix page skipping in albums by [jhwgh1968](https://github.com/jhwgh1968)
|
||||||
|
* [facebook] Fix format sorting
|
||||||
|
* [facebook] Fix metadata extraction by [kikuyan](https://github.com/kikuyan)
|
||||||
|
* [facebook] Update onion URL by [Derkades](https://github.com/Derkades)
|
||||||
|
* [HearThisAtIE] Fix extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [instagram] Add referrer to prevent throttling by [u-spec-png](https://github.com/u-spec-png), [kikuyan](https://github.com/kikuyan)
|
||||||
|
* [iwara.tv] Extract more metadata by [BunnyHelp](https://github.com/BunnyHelp)
|
||||||
|
* [iwara] Add thumbnail by [i6t](https://github.com/i6t)
|
||||||
|
* [kakao] Fix extractor
|
||||||
|
* [mediaset] Fix extraction for some videos by [nyuszika7h](https://github.com/nyuszika7h)
|
||||||
|
* [Motherless] Fix extractor by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [Nova] fix extractor by [std-move](https://github.com/std-move)
|
||||||
|
* [ParamountPlus] Fix geo verification by [shirt](https://github.com/shirt-dev)
|
||||||
|
* [peertube] handle new video URL format by [Chocobozzz](https://github.com/Chocobozzz)
|
||||||
|
* [pornhub] Separate and fix playlist extractor by [mzbaulhaque](https://github.com/mzbaulhaque)
|
||||||
|
* [reddit] Fix for quarantined subreddits by [ouwou](https://github.com/ouwou)
|
||||||
|
* [ShemarooMe] Fix extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [soundcloud] Refetch `client_id` on 403
|
||||||
|
* [tiktok] Fix metadata extraction
|
||||||
|
* [TV2] Fix extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [tv5mondeplus] Fix extractor by [korli](https://github.com/korli)
|
||||||
|
* [VH1,TVLand] Fix extractors by [Sipherdrakon](https://github.com/Sipherdrakon)
|
||||||
|
* [Viafree] Fix extractor and extract subtitles by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [XHamster] Extract `uploader_id` by [octotherp](https://github.com/octotherp)
|
||||||
|
* [youtube] Add `shorts` to `_VALID_URL`
|
||||||
|
* [youtube] Add av01 itags to known formats list by [blackjack4494](https://github.com/blackjack4494)
|
||||||
|
* [youtube] Extract error messages from HTTPError response by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [youtube] Fix subtitle names
|
||||||
|
* [youtube] Prefer audio stream that YouTube considers default
|
||||||
|
* [youtube] Remove annotations and deprecate `--write-annotations` by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
* [Zee5] Fix extractor and add subtitles by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [aria2c] Obey `--rate-limit`
|
||||||
|
* [EmbedSubtitle] Continue even if some files are missing
|
||||||
|
* [extractor] Better error message for DRM
|
||||||
|
* [extractor] Common function `_match_valid_url`
|
||||||
|
* [extractor] Show video id in error messages if possible
|
||||||
|
* [FormatSort] Remove priority of `lang`
|
||||||
|
* [options] Add `_set_from_options_callback`
|
||||||
|
* [SubtitleConvertor] Fix bug during subtitle conversion
|
||||||
|
* [utils] Add `parse_qs`
|
||||||
|
* [webvtt] Fix timestamp overflow adjustment by [fstirlitz](https://github.com/fstirlitz)
|
||||||
|
* Bugfix for `--replace-in-metadata`
|
||||||
|
* Don't try to merge with final extension
|
||||||
|
* Fix `--force-overwrites` when using `-k`
|
||||||
|
* Fix `--no-prefer-free-formats` by [CeruleanSky](https://github.com/CeruleanSky)
|
||||||
|
* Fix `-F` for extractors that directly return url
|
||||||
|
* Fix `-J` when there are failed videos
|
||||||
|
* Fix `extra_info` being reused across runs
|
||||||
|
* Fix `playlist_index` not obeying `playlist_start` and add tests
|
||||||
|
* Fix resuming of single formats when using `--no-part`
|
||||||
|
* Revert erroneous use of the `Content-Length` header by [fstirlitz](https://github.com/fstirlitz)
|
||||||
|
* Use `os.replace` where applicable by; paulwrubel
|
||||||
|
* [build] Add homebrew taps `yt-dlp/taps/yt-dlp` by [nao20010128nao](https://github.com/nao20010128nao)
|
||||||
|
* [build] Fix bug in making `yt-dlp.tar.gz`
|
||||||
|
* [docs] Fix some typos by [pukkandan](https://github.com/pukkandan), [zootedb0t](https://github.com/zootedb0t)
|
||||||
|
* [cleanup] Replace improper use of tab in trovo by [glenn-slayden](https://github.com/glenn-slayden)
|
||||||
|
|
||||||
|
|
||||||
|
### 2021.08.10
|
||||||
|
|
||||||
|
* Add option `--replace-in-metadata`
|
||||||
|
* Add option `--no-simulate` to not simulate even when `--print` or `--list...` are used - Deprecates `--print-json`
|
||||||
|
* Allow entire infodict to be printed using `%()s` - makes `--dump-json` redundant
|
||||||
|
* Allow multiple `--exec` and `--exec-before-download`
|
||||||
|
* Add regex to `--match-filter`
|
||||||
|
* Add all format filtering operators also to `--match-filter` by [max-te](https://github.com/max-te)
|
||||||
|
* Add compat-option `no-keep-subs`
|
||||||
|
* [adobepass] Add MSO Cablevision by [Jessecar96](https://github.com/Jessecar96)
|
||||||
|
* [BandCamp] Add BandcampMusicIE by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [blackboardcollaborate] Add new extractor by [mzbaulhaque](https://github.com/mzbaulhaque)
|
||||||
|
* [eroprofile] Add album downloader by [jhwgh1968](https://github.com/jhwgh1968)
|
||||||
|
* [mirrativ] Add extractors by [nao20010128nao](https://github.com/nao20010128nao)
|
||||||
|
* [openrec] Add extractors by [nao20010128nao](https://github.com/nao20010128nao)
|
||||||
|
* [nbcolympics:stream] Fix extractor by [nchilada](https://github.com/nchilada), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [nbcolympics] Update extractor for 2020 olympics by [wesnm](https://github.com/wesnm)
|
||||||
|
* [paramountplus] Separate extractor and fix some titles by [shirt](https://github.com/shirt-dev), [pukkandan](https://github.com/pukkandan)
|
||||||
|
* [RCTIPlus] Support events and TV by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||||
|
* [Newgrounds] Improve extractor and fix playlist by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [aenetworks] Update `_THEPLATFORM_KEY` and `_THEPLATFORM_SECRET` by [wesnm](https://github.com/wesnm)
|
||||||
|
* [crunchyroll] Fix thumbnail by [funniray](https://github.com/funniray)
|
||||||
|
* [HotStar] Use API for metadata and extract subtitles by [Ashish0804](https://github.com/Ashish0804)
|
||||||
|
* [instagram] Fix comments extraction by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [peertube] Fix videos without description by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [twitch:clips] Extract `display_id` by [dirkf](https://github.com/dirkf)
|
||||||
|
* [viki] Print error message from API request
|
||||||
|
* [Vine] Remove invalid formats by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
* [VrtNU] Fix XSRF token by [pgaig](https://github.com/pgaig)
|
||||||
|
* [vrv] Fix thumbnail extraction by [funniray](https://github.com/funniray)
|
||||||
|
* [youtube] Add extractor-arg `include-live-dash` to show live dash formats
|
||||||
|
* [youtube] Improve signature function detection by [PSlava](https://github.com/PSlava)
|
||||||
|
* [youtube] Raise appropriate error when API pages can't be downloaded
|
||||||
|
* Ensure `_write_ytdl_file` closes file handle on error
|
||||||
|
* Fix `--compat-options filename` by [stdedos](https://github.com/stdedos)
|
||||||
|
* Fix issues with infodict sanitization
|
||||||
|
* Fix resuming when using `--no-part`
|
||||||
|
* Fix wrong extension for intermediate files
|
||||||
|
* Handle `BrokenPipeError` by [kikuyan](https://github.com/kikuyan)
|
||||||
|
* Show libraries present in verbose head
|
||||||
|
* [extractor] Detect `sttp` as subtitles in MPD by [fstirlitz](https://github.com/fstirlitz)
|
||||||
|
* [extractor] Reset non-repeating warnings per video
|
||||||
|
* [ffmpeg] Fix streaming `mp4` to `stdout`
|
||||||
|
* [ffpmeg] Allow `--ffmpeg-location` to be a file with different name
|
||||||
|
* [utils] Fix `InAdvancePagedList.__getitem__`
|
||||||
|
* [utils] Fix `traverse_obj` depth when `is_user_input`
|
||||||
|
* [webvtt] Merge daisy-chained duplicate cues by [fstirlitz](https://github.com/fstirlitz)
|
||||||
|
* [build] Use custom build of `pyinstaller` by [shirt](https://github.com/shirt-dev)
|
||||||
|
* [tests:download] Add batch testing for extractors (`test_YourExtractor_all`)
|
||||||
|
* [docs] Document which fields `--add-metadata` adds to the file
|
||||||
|
* [docs] Fix some mistakes and improve doc
|
||||||
|
* [cleanup] Misc code cleanup
|
||||||
|
|
||||||
|
|
||||||
### 2021.08.02
|
### 2021.08.02
|
||||||
|
|
||||||
* Add logo, banner and donate links
|
* Add logo, banner and donate links
|
||||||
* Expand and escape environment variables correctly in output template
|
* [outtmpl] Expand and escape environment variables
|
||||||
* Add format types `j` (json), `l` (comma delimited list), `q` (quoted for terminal) in output template
|
* [outtmpl] Add format types `j` (json), `l` (comma delimited list), `q` (quoted for terminal)
|
||||||
* [downloader] Allow streaming some unmerged formats to stdout using ffmpeg
|
* [downloader] Allow streaming some unmerged formats to stdout using ffmpeg
|
||||||
* [youtube] **Age-gate bypass**
|
* [youtube] **Age-gate bypass**
|
||||||
* Add `agegate` clients by [pukkandan](https://github.com/pukkandan), [MinePlayersPE](https://github.com/MinePlayersPE)
|
* Add `agegate` clients by [pukkandan](https://github.com/pukkandan), [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||||
@@ -228,7 +737,7 @@
|
|||||||
### 2021.06.09
|
### 2021.06.09
|
||||||
|
|
||||||
* Fix bug where `%(field)d` in filename template throws error
|
* Fix bug where `%(field)d` in filename template throws error
|
||||||
* Improve offset parsing in outtmpl
|
* [outtmpl] Improve offset parsing
|
||||||
* [test] More rigorous tests for `prepare_filename`
|
* [test] More rigorous tests for `prepare_filename`
|
||||||
|
|
||||||
### 2021.06.08
|
### 2021.06.08
|
||||||
@@ -317,7 +826,7 @@
|
|||||||
* Add `html5=1` param to `get_video_info` page requests by [coletdjnz](https://github.com/coletdjnz)
|
* Add `html5=1` param to `get_video_info` page requests by [coletdjnz](https://github.com/coletdjnz)
|
||||||
* Better message when login required
|
* Better message when login required
|
||||||
* **Add option `--print`**: to print any field/template
|
* **Add option `--print`**: to print any field/template
|
||||||
* Deprecates: `--get-description`, `--get-duration`, `--get-filename`, `--get-format`, `--get-id`, `--get-thumbnail`, `--get-title`, `--get-url`
|
* Makes redundant: `--get-description`, `--get-duration`, `--get-filename`, `--get-format`, `--get-id`, `--get-thumbnail`, `--get-title`, `--get-url`
|
||||||
* Field `additional_urls` to download additional videos from metadata using [`--parse-metadata`](https://github.com/yt-dlp/yt-dlp#modifying-metadata)
|
* Field `additional_urls` to download additional videos from metadata using [`--parse-metadata`](https://github.com/yt-dlp/yt-dlp#modifying-metadata)
|
||||||
* Merge youtube-dl: Upto [commit/dfbbe29](https://github.com/ytdl-org/youtube-dl/commit/dfbbe2902fc67f0f93ee47a8077c148055c67a9b)
|
* Merge youtube-dl: Upto [commit/dfbbe29](https://github.com/ytdl-org/youtube-dl/commit/dfbbe2902fc67f0f93ee47a8077c148055c67a9b)
|
||||||
* Write thumbnail of playlist and add `pl_thumbnail` outtmpl key
|
* Write thumbnail of playlist and add `pl_thumbnail` outtmpl key
|
||||||
|
|||||||
@@ -15,6 +15,8 @@ You can also find lists of all [contributors of yt-dlp](CONTRIBUTORS) and [autho
|
|||||||
|
|
||||||
## [shirt](https://github.com/shirt-dev)
|
## [shirt](https://github.com/shirt-dev)
|
||||||
|
|
||||||
|
[](https://ko-fi.com/shirt)
|
||||||
|
|
||||||
* Multithreading (`-N`) and aria2c support for fragment downloads
|
* Multithreading (`-N`) and aria2c support for fragment downloads
|
||||||
* Support for media initialization and discontinuity in HLS
|
* Support for media initialization and discontinuity in HLS
|
||||||
* The self-updater (`-U`)
|
* The self-updater (`-U`)
|
||||||
|
|||||||
37
Makefile
37
Makefile
@@ -1,4 +1,4 @@
|
|||||||
all: yt-dlp doc pypi-files
|
all: lazy-extractors yt-dlp doc pypi-files
|
||||||
clean: clean-test clean-dist clean-cache
|
clean: clean-test clean-dist clean-cache
|
||||||
completions: completion-bash completion-fish completion-zsh
|
completions: completion-bash completion-fish completion-zsh
|
||||||
doc: README.md CONTRIBUTING.md issuetemplates supportedsites
|
doc: README.md CONTRIBUTING.md issuetemplates supportedsites
|
||||||
@@ -13,7 +13,9 @@ pypi-files: AUTHORS Changelog.md LICENSE README.md README.txt supportedsites com
|
|||||||
.PHONY: all clean install test tar pypi-files completions ot offlinetest codetest supportedsites
|
.PHONY: all clean install test tar pypi-files completions ot offlinetest codetest supportedsites
|
||||||
|
|
||||||
clean-test:
|
clean-test:
|
||||||
rm -rf *.dump *.part* *.ytdl *.info.json *.mp4 *.m4a *.flv *.mp3 *.avi *.mkv *.webm *.3gp *.wav *.ape *.swf *.jpg *.png *.frag *.frag.urls *.frag.aria2 test/testdata/player-*.js
|
rm -rf *.3gp *.annotations.xml *.ape *.avi *.description *.dump *.flac *.flv *.frag *.frag.aria2 *.frag.urls \
|
||||||
|
*.info.json *.jpeg *.jpg *.live_chat.json *.m4a *.m4v *.mkv *.mp3 *.mp4 *.ogg *.opus *.part* *.png *.sbv *.srt \
|
||||||
|
*.swf *.swp *.ttml *.vtt *.wav *.webm *.webp *.ytdl test/testdata/player-*.js
|
||||||
clean-dist:
|
clean-dist:
|
||||||
rm -rf yt-dlp.1.temp.md yt-dlp.1 README.txt MANIFEST build/ dist/ .coverage cover/ yt-dlp.tar.gz completions/ yt_dlp/extractor/lazy_extractors.py *.spec CONTRIBUTING.md.tmp yt-dlp yt-dlp.exe yt_dlp.egg-info/ AUTHORS .mailmap
|
rm -rf yt-dlp.1.temp.md yt-dlp.1 README.txt MANIFEST build/ dist/ .coverage cover/ yt-dlp.tar.gz completions/ yt_dlp/extractor/lazy_extractors.py *.spec CONTRIBUTING.md.tmp yt-dlp yt-dlp.exe yt_dlp.egg-info/ AUTHORS .mailmap
|
||||||
clean-cache:
|
clean-cache:
|
||||||
@@ -38,9 +40,9 @@ SYSCONFDIR = $(shell if [ $(PREFIX) = /usr -o $(PREFIX) = /usr/local ]; then ech
|
|||||||
# set markdown input format to "markdown-smart" for pandoc version 2 and to "markdown" for pandoc prior to version 2
|
# set markdown input format to "markdown-smart" for pandoc version 2 and to "markdown" for pandoc prior to version 2
|
||||||
MARKDOWN = $(shell if [ `pandoc -v | head -n1 | cut -d" " -f2 | head -c1` = "2" ]; then echo markdown-smart; else echo markdown; fi)
|
MARKDOWN = $(shell if [ `pandoc -v | head -n1 | cut -d" " -f2 | head -c1` = "2" ]; then echo markdown-smart; else echo markdown; fi)
|
||||||
|
|
||||||
install: yt-dlp yt-dlp.1 completions
|
install: lazy-extractors yt-dlp yt-dlp.1 completions
|
||||||
install -Dm755 yt-dlp $(DESTDIR)$(BINDIR)
|
install -Dm755 yt-dlp $(DESTDIR)$(BINDIR)/yt-dlp
|
||||||
install -Dm644 yt-dlp.1 $(DESTDIR)$(MANDIR)/man1
|
install -Dm644 yt-dlp.1 $(DESTDIR)$(MANDIR)/man1/yt-dlp.1
|
||||||
install -Dm644 completions/bash/yt-dlp $(DESTDIR)$(SHAREDIR)/bash-completion/completions/yt-dlp
|
install -Dm644 completions/bash/yt-dlp $(DESTDIR)$(SHAREDIR)/bash-completion/completions/yt-dlp
|
||||||
install -Dm644 completions/zsh/_yt-dlp $(DESTDIR)$(SHAREDIR)/zsh/site-functions/_yt-dlp
|
install -Dm644 completions/zsh/_yt-dlp $(DESTDIR)$(SHAREDIR)/zsh/site-functions/_yt-dlp
|
||||||
install -Dm644 completions/fish/yt-dlp.fish $(DESTDIR)$(SHAREDIR)/fish/vendor_completions.d/yt-dlp.fish
|
install -Dm644 completions/fish/yt-dlp.fish $(DESTDIR)$(SHAREDIR)/fish/vendor_completions.d/yt-dlp.fish
|
||||||
@@ -76,12 +78,13 @@ README.md: yt_dlp/*.py yt_dlp/*/*.py
|
|||||||
CONTRIBUTING.md: README.md
|
CONTRIBUTING.md: README.md
|
||||||
$(PYTHON) devscripts/make_contributing.py README.md CONTRIBUTING.md
|
$(PYTHON) devscripts/make_contributing.py README.md CONTRIBUTING.md
|
||||||
|
|
||||||
issuetemplates: devscripts/make_issue_template.py .github/ISSUE_TEMPLATE_tmpl/1_broken_site.md .github/ISSUE_TEMPLATE_tmpl/2_site_support_request.md .github/ISSUE_TEMPLATE_tmpl/3_site_feature_request.md .github/ISSUE_TEMPLATE_tmpl/4_bug_report.md .github/ISSUE_TEMPLATE_tmpl/5_feature_request.md yt_dlp/version.py
|
issuetemplates: devscripts/make_issue_template.py .github/ISSUE_TEMPLATE_tmpl/1_broken_site.yml .github/ISSUE_TEMPLATE_tmpl/2_site_support_request.yml .github/ISSUE_TEMPLATE_tmpl/3_site_feature_request.yml .github/ISSUE_TEMPLATE_tmpl/4_bug_report.yml .github/ISSUE_TEMPLATE_tmpl/5_feature_request.yml yt_dlp/version.py
|
||||||
$(PYTHON) devscripts/make_issue_template.py .github/ISSUE_TEMPLATE_tmpl/1_broken_site.md .github/ISSUE_TEMPLATE/1_broken_site.md
|
$(PYTHON) devscripts/make_issue_template.py .github/ISSUE_TEMPLATE_tmpl/1_broken_site.yml .github/ISSUE_TEMPLATE/1_broken_site.yml
|
||||||
$(PYTHON) devscripts/make_issue_template.py .github/ISSUE_TEMPLATE_tmpl/2_site_support_request.md .github/ISSUE_TEMPLATE/2_site_support_request.md
|
$(PYTHON) devscripts/make_issue_template.py .github/ISSUE_TEMPLATE_tmpl/2_site_support_request.yml .github/ISSUE_TEMPLATE/2_site_support_request.yml
|
||||||
$(PYTHON) devscripts/make_issue_template.py .github/ISSUE_TEMPLATE_tmpl/3_site_feature_request.md .github/ISSUE_TEMPLATE/3_site_feature_request.md
|
$(PYTHON) devscripts/make_issue_template.py .github/ISSUE_TEMPLATE_tmpl/3_site_feature_request.yml .github/ISSUE_TEMPLATE/3_site_feature_request.yml
|
||||||
$(PYTHON) devscripts/make_issue_template.py .github/ISSUE_TEMPLATE_tmpl/4_bug_report.md .github/ISSUE_TEMPLATE/4_bug_report.md
|
$(PYTHON) devscripts/make_issue_template.py .github/ISSUE_TEMPLATE_tmpl/4_bug_report.yml .github/ISSUE_TEMPLATE/4_bug_report.yml
|
||||||
$(PYTHON) devscripts/make_issue_template.py .github/ISSUE_TEMPLATE_tmpl/5_feature_request.md .github/ISSUE_TEMPLATE/5_feature_request.md
|
$(PYTHON) devscripts/make_issue_template.py .github/ISSUE_TEMPLATE_tmpl/5_feature_request.yml .github/ISSUE_TEMPLATE/5_feature_request.yml
|
||||||
|
$(PYTHON) devscripts/make_issue_template.py .github/ISSUE_TEMPLATE_tmpl/6_question.yml .github/ISSUE_TEMPLATE/6_question.yml
|
||||||
|
|
||||||
supportedsites:
|
supportedsites:
|
||||||
$(PYTHON) devscripts/make_supportedsites.py supportedsites.md
|
$(PYTHON) devscripts/make_supportedsites.py supportedsites.md
|
||||||
@@ -110,7 +113,7 @@ _EXTRACTOR_FILES = $(shell find yt_dlp/extractor -iname '*.py' -and -not -iname
|
|||||||
yt_dlp/extractor/lazy_extractors.py: devscripts/make_lazy_extractors.py devscripts/lazy_load_template.py $(_EXTRACTOR_FILES)
|
yt_dlp/extractor/lazy_extractors.py: devscripts/make_lazy_extractors.py devscripts/lazy_load_template.py $(_EXTRACTOR_FILES)
|
||||||
$(PYTHON) devscripts/make_lazy_extractors.py $@
|
$(PYTHON) devscripts/make_lazy_extractors.py $@
|
||||||
|
|
||||||
yt-dlp.tar.gz: README.md yt-dlp.1 completions Changelog.md AUTHORS
|
yt-dlp.tar.gz: all
|
||||||
@tar -czf $(DESTDIR)/yt-dlp.tar.gz --transform "s|^|yt-dlp/|" --owner 0 --group 0 \
|
@tar -czf $(DESTDIR)/yt-dlp.tar.gz --transform "s|^|yt-dlp/|" --owner 0 --group 0 \
|
||||||
--exclude '*.DS_Store' \
|
--exclude '*.DS_Store' \
|
||||||
--exclude '*.kate-swp' \
|
--exclude '*.kate-swp' \
|
||||||
@@ -119,12 +122,12 @@ yt-dlp.tar.gz: README.md yt-dlp.1 completions Changelog.md AUTHORS
|
|||||||
--exclude '*~' \
|
--exclude '*~' \
|
||||||
--exclude '__pycache__' \
|
--exclude '__pycache__' \
|
||||||
--exclude '.git' \
|
--exclude '.git' \
|
||||||
--exclude 'docs/_build' \
|
|
||||||
-- \
|
-- \
|
||||||
devscripts test \
|
README.md supportedsites.md Changelog.md LICENSE \
|
||||||
Changelog.md AUTHORS LICENSE README.md supportedsites.md \
|
CONTRIBUTING.md Collaborators.md CONTRIBUTORS AUTHORS \
|
||||||
Makefile MANIFEST.in yt-dlp.1 completions \
|
Makefile MANIFEST.in yt-dlp.1 README.txt completions \
|
||||||
setup.py setup.cfg yt-dlp
|
setup.py setup.cfg yt-dlp yt_dlp requirements.txt \
|
||||||
|
devscripts test tox.ini pytest.ini
|
||||||
|
|
||||||
AUTHORS: .mailmap
|
AUTHORS: .mailmap
|
||||||
git shortlog -s -n | cut -f2 | sort > AUTHORS
|
git shortlog -s -n | cut -f2 | sort > AUTHORS
|
||||||
|
|||||||
@@ -1,20 +1,31 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
from ..utils import bug_reports_message, write_string
|
||||||
|
|
||||||
class LazyLoadExtractor(object):
|
|
||||||
|
class LazyLoadMetaClass(type):
|
||||||
|
def __getattr__(cls, name):
|
||||||
|
if '_real_class' not in cls.__dict__:
|
||||||
|
write_string(
|
||||||
|
f'WARNING: Falling back to normal extractor since lazy extractor '
|
||||||
|
f'{cls.__name__} does not have attribute {name}{bug_reports_message()}')
|
||||||
|
return getattr(cls._get_real_class(), name)
|
||||||
|
|
||||||
|
|
||||||
|
class LazyLoadExtractor(metaclass=LazyLoadMetaClass):
|
||||||
_module = None
|
_module = None
|
||||||
|
_WORKING = True
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def ie_key(cls):
|
def _get_real_class(cls):
|
||||||
return cls.__name__[:-2]
|
if '_real_class' not in cls.__dict__:
|
||||||
|
mod = __import__(cls._module, fromlist=(cls.__name__,))
|
||||||
|
cls._real_class = getattr(mod, cls.__name__)
|
||||||
|
return cls._real_class
|
||||||
|
|
||||||
def __new__(cls, *args, **kwargs):
|
def __new__(cls, *args, **kwargs):
|
||||||
mod = __import__(cls._module, fromlist=(cls.__name__,))
|
real_cls = cls._get_real_class()
|
||||||
real_cls = getattr(mod, cls.__name__)
|
|
||||||
instance = real_cls.__new__(real_cls)
|
instance = real_cls.__new__(real_cls)
|
||||||
instance.__init__(*args, **kwargs)
|
instance.__init__(*args, **kwargs)
|
||||||
return instance
|
return instance
|
||||||
|
|||||||
@@ -1,33 +1,34 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
# import io
|
import io
|
||||||
import optparse
|
import optparse
|
||||||
# import re
|
import re
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
return # This is unused in yt-dlp
|
||||||
|
|
||||||
parser = optparse.OptionParser(usage='%prog INFILE OUTFILE')
|
parser = optparse.OptionParser(usage='%prog INFILE OUTFILE')
|
||||||
options, args = parser.parse_args()
|
options, args = parser.parse_args()
|
||||||
if len(args) != 2:
|
if len(args) != 2:
|
||||||
parser.error('Expected an input and an output filename')
|
parser.error('Expected an input and an output filename')
|
||||||
|
|
||||||
|
infile, outfile = args
|
||||||
""" infile, outfile = args
|
|
||||||
|
|
||||||
with io.open(infile, encoding='utf-8') as inf:
|
with io.open(infile, encoding='utf-8') as inf:
|
||||||
readme = inf.read()
|
readme = inf.read()
|
||||||
|
|
||||||
bug_text = re.search( """
|
bug_text = re.search(
|
||||||
# r'(?s)#\s*BUGS\s*[^\n]*\s*(.*?)#\s*COPYRIGHT', readme).group(1)
|
r'(?s)#\s*BUGS\s*[^\n]*\s*(.*?)#\s*COPYRIGHT', readme).group(1)
|
||||||
# dev_text = re.search(
|
dev_text = re.search(
|
||||||
# r'(?s)(#\s*DEVELOPER INSTRUCTIONS.*?)#\s*EMBEDDING yt-dlp',
|
r'(?s)(#\s*DEVELOPER INSTRUCTIONS.*?)#\s*EMBEDDING yt-dlp', readme).group(1)
|
||||||
""" readme).group(1)
|
|
||||||
|
|
||||||
out = bug_text + dev_text
|
out = bug_text + dev_text
|
||||||
|
|
||||||
with io.open(outfile, 'w', encoding='utf-8') as outf:
|
with io.open(outfile, 'w', encoding='utf-8') as outf:
|
||||||
outf.write(out) """
|
outf.write(out)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
|||||||
@@ -7,32 +7,35 @@ import os
|
|||||||
from os.path import dirname as dirn
|
from os.path import dirname as dirn
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
print('WARNING: Lazy loading extractors is an experimental feature that may not always work', file=sys.stderr)
|
|
||||||
|
|
||||||
sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
|
sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
|
||||||
|
|
||||||
lazy_extractors_filename = sys.argv[1]
|
lazy_extractors_filename = sys.argv[1] if len(sys.argv) > 1 else 'yt_dlp/extractor/lazy_extractors.py'
|
||||||
if os.path.exists(lazy_extractors_filename):
|
if os.path.exists(lazy_extractors_filename):
|
||||||
os.remove(lazy_extractors_filename)
|
os.remove(lazy_extractors_filename)
|
||||||
|
|
||||||
# Block plugins from loading
|
# Block plugins from loading
|
||||||
os.rename('ytdlp_plugins', 'ytdlp_plugins_blocked')
|
plugins_dirname = 'ytdlp_plugins'
|
||||||
|
plugins_blocked_dirname = 'ytdlp_plugins_blocked'
|
||||||
|
if os.path.exists(plugins_dirname):
|
||||||
|
os.rename(plugins_dirname, plugins_blocked_dirname)
|
||||||
|
|
||||||
from yt_dlp.extractor import _ALL_CLASSES
|
from yt_dlp.extractor import _ALL_CLASSES
|
||||||
from yt_dlp.extractor.common import InfoExtractor, SearchInfoExtractor
|
from yt_dlp.extractor.common import InfoExtractor, SearchInfoExtractor
|
||||||
|
|
||||||
os.rename('ytdlp_plugins_blocked', 'ytdlp_plugins')
|
if os.path.exists(plugins_blocked_dirname):
|
||||||
|
os.rename(plugins_blocked_dirname, plugins_dirname)
|
||||||
|
|
||||||
with open('devscripts/lazy_load_template.py', 'rt') as f:
|
with open('devscripts/lazy_load_template.py', 'rt') as f:
|
||||||
module_template = f.read()
|
module_template = f.read()
|
||||||
|
|
||||||
|
CLASS_PROPERTIES = ['ie_key', 'working', '_match_valid_url', 'suitable', '_match_id', 'get_temp_id']
|
||||||
module_contents = [
|
module_contents = [
|
||||||
module_template + '\n' + getsource(InfoExtractor.suitable) + '\n',
|
module_template,
|
||||||
'class LazyLoadSearchExtractor(LazyLoadExtractor):\n pass\n']
|
*[getsource(getattr(InfoExtractor, k)) for k in CLASS_PROPERTIES],
|
||||||
|
'\nclass LazyLoadSearchExtractor(LazyLoadExtractor):\n pass\n']
|
||||||
|
|
||||||
ie_template = '''
|
ie_template = '''
|
||||||
class {name}({bases}):
|
class {name}({bases}):
|
||||||
_VALID_URL = {valid_url!r}
|
|
||||||
_module = '{module}'
|
_module = '{module}'
|
||||||
'''
|
'''
|
||||||
|
|
||||||
@@ -53,14 +56,17 @@ def get_base_name(base):
|
|||||||
|
|
||||||
|
|
||||||
def build_lazy_ie(ie, name):
|
def build_lazy_ie(ie, name):
|
||||||
valid_url = getattr(ie, '_VALID_URL', None)
|
|
||||||
s = ie_template.format(
|
s = ie_template.format(
|
||||||
name=name,
|
name=name,
|
||||||
bases=', '.join(map(get_base_name, ie.__bases__)),
|
bases=', '.join(map(get_base_name, ie.__bases__)),
|
||||||
valid_url=valid_url,
|
|
||||||
module=ie.__module__)
|
module=ie.__module__)
|
||||||
|
valid_url = getattr(ie, '_VALID_URL', None)
|
||||||
|
if valid_url:
|
||||||
|
s += f' _VALID_URL = {valid_url!r}\n'
|
||||||
|
if not ie._WORKING:
|
||||||
|
s += ' _WORKING = False\n'
|
||||||
if ie.suitable.__func__ is not InfoExtractor.suitable.__func__:
|
if ie.suitable.__func__ is not InfoExtractor.suitable.__func__:
|
||||||
s += '\n' + getsource(ie.suitable)
|
s += f'\n{getsource(ie.suitable)}'
|
||||||
if hasattr(ie, '_make_valid_url'):
|
if hasattr(ie, '_make_valid_url'):
|
||||||
# search extractors
|
# search extractors
|
||||||
s += make_valid_template.format(valid_url=ie._make_valid_url())
|
s += make_valid_template.format(valid_url=ie._make_valid_url())
|
||||||
@@ -98,7 +104,7 @@ for ie in ordered_cls:
|
|||||||
names.append(name)
|
names.append(name)
|
||||||
|
|
||||||
module_contents.append(
|
module_contents.append(
|
||||||
'_ALL_CLASSES = [{0}]'.format(', '.join(names)))
|
'\n_ALL_CLASSES = [{0}]'.format(', '.join(names)))
|
||||||
|
|
||||||
module_src = '\n'.join(module_contents) + '\n'
|
module_src = '\n'.join(module_contents) + '\n'
|
||||||
|
|
||||||
|
|||||||
@@ -29,6 +29,9 @@ def main():
|
|||||||
continue
|
continue
|
||||||
if ie_desc is not None:
|
if ie_desc is not None:
|
||||||
ie_md += ': {0}'.format(ie.IE_DESC)
|
ie_md += ': {0}'.format(ie.IE_DESC)
|
||||||
|
search_key = getattr(ie, 'SEARCH_KEY', None)
|
||||||
|
if search_key is not None:
|
||||||
|
ie_md += f'; "{ie.SEARCH_KEY}:" prefix'
|
||||||
if not ie.working():
|
if not ie.working():
|
||||||
ie_md += ' (Currently broken)'
|
ie_md += ' (Currently broken)'
|
||||||
yield ie_md
|
yield ie_md
|
||||||
|
|||||||
@@ -3,11 +3,11 @@
|
|||||||
cd /d %~dp0..
|
cd /d %~dp0..
|
||||||
|
|
||||||
if ["%~1"]==[""] (
|
if ["%~1"]==[""] (
|
||||||
set "test_set="
|
set "test_set="test""
|
||||||
) else if ["%~1"]==["core"] (
|
) else if ["%~1"]==["core"] (
|
||||||
set "test_set=-k "not download""
|
set "test_set="-m not download""
|
||||||
) else if ["%~1"]==["download"] (
|
) else if ["%~1"]==["download"] (
|
||||||
set "test_set=-k download"
|
set "test_set="-m "download""
|
||||||
) else (
|
) else (
|
||||||
echo.Invalid test type "%~1". Use "core" ^| "download"
|
echo.Invalid test type "%~1". Use "core" ^| "download"
|
||||||
exit /b 1
|
exit /b 1
|
||||||
|
|||||||
@@ -3,13 +3,12 @@
|
|||||||
if [ -z $1 ]; then
|
if [ -z $1 ]; then
|
||||||
test_set='test'
|
test_set='test'
|
||||||
elif [ $1 = 'core' ]; then
|
elif [ $1 = 'core' ]; then
|
||||||
test_set='not download'
|
test_set="-m not download"
|
||||||
elif [ $1 = 'download' ]; then
|
elif [ $1 = 'download' ]; then
|
||||||
test_set='download'
|
test_set="-m download"
|
||||||
else
|
else
|
||||||
echo 'Invalid test type "'$1'". Use "core" | "download"'
|
echo 'Invalid test type "'$1'". Use "core" | "download"'
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo python3 -m pytest -k $test_set
|
python3 -m pytest "$test_set"
|
||||||
python3 -m pytest -k "$test_set"
|
|
||||||
|
|||||||
37
devscripts/update-formulae.py
Normal file
37
devscripts/update-formulae.py
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
from yt_dlp.compat import compat_urllib_request
|
||||||
|
|
||||||
|
|
||||||
|
# usage: python3 ./devscripts/update-formulae.py <path-to-formulae-rb> <version>
|
||||||
|
# version can be either 0-aligned (yt-dlp version) or normalized (PyPl version)
|
||||||
|
|
||||||
|
filename, version = sys.argv[1:]
|
||||||
|
|
||||||
|
normalized_version = '.'.join(str(int(x)) for x in version.split('.'))
|
||||||
|
|
||||||
|
pypi_release = json.loads(compat_urllib_request.urlopen(
|
||||||
|
'https://pypi.org/pypi/yt-dlp/%s/json' % normalized_version
|
||||||
|
).read().decode('utf-8'))
|
||||||
|
|
||||||
|
tarball_file = next(x for x in pypi_release['urls'] if x['filename'].endswith('.tar.gz'))
|
||||||
|
|
||||||
|
sha256sum = tarball_file['digests']['sha256']
|
||||||
|
url = tarball_file['url']
|
||||||
|
|
||||||
|
with open(filename, 'r') as r:
|
||||||
|
formulae_text = r.read()
|
||||||
|
|
||||||
|
formulae_text = re.sub(r'sha256 "[0-9a-f]*?"', 'sha256 "%s"' % sha256sum, formulae_text)
|
||||||
|
formulae_text = re.sub(r'url "[^"]*?"', 'url "%s"' % url, formulae_text)
|
||||||
|
|
||||||
|
with open(filename, 'w') as w:
|
||||||
|
w.write(formulae_text)
|
||||||
189
pyinst.py
189
pyinst.py
@@ -1,82 +1,135 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
|
import os
|
||||||
from __future__ import unicode_literals
|
|
||||||
import sys
|
|
||||||
# import os
|
|
||||||
import platform
|
import platform
|
||||||
|
import sys
|
||||||
from PyInstaller.utils.hooks import collect_submodules
|
from PyInstaller.utils.hooks import collect_submodules
|
||||||
from PyInstaller.utils.win32.versioninfo import (
|
|
||||||
VarStruct, VarFileInfo, StringStruct, StringTable,
|
|
||||||
StringFileInfo, FixedFileInfo, VSVersionInfo, SetVersion,
|
|
||||||
)
|
|
||||||
import PyInstaller.__main__
|
|
||||||
|
|
||||||
arch = sys.argv[1] if len(sys.argv) > 1 else platform.architecture()[0][:2]
|
|
||||||
assert arch in ('32', '64')
|
|
||||||
print('Building %sbit version' % arch)
|
|
||||||
_x86 = '_x86' if arch == '32' else ''
|
|
||||||
|
|
||||||
FILE_DESCRIPTION = 'yt-dlp%s' % (' (32 Bit)' if _x86 else '')
|
OS_NAME = platform.system()
|
||||||
|
if OS_NAME == 'Windows':
|
||||||
|
from PyInstaller.utils.win32.versioninfo import (
|
||||||
|
VarStruct, VarFileInfo, StringStruct, StringTable,
|
||||||
|
StringFileInfo, FixedFileInfo, VSVersionInfo, SetVersion,
|
||||||
|
)
|
||||||
|
elif OS_NAME == 'Darwin':
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
raise Exception('{OS_NAME} is not supported')
|
||||||
|
|
||||||
# root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
|
ARCH = platform.architecture()[0][:2]
|
||||||
# print('Changing working directory to %s' % root_dir)
|
|
||||||
# os.chdir(root_dir)
|
|
||||||
|
|
||||||
exec(compile(open('yt_dlp/version.py').read(), 'yt_dlp/version.py', 'exec'))
|
|
||||||
VERSION = locals()['__version__']
|
|
||||||
|
|
||||||
VERSION_LIST = VERSION.split('.')
|
def main():
|
||||||
VERSION_LIST = list(map(int, VERSION_LIST)) + [0] * (4 - len(VERSION_LIST))
|
opts = parse_options()
|
||||||
|
version = read_version()
|
||||||
|
|
||||||
print('Version: %s%s' % (VERSION, _x86))
|
suffix = '_macos' if OS_NAME == 'Darwin' else '_x86' if ARCH == '32' else ''
|
||||||
print('Remember to update the version using devscipts\\update-version.py')
|
final_file = 'dist/%syt-dlp%s%s' % (
|
||||||
|
'yt-dlp/' if '--onedir' in opts else '', suffix, '.exe' if OS_NAME == 'Windows' else '')
|
||||||
|
|
||||||
VERSION_FILE = VSVersionInfo(
|
print(f'Building yt-dlp v{version} {ARCH}bit for {OS_NAME} with options {opts}')
|
||||||
ffi=FixedFileInfo(
|
print('Remember to update the version using "devscripts/update-version.py"')
|
||||||
filevers=VERSION_LIST,
|
if not os.path.isfile('yt_dlp/extractor/lazy_extractors.py'):
|
||||||
prodvers=VERSION_LIST,
|
print('WARNING: Building without lazy_extractors. Run '
|
||||||
mask=0x3F,
|
'"devscripts/make_lazy_extractors.py" to build lazy extractors', file=sys.stderr)
|
||||||
flags=0x0,
|
print(f'Destination: {final_file}\n')
|
||||||
OS=0x4,
|
|
||||||
fileType=0x1,
|
opts = [
|
||||||
subtype=0x0,
|
f'--name=yt-dlp{suffix}',
|
||||||
date=(0, 0),
|
'--icon=devscripts/logo.ico',
|
||||||
),
|
'--upx-exclude=vcruntime140.dll',
|
||||||
kids=[
|
'--noconfirm',
|
||||||
StringFileInfo([
|
*dependancy_options(),
|
||||||
StringTable(
|
*opts,
|
||||||
'040904B0', [
|
'yt_dlp/__main__.py',
|
||||||
StringStruct('Comments', 'yt-dlp%s Command Line Interface.' % _x86),
|
|
||||||
StringStruct('CompanyName', 'https://github.com/yt-dlp'),
|
|
||||||
StringStruct('FileDescription', FILE_DESCRIPTION),
|
|
||||||
StringStruct('FileVersion', VERSION),
|
|
||||||
StringStruct('InternalName', 'yt-dlp%s' % _x86),
|
|
||||||
StringStruct(
|
|
||||||
'LegalCopyright',
|
|
||||||
'pukkandan.ytdlp@gmail.com | UNLICENSE',
|
|
||||||
),
|
|
||||||
StringStruct('OriginalFilename', 'yt-dlp%s.exe' % _x86),
|
|
||||||
StringStruct('ProductName', 'yt-dlp%s' % _x86),
|
|
||||||
StringStruct(
|
|
||||||
'ProductVersion',
|
|
||||||
'%s%s on Python %s' % (VERSION, _x86, platform.python_version())),
|
|
||||||
])]),
|
|
||||||
VarFileInfo([VarStruct('Translation', [0, 1200])])
|
|
||||||
]
|
]
|
||||||
)
|
print(f'Running PyInstaller with {opts}')
|
||||||
|
|
||||||
dependancies = ['Crypto', 'mutagen'] + collect_submodules('websockets')
|
import PyInstaller.__main__
|
||||||
excluded_modules = ['test', 'ytdlp_plugins', 'youtube-dl', 'youtube-dlc']
|
|
||||||
|
|
||||||
PyInstaller.__main__.run([
|
PyInstaller.__main__.run(opts)
|
||||||
'--name=yt-dlp%s' % _x86,
|
|
||||||
'--onefile',
|
set_version_info(final_file, version)
|
||||||
'--icon=devscripts/logo.ico',
|
|
||||||
*[f'--exclude-module={module}' for module in excluded_modules],
|
|
||||||
*[f'--hidden-import={module}' for module in dependancies],
|
def parse_options():
|
||||||
'--upx-exclude=vcruntime140.dll',
|
# Compatability with older arguments
|
||||||
'yt_dlp/__main__.py',
|
opts = sys.argv[1:]
|
||||||
])
|
if opts[0:1] in (['32'], ['64']):
|
||||||
SetVersion('dist/yt-dlp%s.exe' % _x86, VERSION_FILE)
|
if ARCH != opts[0]:
|
||||||
|
raise Exception(f'{opts[0]}bit executable cannot be built on a {ARCH}bit system')
|
||||||
|
opts = opts[1:]
|
||||||
|
return opts or ['--onefile']
|
||||||
|
|
||||||
|
|
||||||
|
def read_version():
|
||||||
|
exec(compile(open('yt_dlp/version.py').read(), 'yt_dlp/version.py', 'exec'))
|
||||||
|
return locals()['__version__']
|
||||||
|
|
||||||
|
|
||||||
|
def version_to_list(version):
|
||||||
|
version_list = version.split('.')
|
||||||
|
return list(map(int, version_list)) + [0] * (4 - len(version_list))
|
||||||
|
|
||||||
|
|
||||||
|
def dependancy_options():
|
||||||
|
dependancies = [pycryptodome_module(), 'mutagen'] + collect_submodules('websockets')
|
||||||
|
excluded_modules = ['test', 'ytdlp_plugins', 'youtube-dl', 'youtube-dlc']
|
||||||
|
|
||||||
|
yield from (f'--hidden-import={module}' for module in dependancies)
|
||||||
|
yield from (f'--exclude-module={module}' for module in excluded_modules)
|
||||||
|
|
||||||
|
|
||||||
|
def pycryptodome_module():
|
||||||
|
try:
|
||||||
|
import Cryptodome # noqa: F401
|
||||||
|
except ImportError:
|
||||||
|
try:
|
||||||
|
import Crypto # noqa: F401
|
||||||
|
print('WARNING: Using Crypto since Cryptodome is not available. '
|
||||||
|
'Install with: pip install pycryptodomex', file=sys.stderr)
|
||||||
|
return 'Crypto'
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
return 'Cryptodome'
|
||||||
|
|
||||||
|
|
||||||
|
def set_version_info(exe, version):
|
||||||
|
if OS_NAME == 'Windows':
|
||||||
|
windows_set_version(exe, version)
|
||||||
|
|
||||||
|
|
||||||
|
def windows_set_version(exe, version):
|
||||||
|
version_list = version_to_list(version)
|
||||||
|
suffix = '_x86' if ARCH == '32' else ''
|
||||||
|
SetVersion(exe, VSVersionInfo(
|
||||||
|
ffi=FixedFileInfo(
|
||||||
|
filevers=version_list,
|
||||||
|
prodvers=version_list,
|
||||||
|
mask=0x3F,
|
||||||
|
flags=0x0,
|
||||||
|
OS=0x4,
|
||||||
|
fileType=0x1,
|
||||||
|
subtype=0x0,
|
||||||
|
date=(0, 0),
|
||||||
|
),
|
||||||
|
kids=[
|
||||||
|
StringFileInfo([StringTable('040904B0', [
|
||||||
|
StringStruct('Comments', 'yt-dlp%s Command Line Interface.' % suffix),
|
||||||
|
StringStruct('CompanyName', 'https://github.com/yt-dlp'),
|
||||||
|
StringStruct('FileDescription', 'yt-dlp%s' % (' (32 Bit)' if ARCH == '32' else '')),
|
||||||
|
StringStruct('FileVersion', version),
|
||||||
|
StringStruct('InternalName', f'yt-dlp{suffix}'),
|
||||||
|
StringStruct('LegalCopyright', 'pukkandan.ytdlp@gmail.com | UNLICENSE'),
|
||||||
|
StringStruct('OriginalFilename', f'yt-dlp{suffix}.exe'),
|
||||||
|
StringStruct('ProductName', f'yt-dlp{suffix}'),
|
||||||
|
StringStruct(
|
||||||
|
'ProductVersion', f'{version}{suffix} on Python {platform.python_version()}'),
|
||||||
|
])]), VarFileInfo([VarStruct('Translation', [0, 1200])])
|
||||||
|
]
|
||||||
|
))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
mutagen
|
mutagen
|
||||||
pycryptodome
|
pycryptodomex
|
||||||
websockets
|
websockets
|
||||||
|
|||||||
96
setup.py
96
setup.py
@@ -1,52 +1,86 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
|
|
||||||
from setuptools import setup, Command, find_packages
|
|
||||||
import os.path
|
import os.path
|
||||||
import warnings
|
import warnings
|
||||||
import sys
|
import sys
|
||||||
from distutils.spawn import spawn
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
from setuptools import setup, Command, find_packages
|
||||||
|
setuptools_available = True
|
||||||
|
except ImportError:
|
||||||
|
from distutils.core import setup, Command
|
||||||
|
setuptools_available = False
|
||||||
|
from distutils.spawn import spawn
|
||||||
|
|
||||||
# Get the version from yt_dlp/version.py without importing the package
|
# Get the version from yt_dlp/version.py without importing the package
|
||||||
exec(compile(open('yt_dlp/version.py').read(), 'yt_dlp/version.py', 'exec'))
|
exec(compile(open('yt_dlp/version.py').read(), 'yt_dlp/version.py', 'exec'))
|
||||||
|
|
||||||
|
|
||||||
DESCRIPTION = 'Command-line program to download videos from YouTube.com and many other other video platforms.'
|
DESCRIPTION = 'A youtube-dl fork with additional features and patches'
|
||||||
|
|
||||||
LONG_DESCRIPTION = '\n\n'.join((
|
LONG_DESCRIPTION = '\n\n'.join((
|
||||||
'Official repository: <https://github.com/yt-dlp/yt-dlp>',
|
'Official repository: <https://github.com/yt-dlp/yt-dlp>',
|
||||||
'**PS**: Some links in this document will not work since this is a copy of the README.md from Github',
|
'**PS**: Some links in this document will not work since this is a copy of the README.md from Github',
|
||||||
open('README.md', 'r', encoding='utf-8').read()))
|
open('README.md', 'r', encoding='utf-8').read()))
|
||||||
|
|
||||||
REQUIREMENTS = ['mutagen', 'pycryptodome', 'websockets']
|
REQUIREMENTS = ['mutagen', 'pycryptodomex', 'websockets']
|
||||||
|
|
||||||
|
|
||||||
if sys.argv[1:2] == ['py2exe']:
|
if sys.argv[1:2] == ['py2exe']:
|
||||||
raise NotImplementedError('py2exe is not currently supported; instead, use "pyinst.py" to build with pyinstaller')
|
import py2exe
|
||||||
|
warnings.warn(
|
||||||
|
'py2exe builds do not support pycryptodomex and needs VC++14 to run. '
|
||||||
|
'The recommended way is to use "pyinst.py" to build using pyinstaller')
|
||||||
|
params = {
|
||||||
|
'console': [{
|
||||||
|
'script': './yt_dlp/__main__.py',
|
||||||
|
'dest_base': 'yt-dlp',
|
||||||
|
'version': __version__,
|
||||||
|
'description': DESCRIPTION,
|
||||||
|
'comments': LONG_DESCRIPTION.split('\n')[0],
|
||||||
|
'product_name': 'yt-dlp',
|
||||||
|
'product_version': __version__,
|
||||||
|
}],
|
||||||
|
'options': {
|
||||||
|
'py2exe': {
|
||||||
|
'bundle_files': 0,
|
||||||
|
'compressed': 1,
|
||||||
|
'optimize': 2,
|
||||||
|
'dist_dir': './dist',
|
||||||
|
'excludes': ['Crypto', 'Cryptodome'], # py2exe cannot import Crypto
|
||||||
|
'dll_excludes': ['w9xpopen.exe', 'crypt32.dll'],
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'zipfile': None
|
||||||
|
}
|
||||||
|
|
||||||
|
else:
|
||||||
|
files_spec = [
|
||||||
|
('share/bash-completion/completions', ['completions/bash/yt-dlp']),
|
||||||
|
('share/zsh/site-functions', ['completions/zsh/_yt-dlp']),
|
||||||
|
('share/fish/vendor_completions.d', ['completions/fish/yt-dlp.fish']),
|
||||||
|
('share/doc/yt_dlp', ['README.txt']),
|
||||||
|
('share/man/man1', ['yt-dlp.1'])
|
||||||
|
]
|
||||||
|
root = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
data_files = []
|
||||||
|
for dirname, files in files_spec:
|
||||||
|
resfiles = []
|
||||||
|
for fn in files:
|
||||||
|
if not os.path.exists(fn):
|
||||||
|
warnings.warn('Skipping file %s since it is not present. Try running `make pypi-files` first' % fn)
|
||||||
|
else:
|
||||||
|
resfiles.append(fn)
|
||||||
|
data_files.append((dirname, resfiles))
|
||||||
|
|
||||||
files_spec = [
|
params = {
|
||||||
('share/bash-completion/completions', ['completions/bash/yt-dlp']),
|
'data_files': data_files,
|
||||||
('share/zsh/site-functions', ['completions/zsh/_yt-dlp']),
|
}
|
||||||
('share/fish/vendor_completions.d', ['completions/fish/yt-dlp.fish']),
|
|
||||||
('share/doc/yt_dlp', ['README.txt']),
|
|
||||||
('share/man/man1', ['yt-dlp.1'])
|
|
||||||
]
|
|
||||||
root = os.path.dirname(os.path.abspath(__file__))
|
|
||||||
data_files = []
|
|
||||||
for dirname, files in files_spec:
|
|
||||||
resfiles = []
|
|
||||||
for fn in files:
|
|
||||||
if not os.path.exists(fn):
|
|
||||||
warnings.warn('Skipping file %s since it is not present. Try running `make pypi-files` first' % fn)
|
|
||||||
else:
|
|
||||||
resfiles.append(fn)
|
|
||||||
data_files.append((dirname, resfiles))
|
|
||||||
|
|
||||||
params = {
|
if setuptools_available:
|
||||||
'data_files': data_files,
|
params['entry_points'] = {'console_scripts': ['yt-dlp = yt_dlp:main']}
|
||||||
}
|
else:
|
||||||
params['entry_points'] = {'console_scripts': ['yt-dlp = yt_dlp:main']}
|
params['scripts'] = ['yt-dlp']
|
||||||
|
|
||||||
|
|
||||||
class build_lazy_extractors(Command):
|
class build_lazy_extractors(Command):
|
||||||
@@ -64,7 +98,11 @@ class build_lazy_extractors(Command):
|
|||||||
dry_run=self.dry_run)
|
dry_run=self.dry_run)
|
||||||
|
|
||||||
|
|
||||||
packages = find_packages(exclude=('youtube_dl', 'test', 'ytdlp_plugins'))
|
if setuptools_available:
|
||||||
|
packages = find_packages(exclude=('youtube_dl', 'youtube_dlc', 'test', 'ytdlp_plugins'))
|
||||||
|
else:
|
||||||
|
packages = ['yt_dlp', 'yt_dlp.downloader', 'yt_dlp.extractor', 'yt_dlp.postprocessor']
|
||||||
|
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
name='yt-dlp',
|
name='yt-dlp',
|
||||||
@@ -81,7 +119,7 @@ setup(
|
|||||||
'Documentation': 'https://yt-dlp.readthedocs.io',
|
'Documentation': 'https://yt-dlp.readthedocs.io',
|
||||||
'Source': 'https://github.com/yt-dlp/yt-dlp',
|
'Source': 'https://github.com/yt-dlp/yt-dlp',
|
||||||
'Tracker': 'https://github.com/yt-dlp/yt-dlp/issues',
|
'Tracker': 'https://github.com/yt-dlp/yt-dlp/issues',
|
||||||
#'Funding': 'https://donate.pypi.org',
|
'Funding': 'https://github.com/yt-dlp/yt-dlp/blob/master/Collaborators.md#collaborators',
|
||||||
},
|
},
|
||||||
classifiers=[
|
classifiers=[
|
||||||
'Topic :: Multimedia :: Video',
|
'Topic :: Multimedia :: Video',
|
||||||
|
|||||||
@@ -1,4 +1,6 @@
|
|||||||
# Supported sites
|
# Supported sites
|
||||||
|
- **17live**
|
||||||
|
- **17live:clip**
|
||||||
- **1tv**: Первый канал
|
- **1tv**: Первый канал
|
||||||
- **20min**
|
- **20min**
|
||||||
- **220.ro**
|
- **220.ro**
|
||||||
@@ -46,10 +48,12 @@
|
|||||||
- **Alura**
|
- **Alura**
|
||||||
- **AluraCourse**
|
- **AluraCourse**
|
||||||
- **Amara**
|
- **Amara**
|
||||||
|
- **AmazonStore**
|
||||||
- **AMCNetworks**
|
- **AMCNetworks**
|
||||||
- **AmericasTestKitchen**
|
- **AmericasTestKitchen**
|
||||||
- **AmericasTestKitchenSeason**
|
- **AmericasTestKitchenSeason**
|
||||||
- **anderetijden**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
- **anderetijden**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
||||||
|
- **AnimalPlanet**
|
||||||
- **AnimeLab**
|
- **AnimeLab**
|
||||||
- **AnimeLabShows**
|
- **AnimeLabShows**
|
||||||
- **AnimeOnDemand**
|
- **AnimeOnDemand**
|
||||||
@@ -95,7 +99,9 @@
|
|||||||
- **Bandcamp**
|
- **Bandcamp**
|
||||||
- **Bandcamp:album**
|
- **Bandcamp:album**
|
||||||
- **Bandcamp:weekly**
|
- **Bandcamp:weekly**
|
||||||
|
- **BandcampMusic**
|
||||||
- **bangumi.bilibili.com**: BiliBili番剧
|
- **bangumi.bilibili.com**: BiliBili番剧
|
||||||
|
- **BannedVideo**
|
||||||
- **bbc**: BBC
|
- **bbc**: BBC
|
||||||
- **bbc.co.uk**: BBC iPlayer
|
- **bbc.co.uk**: BBC iPlayer
|
||||||
- **bbc.co.uk:article**: BBC articles
|
- **bbc.co.uk:article**: BBC articles
|
||||||
@@ -117,11 +123,14 @@
|
|||||||
- **Bigflix**
|
- **Bigflix**
|
||||||
- **Bild**: Bild.de
|
- **Bild**: Bild.de
|
||||||
- **BiliBili**
|
- **BiliBili**
|
||||||
|
- **Bilibili category extractor**
|
||||||
- **BilibiliAudio**
|
- **BilibiliAudio**
|
||||||
- **BilibiliAudioAlbum**
|
- **BilibiliAudioAlbum**
|
||||||
- **BilibiliChannel**
|
- **BilibiliChannel**
|
||||||
- **BiliBiliPlayer**
|
- **BiliBiliPlayer**
|
||||||
- **BiliBiliSearch**: Bilibili video search, "bilisearch" keyword
|
- **BiliBiliSearch**: Bilibili video search; "bilisearch:" prefix
|
||||||
|
- **BiliIntl**
|
||||||
|
- **BiliIntlSeries**
|
||||||
- **BioBioChileTV**
|
- **BioBioChileTV**
|
||||||
- **Biography**
|
- **Biography**
|
||||||
- **BIQLE**
|
- **BIQLE**
|
||||||
@@ -129,6 +138,7 @@
|
|||||||
- **BitChuteChannel**
|
- **BitChuteChannel**
|
||||||
- **bitwave:replay**
|
- **bitwave:replay**
|
||||||
- **bitwave:stream**
|
- **bitwave:stream**
|
||||||
|
- **BlackboardCollaborate**
|
||||||
- **BleacherReport**
|
- **BleacherReport**
|
||||||
- **BleacherReportCMS**
|
- **BleacherReportCMS**
|
||||||
- **Bloomberg**
|
- **Bloomberg**
|
||||||
@@ -148,10 +158,10 @@
|
|||||||
- **BusinessInsider**
|
- **BusinessInsider**
|
||||||
- **BuzzFeed**
|
- **BuzzFeed**
|
||||||
- **BYUtv**
|
- **BYUtv**
|
||||||
|
- **CAM4**
|
||||||
- **Camdemy**
|
- **Camdemy**
|
||||||
- **CamdemyFolder**
|
- **CamdemyFolder**
|
||||||
- **CamModels**
|
- **CamModels**
|
||||||
- **CamTube**
|
|
||||||
- **CamWithHer**
|
- **CamWithHer**
|
||||||
- **canalc2.tv**
|
- **canalc2.tv**
|
||||||
- **Canalplus**: mycanal.fr and piwiplus.fr
|
- **Canalplus**: mycanal.fr and piwiplus.fr
|
||||||
@@ -161,10 +171,7 @@
|
|||||||
- **CarambaTVPage**
|
- **CarambaTVPage**
|
||||||
- **CartoonNetwork**
|
- **CartoonNetwork**
|
||||||
- **cbc.ca**
|
- **cbc.ca**
|
||||||
- **cbc.ca:olympics**
|
|
||||||
- **cbc.ca:player**
|
- **cbc.ca:player**
|
||||||
- **cbc.ca:watch**
|
|
||||||
- **cbc.ca:watch:video**
|
|
||||||
- **CBS**
|
- **CBS**
|
||||||
- **CBSInteractive**
|
- **CBSInteractive**
|
||||||
- **CBSLocal**
|
- **CBSLocal**
|
||||||
@@ -178,11 +185,13 @@
|
|||||||
- **CCTV**: 央视网
|
- **CCTV**: 央视网
|
||||||
- **CDA**
|
- **CDA**
|
||||||
- **CeskaTelevize**
|
- **CeskaTelevize**
|
||||||
- **CeskaTelevizePorady**
|
- **CGTN**
|
||||||
- **channel9**: Channel 9
|
- **channel9**: Channel 9
|
||||||
- **CharlieRose**
|
- **CharlieRose**
|
||||||
- **Chaturbate**
|
- **Chaturbate**
|
||||||
- **Chilloutzone**
|
- **Chilloutzone**
|
||||||
|
- **Chingari**
|
||||||
|
- **ChingariUser**
|
||||||
- **chirbit**
|
- **chirbit**
|
||||||
- **chirbit:profile**
|
- **chirbit:profile**
|
||||||
- **cielotv.it**
|
- **cielotv.it**
|
||||||
@@ -190,6 +199,7 @@
|
|||||||
- **Cinemax**
|
- **Cinemax**
|
||||||
- **CiscoLiveSearch**
|
- **CiscoLiveSearch**
|
||||||
- **CiscoLiveSession**
|
- **CiscoLiveSession**
|
||||||
|
- **ciscowebex**: Cisco Webex
|
||||||
- **CJSW**
|
- **CJSW**
|
||||||
- **cliphunter**
|
- **cliphunter**
|
||||||
- **Clippit**
|
- **Clippit**
|
||||||
@@ -216,13 +226,14 @@
|
|||||||
- **Crackle**
|
- **Crackle**
|
||||||
- **CrooksAndLiars**
|
- **CrooksAndLiars**
|
||||||
- **crunchyroll**
|
- **crunchyroll**
|
||||||
|
- **crunchyroll:beta**
|
||||||
- **crunchyroll:playlist**
|
- **crunchyroll:playlist**
|
||||||
|
- **crunchyroll:playlist:beta**
|
||||||
- **CSpan**: C-SPAN
|
- **CSpan**: C-SPAN
|
||||||
- **CtsNews**: 華視新聞
|
- **CtsNews**: 華視新聞
|
||||||
- **CTV**
|
- **CTV**
|
||||||
- **CTVNews**
|
- **CTVNews**
|
||||||
- **cu.ntv.co.jp**: Nippon Television Network
|
- **cu.ntv.co.jp**: Nippon Television Network
|
||||||
- **Culturebox**
|
|
||||||
- **CultureUnplugged**
|
- **CultureUnplugged**
|
||||||
- **curiositystream**
|
- **curiositystream**
|
||||||
- **curiositystream:collection**
|
- **curiositystream:collection**
|
||||||
@@ -232,6 +243,8 @@
|
|||||||
- **dailymotion**
|
- **dailymotion**
|
||||||
- **dailymotion:playlist**
|
- **dailymotion:playlist**
|
||||||
- **dailymotion:user**
|
- **dailymotion:user**
|
||||||
|
- **damtomo:record**
|
||||||
|
- **damtomo:video**
|
||||||
- **daum.net**
|
- **daum.net**
|
||||||
- **daum.net:clip**
|
- **daum.net:clip**
|
||||||
- **daum.net:playlist**
|
- **daum.net:playlist**
|
||||||
@@ -255,6 +268,7 @@
|
|||||||
- **DiscoveryPlusIndiaShow**
|
- **DiscoveryPlusIndiaShow**
|
||||||
- **DiscoveryVR**
|
- **DiscoveryVR**
|
||||||
- **Disney**
|
- **Disney**
|
||||||
|
- **DIYNetwork**
|
||||||
- **dlive:stream**
|
- **dlive:stream**
|
||||||
- **dlive:vod**
|
- **dlive:vod**
|
||||||
- **DoodStream**
|
- **DoodStream**
|
||||||
@@ -293,13 +307,17 @@
|
|||||||
- **Embedly**
|
- **Embedly**
|
||||||
- **EMPFlix**
|
- **EMPFlix**
|
||||||
- **Engadget**
|
- **Engadget**
|
||||||
|
- **Epicon**
|
||||||
|
- **EpiconSeries**
|
||||||
- **Eporner**
|
- **Eporner**
|
||||||
- **EroProfile**
|
- **EroProfile**
|
||||||
|
- **EroProfile:album**
|
||||||
- **Escapist**
|
- **Escapist**
|
||||||
- **ESPN**
|
- **ESPN**
|
||||||
- **ESPNArticle**
|
- **ESPNArticle**
|
||||||
- **EsriVideo**
|
- **EsriVideo**
|
||||||
- **Europa**
|
- **Europa**
|
||||||
|
- **EUScreen**
|
||||||
- **EWETV**
|
- **EWETV**
|
||||||
- **ExpoTV**
|
- **ExpoTV**
|
||||||
- **Expressen**
|
- **Expressen**
|
||||||
@@ -313,6 +331,7 @@
|
|||||||
- **fc2**
|
- **fc2**
|
||||||
- **fc2:embed**
|
- **fc2:embed**
|
||||||
- **Fczenit**
|
- **Fczenit**
|
||||||
|
- **Filmmodu**
|
||||||
- **filmon**
|
- **filmon**
|
||||||
- **filmon:channel**
|
- **filmon:channel**
|
||||||
- **Filmweb**
|
- **Filmweb**
|
||||||
@@ -329,13 +348,10 @@
|
|||||||
- **foxnews**: Fox News and Fox Business Video
|
- **foxnews**: Fox News and Fox Business Video
|
||||||
- **foxnews:article**
|
- **foxnews:article**
|
||||||
- **FoxSports**
|
- **FoxSports**
|
||||||
- **france2.fr:generation-what**
|
|
||||||
- **FranceCulture**
|
- **FranceCulture**
|
||||||
- **FranceInter**
|
- **FranceInter**
|
||||||
- **FranceTV**
|
- **FranceTV**
|
||||||
- **FranceTVEmbed**
|
|
||||||
- **francetvinfo.fr**
|
- **francetvinfo.fr**
|
||||||
- **FranceTVJeunesse**
|
|
||||||
- **FranceTVSite**
|
- **FranceTVSite**
|
||||||
- **Freesound**
|
- **Freesound**
|
||||||
- **freespeech.org**
|
- **freespeech.org**
|
||||||
@@ -350,6 +366,8 @@
|
|||||||
- **Funk**
|
- **Funk**
|
||||||
- **Fusion**
|
- **Fusion**
|
||||||
- **Fux**
|
- **Fux**
|
||||||
|
- **Gab**
|
||||||
|
- **GabTV**
|
||||||
- **Gaia**
|
- **Gaia**
|
||||||
- **GameInformer**
|
- **GameInformer**
|
||||||
- **GameSpot**
|
- **GameSpot**
|
||||||
@@ -358,7 +376,11 @@
|
|||||||
- **Gazeta**
|
- **Gazeta**
|
||||||
- **GDCVault**
|
- **GDCVault**
|
||||||
- **GediDigital**
|
- **GediDigital**
|
||||||
|
- **gem.cbc.ca**
|
||||||
|
- **gem.cbc.ca:live**
|
||||||
|
- **gem.cbc.ca:playlist**
|
||||||
- **generic**: Generic downloader that works on some sites
|
- **generic**: Generic downloader that works on some sites
|
||||||
|
- **Gettr**
|
||||||
- **Gfycat**
|
- **Gfycat**
|
||||||
- **GiantBomb**
|
- **GiantBomb**
|
||||||
- **Giga**
|
- **Giga**
|
||||||
@@ -372,8 +394,11 @@
|
|||||||
- **google:podcasts**
|
- **google:podcasts**
|
||||||
- **google:podcasts:feed**
|
- **google:podcasts:feed**
|
||||||
- **GoogleDrive**
|
- **GoogleDrive**
|
||||||
|
- **GoPro**
|
||||||
- **Goshgay**
|
- **Goshgay**
|
||||||
|
- **GoToStage**
|
||||||
- **GPUTechConf**
|
- **GPUTechConf**
|
||||||
|
- **Gronkh**
|
||||||
- **Groupon**
|
- **Groupon**
|
||||||
- **hbo**
|
- **hbo**
|
||||||
- **HearThisAt**
|
- **HearThisAt**
|
||||||
@@ -405,6 +430,7 @@
|
|||||||
- **Huajiao**: 花椒直播
|
- **Huajiao**: 花椒直播
|
||||||
- **HuffPost**: Huffington Post
|
- **HuffPost**: Huffington Post
|
||||||
- **Hungama**
|
- **Hungama**
|
||||||
|
- **HungamaAlbumPlaylist**
|
||||||
- **HungamaSong**
|
- **HungamaSong**
|
||||||
- **Hypem**
|
- **Hypem**
|
||||||
- **ign.com**
|
- **ign.com**
|
||||||
@@ -424,9 +450,11 @@
|
|||||||
- **Instagram**
|
- **Instagram**
|
||||||
- **instagram:tag**: Instagram hashtag search
|
- **instagram:tag**: Instagram hashtag search
|
||||||
- **instagram:user**: Instagram user profile
|
- **instagram:user**: Instagram user profile
|
||||||
|
- **InstagramIOS**: IOS instagram:// URL
|
||||||
- **Internazionale**
|
- **Internazionale**
|
||||||
- **InternetVideoArchive**
|
- **InternetVideoArchive**
|
||||||
- **IPrima**
|
- **IPrima**
|
||||||
|
- **IPrimaCNN**
|
||||||
- **iqiyi**: 爱奇艺
|
- **iqiyi**: 爱奇艺
|
||||||
- **Ir90Tv**
|
- **Ir90Tv**
|
||||||
- **ITTF**
|
- **ITTF**
|
||||||
@@ -457,6 +485,7 @@
|
|||||||
- **KinjaEmbed**
|
- **KinjaEmbed**
|
||||||
- **KinoPoisk**
|
- **KinoPoisk**
|
||||||
- **KonserthusetPlay**
|
- **KonserthusetPlay**
|
||||||
|
- **Koo**
|
||||||
- **KrasView**: Красвью
|
- **KrasView**: Красвью
|
||||||
- **Ku6**
|
- **Ku6**
|
||||||
- **KUSI**
|
- **KUSI**
|
||||||
@@ -517,6 +546,9 @@
|
|||||||
- **MallTV**
|
- **MallTV**
|
||||||
- **mangomolo:live**
|
- **mangomolo:live**
|
||||||
- **mangomolo:video**
|
- **mangomolo:video**
|
||||||
|
- **ManotoTV**: Manoto TV (Episode)
|
||||||
|
- **ManotoTVLive**: Manoto TV (Live)
|
||||||
|
- **ManotoTVShow**: Manoto TV (Show)
|
||||||
- **ManyVids**
|
- **ManyVids**
|
||||||
- **MaoriTV**
|
- **MaoriTV**
|
||||||
- **Markiza**
|
- **Markiza**
|
||||||
@@ -527,8 +559,11 @@
|
|||||||
- **MedalTV**
|
- **MedalTV**
|
||||||
- **media.ccc.de**
|
- **media.ccc.de**
|
||||||
- **media.ccc.de:lists**
|
- **media.ccc.de:lists**
|
||||||
|
- **Mediaite**
|
||||||
|
- **MediaKlikk**
|
||||||
- **Medialaan**
|
- **Medialaan**
|
||||||
- **Mediaset**
|
- **Mediaset**
|
||||||
|
- **MediasetShow**
|
||||||
- **Mediasite**
|
- **Mediasite**
|
||||||
- **MediasiteCatalog**
|
- **MediasiteCatalog**
|
||||||
- **MediasiteNamedCatalog**
|
- **MediasiteNamedCatalog**
|
||||||
@@ -543,6 +578,7 @@
|
|||||||
- **Mgoon**
|
- **Mgoon**
|
||||||
- **MGTV**: 芒果TV
|
- **MGTV**: 芒果TV
|
||||||
- **MiaoPai**
|
- **MiaoPai**
|
||||||
|
- **microsoftstream**: Microsoft Stream
|
||||||
- **mildom**: Record ongoing live by specific user in Mildom
|
- **mildom**: Record ongoing live by specific user in Mildom
|
||||||
- **mildom:user:vod**: Download all VODs from specific user in Mildom
|
- **mildom:user:vod**: Download all VODs from specific user in Mildom
|
||||||
- **mildom:vod**: Download a VOD in Mildom
|
- **mildom:vod**: Download a VOD in Mildom
|
||||||
@@ -552,12 +588,15 @@
|
|||||||
- **MinistryGrid**
|
- **MinistryGrid**
|
||||||
- **Minoto**
|
- **Minoto**
|
||||||
- **miomio.tv**
|
- **miomio.tv**
|
||||||
|
- **mirrativ**
|
||||||
|
- **mirrativ:user**
|
||||||
- **MiTele**: mitele.es
|
- **MiTele**: mitele.es
|
||||||
- **mixcloud**
|
- **mixcloud**
|
||||||
- **mixcloud:playlist**
|
- **mixcloud:playlist**
|
||||||
- **mixcloud:user**
|
- **mixcloud:user**
|
||||||
- **MLB**
|
- **MLB**
|
||||||
- **MLBVideo**
|
- **MLBVideo**
|
||||||
|
- **MLSSoccer**
|
||||||
- **Mnet**
|
- **Mnet**
|
||||||
- **MNetTV**
|
- **MNetTV**
|
||||||
- **MoeVideo**: LetitBit video services: moevideo.net, playreplay.net and videochart.net
|
- **MoeVideo**: LetitBit video services: moevideo.net, playreplay.net and videochart.net
|
||||||
@@ -583,6 +622,7 @@
|
|||||||
- **mtvservices:embedded**
|
- **mtvservices:embedded**
|
||||||
- **MTVUutisetArticle**
|
- **MTVUutisetArticle**
|
||||||
- **MuenchenTV**: münchen.tv
|
- **MuenchenTV**: münchen.tv
|
||||||
|
- **MuseScore**
|
||||||
- **mva**: Microsoft Virtual Academy videos
|
- **mva**: Microsoft Virtual Academy videos
|
||||||
- **mva:course**: Microsoft Virtual Academy courses
|
- **mva:course**: Microsoft Virtual Academy courses
|
||||||
- **Mwave**
|
- **Mwave**
|
||||||
@@ -599,6 +639,8 @@
|
|||||||
- **MyviEmbed**
|
- **MyviEmbed**
|
||||||
- **MyVisionTV**
|
- **MyVisionTV**
|
||||||
- **n-tv.de**
|
- **n-tv.de**
|
||||||
|
- **N1Info:article**
|
||||||
|
- **N1InfoAsset**
|
||||||
- **natgeo:video**
|
- **natgeo:video**
|
||||||
- **NationalGeographicTV**
|
- **NationalGeographicTV**
|
||||||
- **Naver**
|
- **Naver**
|
||||||
@@ -632,7 +674,8 @@
|
|||||||
- **NetPlus**
|
- **NetPlus**
|
||||||
- **Netzkino**
|
- **Netzkino**
|
||||||
- **Newgrounds**
|
- **Newgrounds**
|
||||||
- **NewgroundsPlaylist**
|
- **Newgrounds:playlist**
|
||||||
|
- **Newgrounds:user**
|
||||||
- **Newstube**
|
- **Newstube**
|
||||||
- **NextMedia**: 蘋果日報
|
- **NextMedia**: 蘋果日報
|
||||||
- **NextMediaActionNews**: 蘋果日報 - 動新聞
|
- **NextMediaActionNews**: 蘋果日報 - 動新聞
|
||||||
@@ -653,6 +696,9 @@
|
|||||||
- **niconico**: ニコニコ動画
|
- **niconico**: ニコニコ動画
|
||||||
- **NiconicoPlaylist**
|
- **NiconicoPlaylist**
|
||||||
- **NiconicoUser**
|
- **NiconicoUser**
|
||||||
|
- **nicovideo:search**: Nico video searches; "nicosearch:" prefix
|
||||||
|
- **nicovideo:search:date**: Nico video searches, newest first; "nicosearchdate:" prefix
|
||||||
|
- **nicovideo:search_url**: Nico video search URLs
|
||||||
- **Nintendo**
|
- **Nintendo**
|
||||||
- **Nitter**
|
- **Nitter**
|
||||||
- **njoy**: N-JOY
|
- **njoy**: N-JOY
|
||||||
@@ -665,6 +711,7 @@
|
|||||||
- **NosVideo**
|
- **NosVideo**
|
||||||
- **Nova**: TN.cz, Prásk.tv, Nova.cz, Novaplus.cz, FANDA.tv, Krásná.cz and Doma.cz
|
- **Nova**: TN.cz, Prásk.tv, Nova.cz, Novaplus.cz, FANDA.tv, Krásná.cz and Doma.cz
|
||||||
- **NovaEmbed**
|
- **NovaEmbed**
|
||||||
|
- **NovaPlay**
|
||||||
- **nowness**
|
- **nowness**
|
||||||
- **nowness:playlist**
|
- **nowness:playlist**
|
||||||
- **nowness:series**
|
- **nowness:series**
|
||||||
@@ -690,11 +737,14 @@
|
|||||||
- **NYTimes**
|
- **NYTimes**
|
||||||
- **NYTimesArticle**
|
- **NYTimesArticle**
|
||||||
- **NYTimesCooking**
|
- **NYTimesCooking**
|
||||||
|
- **nzherald**
|
||||||
- **NZZ**
|
- **NZZ**
|
||||||
- **ocw.mit.edu**
|
- **ocw.mit.edu**
|
||||||
- **OdaTV**
|
- **OdaTV**
|
||||||
- **Odnoklassniki**
|
- **Odnoklassniki**
|
||||||
- **OktoberfestTV**
|
- **OktoberfestTV**
|
||||||
|
- **OlympicsReplay**
|
||||||
|
- **on24**: ON24
|
||||||
- **OnDemandKorea**
|
- **OnDemandKorea**
|
||||||
- **onet.pl**
|
- **onet.pl**
|
||||||
- **onet.tv**
|
- **onet.tv**
|
||||||
@@ -703,6 +753,8 @@
|
|||||||
- **OnionStudios**
|
- **OnionStudios**
|
||||||
- **Ooyala**
|
- **Ooyala**
|
||||||
- **OoyalaExternal**
|
- **OoyalaExternal**
|
||||||
|
- **openrec**
|
||||||
|
- **openrec:capture**
|
||||||
- **OraTV**
|
- **OraTV**
|
||||||
- **orf:burgenland**: Radio Burgenland
|
- **orf:burgenland**: Radio Burgenland
|
||||||
- **orf:fm4**: radio FM4
|
- **orf:fm4**: radio FM4
|
||||||
@@ -728,13 +780,18 @@
|
|||||||
- **PalcoMP3:video**
|
- **PalcoMP3:video**
|
||||||
- **pandora.tv**: 판도라TV
|
- **pandora.tv**: 판도라TV
|
||||||
- **ParamountNetwork**
|
- **ParamountNetwork**
|
||||||
|
- **ParamountPlus**
|
||||||
- **ParamountPlusSeries**
|
- **ParamountPlusSeries**
|
||||||
- **parliamentlive.tv**: UK parliament videos
|
- **parliamentlive.tv**: UK parliament videos
|
||||||
- **Parlview**
|
- **Parlview**
|
||||||
- **Patreon**
|
- **Patreon**
|
||||||
|
- **PatreonUser**
|
||||||
- **pbs**: Public Broadcasting Service (PBS) and member stations: PBS: Public Broadcasting Service, APT - Alabama Public Television (WBIQ), GPB/Georgia Public Broadcasting (WGTV), Mississippi Public Broadcasting (WMPN), Nashville Public Television (WNPT), WFSU-TV (WFSU), WSRE (WSRE), WTCI (WTCI), WPBA/Channel 30 (WPBA), Alaska Public Media (KAKM), Arizona PBS (KAET), KNME-TV/Channel 5 (KNME), Vegas PBS (KLVX), AETN/ARKANSAS ETV NETWORK (KETS), KET (WKLE), WKNO/Channel 10 (WKNO), LPB/LOUISIANA PUBLIC BROADCASTING (WLPB), OETA (KETA), Ozarks Public Television (KOZK), WSIU Public Broadcasting (WSIU), KEET TV (KEET), KIXE/Channel 9 (KIXE), KPBS San Diego (KPBS), KQED (KQED), KVIE Public Television (KVIE), PBS SoCal/KOCE (KOCE), ValleyPBS (KVPT), CONNECTICUT PUBLIC TELEVISION (WEDH), KNPB Channel 5 (KNPB), SOPTV (KSYS), Rocky Mountain PBS (KRMA), KENW-TV3 (KENW), KUED Channel 7 (KUED), Wyoming PBS (KCWC), Colorado Public Television / KBDI 12 (KBDI), KBYU-TV (KBYU), Thirteen/WNET New York (WNET), WGBH/Channel 2 (WGBH), WGBY (WGBY), NJTV Public Media NJ (WNJT), WLIW21 (WLIW), mpt/Maryland Public Television (WMPB), WETA Television and Radio (WETA), WHYY (WHYY), PBS 39 (WLVT), WVPT - Your Source for PBS and More! (WVPT), Howard University Television (WHUT), WEDU PBS (WEDU), WGCU Public Media (WGCU), WPBT2 (WPBT), WUCF TV (WUCF), WUFT/Channel 5 (WUFT), WXEL/Channel 42 (WXEL), WLRN/Channel 17 (WLRN), WUSF Public Broadcasting (WUSF), ETV (WRLK), UNC-TV (WUNC), PBS Hawaii - Oceanic Cable Channel 10 (KHET), Idaho Public Television (KAID), KSPS (KSPS), OPB (KOPB), KWSU/Channel 10 & KTNW/Channel 31 (KWSU), WILL-TV (WILL), Network Knowledge - WSEC/Springfield (WSEC), WTTW11 (WTTW), Iowa Public Television/IPTV (KDIN), Nine Network (KETC), PBS39 Fort Wayne (WFWA), WFYI Indianapolis (WFYI), Milwaukee Public Television (WMVS), WNIN (WNIN), WNIT Public Television (WNIT), WPT (WPNE), WVUT/Channel 22 (WVUT), WEIU/Channel 51 (WEIU), WQPT-TV (WQPT), WYCC PBS Chicago (WYCC), WIPB-TV (WIPB), WTIU (WTIU), CET (WCET), ThinkTVNetwork (WPTD), WBGU-TV (WBGU), WGVU TV (WGVU), NET1 (KUON), Pioneer Public Television (KWCM), SDPB Television (KUSD), TPT (KTCA), KSMQ (KSMQ), KPTS/Channel 8 (KPTS), KTWU/Channel 11 (KTWU), East Tennessee PBS (WSJK), WCTE-TV (WCTE), WLJT, Channel 11 (WLJT), WOSU TV (WOSU), WOUB/WOUC (WOUB), WVPB (WVPB), WKYU-PBS (WKYU), KERA 13 (KERA), MPBN (WCBB), Mountain Lake PBS (WCFE), NHPTV (WENH), Vermont PBS (WETK), witf (WITF), WQED Multimedia (WQED), WMHT Educational Telecommunications (WMHT), Q-TV (WDCQ), WTVS Detroit Public TV (WTVS), CMU Public Television (WCMU), WKAR-TV (WKAR), WNMU-TV Public TV 13 (WNMU), WDSE - WRPT (WDSE), WGTE TV (WGTE), Lakeland Public Television (KAWE), KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS), MontanaPBS (KUSM), KRWG/Channel 22 (KRWG), KACV (KACV), KCOS/Channel 13 (KCOS), WCNY/Channel 24 (WCNY), WNED (WNED), WPBS (WPBS), WSKG Public TV (WSKG), WXXI (WXXI), WPSU (WPSU), WVIA Public Media Studios (WVIA), WTVI (WTVI), Western Reserve PBS (WNEO), WVIZ/PBS ideastream (WVIZ), KCTS 9 (KCTS), Basin PBS (KPBT), KUHT / Channel 8 (KUHT), KLRN (KLRN), KLRU (KLRU), WTJX Channel 12 (WTJX), WCVE PBS (WCVE), KBTC Public Television (KBTC)
|
- **pbs**: Public Broadcasting Service (PBS) and member stations: PBS: Public Broadcasting Service, APT - Alabama Public Television (WBIQ), GPB/Georgia Public Broadcasting (WGTV), Mississippi Public Broadcasting (WMPN), Nashville Public Television (WNPT), WFSU-TV (WFSU), WSRE (WSRE), WTCI (WTCI), WPBA/Channel 30 (WPBA), Alaska Public Media (KAKM), Arizona PBS (KAET), KNME-TV/Channel 5 (KNME), Vegas PBS (KLVX), AETN/ARKANSAS ETV NETWORK (KETS), KET (WKLE), WKNO/Channel 10 (WKNO), LPB/LOUISIANA PUBLIC BROADCASTING (WLPB), OETA (KETA), Ozarks Public Television (KOZK), WSIU Public Broadcasting (WSIU), KEET TV (KEET), KIXE/Channel 9 (KIXE), KPBS San Diego (KPBS), KQED (KQED), KVIE Public Television (KVIE), PBS SoCal/KOCE (KOCE), ValleyPBS (KVPT), CONNECTICUT PUBLIC TELEVISION (WEDH), KNPB Channel 5 (KNPB), SOPTV (KSYS), Rocky Mountain PBS (KRMA), KENW-TV3 (KENW), KUED Channel 7 (KUED), Wyoming PBS (KCWC), Colorado Public Television / KBDI 12 (KBDI), KBYU-TV (KBYU), Thirteen/WNET New York (WNET), WGBH/Channel 2 (WGBH), WGBY (WGBY), NJTV Public Media NJ (WNJT), WLIW21 (WLIW), mpt/Maryland Public Television (WMPB), WETA Television and Radio (WETA), WHYY (WHYY), PBS 39 (WLVT), WVPT - Your Source for PBS and More! (WVPT), Howard University Television (WHUT), WEDU PBS (WEDU), WGCU Public Media (WGCU), WPBT2 (WPBT), WUCF TV (WUCF), WUFT/Channel 5 (WUFT), WXEL/Channel 42 (WXEL), WLRN/Channel 17 (WLRN), WUSF Public Broadcasting (WUSF), ETV (WRLK), UNC-TV (WUNC), PBS Hawaii - Oceanic Cable Channel 10 (KHET), Idaho Public Television (KAID), KSPS (KSPS), OPB (KOPB), KWSU/Channel 10 & KTNW/Channel 31 (KWSU), WILL-TV (WILL), Network Knowledge - WSEC/Springfield (WSEC), WTTW11 (WTTW), Iowa Public Television/IPTV (KDIN), Nine Network (KETC), PBS39 Fort Wayne (WFWA), WFYI Indianapolis (WFYI), Milwaukee Public Television (WMVS), WNIN (WNIN), WNIT Public Television (WNIT), WPT (WPNE), WVUT/Channel 22 (WVUT), WEIU/Channel 51 (WEIU), WQPT-TV (WQPT), WYCC PBS Chicago (WYCC), WIPB-TV (WIPB), WTIU (WTIU), CET (WCET), ThinkTVNetwork (WPTD), WBGU-TV (WBGU), WGVU TV (WGVU), NET1 (KUON), Pioneer Public Television (KWCM), SDPB Television (KUSD), TPT (KTCA), KSMQ (KSMQ), KPTS/Channel 8 (KPTS), KTWU/Channel 11 (KTWU), East Tennessee PBS (WSJK), WCTE-TV (WCTE), WLJT, Channel 11 (WLJT), WOSU TV (WOSU), WOUB/WOUC (WOUB), WVPB (WVPB), WKYU-PBS (WKYU), KERA 13 (KERA), MPBN (WCBB), Mountain Lake PBS (WCFE), NHPTV (WENH), Vermont PBS (WETK), witf (WITF), WQED Multimedia (WQED), WMHT Educational Telecommunications (WMHT), Q-TV (WDCQ), WTVS Detroit Public TV (WTVS), CMU Public Television (WCMU), WKAR-TV (WKAR), WNMU-TV Public TV 13 (WNMU), WDSE - WRPT (WDSE), WGTE TV (WGTE), Lakeland Public Television (KAWE), KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS), MontanaPBS (KUSM), KRWG/Channel 22 (KRWG), KACV (KACV), KCOS/Channel 13 (KCOS), WCNY/Channel 24 (WCNY), WNED (WNED), WPBS (WPBS), WSKG Public TV (WSKG), WXXI (WXXI), WPSU (WPSU), WVIA Public Media Studios (WVIA), WTVI (WTVI), Western Reserve PBS (WNEO), WVIZ/PBS ideastream (WVIZ), KCTS 9 (KCTS), Basin PBS (KPBT), KUHT / Channel 8 (KUHT), KLRN (KLRN), KLRU (KLRU), WTJX Channel 12 (WTJX), WCVE PBS (WCVE), KBTC Public Television (KBTC)
|
||||||
- **PearVideo**
|
- **PearVideo**
|
||||||
- **PeerTube**
|
- **PeerTube**
|
||||||
|
- **PeerTube:Playlist**
|
||||||
|
- **peloton**
|
||||||
|
- **peloton:live**: Peloton Live
|
||||||
- **People**
|
- **People**
|
||||||
- **PerformGroup**
|
- **PerformGroup**
|
||||||
- **periscope**: Periscope
|
- **periscope**: Periscope
|
||||||
@@ -749,6 +806,7 @@
|
|||||||
- **Pinterest**
|
- **Pinterest**
|
||||||
- **PinterestCollection**
|
- **PinterestCollection**
|
||||||
- **Pladform**
|
- **Pladform**
|
||||||
|
- **PlanetMarathi**
|
||||||
- **Platzi**
|
- **Platzi**
|
||||||
- **PlatziCourse**
|
- **PlatziCourse**
|
||||||
- **play.fm**
|
- **play.fm**
|
||||||
@@ -765,7 +823,12 @@
|
|||||||
- **podomatic**
|
- **podomatic**
|
||||||
- **Pokemon**
|
- **Pokemon**
|
||||||
- **PokemonWatch**
|
- **PokemonWatch**
|
||||||
|
- **PolsatGo**
|
||||||
- **PolskieRadio**
|
- **PolskieRadio**
|
||||||
|
- **polskieradio:kierowcow**
|
||||||
|
- **polskieradio:player**
|
||||||
|
- **polskieradio:podcast**
|
||||||
|
- **polskieradio:podcast:list**
|
||||||
- **PolskieRadioCategory**
|
- **PolskieRadioCategory**
|
||||||
- **Popcorntimes**
|
- **Popcorntimes**
|
||||||
- **PopcornTV**
|
- **PopcornTV**
|
||||||
@@ -775,6 +838,7 @@
|
|||||||
- **PornHd**
|
- **PornHd**
|
||||||
- **PornHub**: PornHub and Thumbzilla
|
- **PornHub**: PornHub and Thumbzilla
|
||||||
- **PornHubPagedVideoList**
|
- **PornHubPagedVideoList**
|
||||||
|
- **PornHubPlaylist**
|
||||||
- **PornHubUser**
|
- **PornHubUser**
|
||||||
- **PornHubUserVideosUpload**
|
- **PornHubUserVideosUpload**
|
||||||
- **Pornotube**
|
- **Pornotube**
|
||||||
@@ -782,6 +846,7 @@
|
|||||||
- **PornoXO**
|
- **PornoXO**
|
||||||
- **PornTube**
|
- **PornTube**
|
||||||
- **PressTV**
|
- **PressTV**
|
||||||
|
- **ProjectVeritas**
|
||||||
- **prosiebensat1**: ProSiebenSat.1 Digital
|
- **prosiebensat1**: ProSiebenSat.1 Digital
|
||||||
- **puhutv**
|
- **puhutv**
|
||||||
- **puhutv:serie**
|
- **puhutv:serie**
|
||||||
@@ -798,16 +863,25 @@
|
|||||||
- **QuicklineLive**
|
- **QuicklineLive**
|
||||||
- **R7**
|
- **R7**
|
||||||
- **R7Article**
|
- **R7Article**
|
||||||
|
- **Radiko**
|
||||||
|
- **RadikoRadio**
|
||||||
- **radio.de**
|
- **radio.de**
|
||||||
- **radiobremen**
|
- **radiobremen**
|
||||||
- **radiocanada**
|
- **radiocanada**
|
||||||
- **radiocanada:audiovideo**
|
- **radiocanada:audiovideo**
|
||||||
- **radiofrance**
|
- **radiofrance**
|
||||||
- **RadioJavan**
|
- **RadioJavan**
|
||||||
|
- **radiokapital**
|
||||||
|
- **radiokapital:show**
|
||||||
|
- **radlive**
|
||||||
|
- **radlive:channel**
|
||||||
|
- **radlive:season**
|
||||||
- **Rai**
|
- **Rai**
|
||||||
- **RaiPlay**
|
- **RaiPlay**
|
||||||
- **RaiPlayLive**
|
- **RaiPlayLive**
|
||||||
- **RaiPlayPlaylist**
|
- **RaiPlayPlaylist**
|
||||||
|
- **RaiPlayRadio**
|
||||||
|
- **RaiPlayRadioPlaylist**
|
||||||
- **RayWenderlich**
|
- **RayWenderlich**
|
||||||
- **RayWenderlichCourse**
|
- **RayWenderlichCourse**
|
||||||
- **RBMARadio**
|
- **RBMARadio**
|
||||||
@@ -816,6 +890,7 @@
|
|||||||
- **RCSVarious**
|
- **RCSVarious**
|
||||||
- **RCTIPlus**
|
- **RCTIPlus**
|
||||||
- **RCTIPlusSeries**
|
- **RCTIPlusSeries**
|
||||||
|
- **RCTIPlusTV**
|
||||||
- **RDS**: RDS.ca
|
- **RDS**: RDS.ca
|
||||||
- **RedBull**
|
- **RedBull**
|
||||||
- **RedBullEmbed**
|
- **RedBullEmbed**
|
||||||
@@ -834,6 +909,7 @@
|
|||||||
- **RMCDecouverte**
|
- **RMCDecouverte**
|
||||||
- **RockstarGames**
|
- **RockstarGames**
|
||||||
- **RoosterTeeth**
|
- **RoosterTeeth**
|
||||||
|
- **RoosterTeethSeries**
|
||||||
- **RottenTomatoes**
|
- **RottenTomatoes**
|
||||||
- **Roxwel**
|
- **Roxwel**
|
||||||
- **Rozhlas**
|
- **Rozhlas**
|
||||||
@@ -853,6 +929,7 @@
|
|||||||
- **RTVNH**
|
- **RTVNH**
|
||||||
- **RTVS**
|
- **RTVS**
|
||||||
- **RUHD**
|
- **RUHD**
|
||||||
|
- **RumbleChannel**
|
||||||
- **RumbleEmbed**
|
- **RumbleEmbed**
|
||||||
- **rutube**: Rutube videos
|
- **rutube**: Rutube videos
|
||||||
- **rutube:channel**: Rutube channels
|
- **rutube:channel**: Rutube channels
|
||||||
@@ -875,7 +952,7 @@
|
|||||||
- **SBS**: sbs.com.au
|
- **SBS**: sbs.com.au
|
||||||
- **schooltv**
|
- **schooltv**
|
||||||
- **ScienceChannel**
|
- **ScienceChannel**
|
||||||
- **screen.yahoo:search**: Yahoo screen search
|
- **screen.yahoo:search**: Yahoo screen search; "yvsearch:" prefix
|
||||||
- **Screencast**
|
- **Screencast**
|
||||||
- **ScreencastOMatic**
|
- **ScreencastOMatic**
|
||||||
- **ScrippsNetworks**
|
- **ScrippsNetworks**
|
||||||
@@ -900,12 +977,14 @@
|
|||||||
- **Sina**
|
- **Sina**
|
||||||
- **sky.it**
|
- **sky.it**
|
||||||
- **sky:news**
|
- **sky:news**
|
||||||
|
- **sky:news:story**
|
||||||
- **sky:sports**
|
- **sky:sports**
|
||||||
- **sky:sports:news**
|
- **sky:sports:news**
|
||||||
- **skyacademy.it**
|
- **skyacademy.it**
|
||||||
- **SkylineWebcams**
|
- **SkylineWebcams**
|
||||||
- **skynewsarabia:article**
|
- **skynewsarabia:article**
|
||||||
- **skynewsarabia:video**
|
- **skynewsarabia:video**
|
||||||
|
- **SkyNewsAU**
|
||||||
- **Slideshare**
|
- **Slideshare**
|
||||||
- **SlidesLive**
|
- **SlidesLive**
|
||||||
- **Slutload**
|
- **Slutload**
|
||||||
@@ -915,7 +994,7 @@
|
|||||||
- **SonyLIVSeries**
|
- **SonyLIVSeries**
|
||||||
- **soundcloud**
|
- **soundcloud**
|
||||||
- **soundcloud:playlist**
|
- **soundcloud:playlist**
|
||||||
- **soundcloud:search**: Soundcloud search
|
- **soundcloud:search**: Soundcloud search; "scsearch:" prefix
|
||||||
- **soundcloud:set**
|
- **soundcloud:set**
|
||||||
- **soundcloud:trackstation**
|
- **soundcloud:trackstation**
|
||||||
- **soundcloud:user**
|
- **soundcloud:user**
|
||||||
@@ -927,11 +1006,12 @@
|
|||||||
- **southpark.de**
|
- **southpark.de**
|
||||||
- **southpark.nl**
|
- **southpark.nl**
|
||||||
- **southparkstudios.dk**
|
- **southparkstudios.dk**
|
||||||
|
- **SovietsCloset**
|
||||||
|
- **SovietsClosetPlaylist**
|
||||||
- **SpankBang**
|
- **SpankBang**
|
||||||
- **SpankBangPlaylist**
|
- **SpankBangPlaylist**
|
||||||
- **Spankwire**
|
- **Spankwire**
|
||||||
- **Spiegel**
|
- **Spiegel**
|
||||||
- **sport.francetvinfo.fr**
|
|
||||||
- **Sport5**
|
- **Sport5**
|
||||||
- **SportBox**
|
- **SportBox**
|
||||||
- **SportDeutschland**
|
- **SportDeutschland**
|
||||||
@@ -947,6 +1027,7 @@
|
|||||||
- **SRGSSR**
|
- **SRGSSR**
|
||||||
- **SRGSSRPlay**: srf.ch, rts.ch, rsi.ch, rtr.ch and swissinfo.ch play sites
|
- **SRGSSRPlay**: srf.ch, rts.ch, rsi.ch, rtr.ch and swissinfo.ch play sites
|
||||||
- **stanfordoc**: Stanford Open ClassRoom
|
- **stanfordoc**: Stanford Open ClassRoom
|
||||||
|
- **startv**
|
||||||
- **Steam**
|
- **Steam**
|
||||||
- **Stitcher**
|
- **Stitcher**
|
||||||
- **StitcherShow**
|
- **StitcherShow**
|
||||||
@@ -954,6 +1035,7 @@
|
|||||||
- **StoryFireSeries**
|
- **StoryFireSeries**
|
||||||
- **StoryFireUser**
|
- **StoryFireUser**
|
||||||
- **Streamable**
|
- **Streamable**
|
||||||
|
- **Streamanity**
|
||||||
- **streamcloud.eu**
|
- **streamcloud.eu**
|
||||||
- **StreamCZ**
|
- **StreamCZ**
|
||||||
- **StreetVoice**
|
- **StreetVoice**
|
||||||
@@ -971,7 +1053,6 @@
|
|||||||
- **SztvHu**
|
- **SztvHu**
|
||||||
- **t-online.de**
|
- **t-online.de**
|
||||||
- **Tagesschau**
|
- **Tagesschau**
|
||||||
- **tagesschau:player**
|
|
||||||
- **Tass**
|
- **Tass**
|
||||||
- **TBS**
|
- **TBS**
|
||||||
- **TDSLifeway**
|
- **TDSLifeway**
|
||||||
@@ -1009,16 +1090,23 @@
|
|||||||
- **TheScene**
|
- **TheScene**
|
||||||
- **TheStar**
|
- **TheStar**
|
||||||
- **TheSun**
|
- **TheSun**
|
||||||
|
- **ThetaStream**
|
||||||
|
- **ThetaVideo**
|
||||||
- **TheWeatherChannel**
|
- **TheWeatherChannel**
|
||||||
- **ThisAmericanLife**
|
- **ThisAmericanLife**
|
||||||
- **ThisAV**
|
- **ThisAV**
|
||||||
- **ThisOldHouse**
|
- **ThisOldHouse**
|
||||||
|
- **ThreeSpeak**
|
||||||
|
- **ThreeSpeakUser**
|
||||||
- **TikTok**
|
- **TikTok**
|
||||||
|
- **tiktok:user**
|
||||||
- **tinypic**: tinypic.com videos
|
- **tinypic**: tinypic.com videos
|
||||||
- **TMZ**
|
- **TMZ**
|
||||||
- **TNAFlix**
|
- **TNAFlix**
|
||||||
- **TNAFlixNetworkEmbed**
|
- **TNAFlixNetworkEmbed**
|
||||||
- **toggle**
|
- **toggle**
|
||||||
|
- **Tokentube**
|
||||||
|
- **Tokentube:channel**
|
||||||
- **ToonGoggles**
|
- **ToonGoggles**
|
||||||
- **tou.tv**
|
- **tou.tv**
|
||||||
- **Toypics**: Toypics video
|
- **Toypics**: Toypics video
|
||||||
@@ -1026,6 +1114,8 @@
|
|||||||
- **TrailerAddict** (Currently broken)
|
- **TrailerAddict** (Currently broken)
|
||||||
- **Trilulilu**
|
- **Trilulilu**
|
||||||
- **Trovo**
|
- **Trovo**
|
||||||
|
- **TrovoChannelClip**: All Clips of a trovo.live channel; "trovoclip:" prefix
|
||||||
|
- **TrovoChannelVod**: All VODs of a trovo.live channel; "trovovod:" prefix
|
||||||
- **TrovoVod**
|
- **TrovoVod**
|
||||||
- **TruNews**
|
- **TruNews**
|
||||||
- **TruTV**
|
- **TruTV**
|
||||||
@@ -1041,10 +1131,11 @@
|
|||||||
- **Turbo**
|
- **Turbo**
|
||||||
- **tv.dfb.de**
|
- **tv.dfb.de**
|
||||||
- **TV2**
|
- **TV2**
|
||||||
- **tv2.hu**
|
|
||||||
- **TV2Article**
|
- **TV2Article**
|
||||||
- **TV2DK**
|
- **TV2DK**
|
||||||
- **TV2DKBornholmPlay**
|
- **TV2DKBornholmPlay**
|
||||||
|
- **tv2play.hu**
|
||||||
|
- **tv2playseries.hu**
|
||||||
- **TV4**: tv4.se and tv4play.se
|
- **TV4**: tv4.se and tv4play.se
|
||||||
- **TV5MondePlus**: TV5MONDE+
|
- **TV5MondePlus**: TV5MONDE+
|
||||||
- **tv5unis**
|
- **tv5unis**
|
||||||
@@ -1070,6 +1161,7 @@
|
|||||||
- **tvp**: Telewizja Polska
|
- **tvp**: Telewizja Polska
|
||||||
- **tvp:embed**: Telewizja Polska
|
- **tvp:embed**: Telewizja Polska
|
||||||
- **tvp:series**
|
- **tvp:series**
|
||||||
|
- **tvp:stream**
|
||||||
- **TVPlayer**
|
- **TVPlayer**
|
||||||
- **TVPlayHome**
|
- **TVPlayHome**
|
||||||
- **Tweakers**
|
- **Tweakers**
|
||||||
@@ -1113,6 +1205,7 @@
|
|||||||
- **Varzesh3**
|
- **Varzesh3**
|
||||||
- **Vbox7**
|
- **Vbox7**
|
||||||
- **VeeHD**
|
- **VeeHD**
|
||||||
|
- **Veo**
|
||||||
- **Veoh**
|
- **Veoh**
|
||||||
- **Vesti**: Вести.Ru
|
- **Vesti**: Вести.Ru
|
||||||
- **Vevo**
|
- **Vevo**
|
||||||
@@ -1128,7 +1221,7 @@
|
|||||||
- **Viddler**
|
- **Viddler**
|
||||||
- **Videa**
|
- **Videa**
|
||||||
- **video.arnes.si**: Arnes Video
|
- **video.arnes.si**: Arnes Video
|
||||||
- **video.google:search**: Google Video search
|
- **video.google:search**: Google Video search; "gvsearch:" prefix (Currently broken)
|
||||||
- **video.sky.it**
|
- **video.sky.it**
|
||||||
- **video.sky.it:live**
|
- **video.sky.it:live**
|
||||||
- **VideoDetective**
|
- **VideoDetective**
|
||||||
@@ -1141,9 +1234,6 @@
|
|||||||
- **VidioLive**
|
- **VidioLive**
|
||||||
- **VidioPremier**
|
- **VidioPremier**
|
||||||
- **VidLii**
|
- **VidLii**
|
||||||
- **vidme**
|
|
||||||
- **vidme:user**
|
|
||||||
- **vidme:user:likes**
|
|
||||||
- **vier**: vier.be and vijf.be
|
- **vier**: vier.be and vijf.be
|
||||||
- **vier:videos**
|
- **vier:videos**
|
||||||
- **viewlift**
|
- **viewlift**
|
||||||
@@ -1178,6 +1268,8 @@
|
|||||||
- **VODPl**
|
- **VODPl**
|
||||||
- **VODPlatform**
|
- **VODPlatform**
|
||||||
- **VoiceRepublic**
|
- **VoiceRepublic**
|
||||||
|
- **voicy**
|
||||||
|
- **voicy:channel**
|
||||||
- **Voot**
|
- **Voot**
|
||||||
- **VootSeries**
|
- **VootSeries**
|
||||||
- **VoxMedia**
|
- **VoxMedia**
|
||||||
@@ -1193,6 +1285,7 @@
|
|||||||
- **VTXTV**
|
- **VTXTV**
|
||||||
- **vube**: Vube.com
|
- **vube**: Vube.com
|
||||||
- **VuClip**
|
- **VuClip**
|
||||||
|
- **Vupload**
|
||||||
- **VVVVID**
|
- **VVVVID**
|
||||||
- **VVVVIDShow**
|
- **VVVVIDShow**
|
||||||
- **VyboryMos**
|
- **VyboryMos**
|
||||||
@@ -1223,6 +1316,8 @@
|
|||||||
- **WistiaPlaylist**
|
- **WistiaPlaylist**
|
||||||
- **wnl**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
- **wnl**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
||||||
- **WorldStarHipHop**
|
- **WorldStarHipHop**
|
||||||
|
- **wppilot**
|
||||||
|
- **wppilot:channels**
|
||||||
- **WSJ**: Wall Street Journal
|
- **WSJ**: Wall Street Journal
|
||||||
- **WSJArticle**
|
- **WSJArticle**
|
||||||
- **WWE**
|
- **WWE**
|
||||||
@@ -1270,19 +1365,19 @@
|
|||||||
- **YouPorn**
|
- **YouPorn**
|
||||||
- **YourPorn**
|
- **YourPorn**
|
||||||
- **YourUpload**
|
- **YourUpload**
|
||||||
- **youtube**: YouTube.com
|
- **youtube**: YouTube
|
||||||
- **youtube:favorites**: YouTube.com liked videos, ":ytfav" for short (requires authentication)
|
- **youtube:favorites**: YouTube liked videos; ":ytfav" keyword (requires cookies)
|
||||||
- **youtube:history**: Youtube watch history, ":ythis" for short (requires authentication)
|
- **youtube:history**: Youtube watch history; ":ythis" keyword (requires cookies)
|
||||||
- **youtube:playlist**: YouTube.com playlists
|
- **youtube:playlist**: YouTube playlists
|
||||||
- **youtube:recommended**: YouTube.com recommended videos, ":ytrec" for short (requires authentication)
|
- **youtube:recommended**: YouTube recommended videos; ":ytrec" keyword
|
||||||
- **youtube:search**: YouTube.com searches, "ytsearch" keyword
|
- **youtube:search**: YouTube searches; "ytsearch:" prefix
|
||||||
- **youtube:search:date**: YouTube.com searches, newest videos first, "ytsearchdate" keyword
|
- **youtube:search:date**: YouTube searches, newest videos first; "ytsearchdate:" prefix
|
||||||
- **youtube:search_url**: YouTube.com search URLs
|
- **youtube:search_url**: YouTube search URLs with sorting and filter support
|
||||||
- **youtube:subscriptions**: YouTube.com subscriptions feed, ":ytsubs" for short (requires authentication)
|
- **youtube:subscriptions**: YouTube subscriptions feed; ":ytsubs" keyword (requires cookies)
|
||||||
- **youtube:tab**: YouTube.com tab
|
- **youtube:tab**: YouTube Tabs
|
||||||
- **youtube:watchlater**: Youtube watch later list, ":ytwatchlater" for short (requires authentication)
|
- **youtube:watchlater**: Youtube watch later list; ":ytwatchlater" keyword (requires cookies)
|
||||||
- **YoutubeYtBe**: youtu.be
|
- **YoutubeYtBe**: youtu.be
|
||||||
- **YoutubeYtUser**: YouTube.com user videos, URL or "ytuser" keyword
|
- **YoutubeYtUser**: YouTube user videos; "ytuser:" prefix
|
||||||
- **Zapiks**
|
- **Zapiks**
|
||||||
- **Zattoo**
|
- **Zattoo**
|
||||||
- **ZattooLive**
|
- **ZattooLive**
|
||||||
@@ -1290,6 +1385,8 @@
|
|||||||
- **ZDFChannel**
|
- **ZDFChannel**
|
||||||
- **Zee5**
|
- **Zee5**
|
||||||
- **zee5:series**
|
- **zee5:series**
|
||||||
|
- **ZenYandex**
|
||||||
|
- **ZenYandexChannel**
|
||||||
- **Zhihu**
|
- **Zhihu**
|
||||||
- **zingmp3**: mp3.zing.vn
|
- **zingmp3**: mp3.zing.vn
|
||||||
- **zingmp3:album**
|
- **zingmp3:album**
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ from yt_dlp.utils import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
if "pytest" in sys.modules:
|
if 'pytest' in sys.modules:
|
||||||
import pytest
|
import pytest
|
||||||
is_download_test = pytest.mark.download
|
is_download_test = pytest.mark.download
|
||||||
else:
|
else:
|
||||||
@@ -32,9 +32,9 @@ else:
|
|||||||
|
|
||||||
def get_params(override=None):
|
def get_params(override=None):
|
||||||
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
|
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
|
||||||
"parameters.json")
|
'parameters.json')
|
||||||
LOCAL_PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
|
LOCAL_PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
|
||||||
"local_parameters.json")
|
'local_parameters.json')
|
||||||
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
|
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
|
||||||
parameters = json.load(pf)
|
parameters = json.load(pf)
|
||||||
if os.path.exists(LOCAL_PARAMETERS_FILE):
|
if os.path.exists(LOCAL_PARAMETERS_FILE):
|
||||||
|
|||||||
@@ -9,7 +9,7 @@
|
|||||||
"forcetitle": false,
|
"forcetitle": false,
|
||||||
"forceurl": false,
|
"forceurl": false,
|
||||||
"force_write_download_archive": false,
|
"force_write_download_archive": false,
|
||||||
"format": "best",
|
"format": "b/bv",
|
||||||
"ignoreerrors": false,
|
"ignoreerrors": false,
|
||||||
"listformats": null,
|
"listformats": null,
|
||||||
"logtostderr": false,
|
"logtostderr": false,
|
||||||
@@ -44,6 +44,5 @@
|
|||||||
"writesubtitles": false,
|
"writesubtitles": false,
|
||||||
"allsubtitles": false,
|
"allsubtitles": false,
|
||||||
"listsubtitles": false,
|
"listsubtitles": false,
|
||||||
"socket_timeout": 20,
|
|
||||||
"fixup": "never"
|
"fixup": "never"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ from yt_dlp.compat import compat_os_name, compat_setenv, compat_str, compat_urll
|
|||||||
from yt_dlp.extractor import YoutubeIE
|
from yt_dlp.extractor import YoutubeIE
|
||||||
from yt_dlp.extractor.common import InfoExtractor
|
from yt_dlp.extractor.common import InfoExtractor
|
||||||
from yt_dlp.postprocessor.common import PostProcessor
|
from yt_dlp.postprocessor.common import PostProcessor
|
||||||
from yt_dlp.utils import ExtractorError, int_or_none, match_filter_func
|
from yt_dlp.utils import ExtractorError, int_or_none, match_filter_func, LazyList
|
||||||
|
|
||||||
TEST_URL = 'http://localhost/sample.mp4'
|
TEST_URL = 'http://localhost/sample.mp4'
|
||||||
|
|
||||||
@@ -649,12 +649,14 @@ class TestYoutubeDL(unittest.TestCase):
|
|||||||
'title2': '%PATH%',
|
'title2': '%PATH%',
|
||||||
'title3': 'foo/bar\\test',
|
'title3': 'foo/bar\\test',
|
||||||
'title4': 'foo "bar" test',
|
'title4': 'foo "bar" test',
|
||||||
|
'title5': 'áéí 𝐀',
|
||||||
'timestamp': 1618488000,
|
'timestamp': 1618488000,
|
||||||
'duration': 100000,
|
'duration': 100000,
|
||||||
'playlist_index': 1,
|
'playlist_index': 1,
|
||||||
|
'playlist_autonumber': 2,
|
||||||
'_last_playlist_index': 100,
|
'_last_playlist_index': 100,
|
||||||
'n_entries': 10,
|
'n_entries': 10,
|
||||||
'formats': [{'id': 'id1'}, {'id': 'id2'}, {'id': 'id3'}]
|
'formats': [{'id': 'id 1'}, {'id': 'id 2'}, {'id': 'id 3'}]
|
||||||
}
|
}
|
||||||
|
|
||||||
def test_prepare_outtmpl_and_filename(self):
|
def test_prepare_outtmpl_and_filename(self):
|
||||||
@@ -664,26 +666,31 @@ class TestYoutubeDL(unittest.TestCase):
|
|||||||
ydl._num_downloads = 1
|
ydl._num_downloads = 1
|
||||||
self.assertEqual(ydl.validate_outtmpl(tmpl), None)
|
self.assertEqual(ydl.validate_outtmpl(tmpl), None)
|
||||||
|
|
||||||
outtmpl, tmpl_dict = ydl.prepare_outtmpl(tmpl, info or self.outtmpl_info)
|
out = ydl.evaluate_outtmpl(tmpl, info or self.outtmpl_info)
|
||||||
out = ydl.escape_outtmpl(outtmpl) % tmpl_dict
|
|
||||||
fname = ydl.prepare_filename(info or self.outtmpl_info)
|
fname = ydl.prepare_filename(info or self.outtmpl_info)
|
||||||
|
|
||||||
if callable(expected):
|
if not isinstance(expected, (list, tuple)):
|
||||||
self.assertTrue(expected(out))
|
expected = (expected, expected)
|
||||||
self.assertTrue(expected(fname))
|
for (name, got), expect in zip((('outtmpl', out), ('filename', fname)), expected):
|
||||||
elif isinstance(expected, str):
|
if callable(expect):
|
||||||
self.assertEqual(out, expected)
|
self.assertTrue(expect(got), f'Wrong {name} from {tmpl}')
|
||||||
self.assertEqual(fname, expected)
|
else:
|
||||||
else:
|
self.assertEqual(got, expect, f'Wrong {name} from {tmpl}')
|
||||||
self.assertEqual(out, expected[0])
|
|
||||||
self.assertEqual(fname, expected[1])
|
# Side-effects
|
||||||
|
original_infodict = dict(self.outtmpl_info)
|
||||||
|
test('foo.bar', 'foo.bar')
|
||||||
|
original_infodict['epoch'] = self.outtmpl_info.get('epoch')
|
||||||
|
self.assertTrue(isinstance(original_infodict['epoch'], int))
|
||||||
|
test('%(epoch)d', int_or_none)
|
||||||
|
self.assertEqual(original_infodict, self.outtmpl_info)
|
||||||
|
|
||||||
# Auto-generated fields
|
# Auto-generated fields
|
||||||
test('%(id)s.%(ext)s', '1234.mp4')
|
test('%(id)s.%(ext)s', '1234.mp4')
|
||||||
test('%(duration_string)s', ('27:46:40', '27-46-40'))
|
test('%(duration_string)s', ('27:46:40', '27-46-40'))
|
||||||
test('%(epoch)d', int_or_none)
|
|
||||||
test('%(resolution)s', '1080p')
|
test('%(resolution)s', '1080p')
|
||||||
test('%(playlist_index)s', '001')
|
test('%(playlist_index)s', '001')
|
||||||
|
test('%(playlist_autonumber)s', '02')
|
||||||
test('%(autonumber)s', '00001')
|
test('%(autonumber)s', '00001')
|
||||||
test('%(autonumber+2)03d', '005', autonumber_start=3)
|
test('%(autonumber+2)03d', '005', autonumber_start=3)
|
||||||
test('%(autonumber)s', '001', autonumber_size=3)
|
test('%(autonumber)s', '001', autonumber_size=3)
|
||||||
@@ -714,13 +721,23 @@ class TestYoutubeDL(unittest.TestCase):
|
|||||||
# Invalid templates
|
# Invalid templates
|
||||||
self.assertTrue(isinstance(YoutubeDL.validate_outtmpl('%(title)'), ValueError))
|
self.assertTrue(isinstance(YoutubeDL.validate_outtmpl('%(title)'), ValueError))
|
||||||
test('%(invalid@tmpl|def)s', 'none', outtmpl_na_placeholder='none')
|
test('%(invalid@tmpl|def)s', 'none', outtmpl_na_placeholder='none')
|
||||||
test('%()s', 'NA')
|
test('%(..)s', 'NA')
|
||||||
|
|
||||||
|
# Entire info_dict
|
||||||
|
def expect_same_infodict(out):
|
||||||
|
got_dict = json.loads(out)
|
||||||
|
for info_field, expected in self.outtmpl_info.items():
|
||||||
|
self.assertEqual(got_dict.get(info_field), expected, info_field)
|
||||||
|
return True
|
||||||
|
|
||||||
|
test('%()j', (expect_same_infodict, str))
|
||||||
|
|
||||||
# NA placeholder
|
# NA placeholder
|
||||||
NA_TEST_OUTTMPL = '%(uploader_date)s-%(width)d-%(x|def)s-%(id)s.%(ext)s'
|
NA_TEST_OUTTMPL = '%(uploader_date)s-%(width)d-%(x|def)s-%(id)s.%(ext)s'
|
||||||
test(NA_TEST_OUTTMPL, 'NA-NA-def-1234.mp4')
|
test(NA_TEST_OUTTMPL, 'NA-NA-def-1234.mp4')
|
||||||
test(NA_TEST_OUTTMPL, 'none-none-def-1234.mp4', outtmpl_na_placeholder='none')
|
test(NA_TEST_OUTTMPL, 'none-none-def-1234.mp4', outtmpl_na_placeholder='none')
|
||||||
test(NA_TEST_OUTTMPL, '--def-1234.mp4', outtmpl_na_placeholder='')
|
test(NA_TEST_OUTTMPL, '--def-1234.mp4', outtmpl_na_placeholder='')
|
||||||
|
test('%(non_existent.0)s', 'NA')
|
||||||
|
|
||||||
# String formatting
|
# String formatting
|
||||||
FMT_TEST_OUTTMPL = '%%(height)%s.%%(ext)s'
|
FMT_TEST_OUTTMPL = '%%(height)%s.%%(ext)s'
|
||||||
@@ -746,17 +763,28 @@ class TestYoutubeDL(unittest.TestCase):
|
|||||||
test('a%(width|)d', 'a', outtmpl_na_placeholder='none')
|
test('a%(width|)d', 'a', outtmpl_na_placeholder='none')
|
||||||
|
|
||||||
FORMATS = self.outtmpl_info['formats']
|
FORMATS = self.outtmpl_info['formats']
|
||||||
sanitize = lambda x: x.replace(':', ' -').replace('"', "'")
|
sanitize = lambda x: x.replace(':', ' -').replace('"', "'").replace('\n', ' ')
|
||||||
|
|
||||||
# Custom type casting
|
# Custom type casting
|
||||||
test('%(formats.:.id)l', 'id1, id2, id3')
|
test('%(formats.:.id)l', 'id 1, id 2, id 3')
|
||||||
|
test('%(formats.:.id)#l', ('id 1\nid 2\nid 3', 'id 1 id 2 id 3'))
|
||||||
test('%(ext)l', 'mp4')
|
test('%(ext)l', 'mp4')
|
||||||
test('%(formats.:.id) 15l', ' id1, id2, id3')
|
test('%(formats.:.id) 18l', ' id 1, id 2, id 3')
|
||||||
test('%(formats)j', (json.dumps(FORMATS), sanitize(json.dumps(FORMATS))))
|
test('%(formats)j', (json.dumps(FORMATS), sanitize(json.dumps(FORMATS))))
|
||||||
|
test('%(formats)#j', (json.dumps(FORMATS, indent=4), sanitize(json.dumps(FORMATS, indent=4))))
|
||||||
|
test('%(title5).3B', 'á')
|
||||||
|
test('%(title5)U', 'áéí 𝐀')
|
||||||
|
test('%(title5)#U', 'a\u0301e\u0301i\u0301 𝐀')
|
||||||
|
test('%(title5)+U', 'áéí A')
|
||||||
|
test('%(title5)+#U', 'a\u0301e\u0301i\u0301 A')
|
||||||
if compat_os_name == 'nt':
|
if compat_os_name == 'nt':
|
||||||
test('%(title4)q', ('"foo \\"bar\\" test"', "'foo _'bar_' test'"))
|
test('%(title4)q', ('"foo \\"bar\\" test"', "'foo _'bar_' test'"))
|
||||||
|
test('%(formats.:.id)#q', ('"id 1" "id 2" "id 3"', "'id 1' 'id 2' 'id 3'"))
|
||||||
|
test('%(formats.0.id)#q', ('"id 1"', "'id 1'"))
|
||||||
else:
|
else:
|
||||||
test('%(title4)q', ('\'foo "bar" test\'', "'foo 'bar' test'"))
|
test('%(title4)q', ('\'foo "bar" test\'', "'foo 'bar' test'"))
|
||||||
|
test('%(formats.:.id)#q', "'id 1' 'id 2' 'id 3'")
|
||||||
|
test('%(formats.0.id)#q', "'id 1'")
|
||||||
|
|
||||||
# Internal formatting
|
# Internal formatting
|
||||||
test('%(timestamp-1000>%H-%M-%S)s', '11-43-20')
|
test('%(timestamp-1000>%H-%M-%S)s', '11-43-20')
|
||||||
@@ -774,6 +802,18 @@ class TestYoutubeDL(unittest.TestCase):
|
|||||||
test('%(formats.0.id.-1+id)f', '1235.000000')
|
test('%(formats.0.id.-1+id)f', '1235.000000')
|
||||||
test('%(formats.0.id.-1+formats.1.id.-1)d', '3')
|
test('%(formats.0.id.-1+formats.1.id.-1)d', '3')
|
||||||
|
|
||||||
|
# Alternates
|
||||||
|
test('%(title,id)s', '1234')
|
||||||
|
test('%(width-100,height+20|def)d', '1100')
|
||||||
|
test('%(width-100,height+width|def)s', 'def')
|
||||||
|
test('%(timestamp-x>%H\\,%M\\,%S,timestamp>%H\\,%M\\,%S)s', '12,00,00')
|
||||||
|
|
||||||
|
# Laziness
|
||||||
|
def gen():
|
||||||
|
yield from range(5)
|
||||||
|
raise self.assertTrue(False, 'LazyList should not be evaluated till here')
|
||||||
|
test('%(key.4)s', '4', info={'key': LazyList(gen())})
|
||||||
|
|
||||||
# Empty filename
|
# Empty filename
|
||||||
test('%(foo|)s-%(bar|)s.%(ext)s', '-.mp4')
|
test('%(foo|)s-%(bar|)s.%(ext)s', '-.mp4')
|
||||||
# test('%(foo|)s.%(ext)s', ('.mp4', '_.mp4')) # fixme
|
# test('%(foo|)s.%(ext)s', ('.mp4', '_.mp4')) # fixme
|
||||||
@@ -783,6 +823,12 @@ class TestYoutubeDL(unittest.TestCase):
|
|||||||
compat_setenv('__yt_dlp_var', 'expanded')
|
compat_setenv('__yt_dlp_var', 'expanded')
|
||||||
envvar = '%__yt_dlp_var%' if compat_os_name == 'nt' else '$__yt_dlp_var'
|
envvar = '%__yt_dlp_var%' if compat_os_name == 'nt' else '$__yt_dlp_var'
|
||||||
test(envvar, (envvar, 'expanded'))
|
test(envvar, (envvar, 'expanded'))
|
||||||
|
if compat_os_name == 'nt':
|
||||||
|
test('%s%', ('%s%', '%s%'))
|
||||||
|
compat_setenv('s', 'expanded')
|
||||||
|
test('%s%', ('%s%', 'expanded')) # %s% should be expanded before escaping %s
|
||||||
|
compat_setenv('(test)s', 'expanded')
|
||||||
|
test('%(test)s%', ('NA%', 'expanded')) # Environment should take priority over template
|
||||||
|
|
||||||
# Path expansion and escaping
|
# Path expansion and escaping
|
||||||
test('Hello %(title1)s', 'Hello $PATH')
|
test('Hello %(title1)s', 'Hello $PATH')
|
||||||
@@ -958,54 +1004,32 @@ class TestYoutubeDL(unittest.TestCase):
|
|||||||
ydl.process_ie_result(copy.deepcopy(playlist))
|
ydl.process_ie_result(copy.deepcopy(playlist))
|
||||||
return ydl.downloaded_info_dicts
|
return ydl.downloaded_info_dicts
|
||||||
|
|
||||||
def get_ids(params):
|
def test_selection(params, expected_ids):
|
||||||
return [int(v['id']) for v in get_downloaded_info_dicts(params)]
|
results = [
|
||||||
|
(v['playlist_autonumber'] - 1, (int(v['id']), v['playlist_index']))
|
||||||
|
for v in get_downloaded_info_dicts(params)]
|
||||||
|
self.assertEqual(results, list(enumerate(zip(expected_ids, expected_ids))))
|
||||||
|
|
||||||
result = get_ids({})
|
test_selection({}, [1, 2, 3, 4])
|
||||||
self.assertEqual(result, [1, 2, 3, 4])
|
test_selection({'playlistend': 10}, [1, 2, 3, 4])
|
||||||
|
test_selection({'playlistend': 2}, [1, 2])
|
||||||
result = get_ids({'playlistend': 10})
|
test_selection({'playliststart': 10}, [])
|
||||||
self.assertEqual(result, [1, 2, 3, 4])
|
test_selection({'playliststart': 2}, [2, 3, 4])
|
||||||
|
test_selection({'playlist_items': '2-4'}, [2, 3, 4])
|
||||||
result = get_ids({'playlistend': 2})
|
test_selection({'playlist_items': '2,4'}, [2, 4])
|
||||||
self.assertEqual(result, [1, 2])
|
test_selection({'playlist_items': '10'}, [])
|
||||||
|
test_selection({'playlist_items': '0'}, [])
|
||||||
result = get_ids({'playliststart': 10})
|
|
||||||
self.assertEqual(result, [])
|
|
||||||
|
|
||||||
result = get_ids({'playliststart': 2})
|
|
||||||
self.assertEqual(result, [2, 3, 4])
|
|
||||||
|
|
||||||
result = get_ids({'playlist_items': '2-4'})
|
|
||||||
self.assertEqual(result, [2, 3, 4])
|
|
||||||
|
|
||||||
result = get_ids({'playlist_items': '2,4'})
|
|
||||||
self.assertEqual(result, [2, 4])
|
|
||||||
|
|
||||||
result = get_ids({'playlist_items': '10'})
|
|
||||||
self.assertEqual(result, [])
|
|
||||||
|
|
||||||
result = get_ids({'playlist_items': '3-10'})
|
|
||||||
self.assertEqual(result, [3, 4])
|
|
||||||
|
|
||||||
result = get_ids({'playlist_items': '2-4,3-4,3'})
|
|
||||||
self.assertEqual(result, [2, 3, 4])
|
|
||||||
|
|
||||||
# Tests for https://github.com/ytdl-org/youtube-dl/issues/10591
|
# Tests for https://github.com/ytdl-org/youtube-dl/issues/10591
|
||||||
# @{
|
test_selection({'playlist_items': '2-4,3-4,3'}, [2, 3, 4])
|
||||||
result = get_downloaded_info_dicts({'playlist_items': '2-4,3-4,3'})
|
test_selection({'playlist_items': '4,2'}, [4, 2])
|
||||||
self.assertEqual(result[0]['playlist_index'], 2)
|
|
||||||
self.assertEqual(result[1]['playlist_index'], 3)
|
|
||||||
|
|
||||||
result = get_downloaded_info_dicts({'playlist_items': '2-4,3-4,3'})
|
# Tests for https://github.com/yt-dlp/yt-dlp/issues/720
|
||||||
self.assertEqual(result[0]['playlist_index'], 2)
|
# https://github.com/yt-dlp/yt-dlp/issues/302
|
||||||
self.assertEqual(result[1]['playlist_index'], 3)
|
test_selection({'playlistreverse': True}, [4, 3, 2, 1])
|
||||||
self.assertEqual(result[2]['playlist_index'], 4)
|
test_selection({'playliststart': 2, 'playlistreverse': True}, [4, 3, 2])
|
||||||
|
test_selection({'playlist_items': '2,4', 'playlistreverse': True}, [4, 2])
|
||||||
result = get_downloaded_info_dicts({'playlist_items': '4,2'})
|
test_selection({'playlist_items': '4,2'}, [4, 2])
|
||||||
self.assertEqual(result[0]['playlist_index'], 4)
|
|
||||||
self.assertEqual(result[1]['playlist_index'], 2)
|
|
||||||
# @}
|
|
||||||
|
|
||||||
def test_urlopen_no_file_protocol(self):
|
def test_urlopen_no_file_protocol(self):
|
||||||
# see https://github.com/ytdl-org/youtube-dl/issues/8227
|
# see https://github.com/ytdl-org/youtube-dl/issues/8227
|
||||||
|
|||||||
@@ -7,7 +7,19 @@ import sys
|
|||||||
import unittest
|
import unittest
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
from yt_dlp.aes import aes_decrypt, aes_encrypt, aes_cbc_decrypt, aes_cbc_encrypt, aes_decrypt_text
|
from yt_dlp.aes import (
|
||||||
|
aes_decrypt,
|
||||||
|
aes_encrypt,
|
||||||
|
aes_cbc_decrypt,
|
||||||
|
aes_cbc_decrypt_bytes,
|
||||||
|
aes_cbc_encrypt,
|
||||||
|
aes_ctr_decrypt,
|
||||||
|
aes_ctr_encrypt,
|
||||||
|
aes_gcm_decrypt_and_verify,
|
||||||
|
aes_gcm_decrypt_and_verify_bytes,
|
||||||
|
aes_decrypt_text
|
||||||
|
)
|
||||||
|
from yt_dlp.compat import compat_pycrypto_AES
|
||||||
from yt_dlp.utils import bytes_to_intlist, intlist_to_bytes
|
from yt_dlp.utils import bytes_to_intlist, intlist_to_bytes
|
||||||
import base64
|
import base64
|
||||||
|
|
||||||
@@ -27,18 +39,43 @@ class TestAES(unittest.TestCase):
|
|||||||
self.assertEqual(decrypted, msg)
|
self.assertEqual(decrypted, msg)
|
||||||
|
|
||||||
def test_cbc_decrypt(self):
|
def test_cbc_decrypt(self):
|
||||||
data = bytes_to_intlist(
|
data = b'\x97\x92+\xe5\x0b\xc3\x18\x91ky9m&\xb3\xb5@\xe6\x27\xc2\x96.\xc8u\x88\xab9-[\x9e|\xf1\xcd'
|
||||||
b"\x97\x92+\xe5\x0b\xc3\x18\x91ky9m&\xb3\xb5@\xe6'\xc2\x96.\xc8u\x88\xab9-[\x9e|\xf1\xcd"
|
decrypted = intlist_to_bytes(aes_cbc_decrypt(bytes_to_intlist(data), self.key, self.iv))
|
||||||
)
|
|
||||||
decrypted = intlist_to_bytes(aes_cbc_decrypt(data, self.key, self.iv))
|
|
||||||
self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg)
|
self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg)
|
||||||
|
if compat_pycrypto_AES:
|
||||||
|
decrypted = aes_cbc_decrypt_bytes(data, intlist_to_bytes(self.key), intlist_to_bytes(self.iv))
|
||||||
|
self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg)
|
||||||
|
|
||||||
def test_cbc_encrypt(self):
|
def test_cbc_encrypt(self):
|
||||||
data = bytes_to_intlist(self.secret_msg)
|
data = bytes_to_intlist(self.secret_msg)
|
||||||
encrypted = intlist_to_bytes(aes_cbc_encrypt(data, self.key, self.iv))
|
encrypted = intlist_to_bytes(aes_cbc_encrypt(data, self.key, self.iv))
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
encrypted,
|
encrypted,
|
||||||
b"\x97\x92+\xe5\x0b\xc3\x18\x91ky9m&\xb3\xb5@\xe6'\xc2\x96.\xc8u\x88\xab9-[\x9e|\xf1\xcd")
|
b'\x97\x92+\xe5\x0b\xc3\x18\x91ky9m&\xb3\xb5@\xe6\'\xc2\x96.\xc8u\x88\xab9-[\x9e|\xf1\xcd')
|
||||||
|
|
||||||
|
def test_ctr_decrypt(self):
|
||||||
|
data = bytes_to_intlist(b'\x03\xc7\xdd\xd4\x8e\xb3\xbc\x1a*O\xdc1\x12+8Aio\xd1z\xb5#\xaf\x08')
|
||||||
|
decrypted = intlist_to_bytes(aes_ctr_decrypt(data, self.key, self.iv))
|
||||||
|
self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg)
|
||||||
|
|
||||||
|
def test_ctr_encrypt(self):
|
||||||
|
data = bytes_to_intlist(self.secret_msg)
|
||||||
|
encrypted = intlist_to_bytes(aes_ctr_encrypt(data, self.key, self.iv))
|
||||||
|
self.assertEqual(
|
||||||
|
encrypted,
|
||||||
|
b'\x03\xc7\xdd\xd4\x8e\xb3\xbc\x1a*O\xdc1\x12+8Aio\xd1z\xb5#\xaf\x08')
|
||||||
|
|
||||||
|
def test_gcm_decrypt(self):
|
||||||
|
data = b'\x159Y\xcf5eud\x90\x9c\x85&]\x14\x1d\x0f.\x08\xb4T\xe4/\x17\xbd'
|
||||||
|
authentication_tag = b'\xe8&I\x80rI\x07\x9d}YWuU@:e'
|
||||||
|
|
||||||
|
decrypted = intlist_to_bytes(aes_gcm_decrypt_and_verify(
|
||||||
|
bytes_to_intlist(data), self.key, bytes_to_intlist(authentication_tag), self.iv[:12]))
|
||||||
|
self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg)
|
||||||
|
if compat_pycrypto_AES:
|
||||||
|
decrypted = aes_gcm_decrypt_and_verify_bytes(
|
||||||
|
data, intlist_to_bytes(self.key), authentication_tag, intlist_to_bytes(self.iv[:12]))
|
||||||
|
self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg)
|
||||||
|
|
||||||
def test_decrypt_text(self):
|
def test_decrypt_text(self):
|
||||||
password = intlist_to_bytes(self.key).decode('utf-8')
|
password = intlist_to_bytes(self.key).decode('utf-8')
|
||||||
|
|||||||
@@ -3,16 +3,28 @@ from datetime import datetime, timezone
|
|||||||
|
|
||||||
from yt_dlp import cookies
|
from yt_dlp import cookies
|
||||||
from yt_dlp.cookies import (
|
from yt_dlp.cookies import (
|
||||||
CRYPTO_AVAILABLE,
|
|
||||||
LinuxChromeCookieDecryptor,
|
LinuxChromeCookieDecryptor,
|
||||||
MacChromeCookieDecryptor,
|
MacChromeCookieDecryptor,
|
||||||
WindowsChromeCookieDecryptor,
|
WindowsChromeCookieDecryptor,
|
||||||
YDLLogger,
|
|
||||||
parse_safari_cookies,
|
parse_safari_cookies,
|
||||||
pbkdf2_sha1,
|
pbkdf2_sha1,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class Logger:
|
||||||
|
def debug(self, message):
|
||||||
|
print(f'[verbose] {message}')
|
||||||
|
|
||||||
|
def info(self, message):
|
||||||
|
print(message)
|
||||||
|
|
||||||
|
def warning(self, message, only_once=False):
|
||||||
|
self.error(message)
|
||||||
|
|
||||||
|
def error(self, message):
|
||||||
|
raise Exception(message)
|
||||||
|
|
||||||
|
|
||||||
class MonkeyPatch:
|
class MonkeyPatch:
|
||||||
def __init__(self, module, temporary_values):
|
def __init__(self, module, temporary_values):
|
||||||
self._module = module
|
self._module = module
|
||||||
@@ -42,7 +54,7 @@ class TestCookies(unittest.TestCase):
|
|||||||
with MonkeyPatch(cookies, {'_get_linux_keyring_password': lambda *args, **kwargs: b''}):
|
with MonkeyPatch(cookies, {'_get_linux_keyring_password': lambda *args, **kwargs: b''}):
|
||||||
encrypted_value = b'v10\xccW%\xcd\xe6\xe6\x9fM" \xa7\xb0\xca\xe4\x07\xd6'
|
encrypted_value = b'v10\xccW%\xcd\xe6\xe6\x9fM" \xa7\xb0\xca\xe4\x07\xd6'
|
||||||
value = 'USD'
|
value = 'USD'
|
||||||
decryptor = LinuxChromeCookieDecryptor('Chrome', YDLLogger())
|
decryptor = LinuxChromeCookieDecryptor('Chrome', Logger())
|
||||||
self.assertEqual(decryptor.decrypt(encrypted_value), value)
|
self.assertEqual(decryptor.decrypt(encrypted_value), value)
|
||||||
|
|
||||||
def test_chrome_cookie_decryptor_linux_v11(self):
|
def test_chrome_cookie_decryptor_linux_v11(self):
|
||||||
@@ -50,24 +62,23 @@ class TestCookies(unittest.TestCase):
|
|||||||
'KEYRING_AVAILABLE': True}):
|
'KEYRING_AVAILABLE': True}):
|
||||||
encrypted_value = b'v11#\x81\x10>`w\x8f)\xc0\xb2\xc1\r\xf4\x1al\xdd\x93\xfd\xf8\xf8N\xf2\xa9\x83\xf1\xe9o\x0elVQd'
|
encrypted_value = b'v11#\x81\x10>`w\x8f)\xc0\xb2\xc1\r\xf4\x1al\xdd\x93\xfd\xf8\xf8N\xf2\xa9\x83\xf1\xe9o\x0elVQd'
|
||||||
value = 'tz=Europe.London'
|
value = 'tz=Europe.London'
|
||||||
decryptor = LinuxChromeCookieDecryptor('Chrome', YDLLogger())
|
decryptor = LinuxChromeCookieDecryptor('Chrome', Logger())
|
||||||
self.assertEqual(decryptor.decrypt(encrypted_value), value)
|
self.assertEqual(decryptor.decrypt(encrypted_value), value)
|
||||||
|
|
||||||
@unittest.skipIf(not CRYPTO_AVAILABLE, 'cryptography library not available')
|
|
||||||
def test_chrome_cookie_decryptor_windows_v10(self):
|
def test_chrome_cookie_decryptor_windows_v10(self):
|
||||||
with MonkeyPatch(cookies, {
|
with MonkeyPatch(cookies, {
|
||||||
'_get_windows_v10_key': lambda *args, **kwargs: b'Y\xef\xad\xad\xeerp\xf0Y\xe6\x9b\x12\xc2<z\x16]\n\xbb\xb8\xcb\xd7\x9bA\xc3\x14e\x99{\xd6\xf4&'
|
'_get_windows_v10_key': lambda *args, **kwargs: b'Y\xef\xad\xad\xeerp\xf0Y\xe6\x9b\x12\xc2<z\x16]\n\xbb\xb8\xcb\xd7\x9bA\xc3\x14e\x99{\xd6\xf4&'
|
||||||
}):
|
}):
|
||||||
encrypted_value = b'v10T\xb8\xf3\xb8\x01\xa7TtcV\xfc\x88\xb8\xb8\xef\x05\xb5\xfd\x18\xc90\x009\xab\xb1\x893\x85)\x87\xe1\xa9-\xa3\xad='
|
encrypted_value = b'v10T\xb8\xf3\xb8\x01\xa7TtcV\xfc\x88\xb8\xb8\xef\x05\xb5\xfd\x18\xc90\x009\xab\xb1\x893\x85)\x87\xe1\xa9-\xa3\xad='
|
||||||
value = '32101439'
|
value = '32101439'
|
||||||
decryptor = WindowsChromeCookieDecryptor('', YDLLogger())
|
decryptor = WindowsChromeCookieDecryptor('', Logger())
|
||||||
self.assertEqual(decryptor.decrypt(encrypted_value), value)
|
self.assertEqual(decryptor.decrypt(encrypted_value), value)
|
||||||
|
|
||||||
def test_chrome_cookie_decryptor_mac_v10(self):
|
def test_chrome_cookie_decryptor_mac_v10(self):
|
||||||
with MonkeyPatch(cookies, {'_get_mac_keyring_password': lambda *args, **kwargs: b'6eIDUdtKAacvlHwBVwvg/Q=='}):
|
with MonkeyPatch(cookies, {'_get_mac_keyring_password': lambda *args, **kwargs: b'6eIDUdtKAacvlHwBVwvg/Q=='}):
|
||||||
encrypted_value = b'v10\xb3\xbe\xad\xa1[\x9fC\xa1\x98\xe0\x9a\x01\xd9\xcf\xbfc'
|
encrypted_value = b'v10\xb3\xbe\xad\xa1[\x9fC\xa1\x98\xe0\x9a\x01\xd9\xcf\xbfc'
|
||||||
value = '2021-06-01-22'
|
value = '2021-06-01-22'
|
||||||
decryptor = MacChromeCookieDecryptor('', YDLLogger())
|
decryptor = MacChromeCookieDecryptor('', Logger())
|
||||||
self.assertEqual(decryptor.decrypt(encrypted_value), value)
|
self.assertEqual(decryptor.decrypt(encrypted_value), value)
|
||||||
|
|
||||||
def test_safari_cookie_parsing(self):
|
def test_safari_cookie_parsing(self):
|
||||||
|
|||||||
36
test/test_download.py
Normal file → Executable file
36
test/test_download.py
Normal file → Executable file
@@ -73,6 +73,8 @@ class TestDownload(unittest.TestCase):
|
|||||||
|
|
||||||
maxDiff = None
|
maxDiff = None
|
||||||
|
|
||||||
|
COMPLETED_TESTS = {}
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
"""Identify each test with the `add_ie` attribute, if available."""
|
"""Identify each test with the `add_ie` attribute, if available."""
|
||||||
|
|
||||||
@@ -94,6 +96,9 @@ class TestDownload(unittest.TestCase):
|
|||||||
def generator(test_case, tname):
|
def generator(test_case, tname):
|
||||||
|
|
||||||
def test_template(self):
|
def test_template(self):
|
||||||
|
if self.COMPLETED_TESTS.get(tname):
|
||||||
|
return
|
||||||
|
self.COMPLETED_TESTS[tname] = True
|
||||||
ie = yt_dlp.extractor.get_info_extractor(test_case['name'])()
|
ie = yt_dlp.extractor.get_info_extractor(test_case['name'])()
|
||||||
other_ies = [get_info_extractor(ie_key)() for ie_key in test_case.get('add_ie', [])]
|
other_ies = [get_info_extractor(ie_key)() for ie_key in test_case.get('add_ie', [])]
|
||||||
is_playlist = any(k.startswith('playlist') for k in test_case)
|
is_playlist = any(k.startswith('playlist') for k in test_case)
|
||||||
@@ -142,7 +147,7 @@ def generator(test_case, tname):
|
|||||||
expect_warnings(ydl, test_case.get('expected_warnings', []))
|
expect_warnings(ydl, test_case.get('expected_warnings', []))
|
||||||
|
|
||||||
def get_tc_filename(tc):
|
def get_tc_filename(tc):
|
||||||
return ydl.prepare_filename(tc.get('info_dict', {}))
|
return ydl.prepare_filename(dict(tc.get('info_dict', {})))
|
||||||
|
|
||||||
res_dict = None
|
res_dict = None
|
||||||
|
|
||||||
@@ -255,12 +260,12 @@ def generator(test_case, tname):
|
|||||||
|
|
||||||
|
|
||||||
# And add them to TestDownload
|
# And add them to TestDownload
|
||||||
for n, test_case in enumerate(defs):
|
tests_counter = {}
|
||||||
tname = 'test_' + str(test_case['name'])
|
for test_case in defs:
|
||||||
i = 1
|
name = test_case['name']
|
||||||
while hasattr(TestDownload, tname):
|
i = tests_counter.get(name, 0)
|
||||||
tname = 'test_%s_%d' % (test_case['name'], i)
|
tests_counter[name] = i + 1
|
||||||
i += 1
|
tname = f'test_{name}_{i}' if i else f'test_{name}'
|
||||||
test_method = generator(test_case, tname)
|
test_method = generator(test_case, tname)
|
||||||
test_method.__name__ = str(tname)
|
test_method.__name__ = str(tname)
|
||||||
ie_list = test_case.get('add_ie')
|
ie_list = test_case.get('add_ie')
|
||||||
@@ -269,5 +274,22 @@ for n, test_case in enumerate(defs):
|
|||||||
del test_method
|
del test_method
|
||||||
|
|
||||||
|
|
||||||
|
def batch_generator(name, num_tests):
|
||||||
|
|
||||||
|
def test_template(self):
|
||||||
|
for i in range(num_tests):
|
||||||
|
getattr(self, f'test_{name}_{i}' if i else f'test_{name}')()
|
||||||
|
|
||||||
|
return test_template
|
||||||
|
|
||||||
|
|
||||||
|
for name, num_tests in tests_counter.items():
|
||||||
|
test_method = batch_generator(name, num_tests)
|
||||||
|
test_method.__name__ = f'test_{name}_all'
|
||||||
|
test_method.add_ie = ''
|
||||||
|
setattr(TestDownload, test_method.__name__, test_method)
|
||||||
|
del test_method
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -112,6 +112,71 @@ class TestJSInterpreter(unittest.TestCase):
|
|||||||
''')
|
''')
|
||||||
self.assertEqual(jsi.call_function('z'), 5)
|
self.assertEqual(jsi.call_function('z'), 5)
|
||||||
|
|
||||||
|
def test_for_loop(self):
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { a=0; for (i=0; i-10; i++) {a++} a }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), 10)
|
||||||
|
|
||||||
|
def test_switch(self):
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x(f) { switch(f){
|
||||||
|
case 1:f+=1;
|
||||||
|
case 2:f+=2;
|
||||||
|
case 3:f+=3;break;
|
||||||
|
case 4:f+=4;
|
||||||
|
default:f=0;
|
||||||
|
} return f }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x', 1), 7)
|
||||||
|
self.assertEqual(jsi.call_function('x', 3), 6)
|
||||||
|
self.assertEqual(jsi.call_function('x', 5), 0)
|
||||||
|
|
||||||
|
def test_switch_default(self):
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x(f) { switch(f){
|
||||||
|
case 2: f+=2;
|
||||||
|
default: f-=1;
|
||||||
|
case 5:
|
||||||
|
case 6: f+=6;
|
||||||
|
case 0: break;
|
||||||
|
case 1: f+=1;
|
||||||
|
} return f }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x', 1), 2)
|
||||||
|
self.assertEqual(jsi.call_function('x', 5), 11)
|
||||||
|
self.assertEqual(jsi.call_function('x', 9), 14)
|
||||||
|
|
||||||
|
def test_try(self):
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { try{return 10} catch(e){return 5} }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), 10)
|
||||||
|
|
||||||
|
def test_for_loop_continue(self):
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { a=0; for (i=0; i-10; i++) { continue; a++ } a }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), 0)
|
||||||
|
|
||||||
|
def test_for_loop_break(self):
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { a=0; for (i=0; i-10; i++) { break; a++ } a }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), 0)
|
||||||
|
|
||||||
|
def test_literal_list(self):
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { [1, 2, "asdf", [5, 6, 7]][3] }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), [5, 6, 7])
|
||||||
|
|
||||||
|
def test_comma(self):
|
||||||
|
jsi = JSInterpreter('''
|
||||||
|
function x() { a=5; a -= 1, a+=3; return a }
|
||||||
|
''')
|
||||||
|
self.assertEqual(jsi.call_function('x'), 7)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -8,13 +8,14 @@ import sys
|
|||||||
import unittest
|
import unittest
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
from test.helper import try_rm
|
from test.helper import is_download_test, try_rm
|
||||||
|
|
||||||
|
|
||||||
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||||
download_file = join(root_dir, 'test.webm')
|
download_file = join(root_dir, 'test.webm')
|
||||||
|
|
||||||
|
|
||||||
|
@is_download_test
|
||||||
class TestOverwrites(unittest.TestCase):
|
class TestOverwrites(unittest.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
# create an empty file
|
# create an empty file
|
||||||
|
|||||||
@@ -6,37 +6,38 @@ from __future__ import unicode_literals
|
|||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
from yt_dlp import YoutubeDL
|
from yt_dlp import YoutubeDL
|
||||||
from yt_dlp.compat import compat_shlex_quote
|
from yt_dlp.compat import compat_shlex_quote
|
||||||
from yt_dlp.postprocessor import (
|
from yt_dlp.postprocessor import (
|
||||||
ExecAfterDownloadPP,
|
ExecPP,
|
||||||
FFmpegThumbnailsConvertorPP,
|
FFmpegThumbnailsConvertorPP,
|
||||||
MetadataFromFieldPP,
|
MetadataFromFieldPP,
|
||||||
MetadataFromTitlePP,
|
MetadataParserPP,
|
||||||
|
ModifyChaptersPP
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class TestMetadataFromField(unittest.TestCase):
|
class TestMetadataFromField(unittest.TestCase):
|
||||||
|
|
||||||
def test_format_to_regex(self):
|
def test_format_to_regex(self):
|
||||||
pp = MetadataFromFieldPP(None, ['title:%(title)s - %(artist)s'])
|
self.assertEqual(
|
||||||
self.assertEqual(pp._data[0]['regex'], r'(?P<title>.+)\ \-\ (?P<artist>.+)')
|
MetadataParserPP.format_to_regex('%(title)s - %(artist)s'),
|
||||||
|
r'(?P<title>.+)\ \-\ (?P<artist>.+)')
|
||||||
|
self.assertEqual(MetadataParserPP.format_to_regex(r'(?P<x>.+)'), r'(?P<x>.+)')
|
||||||
|
|
||||||
def test_field_to_outtmpl(self):
|
def test_field_to_template(self):
|
||||||
pp = MetadataFromFieldPP(None, ['title:%(title)s : %(artist)s'])
|
self.assertEqual(MetadataParserPP.field_to_template('title'), '%(title)s')
|
||||||
self.assertEqual(pp._data[0]['tmpl'], '%(title)s')
|
self.assertEqual(MetadataParserPP.field_to_template('1'), '1')
|
||||||
|
self.assertEqual(MetadataParserPP.field_to_template('foo bar'), 'foo bar')
|
||||||
|
self.assertEqual(MetadataParserPP.field_to_template(' literal'), ' literal')
|
||||||
|
|
||||||
def test_in_out_seperation(self):
|
def test_metadatafromfield(self):
|
||||||
pp = MetadataFromFieldPP(None, ['%(title)s \\: %(artist)s:%(title)s : %(artist)s'])
|
self.assertEqual(
|
||||||
self.assertEqual(pp._data[0]['in'], '%(title)s : %(artist)s')
|
MetadataFromFieldPP.to_action('%(title)s \\: %(artist)s:%(title)s : %(artist)s'),
|
||||||
self.assertEqual(pp._data[0]['out'], '%(title)s : %(artist)s')
|
(MetadataParserPP.Actions.INTERPRET, '%(title)s : %(artist)s', '%(title)s : %(artist)s'))
|
||||||
|
|
||||||
|
|
||||||
class TestMetadataFromTitle(unittest.TestCase):
|
|
||||||
def test_format_to_regex(self):
|
|
||||||
pp = MetadataFromTitlePP(None, '%(title)s - %(artist)s')
|
|
||||||
self.assertEqual(pp._titleregex, r'(?P<title>.+)\ \-\ (?P<artist>.+)')
|
|
||||||
|
|
||||||
|
|
||||||
class TestConvertThumbnail(unittest.TestCase):
|
class TestConvertThumbnail(unittest.TestCase):
|
||||||
@@ -60,12 +61,502 @@ class TestConvertThumbnail(unittest.TestCase):
|
|||||||
os.remove(file.format(out))
|
os.remove(file.format(out))
|
||||||
|
|
||||||
|
|
||||||
class TestExecAfterDownload(unittest.TestCase):
|
class TestExec(unittest.TestCase):
|
||||||
def test_parse_cmd(self):
|
def test_parse_cmd(self):
|
||||||
pp = ExecAfterDownloadPP(YoutubeDL(), '')
|
pp = ExecPP(YoutubeDL(), '')
|
||||||
info = {'filepath': 'file name'}
|
info = {'filepath': 'file name'}
|
||||||
quoted_filepath = compat_shlex_quote(info['filepath'])
|
cmd = 'echo %s' % compat_shlex_quote(info['filepath'])
|
||||||
|
|
||||||
self.assertEqual(pp.parse_cmd('echo', info), 'echo %s' % quoted_filepath)
|
self.assertEqual(pp.parse_cmd('echo', info), cmd)
|
||||||
self.assertEqual(pp.parse_cmd('echo.{}', info), 'echo.%s' % quoted_filepath)
|
self.assertEqual(pp.parse_cmd('echo {}', info), cmd)
|
||||||
self.assertEqual(pp.parse_cmd('echo "%(filepath)s"', info), 'echo "%s"' % info['filepath'])
|
self.assertEqual(pp.parse_cmd('echo %(filepath)q', info), cmd)
|
||||||
|
|
||||||
|
|
||||||
|
class TestModifyChaptersPP(unittest.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
self._pp = ModifyChaptersPP(YoutubeDL())
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _sponsor_chapter(start, end, cat, remove=False):
|
||||||
|
c = {'start_time': start, 'end_time': end, '_categories': [(cat, start, end)]}
|
||||||
|
if remove:
|
||||||
|
c['remove'] = True
|
||||||
|
return c
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _chapter(start, end, title=None, remove=False):
|
||||||
|
c = {'start_time': start, 'end_time': end}
|
||||||
|
if title is not None:
|
||||||
|
c['title'] = title
|
||||||
|
if remove:
|
||||||
|
c['remove'] = True
|
||||||
|
return c
|
||||||
|
|
||||||
|
def _chapters(self, ends, titles):
|
||||||
|
self.assertEqual(len(ends), len(titles))
|
||||||
|
start = 0
|
||||||
|
chapters = []
|
||||||
|
for e, t in zip(ends, titles):
|
||||||
|
chapters.append(self._chapter(start, e, t))
|
||||||
|
start = e
|
||||||
|
return chapters
|
||||||
|
|
||||||
|
def _remove_marked_arrange_sponsors_test_impl(
|
||||||
|
self, chapters, expected_chapters, expected_removed):
|
||||||
|
actual_chapters, actual_removed = (
|
||||||
|
self._pp._remove_marked_arrange_sponsors(chapters))
|
||||||
|
for c in actual_removed:
|
||||||
|
c.pop('title', None)
|
||||||
|
c.pop('_categories', None)
|
||||||
|
actual_chapters = [{
|
||||||
|
'start_time': c['start_time'],
|
||||||
|
'end_time': c['end_time'],
|
||||||
|
'title': c['title'],
|
||||||
|
} for c in actual_chapters]
|
||||||
|
self.assertSequenceEqual(expected_chapters, actual_chapters)
|
||||||
|
self.assertSequenceEqual(expected_removed, actual_removed)
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_CanGetThroughUnaltered(self):
|
||||||
|
chapters = self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, chapters, [])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_ChapterWithSponsors(self):
|
||||||
|
chapters = self._chapters([70], ['c']) + [
|
||||||
|
self._sponsor_chapter(10, 20, 'sponsor'),
|
||||||
|
self._sponsor_chapter(30, 40, 'preview'),
|
||||||
|
self._sponsor_chapter(50, 60, 'sponsor')]
|
||||||
|
expected = self._chapters(
|
||||||
|
[10, 20, 30, 40, 50, 60, 70],
|
||||||
|
['c', '[SponsorBlock]: Sponsor', 'c', '[SponsorBlock]: Preview/Recap',
|
||||||
|
'c', '[SponsorBlock]: Sponsor', 'c'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_UniqueNamesForOverlappingSponsors(self):
|
||||||
|
chapters = self._chapters([120], ['c']) + [
|
||||||
|
self._sponsor_chapter(10, 45, 'sponsor'), self._sponsor_chapter(20, 40, 'selfpromo'),
|
||||||
|
self._sponsor_chapter(50, 70, 'sponsor'), self._sponsor_chapter(60, 85, 'selfpromo'),
|
||||||
|
self._sponsor_chapter(90, 120, 'selfpromo'), self._sponsor_chapter(100, 110, 'sponsor')]
|
||||||
|
expected = self._chapters(
|
||||||
|
[10, 20, 40, 45, 50, 60, 70, 85, 90, 100, 110, 120],
|
||||||
|
['c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Sponsor, Unpaid/Self Promotion',
|
||||||
|
'[SponsorBlock]: Sponsor',
|
||||||
|
'c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Sponsor, Unpaid/Self Promotion',
|
||||||
|
'[SponsorBlock]: Unpaid/Self Promotion',
|
||||||
|
'c', '[SponsorBlock]: Unpaid/Self Promotion', '[SponsorBlock]: Unpaid/Self Promotion, Sponsor',
|
||||||
|
'[SponsorBlock]: Unpaid/Self Promotion'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_ChapterWithCuts(self):
|
||||||
|
cuts = [self._chapter(10, 20, remove=True),
|
||||||
|
self._sponsor_chapter(30, 40, 'sponsor', remove=True),
|
||||||
|
self._chapter(50, 60, remove=True)]
|
||||||
|
chapters = self._chapters([70], ['c']) + cuts
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(
|
||||||
|
chapters, self._chapters([40], ['c']), cuts)
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_ChapterWithSponsorsAndCuts(self):
|
||||||
|
chapters = self._chapters([70], ['c']) + [
|
||||||
|
self._sponsor_chapter(10, 20, 'sponsor'),
|
||||||
|
self._sponsor_chapter(30, 40, 'selfpromo', remove=True),
|
||||||
|
self._sponsor_chapter(50, 60, 'interaction')]
|
||||||
|
expected = self._chapters([10, 20, 40, 50, 60],
|
||||||
|
['c', '[SponsorBlock]: Sponsor', 'c',
|
||||||
|
'[SponsorBlock]: Interaction Reminder', 'c'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(
|
||||||
|
chapters, expected, [self._chapter(30, 40, remove=True)])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_ChapterWithSponsorCutInTheMiddle(self):
|
||||||
|
cuts = [self._sponsor_chapter(20, 30, 'selfpromo', remove=True),
|
||||||
|
self._chapter(40, 50, remove=True)]
|
||||||
|
chapters = self._chapters([70], ['c']) + [self._sponsor_chapter(10, 60, 'sponsor')] + cuts
|
||||||
|
expected = self._chapters(
|
||||||
|
[10, 40, 50], ['c', '[SponsorBlock]: Sponsor', 'c'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_ChapterWithCutHidingSponsor(self):
|
||||||
|
cuts = [self._sponsor_chapter(20, 50, 'selpromo', remove=True)]
|
||||||
|
chapters = self._chapters([60], ['c']) + [
|
||||||
|
self._sponsor_chapter(10, 20, 'intro'),
|
||||||
|
self._sponsor_chapter(30, 40, 'sponsor'),
|
||||||
|
self._sponsor_chapter(50, 60, 'outro'),
|
||||||
|
] + cuts
|
||||||
|
expected = self._chapters(
|
||||||
|
[10, 20, 30], ['c', '[SponsorBlock]: Intermission/Intro Animation', '[SponsorBlock]: Endcards/Credits'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_ChapterWithAdjacentSponsors(self):
|
||||||
|
chapters = self._chapters([70], ['c']) + [
|
||||||
|
self._sponsor_chapter(10, 20, 'sponsor'),
|
||||||
|
self._sponsor_chapter(20, 30, 'selfpromo'),
|
||||||
|
self._sponsor_chapter(30, 40, 'interaction')]
|
||||||
|
expected = self._chapters(
|
||||||
|
[10, 20, 30, 40, 70],
|
||||||
|
['c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Unpaid/Self Promotion',
|
||||||
|
'[SponsorBlock]: Interaction Reminder', 'c'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_ChapterWithAdjacentCuts(self):
|
||||||
|
chapters = self._chapters([70], ['c']) + [
|
||||||
|
self._sponsor_chapter(10, 20, 'sponsor'),
|
||||||
|
self._sponsor_chapter(20, 30, 'interaction', remove=True),
|
||||||
|
self._chapter(30, 40, remove=True),
|
||||||
|
self._sponsor_chapter(40, 50, 'selpromo', remove=True),
|
||||||
|
self._sponsor_chapter(50, 60, 'interaction')]
|
||||||
|
expected = self._chapters([10, 20, 30, 40],
|
||||||
|
['c', '[SponsorBlock]: Sponsor',
|
||||||
|
'[SponsorBlock]: Interaction Reminder', 'c'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(
|
||||||
|
chapters, expected, [self._chapter(20, 50, remove=True)])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_ChapterWithOverlappingSponsors(self):
|
||||||
|
chapters = self._chapters([70], ['c']) + [
|
||||||
|
self._sponsor_chapter(10, 30, 'sponsor'),
|
||||||
|
self._sponsor_chapter(20, 50, 'selfpromo'),
|
||||||
|
self._sponsor_chapter(40, 60, 'interaction')]
|
||||||
|
expected = self._chapters(
|
||||||
|
[10, 20, 30, 40, 50, 60, 70],
|
||||||
|
['c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Sponsor, Unpaid/Self Promotion',
|
||||||
|
'[SponsorBlock]: Unpaid/Self Promotion', '[SponsorBlock]: Unpaid/Self Promotion, Interaction Reminder',
|
||||||
|
'[SponsorBlock]: Interaction Reminder', 'c'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_ChapterWithOverlappingCuts(self):
|
||||||
|
chapters = self._chapters([70], ['c']) + [
|
||||||
|
self._sponsor_chapter(10, 30, 'sponsor', remove=True),
|
||||||
|
self._sponsor_chapter(20, 50, 'selfpromo', remove=True),
|
||||||
|
self._sponsor_chapter(40, 60, 'interaction', remove=True)]
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(
|
||||||
|
chapters, self._chapters([20], ['c']), [self._chapter(10, 60, remove=True)])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingSponsors(self):
|
||||||
|
chapters = self._chapters([170], ['c']) + [
|
||||||
|
self._sponsor_chapter(0, 30, 'intro'),
|
||||||
|
self._sponsor_chapter(20, 50, 'sponsor'),
|
||||||
|
self._sponsor_chapter(40, 60, 'selfpromo'),
|
||||||
|
self._sponsor_chapter(70, 90, 'sponsor'),
|
||||||
|
self._sponsor_chapter(80, 100, 'sponsor'),
|
||||||
|
self._sponsor_chapter(90, 110, 'sponsor'),
|
||||||
|
self._sponsor_chapter(120, 140, 'selfpromo'),
|
||||||
|
self._sponsor_chapter(130, 160, 'interaction'),
|
||||||
|
self._sponsor_chapter(150, 170, 'outro')]
|
||||||
|
expected = self._chapters(
|
||||||
|
[20, 30, 40, 50, 60, 70, 110, 120, 130, 140, 150, 160, 170],
|
||||||
|
['[SponsorBlock]: Intermission/Intro Animation', '[SponsorBlock]: Intermission/Intro Animation, Sponsor', '[SponsorBlock]: Sponsor',
|
||||||
|
'[SponsorBlock]: Sponsor, Unpaid/Self Promotion', '[SponsorBlock]: Unpaid/Self Promotion', 'c',
|
||||||
|
'[SponsorBlock]: Sponsor', 'c', '[SponsorBlock]: Unpaid/Self Promotion',
|
||||||
|
'[SponsorBlock]: Unpaid/Self Promotion, Interaction Reminder',
|
||||||
|
'[SponsorBlock]: Interaction Reminder',
|
||||||
|
'[SponsorBlock]: Interaction Reminder, Endcards/Credits', '[SponsorBlock]: Endcards/Credits'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingCuts(self):
|
||||||
|
chapters = self._chapters([170], ['c']) + [
|
||||||
|
self._chapter(0, 30, remove=True),
|
||||||
|
self._sponsor_chapter(20, 50, 'sponsor', remove=True),
|
||||||
|
self._chapter(40, 60, remove=True),
|
||||||
|
self._sponsor_chapter(70, 90, 'sponsor', remove=True),
|
||||||
|
self._chapter(80, 100, remove=True),
|
||||||
|
self._chapter(90, 110, remove=True),
|
||||||
|
self._sponsor_chapter(120, 140, 'sponsor', remove=True),
|
||||||
|
self._sponsor_chapter(130, 160, 'selfpromo', remove=True),
|
||||||
|
self._chapter(150, 170, remove=True)]
|
||||||
|
expected_cuts = [self._chapter(0, 60, remove=True),
|
||||||
|
self._chapter(70, 110, remove=True),
|
||||||
|
self._chapter(120, 170, remove=True)]
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(
|
||||||
|
chapters, self._chapters([20], ['c']), expected_cuts)
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_OverlappingSponsorsDifferentTitlesAfterCut(self):
|
||||||
|
chapters = self._chapters([60], ['c']) + [
|
||||||
|
self._sponsor_chapter(10, 60, 'sponsor'),
|
||||||
|
self._sponsor_chapter(10, 40, 'intro'),
|
||||||
|
self._sponsor_chapter(30, 50, 'interaction'),
|
||||||
|
self._sponsor_chapter(30, 50, 'selfpromo', remove=True),
|
||||||
|
self._sponsor_chapter(40, 50, 'interaction'),
|
||||||
|
self._sponsor_chapter(50, 60, 'outro')]
|
||||||
|
expected = self._chapters(
|
||||||
|
[10, 30, 40], ['c', '[SponsorBlock]: Sponsor, Intermission/Intro Animation', '[SponsorBlock]: Sponsor, Endcards/Credits'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(
|
||||||
|
chapters, expected, [self._chapter(30, 50, remove=True)])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_SponsorsNoLongerOverlapAfterCut(self):
|
||||||
|
chapters = self._chapters([70], ['c']) + [
|
||||||
|
self._sponsor_chapter(10, 30, 'sponsor'),
|
||||||
|
self._sponsor_chapter(20, 50, 'interaction'),
|
||||||
|
self._sponsor_chapter(30, 50, 'selpromo', remove=True),
|
||||||
|
self._sponsor_chapter(40, 60, 'sponsor'),
|
||||||
|
self._sponsor_chapter(50, 60, 'interaction')]
|
||||||
|
expected = self._chapters(
|
||||||
|
[10, 20, 40, 50], ['c', '[SponsorBlock]: Sponsor',
|
||||||
|
'[SponsorBlock]: Sponsor, Interaction Reminder', 'c'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(
|
||||||
|
chapters, expected, [self._chapter(30, 50, remove=True)])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_SponsorsStillOverlapAfterCut(self):
|
||||||
|
chapters = self._chapters([70], ['c']) + [
|
||||||
|
self._sponsor_chapter(10, 60, 'sponsor'),
|
||||||
|
self._sponsor_chapter(20, 60, 'interaction'),
|
||||||
|
self._sponsor_chapter(30, 50, 'selfpromo', remove=True)]
|
||||||
|
expected = self._chapters(
|
||||||
|
[10, 20, 40, 50], ['c', '[SponsorBlock]: Sponsor',
|
||||||
|
'[SponsorBlock]: Sponsor, Interaction Reminder', 'c'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(
|
||||||
|
chapters, expected, [self._chapter(30, 50, remove=True)])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingSponsorsAndCuts(self):
|
||||||
|
chapters = self._chapters([200], ['c']) + [
|
||||||
|
self._sponsor_chapter(10, 40, 'sponsor'),
|
||||||
|
self._sponsor_chapter(10, 30, 'intro'),
|
||||||
|
self._chapter(20, 30, remove=True),
|
||||||
|
self._sponsor_chapter(30, 40, 'selfpromo'),
|
||||||
|
self._sponsor_chapter(50, 70, 'sponsor'),
|
||||||
|
self._sponsor_chapter(60, 80, 'interaction'),
|
||||||
|
self._chapter(70, 80, remove=True),
|
||||||
|
self._sponsor_chapter(70, 90, 'sponsor'),
|
||||||
|
self._sponsor_chapter(80, 100, 'interaction'),
|
||||||
|
self._sponsor_chapter(120, 170, 'selfpromo'),
|
||||||
|
self._sponsor_chapter(130, 180, 'outro'),
|
||||||
|
self._chapter(140, 150, remove=True),
|
||||||
|
self._chapter(150, 160, remove=True)]
|
||||||
|
expected = self._chapters(
|
||||||
|
[10, 20, 30, 40, 50, 70, 80, 100, 110, 130, 140, 160],
|
||||||
|
['c', '[SponsorBlock]: Sponsor, Intermission/Intro Animation', '[SponsorBlock]: Sponsor, Unpaid/Self Promotion',
|
||||||
|
'c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Sponsor, Interaction Reminder',
|
||||||
|
'[SponsorBlock]: Interaction Reminder', 'c', '[SponsorBlock]: Unpaid/Self Promotion',
|
||||||
|
'[SponsorBlock]: Unpaid/Self Promotion, Endcards/Credits', '[SponsorBlock]: Endcards/Credits', 'c'])
|
||||||
|
expected_cuts = [self._chapter(20, 30, remove=True),
|
||||||
|
self._chapter(70, 80, remove=True),
|
||||||
|
self._chapter(140, 160, remove=True)]
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, expected_cuts)
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_SponsorOverlapsMultipleChapters(self):
|
||||||
|
chapters = (self._chapters([20, 40, 60, 80, 100], ['c1', 'c2', 'c3', 'c4', 'c5'])
|
||||||
|
+ [self._sponsor_chapter(10, 90, 'sponsor')])
|
||||||
|
expected = self._chapters([10, 90, 100], ['c1', '[SponsorBlock]: Sponsor', 'c5'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_CutOverlapsMultipleChapters(self):
|
||||||
|
cuts = [self._chapter(10, 90, remove=True)]
|
||||||
|
chapters = self._chapters([20, 40, 60, 80, 100], ['c1', 'c2', 'c3', 'c4', 'c5']) + cuts
|
||||||
|
expected = self._chapters([10, 20], ['c1', 'c5'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_SponsorsWithinSomeChaptersAndOverlappingOthers(self):
|
||||||
|
chapters = (self._chapters([10, 40, 60, 80], ['c1', 'c2', 'c3', 'c4'])
|
||||||
|
+ [self._sponsor_chapter(20, 30, 'sponsor'),
|
||||||
|
self._sponsor_chapter(50, 70, 'selfpromo')])
|
||||||
|
expected = self._chapters([10, 20, 30, 40, 50, 70, 80],
|
||||||
|
['c1', 'c2', '[SponsorBlock]: Sponsor', 'c2', 'c3',
|
||||||
|
'[SponsorBlock]: Unpaid/Self Promotion', 'c4'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_CutsWithinSomeChaptersAndOverlappingOthers(self):
|
||||||
|
cuts = [self._chapter(20, 30, remove=True), self._chapter(50, 70, remove=True)]
|
||||||
|
chapters = self._chapters([10, 40, 60, 80], ['c1', 'c2', 'c3', 'c4']) + cuts
|
||||||
|
expected = self._chapters([10, 30, 40, 50], ['c1', 'c2', 'c3', 'c4'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_ChaptersAfterLastSponsor(self):
|
||||||
|
chapters = (self._chapters([20, 40, 50, 60], ['c1', 'c2', 'c3', 'c4'])
|
||||||
|
+ [self._sponsor_chapter(10, 30, 'music_offtopic')])
|
||||||
|
expected = self._chapters(
|
||||||
|
[10, 30, 40, 50, 60],
|
||||||
|
['c1', '[SponsorBlock]: Non-Music Section', 'c2', 'c3', 'c4'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_ChaptersAfterLastCut(self):
|
||||||
|
cuts = [self._chapter(10, 30, remove=True)]
|
||||||
|
chapters = self._chapters([20, 40, 50, 60], ['c1', 'c2', 'c3', 'c4']) + cuts
|
||||||
|
expected = self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_SponsorStartsAtChapterStart(self):
|
||||||
|
chapters = (self._chapters([10, 20, 40], ['c1', 'c2', 'c3'])
|
||||||
|
+ [self._sponsor_chapter(20, 30, 'sponsor')])
|
||||||
|
expected = self._chapters([10, 20, 30, 40], ['c1', 'c2', '[SponsorBlock]: Sponsor', 'c3'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_CutStartsAtChapterStart(self):
|
||||||
|
cuts = [self._chapter(20, 30, remove=True)]
|
||||||
|
chapters = self._chapters([10, 20, 40], ['c1', 'c2', 'c3']) + cuts
|
||||||
|
expected = self._chapters([10, 20, 30], ['c1', 'c2', 'c3'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_SponsorEndsAtChapterEnd(self):
|
||||||
|
chapters = (self._chapters([10, 30, 40], ['c1', 'c2', 'c3'])
|
||||||
|
+ [self._sponsor_chapter(20, 30, 'sponsor')])
|
||||||
|
expected = self._chapters([10, 20, 30, 40], ['c1', 'c2', '[SponsorBlock]: Sponsor', 'c3'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_CutEndsAtChapterEnd(self):
|
||||||
|
cuts = [self._chapter(20, 30, remove=True)]
|
||||||
|
chapters = self._chapters([10, 30, 40], ['c1', 'c2', 'c3']) + cuts
|
||||||
|
expected = self._chapters([10, 20, 30], ['c1', 'c2', 'c3'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_SponsorCoincidesWithChapters(self):
|
||||||
|
chapters = (self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4'])
|
||||||
|
+ [self._sponsor_chapter(10, 30, 'sponsor')])
|
||||||
|
expected = self._chapters([10, 30, 40], ['c1', '[SponsorBlock]: Sponsor', 'c4'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_CutCoincidesWithChapters(self):
|
||||||
|
cuts = [self._chapter(10, 30, remove=True)]
|
||||||
|
chapters = self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4']) + cuts
|
||||||
|
expected = self._chapters([10, 20], ['c1', 'c4'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_SponsorsAtVideoBoundaries(self):
|
||||||
|
chapters = (self._chapters([20, 40, 60], ['c1', 'c2', 'c3'])
|
||||||
|
+ [self._sponsor_chapter(0, 10, 'intro'), self._sponsor_chapter(50, 60, 'outro')])
|
||||||
|
expected = self._chapters(
|
||||||
|
[10, 20, 40, 50, 60], ['[SponsorBlock]: Intermission/Intro Animation', 'c1', 'c2', 'c3', '[SponsorBlock]: Endcards/Credits'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_CutsAtVideoBoundaries(self):
|
||||||
|
cuts = [self._chapter(0, 10, remove=True), self._chapter(50, 60, remove=True)]
|
||||||
|
chapters = self._chapters([20, 40, 60], ['c1', 'c2', 'c3']) + cuts
|
||||||
|
expected = self._chapters([10, 30, 40], ['c1', 'c2', 'c3'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_SponsorsOverlapChaptersAtVideoBoundaries(self):
|
||||||
|
chapters = (self._chapters([10, 40, 50], ['c1', 'c2', 'c3'])
|
||||||
|
+ [self._sponsor_chapter(0, 20, 'intro'), self._sponsor_chapter(30, 50, 'outro')])
|
||||||
|
expected = self._chapters(
|
||||||
|
[20, 30, 50], ['[SponsorBlock]: Intermission/Intro Animation', 'c2', '[SponsorBlock]: Endcards/Credits'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_CutsOverlapChaptersAtVideoBoundaries(self):
|
||||||
|
cuts = [self._chapter(0, 20, remove=True), self._chapter(30, 50, remove=True)]
|
||||||
|
chapters = self._chapters([10, 40, 50], ['c1', 'c2', 'c3']) + cuts
|
||||||
|
expected = self._chapters([10], ['c2'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_EverythingSponsored(self):
|
||||||
|
chapters = (self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4'])
|
||||||
|
+ [self._sponsor_chapter(0, 20, 'intro'), self._sponsor_chapter(20, 40, 'outro')])
|
||||||
|
expected = self._chapters([20, 40], ['[SponsorBlock]: Intermission/Intro Animation', '[SponsorBlock]: Endcards/Credits'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_EverythingCut(self):
|
||||||
|
cuts = [self._chapter(0, 20, remove=True), self._chapter(20, 40, remove=True)]
|
||||||
|
chapters = self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4']) + cuts
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(
|
||||||
|
chapters, [], [self._chapter(0, 40, remove=True)])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_TinyChaptersInTheOriginalArePreserved(self):
|
||||||
|
chapters = self._chapters([0.1, 0.2, 0.3, 0.4], ['c1', 'c2', 'c3', 'c4'])
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(chapters, chapters, [])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_TinySponsorsAreIgnored(self):
|
||||||
|
chapters = [self._sponsor_chapter(0, 0.1, 'intro'), self._chapter(0.1, 0.2, 'c1'),
|
||||||
|
self._sponsor_chapter(0.2, 0.3, 'sponsor'), self._chapter(0.3, 0.4, 'c2'),
|
||||||
|
self._sponsor_chapter(0.4, 0.5, 'outro')]
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(
|
||||||
|
chapters, self._chapters([0.3, 0.5], ['c1', 'c2']), [])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_TinyChaptersResultingFromCutsAreIgnored(self):
|
||||||
|
cuts = [self._chapter(1.5, 2.5, remove=True)]
|
||||||
|
chapters = self._chapters([2, 3, 3.5], ['c1', 'c2', 'c3']) + cuts
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(
|
||||||
|
chapters, self._chapters([2, 2.5], ['c1', 'c3']), cuts)
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_SingleTinyChapterIsPreserved(self):
|
||||||
|
cuts = [self._chapter(0.5, 2, remove=True)]
|
||||||
|
chapters = self._chapters([2], ['c']) + cuts
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(
|
||||||
|
chapters, self._chapters([0.5], ['c']), cuts)
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_TinyChapterAtTheStartPrependedToTheNext(self):
|
||||||
|
cuts = [self._chapter(0.5, 2, remove=True)]
|
||||||
|
chapters = self._chapters([2, 4], ['c1', 'c2']) + cuts
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(
|
||||||
|
chapters, self._chapters([2.5], ['c2']), cuts)
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_TinyChaptersResultingFromSponsorOverlapAreIgnored(self):
|
||||||
|
chapters = self._chapters([1, 3, 4], ['c1', 'c2', 'c3']) + [
|
||||||
|
self._sponsor_chapter(1.5, 2.5, 'sponsor')]
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(
|
||||||
|
chapters, self._chapters([1.5, 2.5, 4], ['c1', '[SponsorBlock]: Sponsor', 'c3']), [])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_TinySponsorsOverlapsAreIgnored(self):
|
||||||
|
chapters = self._chapters([2, 3, 5], ['c1', 'c2', 'c3']) + [
|
||||||
|
self._sponsor_chapter(1, 3, 'sponsor'),
|
||||||
|
self._sponsor_chapter(2.5, 4, 'selfpromo')
|
||||||
|
]
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(
|
||||||
|
chapters, self._chapters([1, 3, 4, 5], [
|
||||||
|
'c1', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Unpaid/Self Promotion', 'c3']), [])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_TinySponsorsPrependedToTheNextSponsor(self):
|
||||||
|
chapters = self._chapters([4], ['c']) + [
|
||||||
|
self._sponsor_chapter(1.5, 2, 'sponsor'),
|
||||||
|
self._sponsor_chapter(2, 4, 'selfpromo')
|
||||||
|
]
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(
|
||||||
|
chapters, self._chapters([1.5, 4], ['c', '[SponsorBlock]: Unpaid/Self Promotion']), [])
|
||||||
|
|
||||||
|
def test_remove_marked_arrange_sponsors_SmallestSponsorInTheOverlapGetsNamed(self):
|
||||||
|
self._pp._sponsorblock_chapter_title = '[SponsorBlock]: %(name)s'
|
||||||
|
chapters = self._chapters([10], ['c']) + [
|
||||||
|
self._sponsor_chapter(2, 8, 'sponsor'),
|
||||||
|
self._sponsor_chapter(4, 6, 'selfpromo')
|
||||||
|
]
|
||||||
|
self._remove_marked_arrange_sponsors_test_impl(
|
||||||
|
chapters, self._chapters([2, 4, 6, 8, 10], [
|
||||||
|
'c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Unpaid/Self Promotion',
|
||||||
|
'[SponsorBlock]: Sponsor', 'c'
|
||||||
|
]), [])
|
||||||
|
|
||||||
|
def test_make_concat_opts_CommonCase(self):
|
||||||
|
sponsor_chapters = [self._chapter(1, 2, 's1'), self._chapter(10, 20, 's2')]
|
||||||
|
expected = '''ffconcat version 1.0
|
||||||
|
file 'file:test'
|
||||||
|
outpoint 1.000000
|
||||||
|
file 'file:test'
|
||||||
|
inpoint 2.000000
|
||||||
|
outpoint 10.000000
|
||||||
|
file 'file:test'
|
||||||
|
inpoint 20.000000
|
||||||
|
'''
|
||||||
|
opts = self._pp._make_concat_opts(sponsor_chapters, 30)
|
||||||
|
self.assertEqual(expected, ''.join(self._pp._concat_spec(['test'] * len(opts), opts)))
|
||||||
|
|
||||||
|
def test_make_concat_opts_NoZeroDurationChunkAtVideoStart(self):
|
||||||
|
sponsor_chapters = [self._chapter(0, 1, 's1'), self._chapter(10, 20, 's2')]
|
||||||
|
expected = '''ffconcat version 1.0
|
||||||
|
file 'file:test'
|
||||||
|
inpoint 1.000000
|
||||||
|
outpoint 10.000000
|
||||||
|
file 'file:test'
|
||||||
|
inpoint 20.000000
|
||||||
|
'''
|
||||||
|
opts = self._pp._make_concat_opts(sponsor_chapters, 30)
|
||||||
|
self.assertEqual(expected, ''.join(self._pp._concat_spec(['test'] * len(opts), opts)))
|
||||||
|
|
||||||
|
def test_make_concat_opts_NoZeroDurationChunkAtVideoEnd(self):
|
||||||
|
sponsor_chapters = [self._chapter(1, 2, 's1'), self._chapter(10, 20, 's2')]
|
||||||
|
expected = '''ffconcat version 1.0
|
||||||
|
file 'file:test'
|
||||||
|
outpoint 1.000000
|
||||||
|
file 'file:test'
|
||||||
|
inpoint 2.000000
|
||||||
|
outpoint 10.000000
|
||||||
|
'''
|
||||||
|
opts = self._pp._make_concat_opts(sponsor_chapters, 20)
|
||||||
|
self.assertEqual(expected, ''.join(self._pp._concat_spec(['test'] * len(opts), opts)))
|
||||||
|
|
||||||
|
def test_quote_for_concat_RunsOfQuotes(self):
|
||||||
|
self.assertEqual(
|
||||||
|
r"'special '\'' '\'\''characters'\'\'\''galore'",
|
||||||
|
self._pp._quote_for_ffmpeg("special ' ''characters'''galore"))
|
||||||
|
|
||||||
|
def test_quote_for_concat_QuotesAtStart(self):
|
||||||
|
self.assertEqual(
|
||||||
|
r"\'\'\''special '\'' characters '\'' galore'",
|
||||||
|
self._pp._quote_for_ffmpeg("'''special ' characters ' galore"))
|
||||||
|
|
||||||
|
def test_quote_for_concat_QuotesAtEnd(self):
|
||||||
|
self.assertEqual(
|
||||||
|
r"'special '\'' characters '\'' galore'\'\'\'",
|
||||||
|
self._pp._quote_for_ffmpeg("special ' characters ' galore'''"))
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ from yt_dlp.extractor import (
|
|||||||
CeskaTelevizeIE,
|
CeskaTelevizeIE,
|
||||||
LyndaIE,
|
LyndaIE,
|
||||||
NPOIE,
|
NPOIE,
|
||||||
|
PBSIE,
|
||||||
ComedyCentralIE,
|
ComedyCentralIE,
|
||||||
NRKTVIE,
|
NRKTVIE,
|
||||||
RaiPlayIE,
|
RaiPlayIE,
|
||||||
@@ -372,5 +373,42 @@ class TestDemocracynowSubtitles(BaseTestSubtitles):
|
|||||||
self.assertEqual(md5(subtitles['en']), 'acaca989e24a9e45a6719c9b3d60815c')
|
self.assertEqual(md5(subtitles['en']), 'acaca989e24a9e45a6719c9b3d60815c')
|
||||||
|
|
||||||
|
|
||||||
|
@is_download_test
|
||||||
|
class TestPBSSubtitles(BaseTestSubtitles):
|
||||||
|
url = 'https://www.pbs.org/video/how-fantasy-reflects-our-world-picecq/'
|
||||||
|
IE = PBSIE
|
||||||
|
|
||||||
|
def test_allsubtitles(self):
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
self.DL.params['allsubtitles'] = True
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertEqual(set(subtitles.keys()), set(['en']))
|
||||||
|
|
||||||
|
def test_subtitles_dfxp_format(self):
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
self.DL.params['subtitlesformat'] = 'dfxp'
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertIn(md5(subtitles['en']), ['643b034254cdc3768ff1e750b6b5873b'])
|
||||||
|
|
||||||
|
def test_subtitles_vtt_format(self):
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
self.DL.params['subtitlesformat'] = 'vtt'
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertIn(
|
||||||
|
md5(subtitles['en']), ['937a05711555b165d4c55a9667017045', 'f49ea998d6824d94959c8152a368ff73'])
|
||||||
|
|
||||||
|
def test_subtitles_srt_format(self):
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
self.DL.params['subtitlesformat'] = 'srt'
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertIn(md5(subtitles['en']), ['2082c21b43759d9bf172931b2f2ca371'])
|
||||||
|
|
||||||
|
def test_subtitles_sami_format(self):
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
self.DL.params['subtitlesformat'] = 'sami'
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertIn(md5(subtitles['en']), ['4256b16ac7da6a6780fafd04294e85cd'])
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
@@ -62,6 +62,7 @@ from yt_dlp.utils import (
|
|||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
parse_resolution,
|
parse_resolution,
|
||||||
parse_bitrate,
|
parse_bitrate,
|
||||||
|
parse_qs,
|
||||||
pkcs1pad,
|
pkcs1pad,
|
||||||
read_batch_urls,
|
read_batch_urls,
|
||||||
sanitize_filename,
|
sanitize_filename,
|
||||||
@@ -117,8 +118,6 @@ from yt_dlp.compat import (
|
|||||||
compat_getenv,
|
compat_getenv,
|
||||||
compat_os_name,
|
compat_os_name,
|
||||||
compat_setenv,
|
compat_setenv,
|
||||||
compat_urlparse,
|
|
||||||
compat_parse_qs,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -688,38 +687,36 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertTrue(isinstance(data, bytes))
|
self.assertTrue(isinstance(data, bytes))
|
||||||
|
|
||||||
def test_update_url_query(self):
|
def test_update_url_query(self):
|
||||||
def query_dict(url):
|
self.assertEqual(parse_qs(update_url_query(
|
||||||
return compat_parse_qs(compat_urlparse.urlparse(url).query)
|
|
||||||
self.assertEqual(query_dict(update_url_query(
|
|
||||||
'http://example.com/path', {'quality': ['HD'], 'format': ['mp4']})),
|
'http://example.com/path', {'quality': ['HD'], 'format': ['mp4']})),
|
||||||
query_dict('http://example.com/path?quality=HD&format=mp4'))
|
parse_qs('http://example.com/path?quality=HD&format=mp4'))
|
||||||
self.assertEqual(query_dict(update_url_query(
|
self.assertEqual(parse_qs(update_url_query(
|
||||||
'http://example.com/path', {'system': ['LINUX', 'WINDOWS']})),
|
'http://example.com/path', {'system': ['LINUX', 'WINDOWS']})),
|
||||||
query_dict('http://example.com/path?system=LINUX&system=WINDOWS'))
|
parse_qs('http://example.com/path?system=LINUX&system=WINDOWS'))
|
||||||
self.assertEqual(query_dict(update_url_query(
|
self.assertEqual(parse_qs(update_url_query(
|
||||||
'http://example.com/path', {'fields': 'id,formats,subtitles'})),
|
'http://example.com/path', {'fields': 'id,formats,subtitles'})),
|
||||||
query_dict('http://example.com/path?fields=id,formats,subtitles'))
|
parse_qs('http://example.com/path?fields=id,formats,subtitles'))
|
||||||
self.assertEqual(query_dict(update_url_query(
|
self.assertEqual(parse_qs(update_url_query(
|
||||||
'http://example.com/path', {'fields': ('id,formats,subtitles', 'thumbnails')})),
|
'http://example.com/path', {'fields': ('id,formats,subtitles', 'thumbnails')})),
|
||||||
query_dict('http://example.com/path?fields=id,formats,subtitles&fields=thumbnails'))
|
parse_qs('http://example.com/path?fields=id,formats,subtitles&fields=thumbnails'))
|
||||||
self.assertEqual(query_dict(update_url_query(
|
self.assertEqual(parse_qs(update_url_query(
|
||||||
'http://example.com/path?manifest=f4m', {'manifest': []})),
|
'http://example.com/path?manifest=f4m', {'manifest': []})),
|
||||||
query_dict('http://example.com/path'))
|
parse_qs('http://example.com/path'))
|
||||||
self.assertEqual(query_dict(update_url_query(
|
self.assertEqual(parse_qs(update_url_query(
|
||||||
'http://example.com/path?system=LINUX&system=WINDOWS', {'system': 'LINUX'})),
|
'http://example.com/path?system=LINUX&system=WINDOWS', {'system': 'LINUX'})),
|
||||||
query_dict('http://example.com/path?system=LINUX'))
|
parse_qs('http://example.com/path?system=LINUX'))
|
||||||
self.assertEqual(query_dict(update_url_query(
|
self.assertEqual(parse_qs(update_url_query(
|
||||||
'http://example.com/path', {'fields': b'id,formats,subtitles'})),
|
'http://example.com/path', {'fields': b'id,formats,subtitles'})),
|
||||||
query_dict('http://example.com/path?fields=id,formats,subtitles'))
|
parse_qs('http://example.com/path?fields=id,formats,subtitles'))
|
||||||
self.assertEqual(query_dict(update_url_query(
|
self.assertEqual(parse_qs(update_url_query(
|
||||||
'http://example.com/path', {'width': 1080, 'height': 720})),
|
'http://example.com/path', {'width': 1080, 'height': 720})),
|
||||||
query_dict('http://example.com/path?width=1080&height=720'))
|
parse_qs('http://example.com/path?width=1080&height=720'))
|
||||||
self.assertEqual(query_dict(update_url_query(
|
self.assertEqual(parse_qs(update_url_query(
|
||||||
'http://example.com/path', {'bitrate': 5020.43})),
|
'http://example.com/path', {'bitrate': 5020.43})),
|
||||||
query_dict('http://example.com/path?bitrate=5020.43'))
|
parse_qs('http://example.com/path?bitrate=5020.43'))
|
||||||
self.assertEqual(query_dict(update_url_query(
|
self.assertEqual(parse_qs(update_url_query(
|
||||||
'http://example.com/path', {'test': '第二行тест'})),
|
'http://example.com/path', {'test': '第二行тест'})),
|
||||||
query_dict('http://example.com/path?test=%E7%AC%AC%E4%BA%8C%E8%A1%8C%D1%82%D0%B5%D1%81%D1%82'))
|
parse_qs('http://example.com/path?test=%E7%AC%AC%E4%BA%8C%E8%A1%8C%D1%82%D0%B5%D1%81%D1%82'))
|
||||||
|
|
||||||
def test_multipart_encode(self):
|
def test_multipart_encode(self):
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
@@ -851,30 +848,52 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(parse_codecs('avc1.77.30, mp4a.40.2'), {
|
self.assertEqual(parse_codecs('avc1.77.30, mp4a.40.2'), {
|
||||||
'vcodec': 'avc1.77.30',
|
'vcodec': 'avc1.77.30',
|
||||||
'acodec': 'mp4a.40.2',
|
'acodec': 'mp4a.40.2',
|
||||||
|
'dynamic_range': None,
|
||||||
})
|
})
|
||||||
self.assertEqual(parse_codecs('mp4a.40.2'), {
|
self.assertEqual(parse_codecs('mp4a.40.2'), {
|
||||||
'vcodec': 'none',
|
'vcodec': 'none',
|
||||||
'acodec': 'mp4a.40.2',
|
'acodec': 'mp4a.40.2',
|
||||||
|
'dynamic_range': None,
|
||||||
})
|
})
|
||||||
self.assertEqual(parse_codecs('mp4a.40.5,avc1.42001e'), {
|
self.assertEqual(parse_codecs('mp4a.40.5,avc1.42001e'), {
|
||||||
'vcodec': 'avc1.42001e',
|
'vcodec': 'avc1.42001e',
|
||||||
'acodec': 'mp4a.40.5',
|
'acodec': 'mp4a.40.5',
|
||||||
|
'dynamic_range': None,
|
||||||
})
|
})
|
||||||
self.assertEqual(parse_codecs('avc3.640028'), {
|
self.assertEqual(parse_codecs('avc3.640028'), {
|
||||||
'vcodec': 'avc3.640028',
|
'vcodec': 'avc3.640028',
|
||||||
'acodec': 'none',
|
'acodec': 'none',
|
||||||
|
'dynamic_range': None,
|
||||||
})
|
})
|
||||||
self.assertEqual(parse_codecs(', h264,,newcodec,aac'), {
|
self.assertEqual(parse_codecs(', h264,,newcodec,aac'), {
|
||||||
'vcodec': 'h264',
|
'vcodec': 'h264',
|
||||||
'acodec': 'aac',
|
'acodec': 'aac',
|
||||||
|
'dynamic_range': None,
|
||||||
})
|
})
|
||||||
self.assertEqual(parse_codecs('av01.0.05M.08'), {
|
self.assertEqual(parse_codecs('av01.0.05M.08'), {
|
||||||
'vcodec': 'av01.0.05M.08',
|
'vcodec': 'av01.0.05M.08',
|
||||||
'acodec': 'none',
|
'acodec': 'none',
|
||||||
|
'dynamic_range': None,
|
||||||
|
})
|
||||||
|
self.assertEqual(parse_codecs('vp9.2'), {
|
||||||
|
'vcodec': 'vp9.2',
|
||||||
|
'acodec': 'none',
|
||||||
|
'dynamic_range': 'HDR10',
|
||||||
|
})
|
||||||
|
self.assertEqual(parse_codecs('av01.0.12M.10.0.110.09.16.09.0'), {
|
||||||
|
'vcodec': 'av01.0.12M.10',
|
||||||
|
'acodec': 'none',
|
||||||
|
'dynamic_range': 'HDR10',
|
||||||
|
})
|
||||||
|
self.assertEqual(parse_codecs('dvhe'), {
|
||||||
|
'vcodec': 'dvhe',
|
||||||
|
'acodec': 'none',
|
||||||
|
'dynamic_range': 'DV',
|
||||||
})
|
})
|
||||||
self.assertEqual(parse_codecs('theora, vorbis'), {
|
self.assertEqual(parse_codecs('theora, vorbis'), {
|
||||||
'vcodec': 'theora',
|
'vcodec': 'theora',
|
||||||
'acodec': 'vorbis',
|
'acodec': 'vorbis',
|
||||||
|
'dynamic_range': None,
|
||||||
})
|
})
|
||||||
self.assertEqual(parse_codecs('unknownvcodec, unknownacodec'), {
|
self.assertEqual(parse_codecs('unknownvcodec, unknownacodec'), {
|
||||||
'vcodec': 'unknownvcodec',
|
'vcodec': 'unknownvcodec',
|
||||||
@@ -1144,12 +1163,15 @@ class TestUtil(unittest.TestCase):
|
|||||||
def test_parse_resolution(self):
|
def test_parse_resolution(self):
|
||||||
self.assertEqual(parse_resolution(None), {})
|
self.assertEqual(parse_resolution(None), {})
|
||||||
self.assertEqual(parse_resolution(''), {})
|
self.assertEqual(parse_resolution(''), {})
|
||||||
self.assertEqual(parse_resolution('1920x1080'), {'width': 1920, 'height': 1080})
|
self.assertEqual(parse_resolution(' 1920x1080'), {'width': 1920, 'height': 1080})
|
||||||
self.assertEqual(parse_resolution('1920×1080'), {'width': 1920, 'height': 1080})
|
self.assertEqual(parse_resolution('1920×1080 '), {'width': 1920, 'height': 1080})
|
||||||
self.assertEqual(parse_resolution('1920 x 1080'), {'width': 1920, 'height': 1080})
|
self.assertEqual(parse_resolution('1920 x 1080'), {'width': 1920, 'height': 1080})
|
||||||
self.assertEqual(parse_resolution('720p'), {'height': 720})
|
self.assertEqual(parse_resolution('720p'), {'height': 720})
|
||||||
self.assertEqual(parse_resolution('4k'), {'height': 2160})
|
self.assertEqual(parse_resolution('4k'), {'height': 2160})
|
||||||
self.assertEqual(parse_resolution('8K'), {'height': 4320})
|
self.assertEqual(parse_resolution('8K'), {'height': 4320})
|
||||||
|
self.assertEqual(parse_resolution('pre_1920x1080_post'), {'width': 1920, 'height': 1080})
|
||||||
|
self.assertEqual(parse_resolution('ep1x2'), {})
|
||||||
|
self.assertEqual(parse_resolution('1920, 1080'), {'width': 1920, 'height': 1080})
|
||||||
|
|
||||||
def test_parse_bitrate(self):
|
def test_parse_bitrate(self):
|
||||||
self.assertEqual(parse_bitrate(None), None)
|
self.assertEqual(parse_bitrate(None), None)
|
||||||
@@ -1207,35 +1229,12 @@ ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4')
|
|||||||
'9999 51')
|
'9999 51')
|
||||||
|
|
||||||
def test_match_str(self):
|
def test_match_str(self):
|
||||||
self.assertRaises(ValueError, match_str, 'xy>foobar', {})
|
# Unary
|
||||||
self.assertFalse(match_str('xy', {'x': 1200}))
|
self.assertFalse(match_str('xy', {'x': 1200}))
|
||||||
self.assertTrue(match_str('!xy', {'x': 1200}))
|
self.assertTrue(match_str('!xy', {'x': 1200}))
|
||||||
self.assertTrue(match_str('x', {'x': 1200}))
|
self.assertTrue(match_str('x', {'x': 1200}))
|
||||||
self.assertFalse(match_str('!x', {'x': 1200}))
|
self.assertFalse(match_str('!x', {'x': 1200}))
|
||||||
self.assertTrue(match_str('x', {'x': 0}))
|
self.assertTrue(match_str('x', {'x': 0}))
|
||||||
self.assertFalse(match_str('x>0', {'x': 0}))
|
|
||||||
self.assertFalse(match_str('x>0', {}))
|
|
||||||
self.assertTrue(match_str('x>?0', {}))
|
|
||||||
self.assertTrue(match_str('x>1K', {'x': 1200}))
|
|
||||||
self.assertFalse(match_str('x>2K', {'x': 1200}))
|
|
||||||
self.assertTrue(match_str('x>=1200 & x < 1300', {'x': 1200}))
|
|
||||||
self.assertFalse(match_str('x>=1100 & x < 1200', {'x': 1200}))
|
|
||||||
self.assertFalse(match_str('y=a212', {'y': 'foobar42'}))
|
|
||||||
self.assertTrue(match_str('y=foobar42', {'y': 'foobar42'}))
|
|
||||||
self.assertFalse(match_str('y!=foobar42', {'y': 'foobar42'}))
|
|
||||||
self.assertTrue(match_str('y!=foobar2', {'y': 'foobar42'}))
|
|
||||||
self.assertFalse(match_str(
|
|
||||||
'like_count > 100 & dislike_count <? 50 & description',
|
|
||||||
{'like_count': 90, 'description': 'foo'}))
|
|
||||||
self.assertTrue(match_str(
|
|
||||||
'like_count > 100 & dislike_count <? 50 & description',
|
|
||||||
{'like_count': 190, 'description': 'foo'}))
|
|
||||||
self.assertFalse(match_str(
|
|
||||||
'like_count > 100 & dislike_count <? 50 & description',
|
|
||||||
{'like_count': 190, 'dislike_count': 60, 'description': 'foo'}))
|
|
||||||
self.assertFalse(match_str(
|
|
||||||
'like_count > 100 & dislike_count <? 50 & description',
|
|
||||||
{'like_count': 190, 'dislike_count': 10}))
|
|
||||||
self.assertTrue(match_str('is_live', {'is_live': True}))
|
self.assertTrue(match_str('is_live', {'is_live': True}))
|
||||||
self.assertFalse(match_str('is_live', {'is_live': False}))
|
self.assertFalse(match_str('is_live', {'is_live': False}))
|
||||||
self.assertFalse(match_str('is_live', {'is_live': None}))
|
self.assertFalse(match_str('is_live', {'is_live': None}))
|
||||||
@@ -1249,6 +1248,76 @@ ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4')
|
|||||||
self.assertFalse(match_str('!title', {'title': 'abc'}))
|
self.assertFalse(match_str('!title', {'title': 'abc'}))
|
||||||
self.assertFalse(match_str('!title', {'title': ''}))
|
self.assertFalse(match_str('!title', {'title': ''}))
|
||||||
|
|
||||||
|
# Numeric
|
||||||
|
self.assertFalse(match_str('x>0', {'x': 0}))
|
||||||
|
self.assertFalse(match_str('x>0', {}))
|
||||||
|
self.assertTrue(match_str('x>?0', {}))
|
||||||
|
self.assertTrue(match_str('x>1K', {'x': 1200}))
|
||||||
|
self.assertFalse(match_str('x>2K', {'x': 1200}))
|
||||||
|
self.assertTrue(match_str('x>=1200 & x < 1300', {'x': 1200}))
|
||||||
|
self.assertFalse(match_str('x>=1100 & x < 1200', {'x': 1200}))
|
||||||
|
self.assertTrue(match_str('x > 1:0:0', {'x': 3700}))
|
||||||
|
|
||||||
|
# String
|
||||||
|
self.assertFalse(match_str('y=a212', {'y': 'foobar42'}))
|
||||||
|
self.assertTrue(match_str('y=foobar42', {'y': 'foobar42'}))
|
||||||
|
self.assertFalse(match_str('y!=foobar42', {'y': 'foobar42'}))
|
||||||
|
self.assertTrue(match_str('y!=foobar2', {'y': 'foobar42'}))
|
||||||
|
self.assertTrue(match_str('y^=foo', {'y': 'foobar42'}))
|
||||||
|
self.assertFalse(match_str('y!^=foo', {'y': 'foobar42'}))
|
||||||
|
self.assertFalse(match_str('y^=bar', {'y': 'foobar42'}))
|
||||||
|
self.assertTrue(match_str('y!^=bar', {'y': 'foobar42'}))
|
||||||
|
self.assertRaises(ValueError, match_str, 'x^=42', {'x': 42})
|
||||||
|
self.assertTrue(match_str('y*=bar', {'y': 'foobar42'}))
|
||||||
|
self.assertFalse(match_str('y!*=bar', {'y': 'foobar42'}))
|
||||||
|
self.assertFalse(match_str('y*=baz', {'y': 'foobar42'}))
|
||||||
|
self.assertTrue(match_str('y!*=baz', {'y': 'foobar42'}))
|
||||||
|
self.assertTrue(match_str('y$=42', {'y': 'foobar42'}))
|
||||||
|
self.assertFalse(match_str('y$=43', {'y': 'foobar42'}))
|
||||||
|
|
||||||
|
# And
|
||||||
|
self.assertFalse(match_str(
|
||||||
|
'like_count > 100 & dislike_count <? 50 & description',
|
||||||
|
{'like_count': 90, 'description': 'foo'}))
|
||||||
|
self.assertTrue(match_str(
|
||||||
|
'like_count > 100 & dislike_count <? 50 & description',
|
||||||
|
{'like_count': 190, 'description': 'foo'}))
|
||||||
|
self.assertFalse(match_str(
|
||||||
|
'like_count > 100 & dislike_count <? 50 & description',
|
||||||
|
{'like_count': 190, 'dislike_count': 60, 'description': 'foo'}))
|
||||||
|
self.assertFalse(match_str(
|
||||||
|
'like_count > 100 & dislike_count <? 50 & description',
|
||||||
|
{'like_count': 190, 'dislike_count': 10}))
|
||||||
|
|
||||||
|
# Regex
|
||||||
|
self.assertTrue(match_str(r'x~=\bbar', {'x': 'foo bar'}))
|
||||||
|
self.assertFalse(match_str(r'x~=\bbar.+', {'x': 'foo bar'}))
|
||||||
|
self.assertFalse(match_str(r'x~=^FOO', {'x': 'foo bar'}))
|
||||||
|
self.assertTrue(match_str(r'x~=(?i)^FOO', {'x': 'foo bar'}))
|
||||||
|
|
||||||
|
# Quotes
|
||||||
|
self.assertTrue(match_str(r'x^="foo"', {'x': 'foo "bar"'}))
|
||||||
|
self.assertFalse(match_str(r'x^="foo "', {'x': 'foo "bar"'}))
|
||||||
|
self.assertFalse(match_str(r'x$="bar"', {'x': 'foo "bar"'}))
|
||||||
|
self.assertTrue(match_str(r'x$=" \"bar\""', {'x': 'foo "bar"'}))
|
||||||
|
|
||||||
|
# Escaping &
|
||||||
|
self.assertFalse(match_str(r'x=foo & bar', {'x': 'foo & bar'}))
|
||||||
|
self.assertTrue(match_str(r'x=foo \& bar', {'x': 'foo & bar'}))
|
||||||
|
self.assertTrue(match_str(r'x=foo \& bar & x^=foo', {'x': 'foo & bar'}))
|
||||||
|
self.assertTrue(match_str(r'x="foo \& bar" & x^=foo', {'x': 'foo & bar'}))
|
||||||
|
|
||||||
|
# Example from docs
|
||||||
|
self.assertTrue(match_str(
|
||||||
|
r"!is_live & like_count>?100 & description~='(?i)\bcats \& dogs\b'",
|
||||||
|
{'description': 'Raining Cats & Dogs'}))
|
||||||
|
|
||||||
|
# Incomplete
|
||||||
|
self.assertFalse(match_str('id!=foo', {'id': 'foo'}, True))
|
||||||
|
self.assertTrue(match_str('x', {'id': 'foo'}, True))
|
||||||
|
self.assertTrue(match_str('!x', {'id': 'foo'}, True))
|
||||||
|
self.assertFalse(match_str('x', {'id': 'foo'}, False))
|
||||||
|
|
||||||
def test_parse_dfxp_time_expr(self):
|
def test_parse_dfxp_time_expr(self):
|
||||||
self.assertEqual(parse_dfxp_time_expr(None), None)
|
self.assertEqual(parse_dfxp_time_expr(None), None)
|
||||||
self.assertEqual(parse_dfxp_time_expr(''), None)
|
self.assertEqual(parse_dfxp_time_expr(''), None)
|
||||||
@@ -1324,21 +1393,21 @@ The first line
|
|||||||
</body>
|
</body>
|
||||||
</tt>'''.encode('utf-8')
|
</tt>'''.encode('utf-8')
|
||||||
srt_data = '''1
|
srt_data = '''1
|
||||||
00:00:02,080 --> 00:00:05,839
|
00:00:02,080 --> 00:00:05,840
|
||||||
<font color="white" face="sansSerif" size="16">default style<font color="red">custom style</font></font>
|
<font color="white" face="sansSerif" size="16">default style<font color="red">custom style</font></font>
|
||||||
|
|
||||||
2
|
2
|
||||||
00:00:02,080 --> 00:00:05,839
|
00:00:02,080 --> 00:00:05,840
|
||||||
<b><font color="cyan" face="sansSerif" size="16"><font color="lime">part 1
|
<b><font color="cyan" face="sansSerif" size="16"><font color="lime">part 1
|
||||||
</font>part 2</font></b>
|
</font>part 2</font></b>
|
||||||
|
|
||||||
3
|
3
|
||||||
00:00:05,839 --> 00:00:09,560
|
00:00:05,840 --> 00:00:09,560
|
||||||
<u><font color="lime">line 3
|
<u><font color="lime">line 3
|
||||||
part 3</font></u>
|
part 3</font></u>
|
||||||
|
|
||||||
4
|
4
|
||||||
00:00:09,560 --> 00:00:12,359
|
00:00:09,560 --> 00:00:12,360
|
||||||
<i><u><font color="yellow"><font color="lime">inner
|
<i><u><font color="yellow"><font color="lime">inner
|
||||||
</font>style</font></u></i>
|
</font>style</font></u></i>
|
||||||
|
|
||||||
|
|||||||
@@ -14,9 +14,10 @@ import string
|
|||||||
|
|
||||||
from test.helper import FakeYDL, is_download_test
|
from test.helper import FakeYDL, is_download_test
|
||||||
from yt_dlp.extractor import YoutubeIE
|
from yt_dlp.extractor import YoutubeIE
|
||||||
|
from yt_dlp.jsinterp import JSInterpreter
|
||||||
from yt_dlp.compat import compat_str, compat_urlretrieve
|
from yt_dlp.compat import compat_str, compat_urlretrieve
|
||||||
|
|
||||||
_TESTS = [
|
_SIG_TESTS = [
|
||||||
(
|
(
|
||||||
'https://s.ytimg.com/yts/jsbin/html5player-vflHOr_nV.js',
|
'https://s.ytimg.com/yts/jsbin/html5player-vflHOr_nV.js',
|
||||||
86,
|
86,
|
||||||
@@ -64,6 +65,17 @@ _TESTS = [
|
|||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
|
_NSIG_TESTS = [
|
||||||
|
(
|
||||||
|
'https://www.youtube.com/s/player/9216d1f7/player_ias.vflset/en_US/base.js',
|
||||||
|
'SLp9F5bwjAdhE9F-', 'gWnb9IK2DJ8Q1w',
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'https://www.youtube.com/s/player/f8cb7a3b/player_ias.vflset/en_US/base.js',
|
||||||
|
'oBo2h5euWy6osrUt', 'ivXHpm7qJjJN',
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
@is_download_test
|
@is_download_test
|
||||||
class TestPlayerInfo(unittest.TestCase):
|
class TestPlayerInfo(unittest.TestCase):
|
||||||
@@ -97,35 +109,49 @@ class TestSignature(unittest.TestCase):
|
|||||||
os.mkdir(self.TESTDATA_DIR)
|
os.mkdir(self.TESTDATA_DIR)
|
||||||
|
|
||||||
|
|
||||||
def make_tfunc(url, sig_input, expected_sig):
|
def t_factory(name, sig_func, url_pattern):
|
||||||
m = re.match(r'.*-([a-zA-Z0-9_-]+)(?:/watch_as3|/html5player)?\.[a-z]+$', url)
|
def make_tfunc(url, sig_input, expected_sig):
|
||||||
assert m, '%r should follow URL format' % url
|
m = url_pattern.match(url)
|
||||||
test_id = m.group(1)
|
assert m, '%r should follow URL format' % url
|
||||||
|
test_id = m.group('id')
|
||||||
|
|
||||||
def test_func(self):
|
def test_func(self):
|
||||||
basename = 'player-%s.js' % test_id
|
basename = f'player-{name}-{test_id}.js'
|
||||||
fn = os.path.join(self.TESTDATA_DIR, basename)
|
fn = os.path.join(self.TESTDATA_DIR, basename)
|
||||||
|
|
||||||
if not os.path.exists(fn):
|
if not os.path.exists(fn):
|
||||||
compat_urlretrieve(url, fn)
|
compat_urlretrieve(url, fn)
|
||||||
|
with io.open(fn, encoding='utf-8') as testf:
|
||||||
|
jscode = testf.read()
|
||||||
|
self.assertEqual(sig_func(jscode, sig_input), expected_sig)
|
||||||
|
|
||||||
ydl = FakeYDL()
|
test_func.__name__ = f'test_{name}_js_{test_id}'
|
||||||
ie = YoutubeIE(ydl)
|
setattr(TestSignature, test_func.__name__, test_func)
|
||||||
with io.open(fn, encoding='utf-8') as testf:
|
return make_tfunc
|
||||||
jscode = testf.read()
|
|
||||||
func = ie._parse_sig_js(jscode)
|
|
||||||
src_sig = (
|
|
||||||
compat_str(string.printable[:sig_input])
|
|
||||||
if isinstance(sig_input, int) else sig_input)
|
|
||||||
got_sig = func(src_sig)
|
|
||||||
self.assertEqual(got_sig, expected_sig)
|
|
||||||
|
|
||||||
test_func.__name__ = str('test_signature_js_' + test_id)
|
|
||||||
setattr(TestSignature, test_func.__name__, test_func)
|
|
||||||
|
|
||||||
|
|
||||||
for test_spec in _TESTS:
|
def signature(jscode, sig_input):
|
||||||
make_tfunc(*test_spec)
|
func = YoutubeIE(FakeYDL())._parse_sig_js(jscode)
|
||||||
|
src_sig = (
|
||||||
|
compat_str(string.printable[:sig_input])
|
||||||
|
if isinstance(sig_input, int) else sig_input)
|
||||||
|
return func(src_sig)
|
||||||
|
|
||||||
|
|
||||||
|
def n_sig(jscode, sig_input):
|
||||||
|
funcname = YoutubeIE(FakeYDL())._extract_n_function_name(jscode)
|
||||||
|
return JSInterpreter(jscode).call_function(funcname, sig_input)
|
||||||
|
|
||||||
|
|
||||||
|
make_sig_test = t_factory(
|
||||||
|
'signature', signature, re.compile(r'.*-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player)?\.[a-z]+$'))
|
||||||
|
for test_spec in _SIG_TESTS:
|
||||||
|
make_sig_test(*test_spec)
|
||||||
|
|
||||||
|
make_nsig_test = t_factory(
|
||||||
|
'nsig', n_sig, re.compile(r'.+/player/(?P<id>[a-zA-Z0-9_-]+)/.+.js$'))
|
||||||
|
for test_spec in _NSIG_TESTS:
|
||||||
|
make_nsig_test(*test_spec)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
1431
yt_dlp/YoutubeDL.py
1431
yt_dlp/YoutubeDL.py
File diff suppressed because it is too large
Load Diff
@@ -1,23 +1,24 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
|
|
||||||
from __future__ import unicode_literals
|
f'You are using an unsupported version of Python. Only Python versions 3.6 and above are supported by yt-dlp' # noqa: F541
|
||||||
|
|
||||||
__license__ = 'Public Domain'
|
__license__ = 'Public Domain'
|
||||||
|
|
||||||
import codecs
|
import codecs
|
||||||
import io
|
import io
|
||||||
|
import itertools
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
|
||||||
from .options import (
|
from .options import (
|
||||||
parseOpts,
|
parseOpts,
|
||||||
)
|
)
|
||||||
from .compat import (
|
from .compat import (
|
||||||
compat_getpass,
|
compat_getpass,
|
||||||
|
compat_shlex_quote,
|
||||||
workaround_optparse_bug9161,
|
workaround_optparse_bug9161,
|
||||||
)
|
)
|
||||||
from .cookies import SUPPORTED_BROWSERS
|
from .cookies import SUPPORTED_BROWSERS
|
||||||
@@ -28,8 +29,11 @@ from .utils import (
|
|||||||
error_to_compat_str,
|
error_to_compat_str,
|
||||||
ExistingVideoReached,
|
ExistingVideoReached,
|
||||||
expand_path,
|
expand_path,
|
||||||
|
float_or_none,
|
||||||
|
int_or_none,
|
||||||
match_filter_func,
|
match_filter_func,
|
||||||
MaxDownloadsReached,
|
MaxDownloadsReached,
|
||||||
|
parse_duration,
|
||||||
preferredencoding,
|
preferredencoding,
|
||||||
read_batch_urls,
|
read_batch_urls,
|
||||||
RejectedVideoReached,
|
RejectedVideoReached,
|
||||||
@@ -46,14 +50,15 @@ from .downloader import (
|
|||||||
from .extractor import gen_extractors, list_extractors
|
from .extractor import gen_extractors, list_extractors
|
||||||
from .extractor.common import InfoExtractor
|
from .extractor.common import InfoExtractor
|
||||||
from .extractor.adobepass import MSO_INFO
|
from .extractor.adobepass import MSO_INFO
|
||||||
from .postprocessor.ffmpeg import (
|
from .postprocessor import (
|
||||||
FFmpegExtractAudioPP,
|
FFmpegExtractAudioPP,
|
||||||
FFmpegSubtitlesConvertorPP,
|
FFmpegSubtitlesConvertorPP,
|
||||||
FFmpegThumbnailsConvertorPP,
|
FFmpegThumbnailsConvertorPP,
|
||||||
FFmpegVideoConvertorPP,
|
FFmpegVideoConvertorPP,
|
||||||
FFmpegVideoRemuxerPP,
|
FFmpegVideoRemuxerPP,
|
||||||
|
MetadataFromFieldPP,
|
||||||
|
MetadataParserPP,
|
||||||
)
|
)
|
||||||
from .postprocessor.metadatafromfield import MetadataFromFieldPP
|
|
||||||
from .YoutubeDL import YoutubeDL
|
from .YoutubeDL import YoutubeDL
|
||||||
|
|
||||||
|
|
||||||
@@ -107,22 +112,22 @@ def _real_main(argv=None):
|
|||||||
|
|
||||||
if opts.list_extractors:
|
if opts.list_extractors:
|
||||||
for ie in list_extractors(opts.age_limit):
|
for ie in list_extractors(opts.age_limit):
|
||||||
write_string(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else '') + '\n', out=sys.stdout)
|
write_string(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie.working() else '') + '\n', out=sys.stdout)
|
||||||
matchedUrls = [url for url in all_urls if ie.suitable(url)]
|
matchedUrls = [url for url in all_urls if ie.suitable(url)]
|
||||||
for mu in matchedUrls:
|
for mu in matchedUrls:
|
||||||
write_string(' ' + mu + '\n', out=sys.stdout)
|
write_string(' ' + mu + '\n', out=sys.stdout)
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
if opts.list_extractor_descriptions:
|
if opts.list_extractor_descriptions:
|
||||||
for ie in list_extractors(opts.age_limit):
|
for ie in list_extractors(opts.age_limit):
|
||||||
if not ie._WORKING:
|
if not ie.working():
|
||||||
continue
|
continue
|
||||||
desc = getattr(ie, 'IE_DESC', ie.IE_NAME)
|
desc = getattr(ie, 'IE_DESC', ie.IE_NAME)
|
||||||
if desc is False:
|
if desc is False:
|
||||||
continue
|
continue
|
||||||
if hasattr(ie, 'SEARCH_KEY'):
|
if getattr(ie, 'SEARCH_KEY', None) is not None:
|
||||||
_SEARCHES = ('cute kittens', 'slithering pythons', 'falling cat', 'angry poodle', 'purple fish', 'running tortoise', 'sleeping bunny', 'burping cow')
|
_SEARCHES = ('cute kittens', 'slithering pythons', 'falling cat', 'angry poodle', 'purple fish', 'running tortoise', 'sleeping bunny', 'burping cow')
|
||||||
_COUNTS = ('', '5', '10', 'all')
|
_COUNTS = ('', '5', '10', 'all')
|
||||||
desc += ' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES))
|
desc += f'; "{ie.SEARCH_KEY}:" prefix (Example: "{ie.SEARCH_KEY}{random.choice(_COUNTS)}:{random.choice(_SEARCHES)}")'
|
||||||
write_string(desc + '\n', out=sys.stdout)
|
write_string(desc + '\n', out=sys.stdout)
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
if opts.ap_list_mso:
|
if opts.ap_list_mso:
|
||||||
@@ -222,11 +227,13 @@ def _real_main(argv=None):
|
|||||||
if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart:
|
if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart:
|
||||||
raise ValueError('Playlist end must be greater than playlist start')
|
raise ValueError('Playlist end must be greater than playlist start')
|
||||||
if opts.extractaudio:
|
if opts.extractaudio:
|
||||||
|
opts.audioformat = opts.audioformat.lower()
|
||||||
if opts.audioformat not in ['best'] + list(FFmpegExtractAudioPP.SUPPORTED_EXTS):
|
if opts.audioformat not in ['best'] + list(FFmpegExtractAudioPP.SUPPORTED_EXTS):
|
||||||
parser.error('invalid audio format specified')
|
parser.error('invalid audio format specified')
|
||||||
if opts.audioquality:
|
if opts.audioquality:
|
||||||
opts.audioquality = opts.audioquality.strip('k').strip('K')
|
opts.audioquality = opts.audioquality.strip('k').strip('K')
|
||||||
if not opts.audioquality.isdigit():
|
audioquality = int_or_none(float_or_none(opts.audioquality)) # int_or_none prevents inf, nan
|
||||||
|
if audioquality is None or audioquality < 0:
|
||||||
parser.error('invalid audio quality specified')
|
parser.error('invalid audio quality specified')
|
||||||
if opts.recodevideo is not None:
|
if opts.recodevideo is not None:
|
||||||
opts.recodevideo = opts.recodevideo.replace(' ', '')
|
opts.recodevideo = opts.recodevideo.replace(' ', '')
|
||||||
@@ -246,7 +253,7 @@ def _real_main(argv=None):
|
|||||||
if opts.cookiesfrombrowser is not None:
|
if opts.cookiesfrombrowser is not None:
|
||||||
opts.cookiesfrombrowser = [
|
opts.cookiesfrombrowser = [
|
||||||
part.strip() or None for part in opts.cookiesfrombrowser.split(':', 1)]
|
part.strip() or None for part in opts.cookiesfrombrowser.split(':', 1)]
|
||||||
if opts.cookiesfrombrowser[0] not in SUPPORTED_BROWSERS:
|
if opts.cookiesfrombrowser[0].lower() not in SUPPORTED_BROWSERS:
|
||||||
parser.error('unsupported browser specified for cookies')
|
parser.error('unsupported browser specified for cookies')
|
||||||
|
|
||||||
if opts.date is not None:
|
if opts.date is not None:
|
||||||
@@ -254,35 +261,10 @@ def _real_main(argv=None):
|
|||||||
else:
|
else:
|
||||||
date = DateRange(opts.dateafter, opts.datebefore)
|
date = DateRange(opts.dateafter, opts.datebefore)
|
||||||
|
|
||||||
def parse_compat_opts():
|
compat_opts = opts.compat_opts
|
||||||
parsed_compat_opts, compat_opts = set(), opts.compat_opts[::-1]
|
|
||||||
while compat_opts:
|
|
||||||
actual_opt = opt = compat_opts.pop().lower()
|
|
||||||
if opt == 'youtube-dl':
|
|
||||||
compat_opts.extend(['-multistreams', 'all'])
|
|
||||||
elif opt == 'youtube-dlc':
|
|
||||||
compat_opts.extend(['-no-youtube-channel-redirect', '-no-live-chat', 'all'])
|
|
||||||
elif opt == 'all':
|
|
||||||
parsed_compat_opts.update(all_compat_opts)
|
|
||||||
elif opt == '-all':
|
|
||||||
parsed_compat_opts = set()
|
|
||||||
else:
|
|
||||||
if opt[0] == '-':
|
|
||||||
opt = opt[1:]
|
|
||||||
parsed_compat_opts.discard(opt)
|
|
||||||
else:
|
|
||||||
parsed_compat_opts.update([opt])
|
|
||||||
if opt not in all_compat_opts:
|
|
||||||
parser.error('Invalid compatibility option %s' % actual_opt)
|
|
||||||
return parsed_compat_opts
|
|
||||||
|
|
||||||
all_compat_opts = [
|
def report_conflict(arg1, arg2):
|
||||||
'filename', 'format-sort', 'abort-on-error', 'format-spec', 'no-playlist-metafiles',
|
warnings.append(f'{arg2} is ignored since {arg1} was given')
|
||||||
'multistreams', 'no-live-chat', 'playlist-index', 'list-formats', 'no-direct-merge',
|
|
||||||
'no-youtube-channel-redirect', 'no-youtube-unavailable-videos', 'no-attach-info-json',
|
|
||||||
'embed-thumbnail-atomicparsley', 'seperate-video-versions', 'no-clean-infojson',
|
|
||||||
]
|
|
||||||
compat_opts = parse_compat_opts()
|
|
||||||
|
|
||||||
def _unused_compat_opt(name):
|
def _unused_compat_opt(name):
|
||||||
if name not in compat_opts:
|
if name not in compat_opts:
|
||||||
@@ -305,7 +287,7 @@ def _real_main(argv=None):
|
|||||||
setattr(opts, opt_name, default)
|
setattr(opts, opt_name, default)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
set_default_compat('abort-on-error', 'ignoreerrors')
|
set_default_compat('abort-on-error', 'ignoreerrors', 'only_download')
|
||||||
set_default_compat('no-playlist-metafiles', 'allow_playlist_files')
|
set_default_compat('no-playlist-metafiles', 'allow_playlist_files')
|
||||||
set_default_compat('no-clean-infojson', 'clean_infojson')
|
set_default_compat('no-clean-infojson', 'clean_infojson')
|
||||||
if 'format-sort' in compat_opts:
|
if 'format-sort' in compat_opts:
|
||||||
@@ -315,10 +297,14 @@ def _real_main(argv=None):
|
|||||||
if _video_multistreams_set is False and _audio_multistreams_set is False:
|
if _video_multistreams_set is False and _audio_multistreams_set is False:
|
||||||
_unused_compat_opt('multistreams')
|
_unused_compat_opt('multistreams')
|
||||||
outtmpl_default = opts.outtmpl.get('default')
|
outtmpl_default = opts.outtmpl.get('default')
|
||||||
|
if opts.useid:
|
||||||
|
if outtmpl_default is None:
|
||||||
|
outtmpl_default = opts.outtmpl['default'] = '%(id)s.%(ext)s'
|
||||||
|
else:
|
||||||
|
report_conflict('--output', '--id')
|
||||||
if 'filename' in compat_opts:
|
if 'filename' in compat_opts:
|
||||||
if outtmpl_default is None:
|
if outtmpl_default is None:
|
||||||
outtmpl_default = '%(title)s.%(id)s.%(ext)s'
|
outtmpl_default = opts.outtmpl['default'] = '%(title)s-%(id)s.%(ext)s'
|
||||||
opts.outtmpl.update({'default': outtmpl_default})
|
|
||||||
else:
|
else:
|
||||||
_unused_compat_opt('filename')
|
_unused_compat_opt('filename')
|
||||||
|
|
||||||
@@ -328,9 +314,14 @@ def _real_main(argv=None):
|
|||||||
parser.error('invalid %s %r: %s' % (msg, tmpl, error_to_compat_str(err)))
|
parser.error('invalid %s %r: %s' % (msg, tmpl, error_to_compat_str(err)))
|
||||||
|
|
||||||
for k, tmpl in opts.outtmpl.items():
|
for k, tmpl in opts.outtmpl.items():
|
||||||
validate_outtmpl(tmpl, '%s output template' % k)
|
validate_outtmpl(tmpl, f'{k} output template')
|
||||||
for tmpl in opts.forceprint:
|
opts.forceprint = opts.forceprint or []
|
||||||
|
for tmpl in opts.forceprint or []:
|
||||||
validate_outtmpl(tmpl, 'print template')
|
validate_outtmpl(tmpl, 'print template')
|
||||||
|
validate_outtmpl(opts.sponsorblock_chapter_title, 'SponsorBlock chapter title')
|
||||||
|
for k, tmpl in opts.progress_template.items():
|
||||||
|
k = f'{k[:-6]} console title' if '-title' in k else f'{k} progress'
|
||||||
|
validate_outtmpl(tmpl, f'{k} template')
|
||||||
|
|
||||||
if opts.extractaudio and not opts.keepvideo and opts.format is None:
|
if opts.extractaudio and not opts.keepvideo and opts.format is None:
|
||||||
opts.format = 'bestaudio/best'
|
opts.format = 'bestaudio/best'
|
||||||
@@ -344,13 +335,29 @@ def _real_main(argv=None):
|
|||||||
if re.match(InfoExtractor.FormatSort.regex, f) is None:
|
if re.match(InfoExtractor.FormatSort.regex, f) is None:
|
||||||
parser.error('invalid format sort string "%s" specified' % f)
|
parser.error('invalid format sort string "%s" specified' % f)
|
||||||
|
|
||||||
if opts.metafromfield is None:
|
def metadataparser_actions(f):
|
||||||
opts.metafromfield = []
|
if isinstance(f, str):
|
||||||
|
cmd = '--parse-metadata %s' % compat_shlex_quote(f)
|
||||||
|
try:
|
||||||
|
actions = [MetadataFromFieldPP.to_action(f)]
|
||||||
|
except Exception as err:
|
||||||
|
parser.error(f'{cmd} is invalid; {err}')
|
||||||
|
else:
|
||||||
|
cmd = '--replace-in-metadata %s' % ' '.join(map(compat_shlex_quote, f))
|
||||||
|
actions = ((MetadataParserPP.Actions.REPLACE, x, *f[1:]) for x in f[0].split(','))
|
||||||
|
|
||||||
|
for action in actions:
|
||||||
|
try:
|
||||||
|
MetadataParserPP.validate_action(*action)
|
||||||
|
except Exception as err:
|
||||||
|
parser.error(f'{cmd} is invalid; {err}')
|
||||||
|
yield action
|
||||||
|
|
||||||
|
if opts.parse_metadata is None:
|
||||||
|
opts.parse_metadata = []
|
||||||
if opts.metafromtitle is not None:
|
if opts.metafromtitle is not None:
|
||||||
opts.metafromfield.append('title:%s' % opts.metafromtitle)
|
opts.parse_metadata.append('title:%s' % opts.metafromtitle)
|
||||||
for f in opts.metafromfield:
|
opts.parse_metadata = list(itertools.chain(*map(metadataparser_actions, opts.parse_metadata)))
|
||||||
if re.match(MetadataFromFieldPP.regex, f) is None:
|
|
||||||
parser.error('invalid format string "%s" specified for --parse-metadata' % f)
|
|
||||||
|
|
||||||
any_getting = opts.forceprint or opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson or opts.dump_single_json
|
any_getting = opts.forceprint or opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson or opts.dump_single_json
|
||||||
any_printing = opts.print_json
|
any_printing = opts.print_json
|
||||||
@@ -361,15 +368,31 @@ def _real_main(argv=None):
|
|||||||
if opts.getcomments and not printing_json:
|
if opts.getcomments and not printing_json:
|
||||||
opts.writeinfojson = True
|
opts.writeinfojson = True
|
||||||
|
|
||||||
def report_conflict(arg1, arg2):
|
if opts.no_sponsorblock:
|
||||||
warnings.append('%s is ignored since %s was given' % (arg2, arg1))
|
opts.sponsorblock_mark = set()
|
||||||
|
opts.sponsorblock_remove = set()
|
||||||
|
sponsorblock_query = opts.sponsorblock_mark | opts.sponsorblock_remove
|
||||||
|
|
||||||
|
if (opts.addmetadata or opts.sponsorblock_mark) and opts.addchapters is None:
|
||||||
|
opts.addchapters = True
|
||||||
|
opts.remove_chapters = opts.remove_chapters or []
|
||||||
|
|
||||||
|
if (opts.remove_chapters or sponsorblock_query) and opts.sponskrub is not False:
|
||||||
|
if opts.sponskrub:
|
||||||
|
if opts.remove_chapters:
|
||||||
|
report_conflict('--remove-chapters', '--sponskrub')
|
||||||
|
if opts.sponsorblock_mark:
|
||||||
|
report_conflict('--sponsorblock-mark', '--sponskrub')
|
||||||
|
if opts.sponsorblock_remove:
|
||||||
|
report_conflict('--sponsorblock-remove', '--sponskrub')
|
||||||
|
opts.sponskrub = False
|
||||||
|
if opts.sponskrub_cut and opts.split_chapters and opts.sponskrub is not False:
|
||||||
|
report_conflict('--split-chapter', '--sponskrub-cut')
|
||||||
|
opts.sponskrub_cut = False
|
||||||
|
|
||||||
if opts.remuxvideo and opts.recodevideo:
|
if opts.remuxvideo and opts.recodevideo:
|
||||||
report_conflict('--recode-video', '--remux-video')
|
report_conflict('--recode-video', '--remux-video')
|
||||||
opts.remuxvideo = False
|
opts.remuxvideo = False
|
||||||
if opts.sponskrub_cut and opts.split_chapters and opts.sponskrub is not False:
|
|
||||||
report_conflict('--split-chapter', '--sponskrub-cut')
|
|
||||||
opts.sponskrub_cut = False
|
|
||||||
|
|
||||||
if opts.allow_unplayable_formats:
|
if opts.allow_unplayable_formats:
|
||||||
if opts.extractaudio:
|
if opts.extractaudio:
|
||||||
@@ -396,16 +419,30 @@ def _real_main(argv=None):
|
|||||||
if opts.fixup and opts.fixup.lower() not in ('never', 'ignore'):
|
if opts.fixup and opts.fixup.lower() not in ('never', 'ignore'):
|
||||||
report_conflict('--allow-unplayable-formats', '--fixup')
|
report_conflict('--allow-unplayable-formats', '--fixup')
|
||||||
opts.fixup = 'never'
|
opts.fixup = 'never'
|
||||||
|
if opts.remove_chapters:
|
||||||
|
report_conflict('--allow-unplayable-formats', '--remove-chapters')
|
||||||
|
opts.remove_chapters = []
|
||||||
|
if opts.sponsorblock_remove:
|
||||||
|
report_conflict('--allow-unplayable-formats', '--sponsorblock-remove')
|
||||||
|
opts.sponsorblock_remove = set()
|
||||||
if opts.sponskrub:
|
if opts.sponskrub:
|
||||||
report_conflict('--allow-unplayable-formats', '--sponskrub')
|
report_conflict('--allow-unplayable-formats', '--sponskrub')
|
||||||
opts.sponskrub = False
|
opts.sponskrub = False
|
||||||
|
|
||||||
# PostProcessors
|
# PostProcessors
|
||||||
postprocessors = []
|
postprocessors = list(opts.add_postprocessors)
|
||||||
if opts.metafromfield:
|
if sponsorblock_query:
|
||||||
postprocessors.append({
|
postprocessors.append({
|
||||||
'key': 'MetadataFromField',
|
'key': 'SponsorBlock',
|
||||||
'formats': opts.metafromfield,
|
'categories': sponsorblock_query,
|
||||||
|
'api': opts.sponsorblock_api,
|
||||||
|
# Run this immediately after extraction is complete
|
||||||
|
'when': 'pre_process'
|
||||||
|
})
|
||||||
|
if opts.parse_metadata:
|
||||||
|
postprocessors.append({
|
||||||
|
'key': 'MetadataParser',
|
||||||
|
'actions': opts.parse_metadata,
|
||||||
# Run this immediately after extraction is complete
|
# Run this immediately after extraction is complete
|
||||||
'when': 'pre_process'
|
'when': 'pre_process'
|
||||||
})
|
})
|
||||||
@@ -426,7 +463,7 @@ def _real_main(argv=None):
|
|||||||
# Must be after all other before_dl
|
# Must be after all other before_dl
|
||||||
if opts.exec_before_dl_cmd:
|
if opts.exec_before_dl_cmd:
|
||||||
postprocessors.append({
|
postprocessors.append({
|
||||||
'key': 'ExecAfterDownload',
|
'key': 'Exec',
|
||||||
'exec_cmd': opts.exec_before_dl_cmd,
|
'exec_cmd': opts.exec_before_dl_cmd,
|
||||||
'when': 'before_dl'
|
'when': 'before_dl'
|
||||||
})
|
})
|
||||||
@@ -447,29 +484,55 @@ def _real_main(argv=None):
|
|||||||
'key': 'FFmpegVideoConvertor',
|
'key': 'FFmpegVideoConvertor',
|
||||||
'preferedformat': opts.recodevideo,
|
'preferedformat': opts.recodevideo,
|
||||||
})
|
})
|
||||||
# FFmpegMetadataPP should be run after FFmpegVideoConvertorPP and
|
# If ModifyChapters is going to remove chapters, subtitles must already be in the container.
|
||||||
# FFmpegExtractAudioPP as containers before conversion may not support
|
|
||||||
# metadata (3gp, webm, etc.)
|
|
||||||
# And this post-processor should be placed before other metadata
|
|
||||||
# manipulating post-processors (FFmpegEmbedSubtitle) to prevent loss of
|
|
||||||
# extra metadata. By default ffmpeg preserves metadata applicable for both
|
|
||||||
# source and target containers. From this point the container won't change,
|
|
||||||
# so metadata can be added here.
|
|
||||||
if opts.addmetadata:
|
|
||||||
postprocessors.append({'key': 'FFmpegMetadata'})
|
|
||||||
if opts.embedsubtitles:
|
if opts.embedsubtitles:
|
||||||
already_have_subtitle = opts.writesubtitles
|
already_have_subtitle = opts.writesubtitles and 'no-keep-subs' not in compat_opts
|
||||||
postprocessors.append({
|
postprocessors.append({
|
||||||
'key': 'FFmpegEmbedSubtitle',
|
'key': 'FFmpegEmbedSubtitle',
|
||||||
# already_have_subtitle = True prevents the file from being deleted after embedding
|
# already_have_subtitle = True prevents the file from being deleted after embedding
|
||||||
'already_have_subtitle': already_have_subtitle
|
'already_have_subtitle': already_have_subtitle
|
||||||
})
|
})
|
||||||
if not already_have_subtitle:
|
if not opts.writeautomaticsub and 'no-keep-subs' not in compat_opts:
|
||||||
opts.writesubtitles = True
|
opts.writesubtitles = True
|
||||||
# --all-sub automatically sets --write-sub if --write-auto-sub is not given
|
# --all-sub automatically sets --write-sub if --write-auto-sub is not given
|
||||||
# this was the old behaviour if only --all-sub was given.
|
# this was the old behaviour if only --all-sub was given.
|
||||||
if opts.allsubtitles and not opts.writeautomaticsub:
|
if opts.allsubtitles and not opts.writeautomaticsub:
|
||||||
opts.writesubtitles = True
|
opts.writesubtitles = True
|
||||||
|
# ModifyChapters must run before FFmpegMetadataPP
|
||||||
|
remove_chapters_patterns, remove_ranges = [], []
|
||||||
|
for regex in opts.remove_chapters:
|
||||||
|
if regex.startswith('*'):
|
||||||
|
dur = list(map(parse_duration, regex[1:].split('-')))
|
||||||
|
if len(dur) == 2 and all(t is not None for t in dur):
|
||||||
|
remove_ranges.append(tuple(dur))
|
||||||
|
continue
|
||||||
|
parser.error(f'invalid --remove-chapters time range {regex!r}. Must be of the form ?start-end')
|
||||||
|
try:
|
||||||
|
remove_chapters_patterns.append(re.compile(regex))
|
||||||
|
except re.error as err:
|
||||||
|
parser.error(f'invalid --remove-chapters regex {regex!r} - {err}')
|
||||||
|
if opts.remove_chapters or sponsorblock_query:
|
||||||
|
postprocessors.append({
|
||||||
|
'key': 'ModifyChapters',
|
||||||
|
'remove_chapters_patterns': remove_chapters_patterns,
|
||||||
|
'remove_sponsor_segments': opts.sponsorblock_remove,
|
||||||
|
'remove_ranges': remove_ranges,
|
||||||
|
'sponsorblock_chapter_title': opts.sponsorblock_chapter_title,
|
||||||
|
'force_keyframes': opts.force_keyframes_at_cuts
|
||||||
|
})
|
||||||
|
# FFmpegMetadataPP should be run after FFmpegVideoConvertorPP and
|
||||||
|
# FFmpegExtractAudioPP as containers before conversion may not support
|
||||||
|
# metadata (3gp, webm, etc.)
|
||||||
|
# By default ffmpeg preserves metadata applicable for both
|
||||||
|
# source and target containers. From this point the container won't change,
|
||||||
|
# so metadata can be added here.
|
||||||
|
if opts.addmetadata or opts.addchapters:
|
||||||
|
postprocessors.append({
|
||||||
|
'key': 'FFmpegMetadata',
|
||||||
|
'add_chapters': opts.addchapters,
|
||||||
|
'add_metadata': opts.addmetadata,
|
||||||
|
})
|
||||||
|
# Note: Deprecated
|
||||||
# This should be above EmbedThumbnail since sponskrub removes the thumbnail attachment
|
# This should be above EmbedThumbnail since sponskrub removes the thumbnail attachment
|
||||||
# but must be below EmbedSubtitle and FFmpegMetadata
|
# but must be below EmbedSubtitle and FFmpegMetadata
|
||||||
# See https://github.com/yt-dlp/yt-dlp/issues/204 , https://github.com/faissaloo/SponSkrub/issues/29
|
# See https://github.com/yt-dlp/yt-dlp/issues/204 , https://github.com/faissaloo/SponSkrub/issues/29
|
||||||
@@ -492,15 +555,19 @@ def _real_main(argv=None):
|
|||||||
})
|
})
|
||||||
if not already_have_thumbnail:
|
if not already_have_thumbnail:
|
||||||
opts.writethumbnail = True
|
opts.writethumbnail = True
|
||||||
|
opts.outtmpl['pl_thumbnail'] = ''
|
||||||
if opts.split_chapters:
|
if opts.split_chapters:
|
||||||
postprocessors.append({'key': 'FFmpegSplitChapters'})
|
postprocessors.append({
|
||||||
|
'key': 'FFmpegSplitChapters',
|
||||||
|
'force_keyframes': opts.force_keyframes_at_cuts,
|
||||||
|
})
|
||||||
# XAttrMetadataPP should be run after post-processors that may change file contents
|
# XAttrMetadataPP should be run after post-processors that may change file contents
|
||||||
if opts.xattrs:
|
if opts.xattrs:
|
||||||
postprocessors.append({'key': 'XAttrMetadata'})
|
postprocessors.append({'key': 'XAttrMetadata'})
|
||||||
# ExecAfterDownload must be the last PP
|
# Exec must be the last PP
|
||||||
if opts.exec_cmd:
|
if opts.exec_cmd:
|
||||||
postprocessors.append({
|
postprocessors.append({
|
||||||
'key': 'ExecAfterDownload',
|
'key': 'Exec',
|
||||||
'exec_cmd': opts.exec_cmd,
|
'exec_cmd': opts.exec_cmd,
|
||||||
# Run this only after the files have been moved to their final locations
|
# Run this only after the files have been moved to their final locations
|
||||||
'when': 'after_move'
|
'when': 'after_move'
|
||||||
@@ -529,6 +596,7 @@ def _real_main(argv=None):
|
|||||||
|
|
||||||
ydl_opts = {
|
ydl_opts = {
|
||||||
'usenetrc': opts.usenetrc,
|
'usenetrc': opts.usenetrc,
|
||||||
|
'netrc_location': opts.netrc_location,
|
||||||
'username': opts.username,
|
'username': opts.username,
|
||||||
'password': opts.password,
|
'password': opts.password,
|
||||||
'twofactor': opts.twofactor,
|
'twofactor': opts.twofactor,
|
||||||
@@ -550,7 +618,7 @@ def _real_main(argv=None):
|
|||||||
'forcejson': opts.dumpjson or opts.print_json,
|
'forcejson': opts.dumpjson or opts.print_json,
|
||||||
'dump_single_json': opts.dump_single_json,
|
'dump_single_json': opts.dump_single_json,
|
||||||
'force_write_download_archive': opts.force_write_download_archive,
|
'force_write_download_archive': opts.force_write_download_archive,
|
||||||
'simulate': opts.simulate or any_getting,
|
'simulate': (any_getting or None) if opts.simulate is None else opts.simulate,
|
||||||
'skip_download': opts.skip_download,
|
'skip_download': opts.skip_download,
|
||||||
'format': opts.format,
|
'format': opts.format,
|
||||||
'allow_unplayable_formats': opts.allow_unplayable_formats,
|
'allow_unplayable_formats': opts.allow_unplayable_formats,
|
||||||
@@ -584,8 +652,9 @@ def _real_main(argv=None):
|
|||||||
'noresizebuffer': opts.noresizebuffer,
|
'noresizebuffer': opts.noresizebuffer,
|
||||||
'http_chunk_size': opts.http_chunk_size,
|
'http_chunk_size': opts.http_chunk_size,
|
||||||
'continuedl': opts.continue_dl,
|
'continuedl': opts.continue_dl,
|
||||||
'noprogress': opts.noprogress,
|
'noprogress': opts.quiet if opts.noprogress is None else opts.noprogress,
|
||||||
'progress_with_newline': opts.progress_with_newline,
|
'progress_with_newline': opts.progress_with_newline,
|
||||||
|
'progress_template': opts.progress_template,
|
||||||
'playliststart': opts.playliststart,
|
'playliststart': opts.playliststart,
|
||||||
'playlistend': opts.playlistend,
|
'playlistend': opts.playlistend,
|
||||||
'playlistreverse': opts.playlist_reverse,
|
'playlistreverse': opts.playlist_reverse,
|
||||||
@@ -680,12 +749,8 @@ def _real_main(argv=None):
|
|||||||
'geo_bypass': opts.geo_bypass,
|
'geo_bypass': opts.geo_bypass,
|
||||||
'geo_bypass_country': opts.geo_bypass_country,
|
'geo_bypass_country': opts.geo_bypass_country,
|
||||||
'geo_bypass_ip_block': opts.geo_bypass_ip_block,
|
'geo_bypass_ip_block': opts.geo_bypass_ip_block,
|
||||||
'warnings': warnings,
|
'_warnings': warnings,
|
||||||
'compat_opts': compat_opts,
|
'compat_opts': compat_opts,
|
||||||
# just for deprecation check
|
|
||||||
'autonumber': opts.autonumber or None,
|
|
||||||
'usetitle': opts.usetitle or None,
|
|
||||||
'useid': opts.useid or None,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
with YoutubeDL(ydl_opts) as ydl:
|
with YoutubeDL(ydl_opts) as ydl:
|
||||||
@@ -730,10 +795,15 @@ def main(argv=None):
|
|||||||
_real_main(argv)
|
_real_main(argv)
|
||||||
except DownloadError:
|
except DownloadError:
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
except SameFileError:
|
except SameFileError as e:
|
||||||
sys.exit('ERROR: fixed output name but more than one file to download')
|
sys.exit(f'ERROR: {e}')
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
sys.exit('\nERROR: Interrupted by user')
|
sys.exit('\nERROR: Interrupted by user')
|
||||||
|
except BrokenPipeError as e:
|
||||||
|
# https://docs.python.org/3/library/signal.html#note-on-sigpipe
|
||||||
|
devnull = os.open(os.devnull, os.O_WRONLY)
|
||||||
|
os.dup2(devnull, sys.stdout.fileno())
|
||||||
|
sys.exit(f'\nERROR: {e}')
|
||||||
|
|
||||||
|
|
||||||
__all__ = ['main', 'YoutubeDL', 'gen_extractors', 'list_extractors']
|
__all__ = ['main', 'YoutubeDL', 'gen_extractors', 'list_extractors']
|
||||||
|
|||||||
269
yt_dlp/aes.py
269
yt_dlp/aes.py
@@ -2,36 +2,68 @@ from __future__ import unicode_literals
|
|||||||
|
|
||||||
from math import ceil
|
from math import ceil
|
||||||
|
|
||||||
from .compat import compat_b64decode
|
from .compat import compat_b64decode, compat_pycrypto_AES
|
||||||
from .utils import bytes_to_intlist, intlist_to_bytes
|
from .utils import bytes_to_intlist, intlist_to_bytes
|
||||||
|
|
||||||
|
|
||||||
|
if compat_pycrypto_AES:
|
||||||
|
def aes_cbc_decrypt_bytes(data, key, iv):
|
||||||
|
""" Decrypt bytes with AES-CBC using pycryptodome """
|
||||||
|
return compat_pycrypto_AES.new(key, compat_pycrypto_AES.MODE_CBC, iv).decrypt(data)
|
||||||
|
|
||||||
|
def aes_gcm_decrypt_and_verify_bytes(data, key, tag, nonce):
|
||||||
|
""" Decrypt bytes with AES-GCM using pycryptodome """
|
||||||
|
return compat_pycrypto_AES.new(key, compat_pycrypto_AES.MODE_GCM, nonce).decrypt_and_verify(data, tag)
|
||||||
|
|
||||||
|
else:
|
||||||
|
def aes_cbc_decrypt_bytes(data, key, iv):
|
||||||
|
""" Decrypt bytes with AES-CBC using native implementation since pycryptodome is unavailable """
|
||||||
|
return intlist_to_bytes(aes_cbc_decrypt(*map(bytes_to_intlist, (data, key, iv))))
|
||||||
|
|
||||||
|
def aes_gcm_decrypt_and_verify_bytes(data, key, tag, nonce):
|
||||||
|
""" Decrypt bytes with AES-GCM using native implementation since pycryptodome is unavailable """
|
||||||
|
return intlist_to_bytes(aes_gcm_decrypt_and_verify(*map(bytes_to_intlist, (data, key, tag, nonce))))
|
||||||
|
|
||||||
|
|
||||||
BLOCK_SIZE_BYTES = 16
|
BLOCK_SIZE_BYTES = 16
|
||||||
|
|
||||||
|
|
||||||
def aes_ctr_decrypt(data, key, counter):
|
def aes_ctr_decrypt(data, key, iv):
|
||||||
"""
|
"""
|
||||||
Decrypt with aes in counter mode
|
Decrypt with aes in counter mode
|
||||||
|
|
||||||
@param {int[]} data cipher
|
@param {int[]} data cipher
|
||||||
@param {int[]} key 16/24/32-Byte cipher key
|
@param {int[]} key 16/24/32-Byte cipher key
|
||||||
@param {instance} counter Instance whose next_value function (@returns {int[]} 16-Byte block)
|
@param {int[]} iv 16-Byte initialization vector
|
||||||
returns the next counter block
|
|
||||||
@returns {int[]} decrypted data
|
@returns {int[]} decrypted data
|
||||||
"""
|
"""
|
||||||
|
return aes_ctr_encrypt(data, key, iv)
|
||||||
|
|
||||||
|
|
||||||
|
def aes_ctr_encrypt(data, key, iv):
|
||||||
|
"""
|
||||||
|
Encrypt with aes in counter mode
|
||||||
|
|
||||||
|
@param {int[]} data cleartext
|
||||||
|
@param {int[]} key 16/24/32-Byte cipher key
|
||||||
|
@param {int[]} iv 16-Byte initialization vector
|
||||||
|
@returns {int[]} encrypted data
|
||||||
|
"""
|
||||||
expanded_key = key_expansion(key)
|
expanded_key = key_expansion(key)
|
||||||
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
||||||
|
counter = iter_vector(iv)
|
||||||
|
|
||||||
decrypted_data = []
|
encrypted_data = []
|
||||||
for i in range(block_count):
|
for i in range(block_count):
|
||||||
counter_block = counter.next_value()
|
counter_block = next(counter)
|
||||||
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
|
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
|
||||||
block += [0] * (BLOCK_SIZE_BYTES - len(block))
|
block += [0] * (BLOCK_SIZE_BYTES - len(block))
|
||||||
|
|
||||||
cipher_counter_block = aes_encrypt(counter_block, expanded_key)
|
cipher_counter_block = aes_encrypt(counter_block, expanded_key)
|
||||||
decrypted_data += xor(block, cipher_counter_block)
|
encrypted_data += xor(block, cipher_counter_block)
|
||||||
decrypted_data = decrypted_data[:len(data)]
|
encrypted_data = encrypted_data[:len(data)]
|
||||||
|
|
||||||
return decrypted_data
|
return encrypted_data
|
||||||
|
|
||||||
|
|
||||||
def aes_cbc_decrypt(data, key, iv):
|
def aes_cbc_decrypt(data, key, iv):
|
||||||
@@ -88,39 +120,47 @@ def aes_cbc_encrypt(data, key, iv):
|
|||||||
return encrypted_data
|
return encrypted_data
|
||||||
|
|
||||||
|
|
||||||
def key_expansion(data):
|
def aes_gcm_decrypt_and_verify(data, key, tag, nonce):
|
||||||
"""
|
"""
|
||||||
Generate key schedule
|
Decrypt with aes in GBM mode and checks authenticity using tag
|
||||||
|
|
||||||
@param {int[]} data 16/24/32-Byte cipher key
|
@param {int[]} data cipher
|
||||||
@returns {int[]} 176/208/240-Byte expanded key
|
@param {int[]} key 16-Byte cipher key
|
||||||
|
@param {int[]} tag authentication tag
|
||||||
|
@param {int[]} nonce IV (recommended 12-Byte)
|
||||||
|
@returns {int[]} decrypted data
|
||||||
"""
|
"""
|
||||||
data = data[:] # copy
|
|
||||||
rcon_iteration = 1
|
|
||||||
key_size_bytes = len(data)
|
|
||||||
expanded_key_size_bytes = (key_size_bytes // 4 + 7) * BLOCK_SIZE_BYTES
|
|
||||||
|
|
||||||
while len(data) < expanded_key_size_bytes:
|
# XXX: check aes, gcm param
|
||||||
temp = data[-4:]
|
|
||||||
temp = key_schedule_core(temp, rcon_iteration)
|
|
||||||
rcon_iteration += 1
|
|
||||||
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
|
||||||
|
|
||||||
for _ in range(3):
|
hash_subkey = aes_encrypt([0] * BLOCK_SIZE_BYTES, key_expansion(key))
|
||||||
temp = data[-4:]
|
|
||||||
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
|
||||||
|
|
||||||
if key_size_bytes == 32:
|
if len(nonce) == 12:
|
||||||
temp = data[-4:]
|
j0 = nonce + [0, 0, 0, 1]
|
||||||
temp = sub_bytes(temp)
|
else:
|
||||||
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
fill = (BLOCK_SIZE_BYTES - (len(nonce) % BLOCK_SIZE_BYTES)) % BLOCK_SIZE_BYTES + 8
|
||||||
|
ghash_in = nonce + [0] * fill + bytes_to_intlist((8 * len(nonce)).to_bytes(8, 'big'))
|
||||||
|
j0 = ghash(hash_subkey, ghash_in)
|
||||||
|
|
||||||
for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
|
# TODO: add nonce support to aes_ctr_decrypt
|
||||||
temp = data[-4:]
|
|
||||||
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
|
||||||
data = data[:expanded_key_size_bytes]
|
|
||||||
|
|
||||||
return data
|
# nonce_ctr = j0[:12]
|
||||||
|
iv_ctr = inc(j0)
|
||||||
|
|
||||||
|
decrypted_data = aes_ctr_decrypt(data, key, iv_ctr + [0] * (BLOCK_SIZE_BYTES - len(iv_ctr)))
|
||||||
|
pad_len = len(data) // 16 * 16
|
||||||
|
s_tag = ghash(
|
||||||
|
hash_subkey,
|
||||||
|
data
|
||||||
|
+ [0] * (BLOCK_SIZE_BYTES - len(data) + pad_len) # pad
|
||||||
|
+ bytes_to_intlist((0 * 8).to_bytes(8, 'big') # length of associated data
|
||||||
|
+ ((len(data) * 8).to_bytes(8, 'big'))) # length of data
|
||||||
|
)
|
||||||
|
|
||||||
|
if tag != aes_ctr_encrypt(s_tag, key, j0):
|
||||||
|
raise ValueError("Mismatching authentication tag")
|
||||||
|
|
||||||
|
return decrypted_data
|
||||||
|
|
||||||
|
|
||||||
def aes_encrypt(data, expanded_key):
|
def aes_encrypt(data, expanded_key):
|
||||||
@@ -138,7 +178,7 @@ def aes_encrypt(data, expanded_key):
|
|||||||
data = sub_bytes(data)
|
data = sub_bytes(data)
|
||||||
data = shift_rows(data)
|
data = shift_rows(data)
|
||||||
if i != rounds:
|
if i != rounds:
|
||||||
data = mix_columns(data)
|
data = list(iter_mix_columns(data, MIX_COLUMN_MATRIX))
|
||||||
data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES])
|
data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES])
|
||||||
|
|
||||||
return data
|
return data
|
||||||
@@ -157,7 +197,7 @@ def aes_decrypt(data, expanded_key):
|
|||||||
for i in range(rounds, 0, -1):
|
for i in range(rounds, 0, -1):
|
||||||
data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES])
|
data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES])
|
||||||
if i != rounds:
|
if i != rounds:
|
||||||
data = mix_columns_inv(data)
|
data = list(iter_mix_columns(data, MIX_COLUMN_MATRIX_INV))
|
||||||
data = shift_rows_inv(data)
|
data = shift_rows_inv(data)
|
||||||
data = sub_bytes_inv(data)
|
data = sub_bytes_inv(data)
|
||||||
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
|
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
|
||||||
@@ -189,15 +229,7 @@ def aes_decrypt_text(data, password, key_size_bytes):
|
|||||||
nonce = data[:NONCE_LENGTH_BYTES]
|
nonce = data[:NONCE_LENGTH_BYTES]
|
||||||
cipher = data[NONCE_LENGTH_BYTES:]
|
cipher = data[NONCE_LENGTH_BYTES:]
|
||||||
|
|
||||||
class Counter(object):
|
decrypted_data = aes_ctr_decrypt(cipher, key, nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES))
|
||||||
__value = nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
|
|
||||||
|
|
||||||
def next_value(self):
|
|
||||||
temp = self.__value
|
|
||||||
self.__value = inc(self.__value)
|
|
||||||
return temp
|
|
||||||
|
|
||||||
decrypted_data = aes_ctr_decrypt(cipher, key, Counter())
|
|
||||||
plaintext = intlist_to_bytes(decrypted_data)
|
plaintext = intlist_to_bytes(decrypted_data)
|
||||||
|
|
||||||
return plaintext
|
return plaintext
|
||||||
@@ -278,6 +310,47 @@ RIJNDAEL_LOG_TABLE = (0x00, 0x00, 0x19, 0x01, 0x32, 0x02, 0x1a, 0xc6, 0x4b, 0xc7
|
|||||||
0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07)
|
0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07)
|
||||||
|
|
||||||
|
|
||||||
|
def key_expansion(data):
|
||||||
|
"""
|
||||||
|
Generate key schedule
|
||||||
|
|
||||||
|
@param {int[]} data 16/24/32-Byte cipher key
|
||||||
|
@returns {int[]} 176/208/240-Byte expanded key
|
||||||
|
"""
|
||||||
|
data = data[:] # copy
|
||||||
|
rcon_iteration = 1
|
||||||
|
key_size_bytes = len(data)
|
||||||
|
expanded_key_size_bytes = (key_size_bytes // 4 + 7) * BLOCK_SIZE_BYTES
|
||||||
|
|
||||||
|
while len(data) < expanded_key_size_bytes:
|
||||||
|
temp = data[-4:]
|
||||||
|
temp = key_schedule_core(temp, rcon_iteration)
|
||||||
|
rcon_iteration += 1
|
||||||
|
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||||
|
|
||||||
|
for _ in range(3):
|
||||||
|
temp = data[-4:]
|
||||||
|
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||||
|
|
||||||
|
if key_size_bytes == 32:
|
||||||
|
temp = data[-4:]
|
||||||
|
temp = sub_bytes(temp)
|
||||||
|
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||||
|
|
||||||
|
for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
|
||||||
|
temp = data[-4:]
|
||||||
|
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||||
|
data = data[:expanded_key_size_bytes]
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
def iter_vector(iv):
|
||||||
|
while True:
|
||||||
|
yield iv
|
||||||
|
iv = inc(iv)
|
||||||
|
|
||||||
|
|
||||||
def sub_bytes(data):
|
def sub_bytes(data):
|
||||||
return [SBOX[x] for x in data]
|
return [SBOX[x] for x in data]
|
||||||
|
|
||||||
@@ -302,48 +375,36 @@ def xor(data1, data2):
|
|||||||
return [x ^ y for x, y in zip(data1, data2)]
|
return [x ^ y for x, y in zip(data1, data2)]
|
||||||
|
|
||||||
|
|
||||||
def rijndael_mul(a, b):
|
def iter_mix_columns(data, matrix):
|
||||||
if(a == 0 or b == 0):
|
for i in (0, 4, 8, 12):
|
||||||
return 0
|
for row in matrix:
|
||||||
return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF]
|
mixed = 0
|
||||||
|
for j in range(4):
|
||||||
|
# xor is (+) and (-)
|
||||||
def mix_column(data, matrix):
|
mixed ^= (0 if data[i:i + 4][j] == 0 or row[j] == 0 else
|
||||||
data_mixed = []
|
RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[data[i + j]] + RIJNDAEL_LOG_TABLE[row[j]]) % 0xFF])
|
||||||
for row in range(4):
|
yield mixed
|
||||||
mixed = 0
|
|
||||||
for column in range(4):
|
|
||||||
# xor is (+) and (-)
|
|
||||||
mixed ^= rijndael_mul(data[column], matrix[row][column])
|
|
||||||
data_mixed.append(mixed)
|
|
||||||
return data_mixed
|
|
||||||
|
|
||||||
|
|
||||||
def mix_columns(data, matrix=MIX_COLUMN_MATRIX):
|
|
||||||
data_mixed = []
|
|
||||||
for i in range(4):
|
|
||||||
column = data[i * 4: (i + 1) * 4]
|
|
||||||
data_mixed += mix_column(column, matrix)
|
|
||||||
return data_mixed
|
|
||||||
|
|
||||||
|
|
||||||
def mix_columns_inv(data):
|
|
||||||
return mix_columns(data, MIX_COLUMN_MATRIX_INV)
|
|
||||||
|
|
||||||
|
|
||||||
def shift_rows(data):
|
def shift_rows(data):
|
||||||
data_shifted = []
|
return [data[((column + row) & 0b11) * 4 + row] for column in range(4) for row in range(4)]
|
||||||
for column in range(4):
|
|
||||||
for row in range(4):
|
|
||||||
data_shifted.append(data[((column + row) & 0b11) * 4 + row])
|
|
||||||
return data_shifted
|
|
||||||
|
|
||||||
|
|
||||||
def shift_rows_inv(data):
|
def shift_rows_inv(data):
|
||||||
|
return [data[((column - row) & 0b11) * 4 + row] for column in range(4) for row in range(4)]
|
||||||
|
|
||||||
|
|
||||||
|
def shift_block(data):
|
||||||
data_shifted = []
|
data_shifted = []
|
||||||
for column in range(4):
|
|
||||||
for row in range(4):
|
bit = 0
|
||||||
data_shifted.append(data[((column - row) & 0b11) * 4 + row])
|
for n in data:
|
||||||
|
if bit:
|
||||||
|
n |= 0x100
|
||||||
|
bit = n & 1
|
||||||
|
n >>= 1
|
||||||
|
data_shifted.append(n)
|
||||||
|
|
||||||
return data_shifted
|
return data_shifted
|
||||||
|
|
||||||
|
|
||||||
@@ -358,4 +419,50 @@ def inc(data):
|
|||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
__all__ = ['aes_encrypt', 'key_expansion', 'aes_ctr_decrypt', 'aes_cbc_decrypt', 'aes_decrypt_text']
|
def block_product(block_x, block_y):
|
||||||
|
# NIST SP 800-38D, Algorithm 1
|
||||||
|
|
||||||
|
if len(block_x) != BLOCK_SIZE_BYTES or len(block_y) != BLOCK_SIZE_BYTES:
|
||||||
|
raise ValueError("Length of blocks need to be %d bytes" % BLOCK_SIZE_BYTES)
|
||||||
|
|
||||||
|
block_r = [0xE1] + [0] * (BLOCK_SIZE_BYTES - 1)
|
||||||
|
block_v = block_y[:]
|
||||||
|
block_z = [0] * BLOCK_SIZE_BYTES
|
||||||
|
|
||||||
|
for i in block_x:
|
||||||
|
for bit in range(7, -1, -1):
|
||||||
|
if i & (1 << bit):
|
||||||
|
block_z = xor(block_z, block_v)
|
||||||
|
|
||||||
|
do_xor = block_v[-1] & 1
|
||||||
|
block_v = shift_block(block_v)
|
||||||
|
if do_xor:
|
||||||
|
block_v = xor(block_v, block_r)
|
||||||
|
|
||||||
|
return block_z
|
||||||
|
|
||||||
|
|
||||||
|
def ghash(subkey, data):
|
||||||
|
# NIST SP 800-38D, Algorithm 2
|
||||||
|
|
||||||
|
if len(data) % BLOCK_SIZE_BYTES:
|
||||||
|
raise ValueError("Length of data should be %d bytes" % BLOCK_SIZE_BYTES)
|
||||||
|
|
||||||
|
last_y = [0] * BLOCK_SIZE_BYTES
|
||||||
|
for i in range(0, len(data), BLOCK_SIZE_BYTES):
|
||||||
|
block = data[i : i + BLOCK_SIZE_BYTES] # noqa: E203
|
||||||
|
last_y = block_product(xor(last_y, block), subkey)
|
||||||
|
|
||||||
|
return last_y
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'aes_ctr_decrypt',
|
||||||
|
'aes_cbc_decrypt',
|
||||||
|
'aes_cbc_decrypt_bytes',
|
||||||
|
'aes_decrypt_text',
|
||||||
|
'aes_encrypt',
|
||||||
|
'aes_gcm_decrypt_and_verify',
|
||||||
|
'aes_gcm_decrypt_and_verify_bytes',
|
||||||
|
'key_expansion'
|
||||||
|
]
|
||||||
|
|||||||
@@ -50,6 +50,7 @@ class Cache(object):
|
|||||||
except OSError as ose:
|
except OSError as ose:
|
||||||
if ose.errno != errno.EEXIST:
|
if ose.errno != errno.EEXIST:
|
||||||
raise
|
raise
|
||||||
|
self._ydl.write_debug(f'Saving {section}.{key} to cache')
|
||||||
write_json_file(data, fn)
|
write_json_file(data, fn)
|
||||||
except Exception:
|
except Exception:
|
||||||
tb = traceback.format_exc()
|
tb = traceback.format_exc()
|
||||||
@@ -66,6 +67,7 @@ class Cache(object):
|
|||||||
try:
|
try:
|
||||||
try:
|
try:
|
||||||
with io.open(cache_fn, 'r', encoding='utf-8') as cachef:
|
with io.open(cache_fn, 'r', encoding='utf-8') as cachef:
|
||||||
|
self._ydl.write_debug(f'Loading {section}.{key} from cache')
|
||||||
return json.load(cachef)
|
return json.load(cachef)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ import shlex
|
|||||||
import shutil
|
import shutil
|
||||||
import socket
|
import socket
|
||||||
import struct
|
import struct
|
||||||
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
import tokenize
|
import tokenize
|
||||||
import urllib
|
import urllib
|
||||||
@@ -33,6 +34,8 @@ class compat_HTMLParseError(Exception):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# compat_ctypes_WINFUNCTYPE = ctypes.WINFUNCTYPE
|
||||||
|
# will not work since ctypes.WINFUNCTYPE does not exist in UNIX machines
|
||||||
def compat_ctypes_WINFUNCTYPE(*args, **kwargs):
|
def compat_ctypes_WINFUNCTYPE(*args, **kwargs):
|
||||||
return ctypes.WINFUNCTYPE(*args, **kwargs)
|
return ctypes.WINFUNCTYPE(*args, **kwargs)
|
||||||
|
|
||||||
@@ -130,6 +133,41 @@ except AttributeError:
|
|||||||
asyncio.run = compat_asyncio_run
|
asyncio.run = compat_asyncio_run
|
||||||
|
|
||||||
|
|
||||||
|
# Python 3.8+ does not honor %HOME% on windows, but this breaks compatibility with youtube-dl
|
||||||
|
# See https://github.com/yt-dlp/yt-dlp/issues/792
|
||||||
|
# https://docs.python.org/3/library/os.path.html#os.path.expanduser
|
||||||
|
if compat_os_name in ('nt', 'ce') and 'HOME' in os.environ:
|
||||||
|
_userhome = os.environ['HOME']
|
||||||
|
|
||||||
|
def compat_expanduser(path):
|
||||||
|
if not path.startswith('~'):
|
||||||
|
return path
|
||||||
|
i = path.replace('\\', '/', 1).find('/') # ~user
|
||||||
|
if i < 0:
|
||||||
|
i = len(path)
|
||||||
|
userhome = os.path.join(os.path.dirname(_userhome), path[1:i]) if i > 1 else _userhome
|
||||||
|
return userhome + path[i:]
|
||||||
|
else:
|
||||||
|
compat_expanduser = os.path.expanduser
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
from Cryptodome.Cipher import AES as compat_pycrypto_AES
|
||||||
|
except ImportError:
|
||||||
|
try:
|
||||||
|
from Crypto.Cipher import AES as compat_pycrypto_AES
|
||||||
|
except ImportError:
|
||||||
|
compat_pycrypto_AES = None
|
||||||
|
|
||||||
|
|
||||||
|
def windows_enable_vt_mode(): # TODO: Do this the proper way https://bugs.python.org/issue30075
|
||||||
|
if compat_os_name != 'nt':
|
||||||
|
return
|
||||||
|
startupinfo = subprocess.STARTUPINFO()
|
||||||
|
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
|
||||||
|
subprocess.Popen('', shell=True, startupinfo=startupinfo)
|
||||||
|
|
||||||
|
|
||||||
# Deprecated
|
# Deprecated
|
||||||
|
|
||||||
compat_basestring = str
|
compat_basestring = str
|
||||||
@@ -152,7 +190,6 @@ compat_cookies = http.cookies
|
|||||||
compat_cookies_SimpleCookie = compat_cookies.SimpleCookie
|
compat_cookies_SimpleCookie = compat_cookies.SimpleCookie
|
||||||
compat_etree_Element = etree.Element
|
compat_etree_Element = etree.Element
|
||||||
compat_etree_register_namespace = etree.register_namespace
|
compat_etree_register_namespace = etree.register_namespace
|
||||||
compat_expanduser = os.path.expanduser
|
|
||||||
compat_get_terminal_size = shutil.get_terminal_size
|
compat_get_terminal_size = shutil.get_terminal_size
|
||||||
compat_getenv = os.getenv
|
compat_getenv = os.getenv
|
||||||
compat_getpass = getpass.getpass
|
compat_getpass = getpass.getpass
|
||||||
@@ -224,6 +261,7 @@ __all__ = [
|
|||||||
'compat_os_name',
|
'compat_os_name',
|
||||||
'compat_parse_qs',
|
'compat_parse_qs',
|
||||||
'compat_print',
|
'compat_print',
|
||||||
|
'compat_pycrypto_AES',
|
||||||
'compat_realpath',
|
'compat_realpath',
|
||||||
'compat_setenv',
|
'compat_setenv',
|
||||||
'compat_shlex_quote',
|
'compat_shlex_quote',
|
||||||
@@ -252,5 +290,6 @@ __all__ = [
|
|||||||
'compat_xml_parse_error',
|
'compat_xml_parse_error',
|
||||||
'compat_xpath',
|
'compat_xpath',
|
||||||
'compat_zip',
|
'compat_zip',
|
||||||
|
'windows_enable_vt_mode',
|
||||||
'workaround_optparse_bug9161',
|
'workaround_optparse_bug9161',
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -9,17 +9,15 @@ import tempfile
|
|||||||
from datetime import datetime, timedelta, timezone
|
from datetime import datetime, timedelta, timezone
|
||||||
from hashlib import pbkdf2_hmac
|
from hashlib import pbkdf2_hmac
|
||||||
|
|
||||||
from yt_dlp.aes import aes_cbc_decrypt
|
from .aes import aes_cbc_decrypt_bytes, aes_gcm_decrypt_and_verify_bytes
|
||||||
from yt_dlp.compat import (
|
from .compat import (
|
||||||
compat_b64decode,
|
compat_b64decode,
|
||||||
compat_cookiejar_Cookie,
|
compat_cookiejar_Cookie,
|
||||||
)
|
)
|
||||||
from yt_dlp.utils import (
|
from .utils import (
|
||||||
bug_reports_message,
|
bug_reports_message,
|
||||||
bytes_to_intlist,
|
|
||||||
expand_path,
|
expand_path,
|
||||||
intlist_to_bytes,
|
Popen,
|
||||||
process_communicate_or_kill,
|
|
||||||
YoutubeDLCookieJar,
|
YoutubeDLCookieJar,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -32,12 +30,6 @@ except ImportError:
|
|||||||
SQLITE_AVAILABLE = False
|
SQLITE_AVAILABLE = False
|
||||||
|
|
||||||
|
|
||||||
try:
|
|
||||||
from Crypto.Cipher import AES
|
|
||||||
CRYPTO_AVAILABLE = True
|
|
||||||
except ImportError:
|
|
||||||
CRYPTO_AVAILABLE = False
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import keyring
|
import keyring
|
||||||
KEYRING_AVAILABLE = True
|
KEYRING_AVAILABLE = True
|
||||||
@@ -123,9 +115,9 @@ def _extract_firefox_cookies(profile, logger):
|
|||||||
cookie_database_path = _find_most_recently_used_file(search_root, 'cookies.sqlite')
|
cookie_database_path = _find_most_recently_used_file(search_root, 'cookies.sqlite')
|
||||||
if cookie_database_path is None:
|
if cookie_database_path is None:
|
||||||
raise FileNotFoundError('could not find firefox cookies database in {}'.format(search_root))
|
raise FileNotFoundError('could not find firefox cookies database in {}'.format(search_root))
|
||||||
logger.debug('extracting from: "{}"'.format(cookie_database_path))
|
logger.debug('Extracting cookies from: "{}"'.format(cookie_database_path))
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory(prefix='youtube_dl') as tmpdir:
|
with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir:
|
||||||
cursor = None
|
cursor = None
|
||||||
try:
|
try:
|
||||||
cursor = _open_database_copy(cookie_database_path, tmpdir)
|
cursor = _open_database_copy(cookie_database_path, tmpdir)
|
||||||
@@ -240,11 +232,11 @@ def _extract_chrome_cookies(browser_name, profile, logger):
|
|||||||
cookie_database_path = _find_most_recently_used_file(search_root, 'Cookies')
|
cookie_database_path = _find_most_recently_used_file(search_root, 'Cookies')
|
||||||
if cookie_database_path is None:
|
if cookie_database_path is None:
|
||||||
raise FileNotFoundError('could not find {} cookies database in "{}"'.format(browser_name, search_root))
|
raise FileNotFoundError('could not find {} cookies database in "{}"'.format(browser_name, search_root))
|
||||||
logger.debug('extracting from: "{}"'.format(cookie_database_path))
|
logger.debug('Extracting cookies from: "{}"'.format(cookie_database_path))
|
||||||
|
|
||||||
decryptor = get_cookie_decryptor(config['browser_dir'], config['keyring_name'], logger)
|
decryptor = get_cookie_decryptor(config['browser_dir'], config['keyring_name'], logger)
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory(prefix='youtube_dl') as tmpdir:
|
with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir:
|
||||||
cursor = None
|
cursor = None
|
||||||
try:
|
try:
|
||||||
cursor = _open_database_copy(cookie_database_path, tmpdir)
|
cursor = _open_database_copy(cookie_database_path, tmpdir)
|
||||||
@@ -361,7 +353,7 @@ class LinuxChromeCookieDecryptor(ChromeCookieDecryptor):
|
|||||||
class MacChromeCookieDecryptor(ChromeCookieDecryptor):
|
class MacChromeCookieDecryptor(ChromeCookieDecryptor):
|
||||||
def __init__(self, browser_keyring_name, logger):
|
def __init__(self, browser_keyring_name, logger):
|
||||||
self._logger = logger
|
self._logger = logger
|
||||||
password = _get_mac_keyring_password(browser_keyring_name)
|
password = _get_mac_keyring_password(browser_keyring_name, logger)
|
||||||
self._v10_key = None if password is None else self.derive_key(password)
|
self._v10_key = None if password is None else self.derive_key(password)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -400,11 +392,6 @@ class WindowsChromeCookieDecryptor(ChromeCookieDecryptor):
|
|||||||
if self._v10_key is None:
|
if self._v10_key is None:
|
||||||
self._logger.warning('cannot decrypt v10 cookies: no key found', only_once=True)
|
self._logger.warning('cannot decrypt v10 cookies: no key found', only_once=True)
|
||||||
return None
|
return None
|
||||||
elif not CRYPTO_AVAILABLE:
|
|
||||||
self._logger.warning('cannot decrypt cookie as the `pycryptodome` module is not installed. '
|
|
||||||
'Please install by running `python3 -m pip install pycryptodome`',
|
|
||||||
only_once=True)
|
|
||||||
return None
|
|
||||||
|
|
||||||
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_win.cc
|
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_win.cc
|
||||||
# kNonceLength
|
# kNonceLength
|
||||||
@@ -559,7 +546,7 @@ def _parse_safari_cookies_record(data, jar, logger):
|
|||||||
p.skip_to(value_offset)
|
p.skip_to(value_offset)
|
||||||
value = p.read_cstring()
|
value = p.read_cstring()
|
||||||
except UnicodeDecodeError:
|
except UnicodeDecodeError:
|
||||||
logger.warning('failed to parse cookie because UTF-8 decoding failed')
|
logger.warning('failed to parse Safari cookie because UTF-8 decoding failed', only_once=True)
|
||||||
return record_size
|
return record_size
|
||||||
|
|
||||||
p.skip_to(record_size, 'space at the end of the record')
|
p.skip_to(record_size, 'space at the end of the record')
|
||||||
@@ -605,21 +592,26 @@ def _get_linux_keyring_password(browser_keyring_name):
|
|||||||
return password.encode('utf-8')
|
return password.encode('utf-8')
|
||||||
|
|
||||||
|
|
||||||
def _get_mac_keyring_password(browser_keyring_name):
|
def _get_mac_keyring_password(browser_keyring_name, logger):
|
||||||
if KEYRING_AVAILABLE:
|
if KEYRING_AVAILABLE:
|
||||||
|
logger.debug('using keyring to obtain password')
|
||||||
password = keyring.get_password('{} Safe Storage'.format(browser_keyring_name), browser_keyring_name)
|
password = keyring.get_password('{} Safe Storage'.format(browser_keyring_name), browser_keyring_name)
|
||||||
return password.encode('utf-8')
|
return password.encode('utf-8')
|
||||||
else:
|
else:
|
||||||
proc = subprocess.Popen(['security', 'find-generic-password',
|
logger.debug('using find-generic-password to obtain password')
|
||||||
'-w', # write password to stdout
|
proc = Popen(
|
||||||
'-a', browser_keyring_name, # match 'account'
|
['security', 'find-generic-password',
|
||||||
'-s', '{} Safe Storage'.format(browser_keyring_name)], # match 'service'
|
'-w', # write password to stdout
|
||||||
stdout=subprocess.PIPE,
|
'-a', browser_keyring_name, # match 'account'
|
||||||
stderr=subprocess.DEVNULL)
|
'-s', '{} Safe Storage'.format(browser_keyring_name)], # match 'service'
|
||||||
|
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
|
||||||
try:
|
try:
|
||||||
stdout, stderr = process_communicate_or_kill(proc)
|
stdout, stderr = proc.communicate_or_kill()
|
||||||
|
if stdout[-1:] == b'\n':
|
||||||
|
stdout = stdout[:-1]
|
||||||
return stdout
|
return stdout
|
||||||
except BaseException:
|
except BaseException as e:
|
||||||
|
logger.warning(f'exception running find-generic-password: {type(e).__name__}({e})')
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
@@ -628,7 +620,7 @@ def _get_windows_v10_key(browser_root, logger):
|
|||||||
if path is None:
|
if path is None:
|
||||||
logger.error('could not find local state file')
|
logger.error('could not find local state file')
|
||||||
return None
|
return None
|
||||||
with open(path, 'r') as f:
|
with open(path, 'r', encoding='utf8') as f:
|
||||||
data = json.load(f)
|
data = json.load(f)
|
||||||
try:
|
try:
|
||||||
base64_key = data['os_crypt']['encrypted_key']
|
base64_key = data['os_crypt']['encrypted_key']
|
||||||
@@ -648,29 +640,26 @@ def pbkdf2_sha1(password, salt, iterations, key_length):
|
|||||||
|
|
||||||
|
|
||||||
def _decrypt_aes_cbc(ciphertext, key, logger, initialization_vector=b' ' * 16):
|
def _decrypt_aes_cbc(ciphertext, key, logger, initialization_vector=b' ' * 16):
|
||||||
plaintext = aes_cbc_decrypt(bytes_to_intlist(ciphertext),
|
plaintext = aes_cbc_decrypt_bytes(ciphertext, key, initialization_vector)
|
||||||
bytes_to_intlist(key),
|
|
||||||
bytes_to_intlist(initialization_vector))
|
|
||||||
padding_length = plaintext[-1]
|
padding_length = plaintext[-1]
|
||||||
try:
|
try:
|
||||||
return intlist_to_bytes(plaintext[:-padding_length]).decode('utf-8')
|
return plaintext[:-padding_length].decode('utf-8')
|
||||||
except UnicodeDecodeError:
|
except UnicodeDecodeError:
|
||||||
logger.warning('failed to decrypt cookie because UTF-8 decoding failed. Possibly the key is wrong?')
|
logger.warning('failed to decrypt cookie (AES-CBC) because UTF-8 decoding failed. Possibly the key is wrong?', only_once=True)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def _decrypt_aes_gcm(ciphertext, key, nonce, authentication_tag, logger):
|
def _decrypt_aes_gcm(ciphertext, key, nonce, authentication_tag, logger):
|
||||||
cipher = AES.new(key, AES.MODE_GCM, nonce)
|
|
||||||
try:
|
try:
|
||||||
plaintext = cipher.decrypt_and_verify(ciphertext, authentication_tag)
|
plaintext = aes_gcm_decrypt_and_verify_bytes(ciphertext, key, authentication_tag, nonce)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
logger.warning('failed to decrypt cookie because the MAC check failed. Possibly the key is wrong?')
|
logger.warning('failed to decrypt cookie (AES-GCM) because the MAC check failed. Possibly the key is wrong?', only_once=True)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return plaintext.decode('utf-8')
|
return plaintext.decode('utf-8')
|
||||||
except UnicodeDecodeError:
|
except UnicodeDecodeError:
|
||||||
logger.warning('failed to decrypt cookie because UTF-8 decoding failed. Possibly the key is wrong?')
|
logger.warning('failed to decrypt cookie (AES-GCM) because UTF-8 decoding failed. Possibly the key is wrong?', only_once=True)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
@@ -698,7 +687,7 @@ def _decrypt_windows_dpapi(ciphertext, logger):
|
|||||||
ctypes.byref(blob_out) # pDataOut
|
ctypes.byref(blob_out) # pDataOut
|
||||||
)
|
)
|
||||||
if not ret:
|
if not ret:
|
||||||
logger.warning('failed to decrypt with DPAPI')
|
logger.warning('failed to decrypt with DPAPI', only_once=True)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
result = ctypes.string_at(blob_out.pbData, blob_out.cbData)
|
result = ctypes.string_at(blob_out.pbData, blob_out.cbData)
|
||||||
@@ -748,6 +737,7 @@ def _is_path(value):
|
|||||||
|
|
||||||
|
|
||||||
def _parse_browser_specification(browser_name, profile=None):
|
def _parse_browser_specification(browser_name, profile=None):
|
||||||
|
browser_name = browser_name.lower()
|
||||||
if browser_name not in SUPPORTED_BROWSERS:
|
if browser_name not in SUPPORTED_BROWSERS:
|
||||||
raise ValueError(f'unsupported browser: "{browser_name}"')
|
raise ValueError(f'unsupported browser: "{browser_name}"')
|
||||||
if profile is not None and _is_path(profile):
|
if profile is not None and _is_path(profile):
|
||||||
|
|||||||
@@ -10,10 +10,15 @@ from ..utils import (
|
|||||||
def get_suitable_downloader(info_dict, params={}, default=NO_DEFAULT, protocol=None, to_stdout=False):
|
def get_suitable_downloader(info_dict, params={}, default=NO_DEFAULT, protocol=None, to_stdout=False):
|
||||||
info_dict['protocol'] = determine_protocol(info_dict)
|
info_dict['protocol'] = determine_protocol(info_dict)
|
||||||
info_copy = info_dict.copy()
|
info_copy = info_dict.copy()
|
||||||
if protocol:
|
|
||||||
info_copy['protocol'] = protocol
|
|
||||||
info_copy['to_stdout'] = to_stdout
|
info_copy['to_stdout'] = to_stdout
|
||||||
return _get_suitable_downloader(info_copy, params, default)
|
|
||||||
|
downloaders = [_get_suitable_downloader(info_copy, proto, params, default)
|
||||||
|
for proto in (protocol or info_copy['protocol']).split('+')]
|
||||||
|
if set(downloaders) == {FFmpegFD} and FFmpegFD.can_merge_formats(info_copy, params):
|
||||||
|
return FFmpegFD
|
||||||
|
elif len(downloaders) == 1:
|
||||||
|
return downloaders[0]
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
# Some of these require get_suitable_downloader
|
# Some of these require get_suitable_downloader
|
||||||
@@ -72,7 +77,7 @@ def shorten_protocol_name(proto, simplify=False):
|
|||||||
return short_protocol_names.get(proto, proto)
|
return short_protocol_names.get(proto, proto)
|
||||||
|
|
||||||
|
|
||||||
def _get_suitable_downloader(info_dict, params, default):
|
def _get_suitable_downloader(info_dict, protocol, params, default):
|
||||||
"""Get the downloader class that can handle the info dict."""
|
"""Get the downloader class that can handle the info dict."""
|
||||||
if default is NO_DEFAULT:
|
if default is NO_DEFAULT:
|
||||||
default = HttpFD
|
default = HttpFD
|
||||||
@@ -80,7 +85,7 @@ def _get_suitable_downloader(info_dict, params, default):
|
|||||||
# if (info_dict.get('start_time') or info_dict.get('end_time')) and not info_dict.get('requested_formats') and FFmpegFD.can_download(info_dict):
|
# if (info_dict.get('start_time') or info_dict.get('end_time')) and not info_dict.get('requested_formats') and FFmpegFD.can_download(info_dict):
|
||||||
# return FFmpegFD
|
# return FFmpegFD
|
||||||
|
|
||||||
protocol = info_dict['protocol']
|
info_dict['protocol'] = protocol
|
||||||
downloaders = params.get('external_downloader')
|
downloaders = params.get('external_downloader')
|
||||||
external_downloader = (
|
external_downloader = (
|
||||||
downloaders if isinstance(downloaders, compat_str) or downloaders is None
|
downloaders if isinstance(downloaders, compat_str) or downloaders is None
|
||||||
@@ -94,6 +99,10 @@ def _get_suitable_downloader(info_dict, params, default):
|
|||||||
if ed.can_download(info_dict, external_downloader):
|
if ed.can_download(info_dict, external_downloader):
|
||||||
return ed
|
return ed
|
||||||
|
|
||||||
|
if protocol == 'http_dash_segments':
|
||||||
|
if info_dict.get('is_live') and (external_downloader or '').lower() != 'native':
|
||||||
|
return FFmpegFD
|
||||||
|
|
||||||
if protocol in ('m3u8', 'm3u8_native'):
|
if protocol in ('m3u8', 'm3u8_native'):
|
||||||
if info_dict.get('is_live'):
|
if info_dict.get('is_live'):
|
||||||
return FFmpegFD
|
return FFmpegFD
|
||||||
|
|||||||
@@ -1,13 +1,10 @@
|
|||||||
from __future__ import division, unicode_literals
|
from __future__ import division, unicode_literals
|
||||||
|
|
||||||
import copy
|
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import sys
|
|
||||||
import time
|
import time
|
||||||
import random
|
import random
|
||||||
|
|
||||||
from ..compat import compat_os_name
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
decodeArgument,
|
decodeArgument,
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
@@ -15,6 +12,13 @@ from ..utils import (
|
|||||||
format_bytes,
|
format_bytes,
|
||||||
shell_quote,
|
shell_quote,
|
||||||
timeconvert,
|
timeconvert,
|
||||||
|
timetuple_from_msec,
|
||||||
|
)
|
||||||
|
from ..minicurses import (
|
||||||
|
MultilineLogger,
|
||||||
|
MultilinePrinter,
|
||||||
|
QuietMultilinePrinter,
|
||||||
|
BreaklineStatusPrinter
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -39,20 +43,22 @@ class FileDownloader(object):
|
|||||||
noresizebuffer: Do not automatically resize the download buffer.
|
noresizebuffer: Do not automatically resize the download buffer.
|
||||||
continuedl: Try to continue downloads if possible.
|
continuedl: Try to continue downloads if possible.
|
||||||
noprogress: Do not print the progress bar.
|
noprogress: Do not print the progress bar.
|
||||||
logtostderr: Log messages to stderr instead of stdout.
|
|
||||||
consoletitle: Display progress in console window's titlebar.
|
|
||||||
nopart: Do not use temporary .part files.
|
nopart: Do not use temporary .part files.
|
||||||
updatetime: Use the Last-modified header to set output file timestamps.
|
updatetime: Use the Last-modified header to set output file timestamps.
|
||||||
test: Download only first bytes to test the downloader.
|
test: Download only first bytes to test the downloader.
|
||||||
min_filesize: Skip files smaller than this size
|
min_filesize: Skip files smaller than this size
|
||||||
max_filesize: Skip files larger than this size
|
max_filesize: Skip files larger than this size
|
||||||
xattr_set_filesize: Set ytdl.filesize user xattribute with expected size.
|
xattr_set_filesize: Set ytdl.filesize user xattribute with expected size.
|
||||||
external_downloader_args: A list of additional command-line arguments for the
|
external_downloader_args: A dictionary of downloader keys (in lower case)
|
||||||
external downloader.
|
and a list of additional command-line arguments for the
|
||||||
|
executable. Use 'default' as the name for arguments to be
|
||||||
|
passed to all downloaders. For compatibility with youtube-dl,
|
||||||
|
a single list of args can also be used
|
||||||
hls_use_mpegts: Use the mpegts container for HLS videos.
|
hls_use_mpegts: Use the mpegts container for HLS videos.
|
||||||
http_chunk_size: Size of a chunk for chunk-based HTTP downloading. May be
|
http_chunk_size: Size of a chunk for chunk-based HTTP downloading. May be
|
||||||
useful for bypassing bandwidth throttling imposed by
|
useful for bypassing bandwidth throttling imposed by
|
||||||
a webserver (experimental)
|
a webserver (experimental)
|
||||||
|
progress_template: See YoutubeDL.py
|
||||||
|
|
||||||
Subclasses of this one must re-define the real_download method.
|
Subclasses of this one must re-define the real_download method.
|
||||||
"""
|
"""
|
||||||
@@ -65,18 +71,17 @@ class FileDownloader(object):
|
|||||||
self.ydl = ydl
|
self.ydl = ydl
|
||||||
self._progress_hooks = []
|
self._progress_hooks = []
|
||||||
self.params = params
|
self.params = params
|
||||||
|
self._prepare_multiline_status()
|
||||||
self.add_progress_hook(self.report_progress)
|
self.add_progress_hook(self.report_progress)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def format_seconds(seconds):
|
def format_seconds(seconds):
|
||||||
(mins, secs) = divmod(seconds, 60)
|
time = timetuple_from_msec(seconds * 1000)
|
||||||
(hours, mins) = divmod(mins, 60)
|
if time.hours > 99:
|
||||||
if hours > 99:
|
|
||||||
return '--:--:--'
|
return '--:--:--'
|
||||||
if hours == 0:
|
if not time.hours:
|
||||||
return '%02d:%02d' % (mins, secs)
|
return '%02d:%02d' % time[1:-1]
|
||||||
else:
|
return '%02d:%02d:%02d' % time[:-1]
|
||||||
return '%02d:%02d:%02d' % (hours, mins, secs)
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def calc_percent(byte_counter, data_len):
|
def calc_percent(byte_counter, data_len):
|
||||||
@@ -201,12 +206,12 @@ class FileDownloader(object):
|
|||||||
return filename + '.ytdl'
|
return filename + '.ytdl'
|
||||||
|
|
||||||
def try_rename(self, old_filename, new_filename):
|
def try_rename(self, old_filename, new_filename):
|
||||||
|
if old_filename == new_filename:
|
||||||
|
return
|
||||||
try:
|
try:
|
||||||
if old_filename == new_filename:
|
os.replace(old_filename, new_filename)
|
||||||
return
|
|
||||||
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
|
|
||||||
except (IOError, OSError) as err:
|
except (IOError, OSError) as err:
|
||||||
self.report_error('unable to rename file: %s' % error_to_compat_str(err))
|
self.report_error(f'unable to rename file: {err}')
|
||||||
|
|
||||||
def try_utime(self, filename, last_modified_hdr):
|
def try_utime(self, filename, last_modified_hdr):
|
||||||
"""Try to set the last-modified time of the given file."""
|
"""Try to set the last-modified time of the given file."""
|
||||||
@@ -233,39 +238,46 @@ class FileDownloader(object):
|
|||||||
"""Report destination filename."""
|
"""Report destination filename."""
|
||||||
self.to_screen('[download] Destination: ' + filename)
|
self.to_screen('[download] Destination: ' + filename)
|
||||||
|
|
||||||
def _report_progress_status(self, msg, is_last_line=False):
|
def _prepare_multiline_status(self, lines=1):
|
||||||
fullmsg = '[download] ' + msg
|
if self.params.get('noprogress'):
|
||||||
if self.params.get('progress_with_newline', False):
|
self._multiline = QuietMultilinePrinter()
|
||||||
self.to_screen(fullmsg)
|
elif self.ydl.params.get('logger'):
|
||||||
|
self._multiline = MultilineLogger(self.ydl.params['logger'], lines)
|
||||||
|
elif self.params.get('progress_with_newline'):
|
||||||
|
self._multiline = BreaklineStatusPrinter(self.ydl._screen_file, lines)
|
||||||
else:
|
else:
|
||||||
if compat_os_name == 'nt':
|
self._multiline = MultilinePrinter(self.ydl._screen_file, lines, not self.params.get('quiet'))
|
||||||
prev_len = getattr(self, '_report_progress_prev_line_length',
|
|
||||||
0)
|
def _finish_multiline_status(self):
|
||||||
if prev_len > len(fullmsg):
|
self._multiline.end()
|
||||||
fullmsg += ' ' * (prev_len - len(fullmsg))
|
|
||||||
self._report_progress_prev_line_length = len(fullmsg)
|
def _report_progress_status(self, s):
|
||||||
clear_line = '\r'
|
progress_dict = s.copy()
|
||||||
else:
|
progress_dict.pop('info_dict')
|
||||||
clear_line = ('\r\x1b[K' if sys.stderr.isatty() else '\r')
|
progress_dict = {'info': s['info_dict'], 'progress': progress_dict}
|
||||||
self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line)
|
|
||||||
self.to_console_title('yt-dlp ' + msg)
|
progress_template = self.params.get('progress_template', {})
|
||||||
|
self._multiline.print_at_line(self.ydl.evaluate_outtmpl(
|
||||||
|
progress_template.get('download') or '[download] %(progress._default_template)s',
|
||||||
|
progress_dict), s.get('progress_idx') or 0)
|
||||||
|
self.to_console_title(self.ydl.evaluate_outtmpl(
|
||||||
|
progress_template.get('download-title') or 'yt-dlp %(progress._default_template)s',
|
||||||
|
progress_dict))
|
||||||
|
|
||||||
def report_progress(self, s):
|
def report_progress(self, s):
|
||||||
if s['status'] == 'finished':
|
if s['status'] == 'finished':
|
||||||
if self.params.get('noprogress', False):
|
if self.params.get('noprogress'):
|
||||||
self.to_screen('[download] Download completed')
|
self.to_screen('[download] Download completed')
|
||||||
else:
|
msg_template = '100%%'
|
||||||
msg_template = '100%%'
|
if s.get('total_bytes') is not None:
|
||||||
if s.get('total_bytes') is not None:
|
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
|
||||||
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
|
msg_template += ' of %(_total_bytes_str)s'
|
||||||
msg_template += ' of %(_total_bytes_str)s'
|
if s.get('elapsed') is not None:
|
||||||
if s.get('elapsed') is not None:
|
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
|
||||||
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
|
msg_template += ' in %(_elapsed_str)s'
|
||||||
msg_template += ' in %(_elapsed_str)s'
|
s['_percent_str'] = self.format_percent(100)
|
||||||
self._report_progress_status(
|
s['_default_template'] = msg_template % s
|
||||||
msg_template % s, is_last_line=True)
|
self._report_progress_status(s)
|
||||||
|
|
||||||
if self.params.get('noprogress'):
|
|
||||||
return
|
return
|
||||||
|
|
||||||
if s['status'] != 'downloading':
|
if s['status'] != 'downloading':
|
||||||
@@ -307,8 +319,12 @@ class FileDownloader(object):
|
|||||||
msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s'
|
msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s'
|
||||||
else:
|
else:
|
||||||
msg_template = '%(_percent_str)s % at %(_speed_str)s ETA %(_eta_str)s'
|
msg_template = '%(_percent_str)s % at %(_speed_str)s ETA %(_eta_str)s'
|
||||||
|
if s.get('fragment_index') and s.get('fragment_count'):
|
||||||
self._report_progress_status(msg_template % s)
|
msg_template += ' (frag %(fragment_index)s/%(fragment_count)s)'
|
||||||
|
elif s.get('fragment_index'):
|
||||||
|
msg_template += ' (frag %(fragment_index)s)'
|
||||||
|
s['_default_template'] = msg_template % s
|
||||||
|
self._report_progress_status(s)
|
||||||
|
|
||||||
def report_resuming_byte(self, resume_len):
|
def report_resuming_byte(self, resume_len):
|
||||||
"""Report attempt to resume at given byte."""
|
"""Report attempt to resume at given byte."""
|
||||||
@@ -320,12 +336,9 @@ class FileDownloader(object):
|
|||||||
'[download] Got server HTTP error: %s. Retrying (attempt %d of %s) ...'
|
'[download] Got server HTTP error: %s. Retrying (attempt %d of %s) ...'
|
||||||
% (error_to_compat_str(err), count, self.format_retries(retries)))
|
% (error_to_compat_str(err), count, self.format_retries(retries)))
|
||||||
|
|
||||||
def report_file_already_downloaded(self, file_name):
|
def report_file_already_downloaded(self, *args, **kwargs):
|
||||||
"""Report file has already been fully downloaded."""
|
"""Report file has already been fully downloaded."""
|
||||||
try:
|
return self.ydl.report_file_already_downloaded(*args, **kwargs)
|
||||||
self.to_screen('[download] %s has already been downloaded' % file_name)
|
|
||||||
except UnicodeEncodeError:
|
|
||||||
self.to_screen('[download] The file has already been downloaded')
|
|
||||||
|
|
||||||
def report_unable_to_resume(self):
|
def report_unable_to_resume(self):
|
||||||
"""Report it was impossible to resume download."""
|
"""Report it was impossible to resume download."""
|
||||||
@@ -343,7 +356,7 @@ class FileDownloader(object):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
nooverwrites_and_exists = (
|
nooverwrites_and_exists = (
|
||||||
not self.params.get('overwrites', subtitle)
|
not self.params.get('overwrites', True)
|
||||||
and os.path.exists(encodeFilename(filename))
|
and os.path.exists(encodeFilename(filename))
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -383,7 +396,9 @@ class FileDownloader(object):
|
|||||||
'[download] Sleeping %s seconds ...' % (
|
'[download] Sleeping %s seconds ...' % (
|
||||||
sleep_interval_sub))
|
sleep_interval_sub))
|
||||||
time.sleep(sleep_interval_sub)
|
time.sleep(sleep_interval_sub)
|
||||||
return self.real_download(filename, info_dict), True
|
ret = self.real_download(filename, info_dict)
|
||||||
|
self._finish_multiline_status()
|
||||||
|
return ret, True
|
||||||
|
|
||||||
def real_download(self, filename, info_dict):
|
def real_download(self, filename, info_dict):
|
||||||
"""Real download process. Redefine in subclasses."""
|
"""Real download process. Redefine in subclasses."""
|
||||||
@@ -392,13 +407,10 @@ class FileDownloader(object):
|
|||||||
def _hook_progress(self, status, info_dict):
|
def _hook_progress(self, status, info_dict):
|
||||||
if not self._progress_hooks:
|
if not self._progress_hooks:
|
||||||
return
|
return
|
||||||
info_dict = dict(info_dict)
|
status['info_dict'] = info_dict
|
||||||
for key in ('__original_infodict', '__postprocessors'):
|
|
||||||
info_dict.pop(key, None)
|
|
||||||
# youtube-dl passes the same status object to all the hooks.
|
# youtube-dl passes the same status object to all the hooks.
|
||||||
# Some third party scripts seems to be relying on this.
|
# Some third party scripts seems to be relying on this.
|
||||||
# So keep this behavior if possible
|
# So keep this behavior if possible
|
||||||
status['info_dict'] = copy.deepcopy(info_dict)
|
|
||||||
for ph in self._progress_hooks:
|
for ph in self._progress_hooks:
|
||||||
ph(status)
|
ph(status)
|
||||||
|
|
||||||
|
|||||||
@@ -55,9 +55,8 @@ class DashSegmentsFD(FragmentFD):
|
|||||||
if real_downloader:
|
if real_downloader:
|
||||||
self.to_screen(
|
self.to_screen(
|
||||||
'[%s] Fragment downloads will be delegated to %s' % (self.FD_NAME, real_downloader.get_basename()))
|
'[%s] Fragment downloads will be delegated to %s' % (self.FD_NAME, real_downloader.get_basename()))
|
||||||
info_copy = info_dict.copy()
|
info_dict['fragments'] = fragments_to_download
|
||||||
info_copy['fragments'] = fragments_to_download
|
|
||||||
fd = real_downloader(self.ydl, self.params)
|
fd = real_downloader(self.ydl, self.params)
|
||||||
return fd.real_download(filename, info_copy)
|
return fd.real_download(filename, info_dict)
|
||||||
|
|
||||||
return self.download_and_append_fragments(ctx, fragments_to_download, info_dict)
|
return self.download_and_append_fragments(ctx, fragments_to_download, info_dict)
|
||||||
|
|||||||
@@ -6,13 +6,7 @@ import subprocess
|
|||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
|
||||||
try:
|
from .fragment import FragmentFD
|
||||||
from Crypto.Cipher import AES
|
|
||||||
can_decrypt_frag = True
|
|
||||||
except ImportError:
|
|
||||||
can_decrypt_frag = False
|
|
||||||
|
|
||||||
from .common import FileDownloader
|
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_setenv,
|
compat_setenv,
|
||||||
compat_str,
|
compat_str,
|
||||||
@@ -22,19 +16,17 @@ from ..utils import (
|
|||||||
cli_option,
|
cli_option,
|
||||||
cli_valueless_option,
|
cli_valueless_option,
|
||||||
cli_bool_option,
|
cli_bool_option,
|
||||||
cli_configuration_args,
|
_configuration_args,
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
encodeArgument,
|
encodeArgument,
|
||||||
handle_youtubedl_headers,
|
handle_youtubedl_headers,
|
||||||
check_executable,
|
check_executable,
|
||||||
is_outdated_version,
|
Popen,
|
||||||
process_communicate_or_kill,
|
|
||||||
sanitized_Request,
|
|
||||||
sanitize_open,
|
sanitize_open,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class ExternalFD(FileDownloader):
|
class ExternalFD(FragmentFD):
|
||||||
SUPPORTED_PROTOCOLS = ('http', 'https', 'ftp', 'ftps')
|
SUPPORTED_PROTOCOLS = ('http', 'https', 'ftp', 'ftps')
|
||||||
can_download_to_stdout = False
|
can_download_to_stdout = False
|
||||||
|
|
||||||
@@ -111,11 +103,10 @@ class ExternalFD(FileDownloader):
|
|||||||
def _valueless_option(self, command_option, param, expected_value=True):
|
def _valueless_option(self, command_option, param, expected_value=True):
|
||||||
return cli_valueless_option(self.params, command_option, param, expected_value)
|
return cli_valueless_option(self.params, command_option, param, expected_value)
|
||||||
|
|
||||||
def _configuration_args(self, *args, **kwargs):
|
def _configuration_args(self, keys=None, *args, **kwargs):
|
||||||
return cli_configuration_args(
|
return _configuration_args(
|
||||||
self.params.get('external_downloader_args'),
|
self.get_basename(), self.params.get('external_downloader_args'), self.get_basename(),
|
||||||
[self.get_basename(), 'default'],
|
keys, *args, **kwargs)
|
||||||
*args, **kwargs)
|
|
||||||
|
|
||||||
def _call_downloader(self, tmpfilename, info_dict):
|
def _call_downloader(self, tmpfilename, info_dict):
|
||||||
""" Either overwrite this or implement _make_cmd """
|
""" Either overwrite this or implement _make_cmd """
|
||||||
@@ -123,73 +114,54 @@ class ExternalFD(FileDownloader):
|
|||||||
|
|
||||||
self._debug_cmd(cmd)
|
self._debug_cmd(cmd)
|
||||||
|
|
||||||
if 'fragments' in info_dict:
|
if 'fragments' not in info_dict:
|
||||||
fragment_retries = self.params.get('fragment_retries', 0)
|
p = Popen(cmd, stderr=subprocess.PIPE)
|
||||||
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
|
_, stderr = p.communicate_or_kill()
|
||||||
|
|
||||||
count = 0
|
|
||||||
while count <= fragment_retries:
|
|
||||||
p = subprocess.Popen(
|
|
||||||
cmd, stderr=subprocess.PIPE)
|
|
||||||
_, stderr = process_communicate_or_kill(p)
|
|
||||||
if p.returncode == 0:
|
|
||||||
break
|
|
||||||
# TODO: Decide whether to retry based on error code
|
|
||||||
# https://aria2.github.io/manual/en/html/aria2c.html#exit-status
|
|
||||||
self.to_stderr(stderr.decode('utf-8', 'replace'))
|
|
||||||
count += 1
|
|
||||||
if count <= fragment_retries:
|
|
||||||
self.to_screen(
|
|
||||||
'[%s] Got error. Retrying fragments (attempt %d of %s)...'
|
|
||||||
% (self.get_basename(), count, self.format_retries(fragment_retries)))
|
|
||||||
if count > fragment_retries:
|
|
||||||
if not skip_unavailable_fragments:
|
|
||||||
self.report_error('Giving up after %s fragment retries' % fragment_retries)
|
|
||||||
return -1
|
|
||||||
|
|
||||||
dest, _ = sanitize_open(tmpfilename, 'wb')
|
|
||||||
for frag_index, fragment in enumerate(info_dict['fragments']):
|
|
||||||
fragment_filename = '%s-Frag%d' % (tmpfilename, frag_index)
|
|
||||||
try:
|
|
||||||
src, _ = sanitize_open(fragment_filename, 'rb')
|
|
||||||
except IOError:
|
|
||||||
if skip_unavailable_fragments and frag_index > 1:
|
|
||||||
self.to_screen('[%s] Skipping fragment %d ...' % (self.get_basename(), frag_index))
|
|
||||||
continue
|
|
||||||
self.report_error('Unable to open fragment %d' % frag_index)
|
|
||||||
return -1
|
|
||||||
decrypt_info = fragment.get('decrypt_info')
|
|
||||||
if decrypt_info:
|
|
||||||
if decrypt_info['METHOD'] == 'AES-128':
|
|
||||||
iv = decrypt_info.get('IV')
|
|
||||||
decrypt_info['KEY'] = decrypt_info.get('KEY') or self.ydl.urlopen(
|
|
||||||
self._prepare_url(info_dict, info_dict.get('_decryption_key_url') or decrypt_info['URI'])).read()
|
|
||||||
encrypted_data = src.read()
|
|
||||||
decrypted_data = AES.new(
|
|
||||||
decrypt_info['KEY'], AES.MODE_CBC, iv).decrypt(encrypted_data)
|
|
||||||
dest.write(decrypted_data)
|
|
||||||
else:
|
|
||||||
fragment_data = src.read()
|
|
||||||
dest.write(fragment_data)
|
|
||||||
else:
|
|
||||||
fragment_data = src.read()
|
|
||||||
dest.write(fragment_data)
|
|
||||||
src.close()
|
|
||||||
if not self.params.get('keep_fragments', False):
|
|
||||||
os.remove(encodeFilename(fragment_filename))
|
|
||||||
dest.close()
|
|
||||||
os.remove(encodeFilename('%s.frag.urls' % tmpfilename))
|
|
||||||
else:
|
|
||||||
p = subprocess.Popen(
|
|
||||||
cmd, stderr=subprocess.PIPE)
|
|
||||||
_, stderr = process_communicate_or_kill(p)
|
|
||||||
if p.returncode != 0:
|
if p.returncode != 0:
|
||||||
self.to_stderr(stderr.decode('utf-8', 'replace'))
|
self.to_stderr(stderr.decode('utf-8', 'replace'))
|
||||||
return p.returncode
|
return p.returncode
|
||||||
|
|
||||||
def _prepare_url(self, info_dict, url):
|
fragment_retries = self.params.get('fragment_retries', 0)
|
||||||
headers = info_dict.get('http_headers')
|
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
|
||||||
return sanitized_Request(url, None, headers) if headers else url
|
|
||||||
|
count = 0
|
||||||
|
while count <= fragment_retries:
|
||||||
|
p = Popen(cmd, stderr=subprocess.PIPE)
|
||||||
|
_, stderr = p.communicate_or_kill()
|
||||||
|
if p.returncode == 0:
|
||||||
|
break
|
||||||
|
# TODO: Decide whether to retry based on error code
|
||||||
|
# https://aria2.github.io/manual/en/html/aria2c.html#exit-status
|
||||||
|
self.to_stderr(stderr.decode('utf-8', 'replace'))
|
||||||
|
count += 1
|
||||||
|
if count <= fragment_retries:
|
||||||
|
self.to_screen(
|
||||||
|
'[%s] Got error. Retrying fragments (attempt %d of %s)...'
|
||||||
|
% (self.get_basename(), count, self.format_retries(fragment_retries)))
|
||||||
|
if count > fragment_retries:
|
||||||
|
if not skip_unavailable_fragments:
|
||||||
|
self.report_error('Giving up after %s fragment retries' % fragment_retries)
|
||||||
|
return -1
|
||||||
|
|
||||||
|
decrypt_fragment = self.decrypter(info_dict)
|
||||||
|
dest, _ = sanitize_open(tmpfilename, 'wb')
|
||||||
|
for frag_index, fragment in enumerate(info_dict['fragments']):
|
||||||
|
fragment_filename = '%s-Frag%d' % (tmpfilename, frag_index)
|
||||||
|
try:
|
||||||
|
src, _ = sanitize_open(fragment_filename, 'rb')
|
||||||
|
except IOError as err:
|
||||||
|
if skip_unavailable_fragments and frag_index > 1:
|
||||||
|
self.report_skip_fragment(frag_index, err)
|
||||||
|
continue
|
||||||
|
self.report_error(f'Unable to open fragment {frag_index}; {err}')
|
||||||
|
return -1
|
||||||
|
dest.write(decrypt_fragment(fragment, src.read()))
|
||||||
|
src.close()
|
||||||
|
if not self.params.get('keep_fragments', False):
|
||||||
|
os.remove(encodeFilename(fragment_filename))
|
||||||
|
dest.close()
|
||||||
|
os.remove(encodeFilename('%s.frag.urls' % tmpfilename))
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
class CurlFD(ExternalFD):
|
class CurlFD(ExternalFD):
|
||||||
@@ -224,8 +196,8 @@ class CurlFD(ExternalFD):
|
|||||||
self._debug_cmd(cmd)
|
self._debug_cmd(cmd)
|
||||||
|
|
||||||
# curl writes the progress to stderr so don't capture it.
|
# curl writes the progress to stderr so don't capture it.
|
||||||
p = subprocess.Popen(cmd)
|
p = Popen(cmd)
|
||||||
process_communicate_or_kill(p)
|
p.communicate_or_kill()
|
||||||
return p.returncode
|
return p.returncode
|
||||||
|
|
||||||
|
|
||||||
@@ -289,6 +261,7 @@ class Aria2cFD(ExternalFD):
|
|||||||
if info_dict.get('http_headers') is not None:
|
if info_dict.get('http_headers') is not None:
|
||||||
for key, val in info_dict['http_headers'].items():
|
for key, val in info_dict['http_headers'].items():
|
||||||
cmd += ['--header', '%s: %s' % (key, val)]
|
cmd += ['--header', '%s: %s' % (key, val)]
|
||||||
|
cmd += self._option('--max-overall-download-limit', 'ratelimit')
|
||||||
cmd += self._option('--interface', 'source_address')
|
cmd += self._option('--interface', 'source_address')
|
||||||
cmd += self._option('--all-proxy', 'proxy')
|
cmd += self._option('--all-proxy', 'proxy')
|
||||||
cmd += self._bool_option('--check-certificate', 'nocheckcertificate', 'false', 'true', '=')
|
cmd += self._bool_option('--check-certificate', 'nocheckcertificate', 'false', 'true', '=')
|
||||||
@@ -343,7 +316,7 @@ class HttpieFD(ExternalFD):
|
|||||||
|
|
||||||
|
|
||||||
class FFmpegFD(ExternalFD):
|
class FFmpegFD(ExternalFD):
|
||||||
SUPPORTED_PROTOCOLS = ('http', 'https', 'ftp', 'ftps', 'm3u8', 'm3u8_native', 'rtsp', 'rtmp', 'rtmp_ffmpeg', 'mms')
|
SUPPORTED_PROTOCOLS = ('http', 'https', 'ftp', 'ftps', 'm3u8', 'm3u8_native', 'rtsp', 'rtmp', 'rtmp_ffmpeg', 'mms', 'http_dash_segments')
|
||||||
can_download_to_stdout = True
|
can_download_to_stdout = True
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@@ -352,12 +325,16 @@ class FFmpegFD(ExternalFD):
|
|||||||
# Fixme: This may be wrong when --ffmpeg-location is used
|
# Fixme: This may be wrong when --ffmpeg-location is used
|
||||||
return FFmpegPostProcessor().available
|
return FFmpegPostProcessor().available
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def supports(cls, info_dict):
|
||||||
|
return all(proto in cls.SUPPORTED_PROTOCOLS for proto in info_dict['protocol'].split('+'))
|
||||||
|
|
||||||
def on_process_started(self, proc, stdin):
|
def on_process_started(self, proc, stdin):
|
||||||
""" Override this in subclasses """
|
""" Override this in subclasses """
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def can_merge_formats(cls, info_dict, params={}):
|
def can_merge_formats(cls, info_dict, params):
|
||||||
return (
|
return (
|
||||||
info_dict.get('requested_formats')
|
info_dict.get('requested_formats')
|
||||||
and info_dict.get('protocol')
|
and info_dict.get('protocol')
|
||||||
@@ -382,6 +359,9 @@ class FFmpegFD(ExternalFD):
|
|||||||
if not self.params.get('verbose'):
|
if not self.params.get('verbose'):
|
||||||
args += ['-hide_banner']
|
args += ['-hide_banner']
|
||||||
|
|
||||||
|
args += info_dict.get('_ffmpeg_args', [])
|
||||||
|
|
||||||
|
# This option exists only for compatibility. Extractors should use `_ffmpeg_args` instead
|
||||||
seekable = info_dict.get('_seekable')
|
seekable = info_dict.get('_seekable')
|
||||||
if seekable is not None:
|
if seekable is not None:
|
||||||
# setting -seekable prevents ffmpeg from guessing if the server
|
# setting -seekable prevents ffmpeg from guessing if the server
|
||||||
@@ -456,20 +436,20 @@ class FFmpegFD(ExternalFD):
|
|||||||
elif isinstance(conn, compat_str):
|
elif isinstance(conn, compat_str):
|
||||||
args += ['-rtmp_conn', conn]
|
args += ['-rtmp_conn', conn]
|
||||||
|
|
||||||
for url in urls:
|
for i, url in enumerate(urls):
|
||||||
args += ['-i', url]
|
args += self._configuration_args((f'_i{i + 1}', '_i')) + ['-i', url]
|
||||||
|
|
||||||
args += self._configuration_args() + ['-c', 'copy']
|
args += ['-c', 'copy']
|
||||||
if info_dict.get('requested_formats'):
|
if info_dict.get('requested_formats') or protocol == 'http_dash_segments':
|
||||||
for (i, fmt) in enumerate(info_dict['requested_formats']):
|
for (i, fmt) in enumerate(info_dict.get('requested_formats') or [info_dict]):
|
||||||
if fmt.get('acodec') != 'none':
|
stream_number = fmt.get('manifest_stream_number', 0)
|
||||||
args.extend(['-map', '%d:a:0' % i])
|
a_or_v = 'a' if fmt.get('acodec') != 'none' else 'v'
|
||||||
if fmt.get('vcodec') != 'none':
|
args.extend(['-map', f'{i}:{a_or_v}:{stream_number}'])
|
||||||
args.extend(['-map', '%d:v:0' % i])
|
|
||||||
|
|
||||||
if self.params.get('test', False):
|
if self.params.get('test', False):
|
||||||
args += ['-fs', compat_str(self._TEST_FILE_SIZE)]
|
args += ['-fs', compat_str(self._TEST_FILE_SIZE)]
|
||||||
|
|
||||||
|
ext = info_dict['ext']
|
||||||
if protocol in ('m3u8', 'm3u8_native'):
|
if protocol in ('m3u8', 'm3u8_native'):
|
||||||
use_mpegts = (tmpfilename == '-') or self.params.get('hls_use_mpegts')
|
use_mpegts = (tmpfilename == '-') or self.params.get('hls_use_mpegts')
|
||||||
if use_mpegts is None:
|
if use_mpegts is None:
|
||||||
@@ -478,19 +458,22 @@ class FFmpegFD(ExternalFD):
|
|||||||
args += ['-f', 'mpegts']
|
args += ['-f', 'mpegts']
|
||||||
else:
|
else:
|
||||||
args += ['-f', 'mp4']
|
args += ['-f', 'mp4']
|
||||||
if (ffpp.basename == 'ffmpeg' and is_outdated_version(ffpp._versions['ffmpeg'], '3.2', False)) and (not info_dict.get('acodec') or info_dict['acodec'].split('.')[0] in ('aac', 'mp4a')):
|
if (ffpp.basename == 'ffmpeg' and ffpp._features.get('needs_adtstoasc')) and (not info_dict.get('acodec') or info_dict['acodec'].split('.')[0] in ('aac', 'mp4a')):
|
||||||
args += ['-bsf:a', 'aac_adtstoasc']
|
args += ['-bsf:a', 'aac_adtstoasc']
|
||||||
elif protocol == 'rtmp':
|
elif protocol == 'rtmp':
|
||||||
args += ['-f', 'flv']
|
args += ['-f', 'flv']
|
||||||
|
elif ext == 'mp4' and tmpfilename == '-':
|
||||||
|
args += ['-f', 'mpegts']
|
||||||
else:
|
else:
|
||||||
args += ['-f', EXT_TO_OUT_FORMATS.get(info_dict['ext'], info_dict['ext'])]
|
args += ['-f', EXT_TO_OUT_FORMATS.get(ext, ext)]
|
||||||
|
|
||||||
|
args += self._configuration_args(('_o1', '_o', ''))
|
||||||
|
|
||||||
args = [encodeArgument(opt) for opt in args]
|
args = [encodeArgument(opt) for opt in args]
|
||||||
args.append(encodeFilename(ffpp._ffmpeg_filename_argument(tmpfilename), True))
|
args.append(encodeFilename(ffpp._ffmpeg_filename_argument(tmpfilename), True))
|
||||||
|
|
||||||
self._debug_cmd(args)
|
self._debug_cmd(args)
|
||||||
|
|
||||||
proc = subprocess.Popen(args, stdin=subprocess.PIPE, env=env)
|
proc = Popen(args, stdin=subprocess.PIPE, env=env)
|
||||||
if url in ('-', 'pipe:'):
|
if url in ('-', 'pipe:'):
|
||||||
self.on_process_started(proc, proc.stdin)
|
self.on_process_started(proc, proc.stdin)
|
||||||
try:
|
try:
|
||||||
@@ -502,7 +485,7 @@ class FFmpegFD(ExternalFD):
|
|||||||
# streams). Note that Windows is not affected and produces playable
|
# streams). Note that Windows is not affected and produces playable
|
||||||
# files (see https://github.com/ytdl-org/youtube-dl/issues/8300).
|
# files (see https://github.com/ytdl-org/youtube-dl/issues/8300).
|
||||||
if isinstance(e, KeyboardInterrupt) and sys.platform != 'win32' and url not in ('-', 'pipe:'):
|
if isinstance(e, KeyboardInterrupt) and sys.platform != 'win32' and url not in ('-', 'pipe:'):
|
||||||
process_communicate_or_kill(proc, b'q')
|
proc.communicate_or_kill(b'q')
|
||||||
else:
|
else:
|
||||||
proc.kill()
|
proc.kill()
|
||||||
proc.wait()
|
proc.wait()
|
||||||
@@ -517,7 +500,7 @@ class AVconvFD(FFmpegFD):
|
|||||||
_BY_NAME = dict(
|
_BY_NAME = dict(
|
||||||
(klass.get_basename(), klass)
|
(klass.get_basename(), klass)
|
||||||
for name, klass in globals().items()
|
for name, klass in globals().items()
|
||||||
if name.endswith('FD') and name != 'ExternalFD'
|
if name.endswith('FD') and name not in ('ExternalFD', 'FragmentFD')
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -3,12 +3,7 @@ from __future__ import division, unicode_literals
|
|||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
import json
|
import json
|
||||||
|
from math import ceil
|
||||||
try:
|
|
||||||
from Crypto.Cipher import AES
|
|
||||||
can_decrypt_frag = True
|
|
||||||
except ImportError:
|
|
||||||
can_decrypt_frag = False
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import concurrent.futures
|
import concurrent.futures
|
||||||
@@ -18,6 +13,7 @@ except ImportError:
|
|||||||
|
|
||||||
from .common import FileDownloader
|
from .common import FileDownloader
|
||||||
from .http import HttpFD
|
from .http import HttpFD
|
||||||
|
from ..aes import aes_cbc_decrypt_bytes
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_error,
|
compat_urllib_error,
|
||||||
compat_struct_pack,
|
compat_struct_pack,
|
||||||
@@ -35,6 +31,10 @@ class HttpQuietDownloader(HttpFD):
|
|||||||
def to_screen(self, *args, **kargs):
|
def to_screen(self, *args, **kargs):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def report_retry(self, err, count, retries):
|
||||||
|
super().to_screen(
|
||||||
|
f'[download] Got server HTTP error: {err}. Retrying (attempt {count} of {self.format_retries(retries)}) ...')
|
||||||
|
|
||||||
|
|
||||||
class FragmentFD(FileDownloader):
|
class FragmentFD(FileDownloader):
|
||||||
"""
|
"""
|
||||||
@@ -48,6 +48,7 @@ class FragmentFD(FileDownloader):
|
|||||||
Skip unavailable fragments (DASH and hlsnative only)
|
Skip unavailable fragments (DASH and hlsnative only)
|
||||||
keep_fragments: Keep downloaded fragments on disk after downloading is
|
keep_fragments: Keep downloaded fragments on disk after downloading is
|
||||||
finished
|
finished
|
||||||
|
concurrent_fragment_downloads: The number of threads to use for native hls and dash downloads
|
||||||
_no_ytdl_file: Don't use .ytdl file
|
_no_ytdl_file: Don't use .ytdl file
|
||||||
|
|
||||||
For each incomplete fragment download yt-dlp keeps on disk a special
|
For each incomplete fragment download yt-dlp keeps on disk a special
|
||||||
@@ -76,8 +77,9 @@ class FragmentFD(FileDownloader):
|
|||||||
'\r[download] Got server HTTP error: %s. Retrying fragment %d (attempt %d of %s) ...'
|
'\r[download] Got server HTTP error: %s. Retrying fragment %d (attempt %d of %s) ...'
|
||||||
% (error_to_compat_str(err), frag_index, count, self.format_retries(retries)))
|
% (error_to_compat_str(err), frag_index, count, self.format_retries(retries)))
|
||||||
|
|
||||||
def report_skip_fragment(self, frag_index):
|
def report_skip_fragment(self, frag_index, err=None):
|
||||||
self.to_screen('[download] Skipping fragment %d ...' % frag_index)
|
err = f' {err};' if err else ''
|
||||||
|
self.to_screen(f'[download]{err} Skipping fragment {frag_index:d} ...')
|
||||||
|
|
||||||
def _prepare_url(self, info_dict, url):
|
def _prepare_url(self, info_dict, url):
|
||||||
headers = info_dict.get('http_headers')
|
headers = info_dict.get('http_headers')
|
||||||
@@ -105,17 +107,19 @@ class FragmentFD(FileDownloader):
|
|||||||
|
|
||||||
def _write_ytdl_file(self, ctx):
|
def _write_ytdl_file(self, ctx):
|
||||||
frag_index_stream, _ = sanitize_open(self.ytdl_filename(ctx['filename']), 'w')
|
frag_index_stream, _ = sanitize_open(self.ytdl_filename(ctx['filename']), 'w')
|
||||||
downloader = {
|
try:
|
||||||
'current_fragment': {
|
downloader = {
|
||||||
'index': ctx['fragment_index'],
|
'current_fragment': {
|
||||||
},
|
'index': ctx['fragment_index'],
|
||||||
}
|
},
|
||||||
if 'extra_state' in ctx:
|
}
|
||||||
downloader['extra_state'] = ctx['extra_state']
|
if 'extra_state' in ctx:
|
||||||
if ctx.get('fragment_count') is not None:
|
downloader['extra_state'] = ctx['extra_state']
|
||||||
downloader['fragment_count'] = ctx['fragment_count']
|
if ctx.get('fragment_count') is not None:
|
||||||
frag_index_stream.write(json.dumps({'downloader': downloader}))
|
downloader['fragment_count'] = ctx['fragment_count']
|
||||||
frag_index_stream.close()
|
frag_index_stream.write(json.dumps({'downloader': downloader}))
|
||||||
|
finally:
|
||||||
|
frag_index_stream.close()
|
||||||
|
|
||||||
def _download_fragment(self, ctx, frag_url, info_dict, headers=None, request_data=None):
|
def _download_fragment(self, ctx, frag_url, info_dict, headers=None, request_data=None):
|
||||||
fragment_filename = '%s-Frag%d' % (ctx['tmpfilename'], ctx['fragment_index'])
|
fragment_filename = '%s-Frag%d' % (ctx['tmpfilename'], ctx['fragment_index'])
|
||||||
@@ -123,6 +127,7 @@ class FragmentFD(FileDownloader):
|
|||||||
'url': frag_url,
|
'url': frag_url,
|
||||||
'http_headers': headers or info_dict.get('http_headers'),
|
'http_headers': headers or info_dict.get('http_headers'),
|
||||||
'request_data': request_data,
|
'request_data': request_data,
|
||||||
|
'ctx_id': ctx.get('ctx_id'),
|
||||||
}
|
}
|
||||||
success = ctx['dl'].download(fragment_filename, fragment_info_dict)
|
success = ctx['dl'].download(fragment_filename, fragment_info_dict)
|
||||||
if not success:
|
if not success:
|
||||||
@@ -167,7 +172,7 @@ class FragmentFD(FileDownloader):
|
|||||||
self.ydl,
|
self.ydl,
|
||||||
{
|
{
|
||||||
'continuedl': True,
|
'continuedl': True,
|
||||||
'quiet': True,
|
'quiet': self.params.get('quiet'),
|
||||||
'noprogress': True,
|
'noprogress': True,
|
||||||
'ratelimit': self.params.get('ratelimit'),
|
'ratelimit': self.params.get('ratelimit'),
|
||||||
'retries': self.params.get('retries', 0),
|
'retries': self.params.get('retries', 0),
|
||||||
@@ -222,6 +227,7 @@ class FragmentFD(FileDownloader):
|
|||||||
def _start_frag_download(self, ctx, info_dict):
|
def _start_frag_download(self, ctx, info_dict):
|
||||||
resume_len = ctx['complete_frags_downloaded_bytes']
|
resume_len = ctx['complete_frags_downloaded_bytes']
|
||||||
total_frags = ctx['total_frags']
|
total_frags = ctx['total_frags']
|
||||||
|
ctx_id = ctx.get('ctx_id')
|
||||||
# This dict stores the download progress, it's updated by the progress
|
# This dict stores the download progress, it's updated by the progress
|
||||||
# hook
|
# hook
|
||||||
state = {
|
state = {
|
||||||
@@ -236,6 +242,7 @@ class FragmentFD(FileDownloader):
|
|||||||
start = time.time()
|
start = time.time()
|
||||||
ctx.update({
|
ctx.update({
|
||||||
'started': start,
|
'started': start,
|
||||||
|
'fragment_started': start,
|
||||||
# Amount of fragment's bytes downloaded by the time of the previous
|
# Amount of fragment's bytes downloaded by the time of the previous
|
||||||
# frag progress hook invocation
|
# frag progress hook invocation
|
||||||
'prev_frag_downloaded_bytes': 0,
|
'prev_frag_downloaded_bytes': 0,
|
||||||
@@ -245,6 +252,12 @@ class FragmentFD(FileDownloader):
|
|||||||
if s['status'] not in ('downloading', 'finished'):
|
if s['status'] not in ('downloading', 'finished'):
|
||||||
return
|
return
|
||||||
|
|
||||||
|
if ctx_id is not None and s.get('ctx_id') != ctx_id:
|
||||||
|
return
|
||||||
|
|
||||||
|
state['max_progress'] = ctx.get('max_progress')
|
||||||
|
state['progress_idx'] = ctx.get('progress_idx')
|
||||||
|
|
||||||
time_now = time.time()
|
time_now = time.time()
|
||||||
state['elapsed'] = time_now - start
|
state['elapsed'] = time_now - start
|
||||||
frag_total_bytes = s.get('total_bytes') or 0
|
frag_total_bytes = s.get('total_bytes') or 0
|
||||||
@@ -260,6 +273,9 @@ class FragmentFD(FileDownloader):
|
|||||||
ctx['fragment_index'] = state['fragment_index']
|
ctx['fragment_index'] = state['fragment_index']
|
||||||
state['downloaded_bytes'] += frag_total_bytes - ctx['prev_frag_downloaded_bytes']
|
state['downloaded_bytes'] += frag_total_bytes - ctx['prev_frag_downloaded_bytes']
|
||||||
ctx['complete_frags_downloaded_bytes'] = state['downloaded_bytes']
|
ctx['complete_frags_downloaded_bytes'] = state['downloaded_bytes']
|
||||||
|
ctx['speed'] = state['speed'] = self.calc_speed(
|
||||||
|
ctx['fragment_started'], time_now, frag_total_bytes)
|
||||||
|
ctx['fragment_started'] = time.time()
|
||||||
ctx['prev_frag_downloaded_bytes'] = 0
|
ctx['prev_frag_downloaded_bytes'] = 0
|
||||||
else:
|
else:
|
||||||
frag_downloaded_bytes = s['downloaded_bytes']
|
frag_downloaded_bytes = s['downloaded_bytes']
|
||||||
@@ -268,8 +284,8 @@ class FragmentFD(FileDownloader):
|
|||||||
state['eta'] = self.calc_eta(
|
state['eta'] = self.calc_eta(
|
||||||
start, time_now, estimated_size - resume_len,
|
start, time_now, estimated_size - resume_len,
|
||||||
state['downloaded_bytes'] - resume_len)
|
state['downloaded_bytes'] - resume_len)
|
||||||
state['speed'] = s.get('speed') or ctx.get('speed')
|
ctx['speed'] = state['speed'] = self.calc_speed(
|
||||||
ctx['speed'] = state['speed']
|
ctx['fragment_started'], time_now, frag_downloaded_bytes)
|
||||||
ctx['prev_frag_downloaded_bytes'] = frag_downloaded_bytes
|
ctx['prev_frag_downloaded_bytes'] = frag_downloaded_bytes
|
||||||
self._hook_progress(state, info_dict)
|
self._hook_progress(state, info_dict)
|
||||||
|
|
||||||
@@ -304,6 +320,9 @@ class FragmentFD(FileDownloader):
|
|||||||
'filename': ctx['filename'],
|
'filename': ctx['filename'],
|
||||||
'status': 'finished',
|
'status': 'finished',
|
||||||
'elapsed': elapsed,
|
'elapsed': elapsed,
|
||||||
|
'ctx_id': ctx.get('ctx_id'),
|
||||||
|
'max_progress': ctx.get('max_progress'),
|
||||||
|
'progress_idx': ctx.get('progress_idx'),
|
||||||
}, info_dict)
|
}, info_dict)
|
||||||
|
|
||||||
def _prepare_external_frag_download(self, ctx):
|
def _prepare_external_frag_download(self, ctx):
|
||||||
@@ -327,7 +346,67 @@ class FragmentFD(FileDownloader):
|
|||||||
'fragment_index': 0,
|
'fragment_index': 0,
|
||||||
})
|
})
|
||||||
|
|
||||||
def download_and_append_fragments(self, ctx, fragments, info_dict, pack_func=None):
|
def decrypter(self, info_dict):
|
||||||
|
_key_cache = {}
|
||||||
|
|
||||||
|
def _get_key(url):
|
||||||
|
if url not in _key_cache:
|
||||||
|
_key_cache[url] = self.ydl.urlopen(self._prepare_url(info_dict, url)).read()
|
||||||
|
return _key_cache[url]
|
||||||
|
|
||||||
|
def decrypt_fragment(fragment, frag_content):
|
||||||
|
decrypt_info = fragment.get('decrypt_info')
|
||||||
|
if not decrypt_info or decrypt_info['METHOD'] != 'AES-128':
|
||||||
|
return frag_content
|
||||||
|
iv = decrypt_info.get('IV') or compat_struct_pack('>8xq', fragment['media_sequence'])
|
||||||
|
decrypt_info['KEY'] = decrypt_info.get('KEY') or _get_key(info_dict.get('_decryption_key_url') or decrypt_info['URI'])
|
||||||
|
# Don't decrypt the content in tests since the data is explicitly truncated and it's not to a valid block
|
||||||
|
# size (see https://github.com/ytdl-org/youtube-dl/pull/27660). Tests only care that the correct data downloaded,
|
||||||
|
# not what it decrypts to.
|
||||||
|
if self.params.get('test', False):
|
||||||
|
return frag_content
|
||||||
|
decrypted_data = aes_cbc_decrypt_bytes(frag_content, decrypt_info['KEY'], iv)
|
||||||
|
return decrypted_data[:-decrypted_data[-1]]
|
||||||
|
|
||||||
|
return decrypt_fragment
|
||||||
|
|
||||||
|
def download_and_append_fragments_multiple(self, *args, pack_func=None, finish_func=None):
|
||||||
|
'''
|
||||||
|
@params (ctx1, fragments1, info_dict1), (ctx2, fragments2, info_dict2), ...
|
||||||
|
all args must be either tuple or list
|
||||||
|
'''
|
||||||
|
max_progress = len(args)
|
||||||
|
if max_progress == 1:
|
||||||
|
return self.download_and_append_fragments(*args[0], pack_func=pack_func, finish_func=finish_func)
|
||||||
|
max_workers = self.params.get('concurrent_fragment_downloads', max_progress)
|
||||||
|
if max_progress > 1:
|
||||||
|
self._prepare_multiline_status(max_progress)
|
||||||
|
|
||||||
|
def thread_func(idx, ctx, fragments, info_dict, tpe):
|
||||||
|
ctx['max_progress'] = max_progress
|
||||||
|
ctx['progress_idx'] = idx
|
||||||
|
return self.download_and_append_fragments(ctx, fragments, info_dict, pack_func=pack_func, finish_func=finish_func, tpe=tpe)
|
||||||
|
|
||||||
|
class FTPE(concurrent.futures.ThreadPoolExecutor):
|
||||||
|
# has to stop this or it's going to wait on the worker thread itself
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
pass
|
||||||
|
|
||||||
|
spins = []
|
||||||
|
for idx, (ctx, fragments, info_dict) in enumerate(args):
|
||||||
|
tpe = FTPE(ceil(max_workers / max_progress))
|
||||||
|
job = tpe.submit(thread_func, idx, ctx, fragments, info_dict, tpe)
|
||||||
|
spins.append((tpe, job))
|
||||||
|
|
||||||
|
result = True
|
||||||
|
for tpe, job in spins:
|
||||||
|
try:
|
||||||
|
result = result and job.result()
|
||||||
|
finally:
|
||||||
|
tpe.shutdown(wait=True)
|
||||||
|
return result
|
||||||
|
|
||||||
|
def download_and_append_fragments(self, ctx, fragments, info_dict, *, pack_func=None, finish_func=None, tpe=None):
|
||||||
fragment_retries = self.params.get('fragment_retries', 0)
|
fragment_retries = self.params.get('fragment_retries', 0)
|
||||||
is_fatal = (lambda idx: idx == 0) if self.params.get('skip_unavailable_fragments', True) else (lambda _: True)
|
is_fatal = (lambda idx: idx == 0) if self.params.get('skip_unavailable_fragments', True) else (lambda _: True)
|
||||||
if not pack_func:
|
if not pack_func:
|
||||||
@@ -335,7 +414,7 @@ class FragmentFD(FileDownloader):
|
|||||||
|
|
||||||
def download_fragment(fragment, ctx):
|
def download_fragment(fragment, ctx):
|
||||||
frag_index = ctx['fragment_index'] = fragment['frag_index']
|
frag_index = ctx['fragment_index'] = fragment['frag_index']
|
||||||
headers = info_dict.get('http_headers', {})
|
headers = info_dict.get('http_headers', {}).copy()
|
||||||
byte_range = fragment.get('byte_range')
|
byte_range = fragment.get('byte_range')
|
||||||
if byte_range:
|
if byte_range:
|
||||||
headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'] - 1)
|
headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'] - 1)
|
||||||
@@ -372,24 +451,10 @@ class FragmentFD(FileDownloader):
|
|||||||
return False, frag_index
|
return False, frag_index
|
||||||
return frag_content, frag_index
|
return frag_content, frag_index
|
||||||
|
|
||||||
def decrypt_fragment(fragment, frag_content):
|
|
||||||
decrypt_info = fragment.get('decrypt_info')
|
|
||||||
if not decrypt_info or decrypt_info['METHOD'] != 'AES-128':
|
|
||||||
return frag_content
|
|
||||||
iv = decrypt_info.get('IV') or compat_struct_pack('>8xq', fragment['media_sequence'])
|
|
||||||
decrypt_info['KEY'] = decrypt_info.get('KEY') or self.ydl.urlopen(
|
|
||||||
self._prepare_url(info_dict, info_dict.get('_decryption_key_url') or decrypt_info['URI'])).read()
|
|
||||||
# Don't decrypt the content in tests since the data is explicitly truncated and it's not to a valid block
|
|
||||||
# size (see https://github.com/ytdl-org/youtube-dl/pull/27660). Tests only care that the correct data downloaded,
|
|
||||||
# not what it decrypts to.
|
|
||||||
if self.params.get('test', False):
|
|
||||||
return frag_content
|
|
||||||
return AES.new(decrypt_info['KEY'], AES.MODE_CBC, iv).decrypt(frag_content)
|
|
||||||
|
|
||||||
def append_fragment(frag_content, frag_index, ctx):
|
def append_fragment(frag_content, frag_index, ctx):
|
||||||
if not frag_content:
|
if not frag_content:
|
||||||
if not is_fatal(frag_index - 1):
|
if not is_fatal(frag_index - 1):
|
||||||
self.report_skip_fragment(frag_index)
|
self.report_skip_fragment(frag_index, 'fragment not found')
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
ctx['dest_stream'].close()
|
ctx['dest_stream'].close()
|
||||||
@@ -399,6 +464,8 @@ class FragmentFD(FileDownloader):
|
|||||||
self._append_fragment(ctx, pack_func(frag_content, frag_index))
|
self._append_fragment(ctx, pack_func(frag_content, frag_index))
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
decrypt_fragment = self.decrypter(info_dict)
|
||||||
|
|
||||||
max_workers = self.params.get('concurrent_fragment_downloads', 1)
|
max_workers = self.params.get('concurrent_fragment_downloads', 1)
|
||||||
if can_threaded_download and max_workers > 1:
|
if can_threaded_download and max_workers > 1:
|
||||||
|
|
||||||
@@ -408,7 +475,7 @@ class FragmentFD(FileDownloader):
|
|||||||
return fragment, frag_content, frag_index, ctx_copy.get('fragment_filename_sanitized')
|
return fragment, frag_content, frag_index, ctx_copy.get('fragment_filename_sanitized')
|
||||||
|
|
||||||
self.report_warning('The download speed shown is only of one thread. This is a known issue and patches are welcome')
|
self.report_warning('The download speed shown is only of one thread. This is a known issue and patches are welcome')
|
||||||
with concurrent.futures.ThreadPoolExecutor(max_workers) as pool:
|
with tpe or concurrent.futures.ThreadPoolExecutor(max_workers) as pool:
|
||||||
for fragment, frag_content, frag_index, frag_filename in pool.map(_download_fragment, fragments):
|
for fragment, frag_content, frag_index, frag_filename in pool.map(_download_fragment, fragments):
|
||||||
ctx['fragment_filename_sanitized'] = frag_filename
|
ctx['fragment_filename_sanitized'] = frag_filename
|
||||||
ctx['fragment_index'] = frag_index
|
ctx['fragment_index'] = frag_index
|
||||||
@@ -422,5 +489,8 @@ class FragmentFD(FileDownloader):
|
|||||||
if not result:
|
if not result:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
if finish_func is not None:
|
||||||
|
ctx['dest_stream'].write(finish_func())
|
||||||
|
ctx['dest_stream'].flush()
|
||||||
self._finish_frag_download(ctx, info_dict)
|
self._finish_frag_download(ctx, info_dict)
|
||||||
return True
|
return True
|
||||||
|
|||||||
@@ -5,10 +5,11 @@ import io
|
|||||||
import binascii
|
import binascii
|
||||||
|
|
||||||
from ..downloader import get_suitable_downloader
|
from ..downloader import get_suitable_downloader
|
||||||
from .fragment import FragmentFD, can_decrypt_frag
|
from .fragment import FragmentFD
|
||||||
from .external import FFmpegFD
|
from .external import FFmpegFD
|
||||||
|
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
|
compat_pycrypto_AES,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
@@ -29,7 +30,7 @@ class HlsFD(FragmentFD):
|
|||||||
FD_NAME = 'hlsnative'
|
FD_NAME = 'hlsnative'
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def can_download(manifest, info_dict, allow_unplayable_formats=False, with_crypto=can_decrypt_frag):
|
def can_download(manifest, info_dict, allow_unplayable_formats=False):
|
||||||
UNSUPPORTED_FEATURES = [
|
UNSUPPORTED_FEATURES = [
|
||||||
# r'#EXT-X-BYTERANGE', # playlists composed of byte ranges of media files [2]
|
# r'#EXT-X-BYTERANGE', # playlists composed of byte ranges of media files [2]
|
||||||
|
|
||||||
@@ -56,9 +57,6 @@ class HlsFD(FragmentFD):
|
|||||||
|
|
||||||
def check_results():
|
def check_results():
|
||||||
yield not info_dict.get('is_live')
|
yield not info_dict.get('is_live')
|
||||||
is_aes128_enc = '#EXT-X-KEY:METHOD=AES-128' in manifest
|
|
||||||
yield with_crypto or not is_aes128_enc
|
|
||||||
yield not (is_aes128_enc and r'#EXT-X-BYTERANGE' in manifest)
|
|
||||||
for feature in UNSUPPORTED_FEATURES:
|
for feature in UNSUPPORTED_FEATURES:
|
||||||
yield not re.search(feature, manifest)
|
yield not re.search(feature, manifest)
|
||||||
return all(check_results())
|
return all(check_results())
|
||||||
@@ -71,16 +69,20 @@ class HlsFD(FragmentFD):
|
|||||||
man_url = urlh.geturl()
|
man_url = urlh.geturl()
|
||||||
s = urlh.read().decode('utf-8', 'ignore')
|
s = urlh.read().decode('utf-8', 'ignore')
|
||||||
|
|
||||||
if not self.can_download(s, info_dict, self.params.get('allow_unplayable_formats')):
|
can_download, message = self.can_download(s, info_dict, self.params.get('allow_unplayable_formats')), None
|
||||||
if info_dict.get('extra_param_to_segment_url') or info_dict.get('_decryption_key_url'):
|
if can_download and not compat_pycrypto_AES and '#EXT-X-KEY:METHOD=AES-128' in s:
|
||||||
self.report_error('pycryptodome not found. Please install')
|
if FFmpegFD.available():
|
||||||
return False
|
can_download, message = False, 'The stream has AES-128 encryption and pycryptodomex is not available'
|
||||||
if self.can_download(s, info_dict, with_crypto=True):
|
else:
|
||||||
self.report_warning('pycryptodome is needed to download this file natively')
|
message = ('The stream has AES-128 encryption and neither ffmpeg nor pycryptodomex are available; '
|
||||||
|
'Decryption will be performed natively, but will be extremely slow')
|
||||||
|
if not can_download:
|
||||||
|
message = message or 'Unsupported features have been detected'
|
||||||
fd = FFmpegFD(self.ydl, self.params)
|
fd = FFmpegFD(self.ydl, self.params)
|
||||||
self.report_warning(
|
self.report_warning(f'{message}; extraction will be delegated to {fd.get_basename()}')
|
||||||
'%s detected unsupported features; extraction will be delegated to %s' % (self.FD_NAME, fd.get_basename()))
|
|
||||||
return fd.real_download(filename, info_dict)
|
return fd.real_download(filename, info_dict)
|
||||||
|
elif message:
|
||||||
|
self.report_warning(message)
|
||||||
|
|
||||||
is_webvtt = info_dict['ext'] == 'vtt'
|
is_webvtt = info_dict['ext'] == 'vtt'
|
||||||
if is_webvtt:
|
if is_webvtt:
|
||||||
@@ -172,6 +174,7 @@ class HlsFD(FragmentFD):
|
|||||||
'byte_range': byte_range,
|
'byte_range': byte_range,
|
||||||
'media_sequence': media_sequence,
|
'media_sequence': media_sequence,
|
||||||
})
|
})
|
||||||
|
media_sequence += 1
|
||||||
|
|
||||||
elif line.startswith('#EXT-X-MAP'):
|
elif line.startswith('#EXT-X-MAP'):
|
||||||
if format_index and discontinuity_count != format_index:
|
if format_index and discontinuity_count != format_index:
|
||||||
@@ -196,6 +199,7 @@ class HlsFD(FragmentFD):
|
|||||||
'byte_range': byte_range,
|
'byte_range': byte_range,
|
||||||
'media_sequence': media_sequence
|
'media_sequence': media_sequence
|
||||||
})
|
})
|
||||||
|
media_sequence += 1
|
||||||
|
|
||||||
if map_info.get('BYTERANGE'):
|
if map_info.get('BYTERANGE'):
|
||||||
splitted_byte_range = map_info.get('BYTERANGE').split('@')
|
splitted_byte_range = map_info.get('BYTERANGE').split('@')
|
||||||
@@ -235,54 +239,64 @@ class HlsFD(FragmentFD):
|
|||||||
elif line.startswith('#EXT-X-DISCONTINUITY'):
|
elif line.startswith('#EXT-X-DISCONTINUITY'):
|
||||||
discontinuity_count += 1
|
discontinuity_count += 1
|
||||||
i += 1
|
i += 1
|
||||||
media_sequence += 1
|
|
||||||
|
|
||||||
# We only download the first fragment during the test
|
# We only download the first fragment during the test
|
||||||
if self.params.get('test', False):
|
if self.params.get('test', False):
|
||||||
fragments = [fragments[0] if fragments else None]
|
fragments = [fragments[0] if fragments else None]
|
||||||
|
|
||||||
if real_downloader:
|
if real_downloader:
|
||||||
info_copy = info_dict.copy()
|
info_dict['fragments'] = fragments
|
||||||
info_copy['fragments'] = fragments
|
|
||||||
fd = real_downloader(self.ydl, self.params)
|
fd = real_downloader(self.ydl, self.params)
|
||||||
# TODO: Make progress updates work without hooking twice
|
# TODO: Make progress updates work without hooking twice
|
||||||
# for ph in self._progress_hooks:
|
# for ph in self._progress_hooks:
|
||||||
# fd.add_progress_hook(ph)
|
# fd.add_progress_hook(ph)
|
||||||
return fd.real_download(filename, info_copy)
|
return fd.real_download(filename, info_dict)
|
||||||
|
|
||||||
if is_webvtt:
|
if is_webvtt:
|
||||||
def pack_fragment(frag_content, frag_index):
|
def pack_fragment(frag_content, frag_index):
|
||||||
output = io.StringIO()
|
output = io.StringIO()
|
||||||
adjust = 0
|
adjust = 0
|
||||||
|
overflow = False
|
||||||
|
mpegts_last = None
|
||||||
for block in webvtt.parse_fragment(frag_content):
|
for block in webvtt.parse_fragment(frag_content):
|
||||||
if isinstance(block, webvtt.CueBlock):
|
if isinstance(block, webvtt.CueBlock):
|
||||||
|
extra_state['webvtt_mpegts_last'] = mpegts_last
|
||||||
|
if overflow:
|
||||||
|
extra_state['webvtt_mpegts_adjust'] += 1
|
||||||
|
overflow = False
|
||||||
block.start += adjust
|
block.start += adjust
|
||||||
block.end += adjust
|
block.end += adjust
|
||||||
|
|
||||||
dedup_window = extra_state.setdefault('webvtt_dedup_window', [])
|
dedup_window = extra_state.setdefault('webvtt_dedup_window', [])
|
||||||
cue = block.as_json
|
|
||||||
|
|
||||||
# skip the cue if an identical one appears
|
ready = []
|
||||||
# in the window of potential duplicates
|
|
||||||
# and prune the window of unviable candidates
|
|
||||||
i = 0
|
i = 0
|
||||||
skip = True
|
is_new = True
|
||||||
while i < len(dedup_window):
|
while i < len(dedup_window):
|
||||||
window_cue = dedup_window[i]
|
wcue = dedup_window[i]
|
||||||
if window_cue == cue:
|
wblock = webvtt.CueBlock.from_json(wcue)
|
||||||
break
|
i += 1
|
||||||
if window_cue['end'] >= cue['start']:
|
if wblock.hinges(block):
|
||||||
i += 1
|
wcue['end'] = block.end
|
||||||
|
is_new = False
|
||||||
continue
|
continue
|
||||||
|
if wblock == block:
|
||||||
|
is_new = False
|
||||||
|
continue
|
||||||
|
if wblock.end > block.start:
|
||||||
|
continue
|
||||||
|
ready.append(wblock)
|
||||||
|
i -= 1
|
||||||
del dedup_window[i]
|
del dedup_window[i]
|
||||||
else:
|
|
||||||
skip = False
|
|
||||||
|
|
||||||
if skip:
|
if is_new:
|
||||||
continue
|
dedup_window.append(block.as_json)
|
||||||
|
for block in ready:
|
||||||
|
block.write_into(output)
|
||||||
|
|
||||||
# add the cue to the window
|
# we only emit cues once they fall out of the duplicate window
|
||||||
dedup_window.append(cue)
|
continue
|
||||||
elif isinstance(block, webvtt.Magic):
|
elif isinstance(block, webvtt.Magic):
|
||||||
# take care of MPEG PES timestamp overflow
|
# take care of MPEG PES timestamp overflow
|
||||||
if block.mpegts is None:
|
if block.mpegts is None:
|
||||||
@@ -290,9 +304,9 @@ class HlsFD(FragmentFD):
|
|||||||
extra_state.setdefault('webvtt_mpegts_adjust', 0)
|
extra_state.setdefault('webvtt_mpegts_adjust', 0)
|
||||||
block.mpegts += extra_state['webvtt_mpegts_adjust'] << 33
|
block.mpegts += extra_state['webvtt_mpegts_adjust'] << 33
|
||||||
if block.mpegts < extra_state.get('webvtt_mpegts_last', 0):
|
if block.mpegts < extra_state.get('webvtt_mpegts_last', 0):
|
||||||
extra_state['webvtt_mpegts_adjust'] += 1
|
overflow = True
|
||||||
block.mpegts += 1 << 33
|
block.mpegts += 1 << 33
|
||||||
extra_state['webvtt_mpegts_last'] = block.mpegts
|
mpegts_last = block.mpegts
|
||||||
|
|
||||||
if frag_index == 1:
|
if frag_index == 1:
|
||||||
extra_state['webvtt_mpegts'] = block.mpegts or 0
|
extra_state['webvtt_mpegts'] = block.mpegts or 0
|
||||||
@@ -317,6 +331,19 @@ class HlsFD(FragmentFD):
|
|||||||
block.write_into(output)
|
block.write_into(output)
|
||||||
|
|
||||||
return output.getvalue().encode('utf-8')
|
return output.getvalue().encode('utf-8')
|
||||||
|
|
||||||
|
def fin_fragments():
|
||||||
|
dedup_window = extra_state.get('webvtt_dedup_window')
|
||||||
|
if not dedup_window:
|
||||||
|
return b''
|
||||||
|
|
||||||
|
output = io.StringIO()
|
||||||
|
for cue in dedup_window:
|
||||||
|
webvtt.CueBlock.from_json(cue).write_into(output)
|
||||||
|
|
||||||
|
return output.getvalue().encode('utf-8')
|
||||||
|
|
||||||
|
self.download_and_append_fragments(
|
||||||
|
ctx, fragments, info_dict, pack_func=pack_fragment, finish_func=fin_fragments)
|
||||||
else:
|
else:
|
||||||
pack_fragment = None
|
return self.download_and_append_fragments(ctx, fragments, info_dict)
|
||||||
return self.download_and_append_fragments(ctx, fragments, info_dict, pack_fragment)
|
|
||||||
|
|||||||
@@ -48,8 +48,9 @@ class HttpFD(FileDownloader):
|
|||||||
|
|
||||||
is_test = self.params.get('test', False)
|
is_test = self.params.get('test', False)
|
||||||
chunk_size = self._TEST_FILE_SIZE if is_test else (
|
chunk_size = self._TEST_FILE_SIZE if is_test else (
|
||||||
info_dict.get('downloader_options', {}).get('http_chunk_size')
|
self.params.get('http_chunk_size')
|
||||||
or self.params.get('http_chunk_size') or 0)
|
or info_dict.get('downloader_options', {}).get('http_chunk_size')
|
||||||
|
or 0)
|
||||||
|
|
||||||
ctx.open_mode = 'wb'
|
ctx.open_mode = 'wb'
|
||||||
ctx.resume_len = 0
|
ctx.resume_len = 0
|
||||||
@@ -57,6 +58,7 @@ class HttpFD(FileDownloader):
|
|||||||
ctx.block_size = self.params.get('buffersize', 1024)
|
ctx.block_size = self.params.get('buffersize', 1024)
|
||||||
ctx.start_time = time.time()
|
ctx.start_time = time.time()
|
||||||
ctx.chunk_size = None
|
ctx.chunk_size = None
|
||||||
|
throttle_start = None
|
||||||
|
|
||||||
if self.params.get('continuedl', True):
|
if self.params.get('continuedl', True):
|
||||||
# Establish possible resume length
|
# Establish possible resume length
|
||||||
@@ -189,13 +191,16 @@ class HttpFD(FileDownloader):
|
|||||||
# Unexpected HTTP error
|
# Unexpected HTTP error
|
||||||
raise
|
raise
|
||||||
raise RetryDownload(err)
|
raise RetryDownload(err)
|
||||||
except socket.error as err:
|
except socket.timeout as err:
|
||||||
if err.errno != errno.ECONNRESET:
|
|
||||||
# Connection reset is no problem, just retry
|
|
||||||
raise
|
|
||||||
raise RetryDownload(err)
|
raise RetryDownload(err)
|
||||||
|
except socket.error as err:
|
||||||
|
if err.errno in (errno.ECONNRESET, errno.ETIMEDOUT):
|
||||||
|
# Connection reset is no problem, just retry
|
||||||
|
raise RetryDownload(err)
|
||||||
|
raise
|
||||||
|
|
||||||
def download():
|
def download():
|
||||||
|
nonlocal throttle_start
|
||||||
data_len = ctx.data.info().get('Content-length', None)
|
data_len = ctx.data.info().get('Content-length', None)
|
||||||
|
|
||||||
# Range HTTP header may be ignored/unsupported by a webserver
|
# Range HTTP header may be ignored/unsupported by a webserver
|
||||||
@@ -224,7 +229,6 @@ class HttpFD(FileDownloader):
|
|||||||
# measure time over whole while-loop, so slow_down() and best_block_size() work together properly
|
# measure time over whole while-loop, so slow_down() and best_block_size() work together properly
|
||||||
now = None # needed for slow_down() in the first loop run
|
now = None # needed for slow_down() in the first loop run
|
||||||
before = start # start measuring
|
before = start # start measuring
|
||||||
throttle_start = None
|
|
||||||
|
|
||||||
def retry(e):
|
def retry(e):
|
||||||
to_stdout = ctx.tmpfilename == '-'
|
to_stdout = ctx.tmpfilename == '-'
|
||||||
@@ -238,7 +242,7 @@ class HttpFD(FileDownloader):
|
|||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
# Download and write
|
# Download and write
|
||||||
data_block = ctx.data.read(block_size if data_len is None else min(block_size, data_len - byte_counter))
|
data_block = ctx.data.read(block_size if not is_test else min(block_size, data_len - byte_counter))
|
||||||
# socket.timeout is a subclass of socket.error but may not have
|
# socket.timeout is a subclass of socket.error but may not have
|
||||||
# errno set
|
# errno set
|
||||||
except socket.timeout as e:
|
except socket.timeout as e:
|
||||||
@@ -310,6 +314,7 @@ class HttpFD(FileDownloader):
|
|||||||
'eta': eta,
|
'eta': eta,
|
||||||
'speed': speed,
|
'speed': speed,
|
||||||
'elapsed': now - ctx.start_time,
|
'elapsed': now - ctx.start_time,
|
||||||
|
'ctx_id': info_dict.get('ctx_id'),
|
||||||
}, info_dict)
|
}, info_dict)
|
||||||
|
|
||||||
if data_len is not None and byte_counter == data_len:
|
if data_len is not None and byte_counter == data_len:
|
||||||
@@ -324,7 +329,7 @@ class HttpFD(FileDownloader):
|
|||||||
if ctx.stream is not None and ctx.tmpfilename != '-':
|
if ctx.stream is not None and ctx.tmpfilename != '-':
|
||||||
ctx.stream.close()
|
ctx.stream.close()
|
||||||
raise ThrottledDownload()
|
raise ThrottledDownload()
|
||||||
else:
|
elif speed:
|
||||||
throttle_start = None
|
throttle_start = None
|
||||||
|
|
||||||
if not is_test and ctx.chunk_size and ctx.data_len is not None and byte_counter < ctx.data_len:
|
if not is_test and ctx.chunk_size and ctx.data_len is not None and byte_counter < ctx.data_len:
|
||||||
@@ -357,6 +362,7 @@ class HttpFD(FileDownloader):
|
|||||||
'filename': ctx.filename,
|
'filename': ctx.filename,
|
||||||
'status': 'finished',
|
'status': 'finished',
|
||||||
'elapsed': time.time() - ctx.start_time,
|
'elapsed': time.time() - ctx.start_time,
|
||||||
|
'ctx_id': info_dict.get('ctx_id'),
|
||||||
}, info_dict)
|
}, info_dict)
|
||||||
|
|
||||||
return True
|
return True
|
||||||
@@ -369,6 +375,8 @@ class HttpFD(FileDownloader):
|
|||||||
count += 1
|
count += 1
|
||||||
if count <= retries:
|
if count <= retries:
|
||||||
self.report_retry(e.source_error, count, retries)
|
self.report_retry(e.source_error, count, retries)
|
||||||
|
else:
|
||||||
|
self.to_screen(f'[download] Got server HTTP error: {e.source_error}')
|
||||||
continue
|
continue
|
||||||
except NextFragment:
|
except NextFragment:
|
||||||
continue
|
continue
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import threading
|
|||||||
from .common import FileDownloader
|
from .common import FileDownloader
|
||||||
from ..downloader import get_suitable_downloader
|
from ..downloader import get_suitable_downloader
|
||||||
from ..extractor.niconico import NiconicoIE
|
from ..extractor.niconico import NiconicoIE
|
||||||
from ..compat import compat_urllib_request
|
from ..utils import sanitized_Request
|
||||||
|
|
||||||
|
|
||||||
class NiconicoDmcFD(FileDownloader):
|
class NiconicoDmcFD(FileDownloader):
|
||||||
@@ -29,9 +29,11 @@ class NiconicoDmcFD(FileDownloader):
|
|||||||
heartbeat_data = heartbeat_info_dict['data'].encode()
|
heartbeat_data = heartbeat_info_dict['data'].encode()
|
||||||
heartbeat_interval = heartbeat_info_dict.get('interval', 30)
|
heartbeat_interval = heartbeat_info_dict.get('interval', 30)
|
||||||
|
|
||||||
|
request = sanitized_Request(heartbeat_url, heartbeat_data)
|
||||||
|
|
||||||
def heartbeat():
|
def heartbeat():
|
||||||
try:
|
try:
|
||||||
compat_urllib_request.urlopen(url=heartbeat_url, data=heartbeat_data)
|
self.ydl.urlopen(request).read()
|
||||||
except Exception:
|
except Exception:
|
||||||
self.to_screen('[%s] Heartbeat failed' % self.FD_NAME)
|
self.to_screen('[%s] Heartbeat failed' % self.FD_NAME)
|
||||||
|
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ from ..utils import (
|
|||||||
encodeFilename,
|
encodeFilename,
|
||||||
encodeArgument,
|
encodeArgument,
|
||||||
get_exe_version,
|
get_exe_version,
|
||||||
|
Popen,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -26,7 +27,7 @@ class RtmpFD(FileDownloader):
|
|||||||
start = time.time()
|
start = time.time()
|
||||||
resume_percent = None
|
resume_percent = None
|
||||||
resume_downloaded_data_len = None
|
resume_downloaded_data_len = None
|
||||||
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
|
proc = Popen(args, stderr=subprocess.PIPE)
|
||||||
cursor_in_new_line = True
|
cursor_in_new_line = True
|
||||||
proc_stderr_closed = False
|
proc_stderr_closed = False
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -183,7 +183,7 @@ class YoutubeLiveChatFD(FragmentFD):
|
|||||||
request_data['currentPlayerState'] = {'playerOffsetMs': str(max(offset - 5000, 0))}
|
request_data['currentPlayerState'] = {'playerOffsetMs': str(max(offset - 5000, 0))}
|
||||||
if click_tracking_params:
|
if click_tracking_params:
|
||||||
request_data['context']['clickTracking'] = {'clickTrackingParams': click_tracking_params}
|
request_data['context']['clickTracking'] = {'clickTrackingParams': click_tracking_params}
|
||||||
headers = ie.generate_api_headers(ytcfg, visitor_data=visitor_data)
|
headers = ie.generate_api_headers(ytcfg=ytcfg, visitor_data=visitor_data)
|
||||||
headers.update({'content-type': 'application/json'})
|
headers.update({'content-type': 'application/json'})
|
||||||
fragment_request_data = json.dumps(request_data, ensure_ascii=False).encode('utf-8') + b'\n'
|
fragment_request_data = json.dumps(request_data, ensure_ascii=False).encode('utf-8') + b'\n'
|
||||||
success, continuation_id, offset, click_tracking_params = download_and_parse_fragment(
|
success, continuation_id, offset, click_tracking_params = download_and_parse_fragment(
|
||||||
|
|||||||
@@ -1,14 +1,15 @@
|
|||||||
from __future__ import unicode_literals
|
import os
|
||||||
|
|
||||||
from ..utils import load_plugins
|
from ..utils import load_plugins
|
||||||
|
|
||||||
try:
|
_LAZY_LOADER = False
|
||||||
from .lazy_extractors import *
|
if not os.environ.get('YTDLP_NO_LAZY_EXTRACTORS'):
|
||||||
from .lazy_extractors import _ALL_CLASSES
|
try:
|
||||||
_LAZY_LOADER = True
|
from .lazy_extractors import *
|
||||||
_PLUGIN_CLASSES = []
|
from .lazy_extractors import _ALL_CLASSES
|
||||||
except ImportError:
|
_LAZY_LOADER = True
|
||||||
_LAZY_LOADER = False
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
if not _LAZY_LOADER:
|
if not _LAZY_LOADER:
|
||||||
from .extractors import *
|
from .extractors import *
|
||||||
@@ -19,8 +20,8 @@ if not _LAZY_LOADER:
|
|||||||
]
|
]
|
||||||
_ALL_CLASSES.append(GenericIE)
|
_ALL_CLASSES.append(GenericIE)
|
||||||
|
|
||||||
_PLUGIN_CLASSES = load_plugins('extractor', 'IE', globals())
|
_PLUGIN_CLASSES = load_plugins('extractor', 'IE', globals())
|
||||||
_ALL_CLASSES = _PLUGIN_CLASSES + _ALL_CLASSES
|
_ALL_CLASSES = list(_PLUGIN_CLASSES.values()) + _ALL_CLASSES
|
||||||
|
|
||||||
|
|
||||||
def gen_extractor_classes():
|
def gen_extractor_classes():
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
from .amp import AMPIE
|
from .amp import AMPIE
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
@@ -59,7 +58,7 @@ class AbcNewsVideoIE(AMPIE):
|
|||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = self._match_valid_url(url)
|
||||||
display_id = mobj.group('display_id')
|
display_id = mobj.group('display_id')
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
info_dict = self._extract_feed_info(
|
info_dict = self._extract_feed_info(
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
from ..compat import compat_str
|
||||||
@@ -55,7 +54,7 @@ class ABCOTVSIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
site, display_id, video_id = re.match(self._VALID_URL, url).groups()
|
site, display_id, video_id = self._match_valid_url(url).groups()
|
||||||
display_id = display_id or video_id
|
display_id = display_id or video_id
|
||||||
station = self._SITE_MAP[site]
|
station = self._SITE_MAP[site]
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
@@ -80,7 +79,7 @@ class ACastIE(ACastBaseIE):
|
|||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
channel, display_id = re.match(self._VALID_URL, url).groups()
|
channel, display_id = self._match_valid_url(url).groups()
|
||||||
episode = self._call_api(
|
episode = self._call_api(
|
||||||
'%s/episodes/%s' % (channel, display_id),
|
'%s/episodes/%s' % (channel, display_id),
|
||||||
display_id, {'showInfo': 'true'})
|
display_id, {'showInfo': 'true'})
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ from ..compat import (
|
|||||||
compat_ord,
|
compat_ord,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
ass_subtitles_timecode,
|
||||||
bytes_to_intlist,
|
bytes_to_intlist,
|
||||||
bytes_to_long,
|
bytes_to_long,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
@@ -68,10 +69,6 @@ class ADNIE(InfoExtractor):
|
|||||||
'end': 4,
|
'end': 4,
|
||||||
}
|
}
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _ass_subtitles_timecode(seconds):
|
|
||||||
return '%01d:%02d:%02d.%02d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 100)
|
|
||||||
|
|
||||||
def _get_subtitles(self, sub_url, video_id):
|
def _get_subtitles(self, sub_url, video_id):
|
||||||
if not sub_url:
|
if not sub_url:
|
||||||
return None
|
return None
|
||||||
@@ -117,8 +114,8 @@ Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text'''
|
|||||||
continue
|
continue
|
||||||
alignment = self._POS_ALIGN_MAP.get(position_align, 2) + self._LINE_ALIGN_MAP.get(line_align, 0)
|
alignment = self._POS_ALIGN_MAP.get(position_align, 2) + self._LINE_ALIGN_MAP.get(line_align, 0)
|
||||||
ssa += os.linesep + 'Dialogue: Marked=0,%s,%s,Default,,0,0,0,,%s%s' % (
|
ssa += os.linesep + 'Dialogue: Marked=0,%s,%s,Default,,0,0,0,,%s%s' % (
|
||||||
self._ass_subtitles_timecode(start),
|
ass_subtitles_timecode(start),
|
||||||
self._ass_subtitles_timecode(end),
|
ass_subtitles_timecode(end),
|
||||||
'{\\a%d}' % alignment if alignment != 2 else '',
|
'{\\a%d}' % alignment if alignment != 2 else '',
|
||||||
text.replace('\n', '\\N').replace('<i>', '{\\i1}').replace('</i>', '{\\i0}'))
|
text.replace('\n', '\\N').replace('<i>', '{\\i1}').replace('</i>', '{\\i0}'))
|
||||||
|
|
||||||
|
|||||||
@@ -37,6 +37,11 @@ MSO_INFO = {
|
|||||||
'username_field': 'email',
|
'username_field': 'email',
|
||||||
'password_field': 'loginpassword',
|
'password_field': 'loginpassword',
|
||||||
},
|
},
|
||||||
|
'RCN': {
|
||||||
|
'name': 'RCN',
|
||||||
|
'username_field': 'username',
|
||||||
|
'password_field': 'password',
|
||||||
|
},
|
||||||
'Rogers': {
|
'Rogers': {
|
||||||
'name': 'Rogers',
|
'name': 'Rogers',
|
||||||
'username_field': 'UserName',
|
'username_field': 'UserName',
|
||||||
@@ -76,6 +81,11 @@ MSO_INFO = {
|
|||||||
'username_field': 'IDToken1',
|
'username_field': 'IDToken1',
|
||||||
'password_field': 'IDToken2',
|
'password_field': 'IDToken2',
|
||||||
},
|
},
|
||||||
|
'Cablevision': {
|
||||||
|
'name': 'Optimum/Cablevision',
|
||||||
|
'username_field': 'j_username',
|
||||||
|
'password_field': 'j_password',
|
||||||
|
},
|
||||||
'thr030': {
|
'thr030': {
|
||||||
'name': '3 Rivers Communications'
|
'name': '3 Rivers Communications'
|
||||||
},
|
},
|
||||||
@@ -1503,7 +1513,8 @@ class AdobePassIE(InfoExtractor):
|
|||||||
# In general, if you're connecting from a Verizon-assigned IP,
|
# In general, if you're connecting from a Verizon-assigned IP,
|
||||||
# you will not actually pass your credentials.
|
# you will not actually pass your credentials.
|
||||||
provider_redirect_page, urlh = provider_redirect_page_res
|
provider_redirect_page, urlh = provider_redirect_page_res
|
||||||
if 'Please wait ...' in provider_redirect_page:
|
# From non-Verizon IP, still gave 'Please wait', but noticed N==Y; will need to try on Verizon IP
|
||||||
|
if 'Please wait ...' in provider_redirect_page and '\'N\'== "Y"' not in provider_redirect_page:
|
||||||
saml_redirect_url = self._html_search_regex(
|
saml_redirect_url = self._html_search_regex(
|
||||||
r'self\.parent\.location=(["\'])(?P<url>.+?)\1',
|
r'self\.parent\.location=(["\'])(?P<url>.+?)\1',
|
||||||
provider_redirect_page,
|
provider_redirect_page,
|
||||||
@@ -1511,7 +1522,8 @@ class AdobePassIE(InfoExtractor):
|
|||||||
saml_login_page = self._download_webpage(
|
saml_login_page = self._download_webpage(
|
||||||
saml_redirect_url, video_id,
|
saml_redirect_url, video_id,
|
||||||
'Downloading SAML Login Page')
|
'Downloading SAML Login Page')
|
||||||
else:
|
elif 'Verizon FiOS - sign in' in provider_redirect_page:
|
||||||
|
# FXNetworks from non-Verizon IP
|
||||||
saml_login_page_res = post_form(
|
saml_login_page_res = post_form(
|
||||||
provider_redirect_page_res, 'Logging in', {
|
provider_redirect_page_res, 'Logging in', {
|
||||||
mso_info['username_field']: username,
|
mso_info['username_field']: username,
|
||||||
@@ -1521,6 +1533,26 @@ class AdobePassIE(InfoExtractor):
|
|||||||
if 'Please try again.' in saml_login_page:
|
if 'Please try again.' in saml_login_page:
|
||||||
raise ExtractorError(
|
raise ExtractorError(
|
||||||
'We\'re sorry, but either the User ID or Password entered is not correct.')
|
'We\'re sorry, but either the User ID or Password entered is not correct.')
|
||||||
|
else:
|
||||||
|
# ABC from non-Verizon IP
|
||||||
|
saml_redirect_url = self._html_search_regex(
|
||||||
|
r'var\surl\s*=\s*(["\'])(?P<url>.+?)\1',
|
||||||
|
provider_redirect_page,
|
||||||
|
'SAML Redirect URL', group='url')
|
||||||
|
saml_redirect_url = saml_redirect_url.replace(r'\/', '/')
|
||||||
|
saml_redirect_url = saml_redirect_url.replace(r'\-', '-')
|
||||||
|
saml_redirect_url = saml_redirect_url.replace(r'\x26', '&')
|
||||||
|
saml_login_page = self._download_webpage(
|
||||||
|
saml_redirect_url, video_id,
|
||||||
|
'Downloading SAML Login Page')
|
||||||
|
saml_login_page, urlh = post_form(
|
||||||
|
[saml_login_page, saml_redirect_url], 'Logging in', {
|
||||||
|
mso_info['username_field']: username,
|
||||||
|
mso_info['password_field']: password,
|
||||||
|
})
|
||||||
|
if 'Please try again.' in saml_login_page:
|
||||||
|
raise ExtractorError(
|
||||||
|
'Failed to login, incorrect User ID or Password.')
|
||||||
saml_login_url = self._search_regex(
|
saml_login_url = self._search_regex(
|
||||||
r'xmlHttp\.open\("POST"\s*,\s*(["\'])(?P<url>.+?)\1',
|
r'xmlHttp\.open\("POST"\s*,\s*(["\'])(?P<url>.+?)\1',
|
||||||
saml_login_page, 'SAML Login URL', group='url')
|
saml_login_page, 'SAML Login URL', group='url')
|
||||||
@@ -1581,7 +1613,7 @@ class AdobePassIE(InfoExtractor):
|
|||||||
hidden_data['history'] = 1
|
hidden_data['history'] = 1
|
||||||
|
|
||||||
provider_login_page_res = self._download_webpage_handle(
|
provider_login_page_res = self._download_webpage_handle(
|
||||||
urlh.geturl(), video_id, 'Sending first bookend.',
|
urlh.geturl(), video_id, 'Sending first bookend',
|
||||||
query=hidden_data)
|
query=hidden_data)
|
||||||
|
|
||||||
provider_association_redirect, urlh = post_form(
|
provider_association_redirect, urlh = post_form(
|
||||||
@@ -1600,7 +1632,7 @@ class AdobePassIE(InfoExtractor):
|
|||||||
hidden_data['history'] = 3
|
hidden_data['history'] = 3
|
||||||
|
|
||||||
mvpd_confirm_page_res = self._download_webpage_handle(
|
mvpd_confirm_page_res = self._download_webpage_handle(
|
||||||
urlh.geturl(), video_id, 'Sending final bookend.',
|
urlh.geturl(), video_id, 'Sending final bookend',
|
||||||
query=hidden_data)
|
query=hidden_data)
|
||||||
|
|
||||||
post_form(mvpd_confirm_page_res, 'Confirming Login')
|
post_form(mvpd_confirm_page_res, 'Confirming Login')
|
||||||
@@ -1616,10 +1648,13 @@ class AdobePassIE(InfoExtractor):
|
|||||||
'Downloading Provider Redirect Page (meta refresh)')
|
'Downloading Provider Redirect Page (meta refresh)')
|
||||||
provider_login_page_res = post_form(
|
provider_login_page_res = post_form(
|
||||||
provider_redirect_page_res, self._DOWNLOADING_LOGIN_PAGE)
|
provider_redirect_page_res, self._DOWNLOADING_LOGIN_PAGE)
|
||||||
mvpd_confirm_page_res = post_form(provider_login_page_res, 'Logging in', {
|
form_data = {
|
||||||
mso_info.get('username_field', 'username'): username,
|
mso_info.get('username_field', 'username'): username,
|
||||||
mso_info.get('password_field', 'password'): password,
|
mso_info.get('password_field', 'password'): password
|
||||||
})
|
}
|
||||||
|
if mso_id == 'Cablevision':
|
||||||
|
form_data['_eventId_proceed'] = ''
|
||||||
|
mvpd_confirm_page_res = post_form(provider_login_page_res, 'Logging in', form_data)
|
||||||
if mso_id != 'Rogers':
|
if mso_id != 'Rogers':
|
||||||
post_form(mvpd_confirm_page_res, 'Confirming Login')
|
post_form(mvpd_confirm_page_res, 'Confirming Login')
|
||||||
|
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ from ..utils import (
|
|||||||
float_or_none,
|
float_or_none,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
ISO639Utils,
|
ISO639Utils,
|
||||||
|
join_nonempty,
|
||||||
OnDemandPagedList,
|
OnDemandPagedList,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
str_or_none,
|
str_or_none,
|
||||||
@@ -132,7 +133,7 @@ class AdobeTVIE(AdobeTVBaseIE):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
language, show_urlname, urlname = re.match(self._VALID_URL, url).groups()
|
language, show_urlname, urlname = self._match_valid_url(url).groups()
|
||||||
if not language:
|
if not language:
|
||||||
language = 'en'
|
language = 'en'
|
||||||
|
|
||||||
@@ -178,7 +179,7 @@ class AdobeTVShowIE(AdobeTVPlaylistBaseIE):
|
|||||||
_process_data = AdobeTVBaseIE._parse_video_data
|
_process_data = AdobeTVBaseIE._parse_video_data
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
language, show_urlname = re.match(self._VALID_URL, url).groups()
|
language, show_urlname = self._match_valid_url(url).groups()
|
||||||
if not language:
|
if not language:
|
||||||
language = 'en'
|
language = 'en'
|
||||||
query = {
|
query = {
|
||||||
@@ -215,7 +216,7 @@ class AdobeTVChannelIE(AdobeTVPlaylistBaseIE):
|
|||||||
show_data['url'], 'AdobeTVShow', str_or_none(show_data.get('id')))
|
show_data['url'], 'AdobeTVShow', str_or_none(show_data.get('id')))
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
language, channel_urlname, category_urlname = re.match(self._VALID_URL, url).groups()
|
language, channel_urlname, category_urlname = self._match_valid_url(url).groups()
|
||||||
if not language:
|
if not language:
|
||||||
language = 'en'
|
language = 'en'
|
||||||
query = {
|
query = {
|
||||||
@@ -263,7 +264,7 @@ class AdobeTVVideoIE(AdobeTVBaseIE):
|
|||||||
continue
|
continue
|
||||||
formats.append({
|
formats.append({
|
||||||
'filesize': int_or_none(source.get('kilobytes') or None, invscale=1000),
|
'filesize': int_or_none(source.get('kilobytes') or None, invscale=1000),
|
||||||
'format_id': '-'.join(filter(None, [source.get('format'), source.get('label')])),
|
'format_id': join_nonempty(source.get('format'), source.get('label')),
|
||||||
'height': int_or_none(source.get('height') or None),
|
'height': int_or_none(source.get('height') or None),
|
||||||
'tbr': int_or_none(source.get('bitrate') or None),
|
'tbr': int_or_none(source.get('bitrate') or None),
|
||||||
'width': int_or_none(source.get('width') or None),
|
'width': int_or_none(source.get('width') or None),
|
||||||
|
|||||||
@@ -2,7 +2,6 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import re
|
|
||||||
|
|
||||||
from .turner import TurnerBaseIE
|
from .turner import TurnerBaseIE
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
@@ -89,7 +88,7 @@ class AdultSwimIE(TurnerBaseIE):
|
|||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
show_path, episode_path = re.match(self._VALID_URL, url).groups()
|
show_path, episode_path = self._match_valid_url(url).groups()
|
||||||
display_id = episode_path or show_path
|
display_id = episode_path or show_path
|
||||||
query = '''query {
|
query = '''query {
|
||||||
getShowBySlug(slug:"%s") {
|
getShowBySlug(slug:"%s") {
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
from .theplatform import ThePlatformIE
|
from .theplatform import ThePlatformIE
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
@@ -20,8 +19,8 @@ class AENetworksBaseIE(ThePlatformIE):
|
|||||||
(?:history(?:vault)?|aetv|mylifetime|lifetimemovieclub)\.com|
|
(?:history(?:vault)?|aetv|mylifetime|lifetimemovieclub)\.com|
|
||||||
fyi\.tv
|
fyi\.tv
|
||||||
)/'''
|
)/'''
|
||||||
_THEPLATFORM_KEY = 'crazyjava'
|
_THEPLATFORM_KEY = '43jXaGRQud'
|
||||||
_THEPLATFORM_SECRET = 's3cr3t'
|
_THEPLATFORM_SECRET = 'S10BPXHMlb'
|
||||||
_DOMAIN_MAP = {
|
_DOMAIN_MAP = {
|
||||||
'history.com': ('HISTORY', 'history'),
|
'history.com': ('HISTORY', 'history'),
|
||||||
'aetv.com': ('AETV', 'aetv'),
|
'aetv.com': ('AETV', 'aetv'),
|
||||||
@@ -170,7 +169,7 @@ class AENetworksIE(AENetworksBaseIE):
|
|||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
domain, canonical = re.match(self._VALID_URL, url).groups()
|
domain, canonical = self._match_valid_url(url).groups()
|
||||||
return self._extract_aetn_info(domain, 'canonical', '/' + canonical, url)
|
return self._extract_aetn_info(domain, 'canonical', '/' + canonical, url)
|
||||||
|
|
||||||
|
|
||||||
@@ -187,7 +186,7 @@ class AENetworksListBaseIE(AENetworksBaseIE):
|
|||||||
}))['data'][resource]
|
}))['data'][resource]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
domain, slug = re.match(self._VALID_URL, url).groups()
|
domain, slug = self._match_valid_url(url).groups()
|
||||||
_, brand = self._DOMAIN_MAP[domain]
|
_, brand = self._DOMAIN_MAP[domain]
|
||||||
playlist = self._call_api(self._RESOURCE, slug, brand, self._FIELDS)
|
playlist = self._call_api(self._RESOURCE, slug, brand, self._FIELDS)
|
||||||
base_url = 'http://watch.%s' % domain
|
base_url = 'http://watch.%s' % domain
|
||||||
@@ -309,7 +308,7 @@ class HistoryPlayerIE(AENetworksBaseIE):
|
|||||||
_TESTS = []
|
_TESTS = []
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
domain, video_id = re.match(self._VALID_URL, url).groups()
|
domain, video_id = self._match_valid_url(url).groups()
|
||||||
return self._extract_aetn_info(domain, 'id', video_id, url)
|
return self._extract_aetn_info(domain, 'id', video_id, url)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -6,9 +6,11 @@ import re
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_xpath
|
from ..compat import compat_xpath
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
date_from_str,
|
||||||
determine_ext,
|
determine_ext,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
unified_strdate,
|
||||||
url_or_none,
|
url_or_none,
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
xpath_text,
|
xpath_text,
|
||||||
@@ -237,6 +239,7 @@ class AfreecaTVIE(InfoExtractor):
|
|||||||
r'nTitleNo\s*=\s*(\d+)', webpage, 'title', default=video_id)
|
r'nTitleNo\s*=\s*(\d+)', webpage, 'title', default=video_id)
|
||||||
|
|
||||||
partial_view = False
|
partial_view = False
|
||||||
|
adult_view = False
|
||||||
for _ in range(2):
|
for _ in range(2):
|
||||||
query = {
|
query = {
|
||||||
'nTitleNo': video_id,
|
'nTitleNo': video_id,
|
||||||
@@ -245,6 +248,8 @@ class AfreecaTVIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
if partial_view:
|
if partial_view:
|
||||||
query['partialView'] = 'SKIP_ADULT'
|
query['partialView'] = 'SKIP_ADULT'
|
||||||
|
if adult_view:
|
||||||
|
query['adultView'] = 'ADULT_VIEW'
|
||||||
video_xml = self._download_xml(
|
video_xml = self._download_xml(
|
||||||
'http://afbbs.afreecatv.com:8080/api/video/get_video_info.php',
|
'http://afbbs.afreecatv.com:8080/api/video/get_video_info.php',
|
||||||
video_id, 'Downloading video info XML%s'
|
video_id, 'Downloading video info XML%s'
|
||||||
@@ -264,6 +269,9 @@ class AfreecaTVIE(InfoExtractor):
|
|||||||
partial_view = True
|
partial_view = True
|
||||||
continue
|
continue
|
||||||
elif flag == 'ADULT':
|
elif flag == 'ADULT':
|
||||||
|
if not adult_view:
|
||||||
|
adult_view = True
|
||||||
|
continue
|
||||||
error = 'Only users older than 19 are able to watch this video. Provide account credentials to download this content.'
|
error = 'Only users older than 19 are able to watch this video. Provide account credentials to download this content.'
|
||||||
else:
|
else:
|
||||||
error = flag
|
error = flag
|
||||||
@@ -309,8 +317,15 @@ class AfreecaTVIE(InfoExtractor):
|
|||||||
if not file_url:
|
if not file_url:
|
||||||
continue
|
continue
|
||||||
key = file_element.get('key', '')
|
key = file_element.get('key', '')
|
||||||
upload_date = self._search_regex(
|
upload_date = unified_strdate(self._search_regex(
|
||||||
r'^(\d{8})_', key, 'upload date', default=None)
|
r'^(\d{8})_', key, 'upload date', default=None))
|
||||||
|
if upload_date is not None:
|
||||||
|
# sometimes the upload date isn't included in the file name
|
||||||
|
# instead, another random ID is, which may parse as a valid
|
||||||
|
# date but be wildly out of a reasonable range
|
||||||
|
parsed_date = date_from_str(upload_date)
|
||||||
|
if parsed_date.year < 2000 or parsed_date.year >= 2100:
|
||||||
|
upload_date = None
|
||||||
file_duration = int_or_none(file_element.get('duration'))
|
file_duration = int_or_none(file_element.get('duration'))
|
||||||
format_id = key if key else '%s_%s' % (video_id, file_num)
|
format_id = key if key else '%s_%s' % (video_id, file_num)
|
||||||
if determine_ext(file_url) == 'm3u8':
|
if determine_ext(file_url) == 'm3u8':
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import re
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
|
||||||
@@ -32,7 +31,7 @@ class AlJazeeraIE(InfoExtractor):
|
|||||||
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_default/index.html?videoId=%s'
|
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_default/index.html?videoId=%s'
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
post_type, name = re.match(self._VALID_URL, url).groups()
|
post_type, name = self._match_valid_url(url).groups()
|
||||||
post_type = {
|
post_type = {
|
||||||
'features': 'post',
|
'features': 'post',
|
||||||
'program': 'episode',
|
'program': 'episode',
|
||||||
@@ -40,7 +39,7 @@ class AlJazeeraIE(InfoExtractor):
|
|||||||
}[post_type.split('/')[0]]
|
}[post_type.split('/')[0]]
|
||||||
video = self._download_json(
|
video = self._download_json(
|
||||||
'https://www.aljazeera.com/graphql', name, query={
|
'https://www.aljazeera.com/graphql', name, query={
|
||||||
'operationName': 'SingleArticleQuery',
|
'operationName': 'ArchipelagoSingleArticleQuery',
|
||||||
'variables': json.dumps({
|
'variables': json.dumps({
|
||||||
'name': name,
|
'name': name,
|
||||||
'postType': post_type,
|
'postType': post_type,
|
||||||
|
|||||||
@@ -42,8 +42,7 @@ class AluraIE(InfoExtractor):
|
|||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
|
||||||
video_id = self._match_id(url)
|
course, video_id = self._match_valid_url(url)
|
||||||
course = self._search_regex(self._VALID_URL, url, 'post url', group='course_name')
|
|
||||||
video_url = self._VIDEO_URL % (course, video_id)
|
video_url = self._VIDEO_URL % (course, video_id)
|
||||||
|
|
||||||
video_dict = self._download_json(video_url, video_id, 'Searching for videos')
|
video_dict = self._download_json(video_url, video_id, 'Searching for videos')
|
||||||
|
|||||||
53
yt_dlp/extractor/amazon.py
Normal file
53
yt_dlp/extractor/amazon.py
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import int_or_none
|
||||||
|
|
||||||
|
|
||||||
|
class AmazonStoreIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'(?:https?://)(?:www\.)?amazon\.(?:[a-z]{2,3})(?:\.[a-z]{2})?/[^/]*/?(?:dp|gp/product)/(?P<id>[^/&#$?]+)'
|
||||||
|
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.amazon.co.uk/dp/B098XNCHLD/',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'B098XNCHLD',
|
||||||
|
'title': 'md5:5f3194dbf75a8dcfc83079bd63a2abed',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 1,
|
||||||
|
'playlist': [{
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'A1F83G8C2ARO7P',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'mcdodo usb c cable 100W 5a',
|
||||||
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.amazon.in/Sony-WH-1000XM4-Cancelling-Headphones-Bluetooth/dp/B0863TXGM3',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'B0863TXGM3',
|
||||||
|
'title': 'md5:b0bde4881d3cfd40d63af19f7898b8ff',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 4,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.amazon.com/dp/B0845NXCXF/',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'B0845NXCXF',
|
||||||
|
'title': 'md5:2145cd4e3c7782f1ee73649a3cff1171',
|
||||||
|
},
|
||||||
|
'playlist-mincount': 1,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, id)
|
||||||
|
data_json = self._parse_json(self._html_search_regex(r'var\s?obj\s?=\s?jQuery\.parseJSON\(\'(.*)\'\)', webpage, 'data'), id)
|
||||||
|
entries = [{
|
||||||
|
'id': video['marketPlaceID'],
|
||||||
|
'url': video['url'],
|
||||||
|
'title': video.get('title'),
|
||||||
|
'thumbnail': video.get('thumbUrl') or video.get('thumb'),
|
||||||
|
'duration': video.get('durationSeconds'),
|
||||||
|
'height': int_or_none(video.get('videoHeight')),
|
||||||
|
'width': int_or_none(video.get('videoWidth')),
|
||||||
|
} for video in (data_json.get('videos') or []) if video.get('isVideo') and video.get('url')]
|
||||||
|
return self.playlist_result(entries, playlist_id=id, playlist_title=data_json['title'])
|
||||||
@@ -63,7 +63,7 @@ class AMCNetworksIE(ThePlatformIE):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
site, display_id = re.match(self._VALID_URL, url).groups()
|
site, display_id = self._match_valid_url(url).groups()
|
||||||
requestor_id = self._REQUESTOR_ID_MAP[site]
|
requestor_id = self._REQUESTOR_ID_MAP[site]
|
||||||
page_data = self._download_json(
|
page_data = self._download_json(
|
||||||
'https://content-delivery-gw.svc.ds.amcn.com/api/v2/content/amcn/%s/url/%s'
|
'https://content-delivery-gw.svc.ds.amcn.com/api/v2/content/amcn/%s/url/%s'
|
||||||
|
|||||||
@@ -2,7 +2,6 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import re
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
@@ -69,7 +68,7 @@ class AmericasTestKitchenIE(InfoExtractor):
|
|||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
resource_type, video_id = re.match(self._VALID_URL, url).groups()
|
resource_type, video_id = self._match_valid_url(url).groups()
|
||||||
is_episode = resource_type == 'episode'
|
is_episode = resource_type == 'episode'
|
||||||
if is_episode:
|
if is_episode:
|
||||||
resource_type = 'episodes'
|
resource_type = 'episodes'
|
||||||
@@ -114,7 +113,7 @@ class AmericasTestKitchenSeasonIE(InfoExtractor):
|
|||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
show_name, season_number = re.match(self._VALID_URL, url).groups()
|
show_name, season_number = self._match_valid_url(url).groups()
|
||||||
season_number = int(season_number)
|
season_number = int(season_number)
|
||||||
|
|
||||||
slug = 'atk' if show_name == 'americastestkitchen' else 'cco'
|
slug = 'atk' if show_name == 'americastestkitchen' else 'cco'
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ from ..utils import (
|
|||||||
determine_ext,
|
determine_ext,
|
||||||
extract_attributes,
|
extract_attributes,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
join_nonempty,
|
||||||
url_or_none,
|
url_or_none,
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
urljoin,
|
urljoin,
|
||||||
@@ -140,15 +141,8 @@ class AnimeOnDemandIE(InfoExtractor):
|
|||||||
kind = self._search_regex(
|
kind = self._search_regex(
|
||||||
r'videomaterialurl/\d+/([^/]+)/',
|
r'videomaterialurl/\d+/([^/]+)/',
|
||||||
playlist_url, 'media kind', default=None)
|
playlist_url, 'media kind', default=None)
|
||||||
format_id_list = []
|
format_id = join_nonempty(lang, kind) if lang or kind else str(num)
|
||||||
if lang:
|
format_note = join_nonempty(kind, lang_note, delim=', ')
|
||||||
format_id_list.append(lang)
|
|
||||||
if kind:
|
|
||||||
format_id_list.append(kind)
|
|
||||||
if not format_id_list and num is not None:
|
|
||||||
format_id_list.append(compat_str(num))
|
|
||||||
format_id = '-'.join(format_id_list)
|
|
||||||
format_note = ', '.join(filter(None, (kind, lang_note)))
|
|
||||||
item_id_list = []
|
item_id_list = []
|
||||||
if format_id:
|
if format_id:
|
||||||
item_id_list.append(format_id)
|
item_id_list.append(format_id)
|
||||||
@@ -195,12 +189,10 @@ class AnimeOnDemandIE(InfoExtractor):
|
|||||||
if not file_:
|
if not file_:
|
||||||
continue
|
continue
|
||||||
ext = determine_ext(file_)
|
ext = determine_ext(file_)
|
||||||
format_id_list = [lang, kind]
|
format_id = join_nonempty(
|
||||||
if ext == 'm3u8':
|
lang, kind,
|
||||||
format_id_list.append('hls')
|
'hls' if ext == 'm3u8' else None,
|
||||||
elif source.get('type') == 'video/dash' or ext == 'mpd':
|
'dash' if source.get('type') == 'video/dash' or ext == 'mpd' else None)
|
||||||
format_id_list.append('dash')
|
|
||||||
format_id = '-'.join(filter(None, format_id_list))
|
|
||||||
if ext == 'm3u8':
|
if ext == 'm3u8':
|
||||||
file_formats = self._extract_m3u8_formats(
|
file_formats = self._extract_m3u8_formats(
|
||||||
file_, video_id, 'mp4',
|
file_, video_id, 'mp4',
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ from ..utils import (
|
|||||||
determine_ext,
|
determine_ext,
|
||||||
intlist_to_bytes,
|
intlist_to_bytes,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
join_nonempty,
|
||||||
strip_jsonp,
|
strip_jsonp,
|
||||||
unescapeHTML,
|
unescapeHTML,
|
||||||
unsmuggle_url,
|
unsmuggle_url,
|
||||||
@@ -303,13 +304,13 @@ class AnvatoIE(InfoExtractor):
|
|||||||
tbr = int_or_none(published_url.get('kbps'))
|
tbr = int_or_none(published_url.get('kbps'))
|
||||||
a_format = {
|
a_format = {
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'format_id': ('-'.join(filter(None, ['http', published_url.get('cdn_name')]))).lower(),
|
'format_id': join_nonempty('http', published_url.get('cdn_name')).lower(),
|
||||||
'tbr': tbr if tbr != 0 else None,
|
'tbr': tbr or None,
|
||||||
}
|
}
|
||||||
|
|
||||||
if media_format == 'm3u8' and tbr is not None:
|
if media_format == 'm3u8' and tbr is not None:
|
||||||
a_format.update({
|
a_format.update({
|
||||||
'format_id': '-'.join(filter(None, ['hls', compat_str(tbr)])),
|
'format_id': join_nonempty('hls', tbr),
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
})
|
})
|
||||||
elif media_format == 'm3u8-variant' or ext == 'm3u8':
|
elif media_format == 'm3u8-variant' or ext == 'm3u8':
|
||||||
@@ -390,7 +391,7 @@ class AnvatoIE(InfoExtractor):
|
|||||||
'countries': smuggled_data.get('geo_countries'),
|
'countries': smuggled_data.get('geo_countries'),
|
||||||
})
|
})
|
||||||
|
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = self._match_valid_url(url)
|
||||||
access_key, video_id = mobj.group('access_key_or_mcp', 'id')
|
access_key, video_id = mobj.group('access_key_or_mcp', 'id')
|
||||||
if access_key not in self._ANVACK_TABLE:
|
if access_key not in self._ANVACK_TABLE:
|
||||||
access_key = self._MCP_TO_ACCESS_KEY_TABLE.get(
|
access_key = self._MCP_TO_ACCESS_KEY_TABLE.get(
|
||||||
|
|||||||
@@ -4,13 +4,10 @@ from __future__ import unicode_literals
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .yahoo import YahooIE
|
from .yahoo import YahooIE
|
||||||
from ..compat import (
|
|
||||||
compat_parse_qs,
|
|
||||||
compat_urllib_parse_urlparse,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
parse_qs,
|
||||||
url_or_none,
|
url_or_none,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -119,7 +116,7 @@ class AolIE(YahooIE):
|
|||||||
'height': int(mobj.group(2)),
|
'height': int(mobj.group(2)),
|
||||||
})
|
})
|
||||||
else:
|
else:
|
||||||
qs = compat_parse_qs(compat_urllib_parse_urlparse(video_url).query)
|
qs = parse_qs(video_url)
|
||||||
f.update({
|
f.update({
|
||||||
'width': int_or_none(qs.get('w', [None])[0]),
|
'width': int_or_none(qs.get('w', [None])[0]),
|
||||||
'height': int_or_none(qs.get('h', [None])[0]),
|
'height': int_or_none(qs.get('h', [None])[0]),
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ class APAIE(InfoExtractor):
|
|||||||
webpage)]
|
webpage)]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = self._match_valid_url(url)
|
||||||
video_id, base_url = mobj.group('id', 'base_url')
|
video_id, base_url = mobj.group('id', 'base_url')
|
||||||
|
|
||||||
webpage = self._download_webpage(
|
webpage = self._download_webpage(
|
||||||
|
|||||||
@@ -94,7 +94,7 @@ class AppleTrailersIE(InfoExtractor):
|
|||||||
_JSON_RE = r'iTunes.playURL\((.*?)\);'
|
_JSON_RE = r'iTunes.playURL\((.*?)\);'
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = self._match_valid_url(url)
|
||||||
movie = mobj.group('movie')
|
movie = mobj.group('movie')
|
||||||
uploader_id = mobj.group('company')
|
uploader_id = mobj.group('company')
|
||||||
|
|
||||||
|
|||||||
@@ -9,8 +9,6 @@ from .youtube import YoutubeIE
|
|||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse_unquote,
|
compat_urllib_parse_unquote,
|
||||||
compat_urllib_parse_unquote_plus,
|
compat_urllib_parse_unquote_plus,
|
||||||
compat_urlparse,
|
|
||||||
compat_parse_qs,
|
|
||||||
compat_HTTPError
|
compat_HTTPError
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
@@ -25,6 +23,7 @@ from ..utils import (
|
|||||||
merge_dicts,
|
merge_dicts,
|
||||||
mimetype2ext,
|
mimetype2ext,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
|
parse_qs,
|
||||||
RegexNotFoundError,
|
RegexNotFoundError,
|
||||||
str_to_int,
|
str_to_int,
|
||||||
str_or_none,
|
str_or_none,
|
||||||
@@ -399,7 +398,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
|
|||||||
expected=True)
|
expected=True)
|
||||||
raise
|
raise
|
||||||
video_file_url = compat_urllib_parse_unquote(video_file_webpage.url)
|
video_file_url = compat_urllib_parse_unquote(video_file_webpage.url)
|
||||||
video_file_url_qs = compat_parse_qs(compat_urlparse.urlparse(video_file_url).query)
|
video_file_url_qs = parse_qs(video_file_url)
|
||||||
|
|
||||||
# Attempt to recover any ext & format info from playback url
|
# Attempt to recover any ext & format info from playback url
|
||||||
format = {'url': video_file_url}
|
format = {'url': video_file_url}
|
||||||
|
|||||||
@@ -86,7 +86,7 @@ class ArcPublishingIE(InfoExtractor):
|
|||||||
return entries
|
return entries
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
org, uuid = re.match(self._VALID_URL, url).groups()
|
org, uuid = self._match_valid_url(url).groups()
|
||||||
for orgs, tmpl in self._POWA_DEFAULTS:
|
for orgs, tmpl in self._POWA_DEFAULTS:
|
||||||
if org in orgs:
|
if org in orgs:
|
||||||
base_api_tmpl = tmpl
|
base_api_tmpl = tmpl
|
||||||
|
|||||||
@@ -199,7 +199,7 @@ class ARDMediathekIE(ARDMediathekBaseIE):
|
|||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
# determine video id from url
|
# determine video id from url
|
||||||
m = re.match(self._VALID_URL, url)
|
m = self._match_valid_url(url)
|
||||||
|
|
||||||
document_id = None
|
document_id = None
|
||||||
|
|
||||||
@@ -325,7 +325,7 @@ class ARDIE(InfoExtractor):
|
|||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = self._match_valid_url(url)
|
||||||
display_id = mobj.group('id')
|
display_id = mobj.group('id')
|
||||||
|
|
||||||
player_url = mobj.group('mainurl') + '~playerXml.xml'
|
player_url = mobj.group('mainurl') + '~playerXml.xml'
|
||||||
@@ -525,7 +525,7 @@ class ARDBetaMediathekIE(ARDMediathekBaseIE):
|
|||||||
return self.playlist_result(entries, playlist_title=display_id)
|
return self.playlist_result(entries, playlist_title=display_id)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = self._match_valid_url(url)
|
||||||
video_id = mobj.group('video_id')
|
video_id = mobj.group('video_id')
|
||||||
display_id = mobj.group('display_id')
|
display_id = mobj.group('display_id')
|
||||||
if display_id:
|
if display_id:
|
||||||
|
|||||||
@@ -4,12 +4,12 @@ from __future__ import unicode_literals
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urlparse
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
|
parse_qs,
|
||||||
try_get,
|
try_get,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -63,13 +63,13 @@ class ArkenaIE(InfoExtractor):
|
|||||||
return mobj.group('url')
|
return mobj.group('url')
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = self._match_valid_url(url)
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
account_id = mobj.group('account_id')
|
account_id = mobj.group('account_id')
|
||||||
|
|
||||||
# Handle http://video.arkena.com/play2/embed/player URL
|
# Handle http://video.arkena.com/play2/embed/player URL
|
||||||
if not video_id:
|
if not video_id:
|
||||||
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
|
qs = parse_qs(url)
|
||||||
video_id = qs.get('mediaId', [None])[0]
|
video_id = qs.get('mediaId', [None])[0]
|
||||||
account_id = qs.get('accountId', [None])[0]
|
account_id = qs.get('accountId', [None])[0]
|
||||||
if not video_id or not account_id:
|
if not video_id or not account_id:
|
||||||
|
|||||||
@@ -6,11 +6,11 @@ import re
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urlparse,
|
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
parse_qs,
|
||||||
qualities,
|
qualities,
|
||||||
try_get,
|
try_get,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
@@ -49,7 +49,7 @@ class ArteTVIE(ArteTVBaseIE):
|
|||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = self._match_valid_url(url)
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
lang = mobj.group('lang') or mobj.group('lang_2')
|
lang = mobj.group('lang') or mobj.group('lang_2')
|
||||||
|
|
||||||
@@ -174,7 +174,7 @@ class ArteTVIE(ArteTVBaseIE):
|
|||||||
return {
|
return {
|
||||||
'id': player_info.get('VID') or video_id,
|
'id': player_info.get('VID') or video_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'description': player_info.get('VDE'),
|
'description': player_info.get('VDE') or player_info.get('V7T'),
|
||||||
'upload_date': unified_strdate(upload_date_str),
|
'upload_date': unified_strdate(upload_date_str),
|
||||||
'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
|
'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
@@ -204,7 +204,7 @@ class ArteTVEmbedIE(InfoExtractor):
|
|||||||
webpage)]
|
webpage)]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
|
qs = parse_qs(url)
|
||||||
json_url = qs['json_url'][0]
|
json_url = qs['json_url'][0]
|
||||||
video_id = ArteTVIE._match_id(json_url)
|
video_id = ArteTVIE._match_id(json_url)
|
||||||
return self.url_result(
|
return self.url_result(
|
||||||
@@ -227,7 +227,7 @@ class ArteTVPlaylistIE(ArteTVBaseIE):
|
|||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
lang, playlist_id = re.match(self._VALID_URL, url).groups()
|
lang, playlist_id = self._match_valid_url(url).groups()
|
||||||
collection = self._download_json(
|
collection = self._download_json(
|
||||||
'%s/collectionData/%s/%s?source=videos'
|
'%s/collectionData/%s/%s?source=videos'
|
||||||
% (self._API_BASE, lang, playlist_id), playlist_id)
|
% (self._API_BASE, lang, playlist_id), playlist_id)
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user