mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2025-12-08 15:12:47 +01:00
Compare commits
570 Commits
2021.11.10
...
2022.03.08
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a3b7dff015 | ||
|
|
c0c2c57d35 | ||
|
|
aee6ce5867 | ||
|
|
d1b5f70bc9 | ||
|
|
1eae7f94c1 | ||
|
|
535eb16a44 | ||
|
|
9461cb586a | ||
|
|
a405b38f20 | ||
|
|
08d30158ec | ||
|
|
c89bec262c | ||
|
|
151f8f1c02 | ||
|
|
a35155be17 | ||
|
|
e66662b1e0 | ||
|
|
4390d5ec12 | ||
|
|
9e0e6adb2d | ||
|
|
b637c4e22e | ||
|
|
fb6e3f4389 | ||
|
|
409cdd1ec9 | ||
|
|
992f9a730b | ||
|
|
497d2fab6c | ||
|
|
2807d1709b | ||
|
|
b46ccbc6d4 | ||
|
|
1ed7953a74 | ||
|
|
d49669acad | ||
|
|
bed30106f5 | ||
|
|
27231526ae | ||
|
|
50e93e03a7 | ||
|
|
72e995f122 | ||
|
|
8b7539d27c | ||
|
|
e48b3875ec | ||
|
|
2a938746f3 | ||
|
|
933dbf5a55 | ||
|
|
a10aa588b0 | ||
|
|
be8cd3cb1d | ||
|
|
319b6059d2 | ||
|
|
4c3f8c3fb6 | ||
|
|
7265a2190c | ||
|
|
3a4bb9f751 | ||
|
|
b90dbe6c19 | ||
|
|
97bef011ee | ||
|
|
ecca4519b7 | ||
|
|
761fba6d22 | ||
|
|
5bcccbfec3 | ||
|
|
ded9f32667 | ||
|
|
45806d44a7 | ||
|
|
747c0bd127 | ||
|
|
acea8d7cfb | ||
|
|
f1d130902b | ||
|
|
c2ae48dbd5 | ||
|
|
a5c0c20252 | ||
|
|
f494ddada8 | ||
|
|
02fc6feb6e | ||
|
|
7eaf7f9aba | ||
|
|
334b1c4800 | ||
|
|
7c219ea601 | ||
|
|
93c8410d33 | ||
|
|
195c22840c | ||
|
|
f0734e1190 | ||
|
|
15dfb3929c | ||
|
|
3e9b66d761 | ||
|
|
a539f06570 | ||
|
|
b440e1bb22 | ||
|
|
03f830040a | ||
|
|
09b49e1f68 | ||
|
|
1108613f02 | ||
|
|
a30a6ed3e4 | ||
|
|
65d151d58f | ||
|
|
72073451be | ||
|
|
77cc7c6e60 | ||
|
|
971c4847d7 | ||
|
|
7a34b5d628 | ||
|
|
4d4f9a029f | ||
|
|
f099df1463 | ||
|
|
3f4faff748 | ||
|
|
be8d623455 | ||
|
|
a7d4acc018 | ||
|
|
febff4c119 | ||
|
|
ed66a17ef0 | ||
|
|
5625e6073f | ||
|
|
0ad92dfb18 | ||
|
|
60f3e99592 | ||
|
|
8d93e69d67 | ||
|
|
3aa915400d | ||
|
|
dcd55f766d | ||
|
|
2e4cacd038 | ||
|
|
c15c316b21 | ||
|
|
549cb2a836 | ||
|
|
c571b3a6ab | ||
|
|
5b804e3906 | ||
|
|
6bb608d055 | ||
|
|
ae419aa94f | ||
|
|
ac184ab742 | ||
|
|
5c10453827 | ||
|
|
ffa89477ea | ||
|
|
db74de8c54 | ||
|
|
edecb5f81f | ||
|
|
85a0ad0117 | ||
|
|
07ea0014ae | ||
|
|
e1f7f235bd | ||
|
|
fc259cc249 | ||
|
|
9a5b012575 | ||
|
|
df635a09a4 | ||
|
|
812283199a | ||
|
|
5c6dfc1f79 | ||
|
|
c2a8547fdc | ||
|
|
0a19532ead | ||
|
|
2d41e2eceb | ||
|
|
81c5f44c0f | ||
|
|
1f7db8533a | ||
|
|
e8969bda94 | ||
|
|
c82f051dbb | ||
|
|
49895f062e | ||
|
|
60f393e48b | ||
|
|
88afe05695 | ||
|
|
57ebfca39b | ||
|
|
b1cb0525ac | ||
|
|
da42679b87 | ||
|
|
2944835080 | ||
|
|
a3eb987e0e | ||
|
|
7bc33ad0e9 | ||
|
|
2068a60318 | ||
|
|
1ce9a3cb49 | ||
|
|
d49f8db39f | ||
|
|
ab6df717d1 | ||
|
|
0c8d9e5fec | ||
|
|
3f047fc406 | ||
|
|
82b5176783 | ||
|
|
17b183886f | ||
|
|
cd170e8184 | ||
|
|
297e9952b6 | ||
|
|
dca4f46274 | ||
|
|
5dee3ad037 | ||
|
|
079a7cfc71 | ||
|
|
3856407a86 | ||
|
|
db2e129ca0 | ||
|
|
1209b6ca5b | ||
|
|
a3125791c7 | ||
|
|
f1657a98cb | ||
|
|
b761428226 | ||
|
|
c1653e9efb | ||
|
|
84bbc54599 | ||
|
|
1e5d87beee | ||
|
|
22219f2d1f | ||
|
|
5a13fdd225 | ||
|
|
af5c1c553e | ||
|
|
3cea9ec2eb | ||
|
|
28469edd7d | ||
|
|
d5a398988b | ||
|
|
455a15e2dc | ||
|
|
460a1c08b9 | ||
|
|
4918522735 | ||
|
|
65662dffb1 | ||
|
|
5e51f4a8ad | ||
|
|
54bb39065c | ||
|
|
c5332d7fbb | ||
|
|
35cd4c4d88 | ||
|
|
67fb99f193 | ||
|
|
85553414ae | ||
|
|
d16df59db5 | ||
|
|
63c3ee4f63 | ||
|
|
182bda88e8 | ||
|
|
16aa9ea41d | ||
|
|
d6bc443bde | ||
|
|
046cab3915 | ||
|
|
7df07a3b55 | ||
|
|
2d49720f89 | ||
|
|
48416bc4a8 | ||
|
|
6a0546e313 | ||
|
|
dbcea0585f | ||
|
|
f7d4854131 | ||
|
|
403be2eefb | ||
|
|
63bac931c2 | ||
|
|
7c74a01584 | ||
|
|
1d3586d0d5 | ||
|
|
c533c89ce1 | ||
|
|
b8b3f4562a | ||
|
|
1c6f480160 | ||
|
|
f8580bf02f | ||
|
|
19afd9ea51 | ||
|
|
b72270d27e | ||
|
|
706dfe441b | ||
|
|
c4da5ff971 | ||
|
|
e26f9cc1e5 | ||
|
|
fa8fd95118 | ||
|
|
05b23b4156 | ||
|
|
8f028b5f40 | ||
|
|
013322a95e | ||
|
|
fb62afd6f0 | ||
|
|
50600e833d | ||
|
|
fc08bdd6ab | ||
|
|
2568d41f70 | ||
|
|
88f23a18e0 | ||
|
|
bb66c24797 | ||
|
|
2edb38e8ca | ||
|
|
af6793f804 | ||
|
|
b695e3f9bd | ||
|
|
6a5a30f9e2 | ||
|
|
d37707bda4 | ||
|
|
f40ee5e9a0 | ||
|
|
1f13021eca | ||
|
|
e612f66c7c | ||
|
|
87e8e8a7d0 | ||
|
|
e600a5c908 | ||
|
|
50ce204cc2 | ||
|
|
144a3588b4 | ||
|
|
ed40877833 | ||
|
|
935f5a4209 | ||
|
|
6970b6005e | ||
|
|
fc5fa964c7 | ||
|
|
e0ddbd02bd | ||
|
|
0bfc53d05c | ||
|
|
78ab4f447c | ||
|
|
85fee22152 | ||
|
|
ad9158d5f4 | ||
|
|
f81c62a6a4 | ||
|
|
6c73052c0a | ||
|
|
593e43c030 | ||
|
|
8fe514d382 | ||
|
|
b1156c1e59 | ||
|
|
311b6615d8 | ||
|
|
396a76f7bf | ||
|
|
301d07fc4b | ||
|
|
d14cbdd92d | ||
|
|
19b4c74d40 | ||
|
|
135dfa2c7e | ||
|
|
e0585e6562 | ||
|
|
426764371f | ||
|
|
64f36541c9 | ||
|
|
0ff1e0fba3 | ||
|
|
1a20d29552 | ||
|
|
f7085283e1 | ||
|
|
e25ca9b017 | ||
|
|
4259402c56 | ||
|
|
dfb7f2a25d | ||
|
|
42c5458a02 | ||
|
|
ba1c671d2e | ||
|
|
b143e83ec9 | ||
|
|
4a77fb1d6b | ||
|
|
66f7c6a3e0 | ||
|
|
baf599effa | ||
|
|
8bd1c00bf3 | ||
|
|
596379e260 | ||
|
|
b6ce9bb038 | ||
|
|
eea1b0358e | ||
|
|
32b95bb643 | ||
|
|
fdf80059d9 | ||
|
|
aa062713c1 | ||
|
|
71738b1451 | ||
|
|
0bb5ac1ac4 | ||
|
|
77b28f000a | ||
|
|
d57576b9d9 | ||
|
|
11c861702d | ||
|
|
a4a426023d | ||
|
|
3b603dbdf1 | ||
|
|
5df1ac92bd | ||
|
|
b2db8102dc | ||
|
|
e9a6a65a55 | ||
|
|
ed8d87f911 | ||
|
|
397235c52b | ||
|
|
4636548463 | ||
|
|
cb3c5682ae | ||
|
|
7d449fff53 | ||
|
|
80fa6e5327 | ||
|
|
fabb27fcea | ||
|
|
e04938ab88 | ||
|
|
8bcd404818 | ||
|
|
0df11dafdd | ||
|
|
dc5f409cdc | ||
|
|
99d6f9461d | ||
|
|
8130779db6 | ||
|
|
ed5835b451 | ||
|
|
e88e1febd8 | ||
|
|
faca674510 | ||
|
|
0931ba94ab | ||
|
|
b31874334d | ||
|
|
f1150b9e1e | ||
|
|
d6579d532b | ||
|
|
2be56f2242 | ||
|
|
f95a7b93e6 | ||
|
|
62c955efc9 | ||
|
|
0254f16274 | ||
|
|
a70b71e85a | ||
|
|
4c968755fc | ||
|
|
be1f331f21 | ||
|
|
3cf5429a21 | ||
|
|
bfa0e270cf | ||
|
|
f76ca2dd56 | ||
|
|
5f969a78b0 | ||
|
|
443f8de820 | ||
|
|
768145d48a | ||
|
|
976ae3eabb | ||
|
|
f0d785d3ed | ||
|
|
97a6b117d9 | ||
|
|
6f32a0b5b7 | ||
|
|
e8736539f3 | ||
|
|
9c634ef857 | ||
|
|
9f517bb1f3 | ||
|
|
b8eeced286 | ||
|
|
db47787024 | ||
|
|
fdeab99eab | ||
|
|
9e907ebddf | ||
|
|
21df2117e4 | ||
|
|
06e57990f7 | ||
|
|
b62fa6d75f | ||
|
|
be72c62480 | ||
|
|
61e9d9268c | ||
|
|
a13e684813 | ||
|
|
f46e2f9d92 | ||
|
|
9c906919ae | ||
|
|
6020e05d23 | ||
|
|
ebed8b3732 | ||
|
|
1e43a6f733 | ||
|
|
ca30f449a1 | ||
|
|
af3cbd8782 | ||
|
|
7141ced57d | ||
|
|
18c7683d27 | ||
|
|
f5c2c2c9b0 | ||
|
|
8896899216 | ||
|
|
1797b073ed | ||
|
|
4c922dd3fc | ||
|
|
b8e976a445 | ||
|
|
a9f5f5d6eb | ||
|
|
f522573787 | ||
|
|
7592749cbe | ||
|
|
767f999b53 | ||
|
|
8efffafa53 | ||
|
|
26f2aa3db9 | ||
|
|
3464a2727b | ||
|
|
497d77e1aa | ||
|
|
9040e2d6e3 | ||
|
|
6134fbeb65 | ||
|
|
cfcf60ea99 | ||
|
|
4afa3ec4b6 | ||
|
|
11aa91a12f | ||
|
|
abbeeebc4c | ||
|
|
2c539d493a | ||
|
|
042931a507 | ||
|
|
96f13f01a6 | ||
|
|
4b9353239e | ||
|
|
dd5e60b15d | ||
|
|
e540c56f39 | ||
|
|
45d86abeb4 | ||
|
|
f02d24d8d2 | ||
|
|
ceb98323f2 | ||
|
|
7537e35b64 | ||
|
|
1e5c83b26b | ||
|
|
6223f67a8c | ||
|
|
6a34813a0d | ||
|
|
f59f5ef8b6 | ||
|
|
f44afb54ef | ||
|
|
77cee0f188 | ||
|
|
6a17677577 | ||
|
|
ee7b9bdf5d | ||
|
|
185bf31070 | ||
|
|
0b77924a38 | ||
|
|
8126298c1b | ||
|
|
6da22e7d4f | ||
|
|
c62ecf0d90 | ||
|
|
3774f4f427 | ||
|
|
9980d3d213 | ||
|
|
8eb4b1bb8e | ||
|
|
332da56f52 | ||
|
|
459aea84c3 | ||
|
|
87e0499624 | ||
|
|
0f86a1cd59 | ||
|
|
d80d98e7d4 | ||
|
|
352d5da812 | ||
|
|
d43de6821c | ||
|
|
070f6a85ea | ||
|
|
4b4b7f746c | ||
|
|
e9efb99f66 | ||
|
|
a709d87335 | ||
|
|
774a46c53d | ||
|
|
c8b80b9643 | ||
|
|
4e260d1a56 | ||
|
|
4f3fa23e5a | ||
|
|
b28bac93ab | ||
|
|
37893bb0c9 | ||
|
|
c25de59cf7 | ||
|
|
205a0654c0 | ||
|
|
663949f825 | ||
|
|
b69fd25c25 | ||
|
|
e0fd95737d | ||
|
|
4ac5b94807 | ||
|
|
4273cc776d | ||
|
|
fa9f30b802 | ||
|
|
1cefca9e44 | ||
|
|
5edb8dfec2 | ||
|
|
0fcba15d57 | ||
|
|
adbc4ec4bb | ||
|
|
c031b0414c | ||
|
|
f3aa3c3f98 | ||
|
|
ae43a4b986 | ||
|
|
ca5db158ae | ||
|
|
5f549d4959 | ||
|
|
6839d02cb6 | ||
|
|
2aae2c91ff | ||
|
|
c2dedf12e8 | ||
|
|
e75bb0d6c3 | ||
|
|
dd0228ce1f | ||
|
|
37e57a9fd4 | ||
|
|
940a67a3e2 | ||
|
|
e6ae51c123 | ||
|
|
75ad33572b | ||
|
|
aab41cdd33 | ||
|
|
b3a5115ff1 | ||
|
|
d76d15a669 | ||
|
|
e978789f0f | ||
|
|
ec2e44fc57 | ||
|
|
375d9360bf | ||
|
|
d5c3254889 | ||
|
|
fed1309651 | ||
|
|
fe69f52e5c | ||
|
|
3116be32b4 | ||
|
|
a8549f19e7 | ||
|
|
39ca3b5c7f | ||
|
|
46383212b3 | ||
|
|
0bb322b9c0 | ||
|
|
ff9f925b63 | ||
|
|
5bfc8bee5a | ||
|
|
19188702ef | ||
|
|
d984a98def | ||
|
|
069c6ccf02 | ||
|
|
53dad39e30 | ||
|
|
db77c49c84 | ||
|
|
abc07b554c | ||
|
|
86f3d52f8c | ||
|
|
8b688881ba | ||
|
|
13debc86e7 | ||
|
|
b5f94e4fa1 | ||
|
|
61882afdc5 | ||
|
|
aa4b054512 | ||
|
|
487c5b3389 | ||
|
|
8157a09d22 | ||
|
|
b1aaf1c07f | ||
|
|
5f9aaac8c2 | ||
|
|
54c2521ca6 | ||
|
|
2814f12ba4 | ||
|
|
1619836cb7 | ||
|
|
e3c7d49571 | ||
|
|
ddd24c9949 | ||
|
|
443b21dc4e | ||
|
|
66f4c04e50 | ||
|
|
93864403ea | ||
|
|
b5475f1145 | ||
|
|
38d79fd16c | ||
|
|
acc0d6a411 | ||
|
|
146cc4114a | ||
|
|
818faa3a86 | ||
|
|
aa5ecf082c | ||
|
|
d2b2fca53f | ||
|
|
63ccf4ff1a | ||
|
|
43b2290658 | ||
|
|
99148c6a33 | ||
|
|
9bdd99cf39 | ||
|
|
2c4aaaddc9 | ||
|
|
5f7cb91ae9 | ||
|
|
3efb96a6d1 | ||
|
|
3262f8abf2 | ||
|
|
bdbafb3913 | ||
|
|
a804f6d89c | ||
|
|
814dfb7e25 | ||
|
|
91f071af60 | ||
|
|
2aa5e2cc01 | ||
|
|
1bad50eced | ||
|
|
ac0efabf12 | ||
|
|
73f035e1fe | ||
|
|
0cbed930c8 | ||
|
|
5118d2ec58 | ||
|
|
717216b093 | ||
|
|
5c22c63da3 | ||
|
|
ee8dd27a73 | ||
|
|
f304da8a29 | ||
|
|
06dfe0a0a2 | ||
|
|
75b725a7cc | ||
|
|
13ab5fa586 | ||
|
|
36eaf3039a | ||
|
|
f2ebc5c7be | ||
|
|
b222c27145 | ||
|
|
5e5be0c0b2 | ||
|
|
7578d77d8c | ||
|
|
b29165267f | ||
|
|
bc104778d6 | ||
|
|
d298d33fe6 | ||
|
|
bf57cfa8b7 | ||
|
|
3c2208f82d | ||
|
|
93e597ba28 | ||
|
|
b28cdcc0e4 | ||
|
|
a33c0d9c5d | ||
|
|
75689fe59b | ||
|
|
5ce1d13eba | ||
|
|
e04b003e64 | ||
|
|
909b0d66f4 | ||
|
|
dfd78699f5 | ||
|
|
639f80c1f9 | ||
|
|
896a88c5c6 | ||
|
|
4e4ba1d75f | ||
|
|
2abf081554 | ||
|
|
359df0fc42 | ||
|
|
3938a9212c | ||
|
|
cf1f13b817 | ||
|
|
18d6dd4e01 | ||
|
|
883ecd5494 | ||
|
|
eb56d132d2 | ||
|
|
17b4540662 | ||
|
|
da27aeea5c | ||
|
|
fec41d17a5 | ||
|
|
a61fd4cf6f | ||
|
|
a6213a4925 | ||
|
|
9941a1e127 | ||
|
|
ff51ed588f | ||
|
|
57dbe8077f | ||
|
|
e5d731f35d | ||
|
|
d52cd2f5cd | ||
|
|
bc8ab44ea0 | ||
|
|
8f122fa070 | ||
|
|
14a086058a | ||
|
|
0e6b018a10 | ||
|
|
f7b558df4d | ||
|
|
1ee34c76bb | ||
|
|
234416e4bf | ||
|
|
c98d4df23b | ||
|
|
849d699a8b | ||
|
|
77fcc65158 | ||
|
|
545ad64988 | ||
|
|
d76991ab07 | ||
|
|
282f570918 | ||
|
|
c07a39ae8e | ||
|
|
c5e3f84972 | ||
|
|
c45b87419f | ||
|
|
7333296ff5 | ||
|
|
a04e005521 | ||
|
|
6b993ca765 | ||
|
|
dd2a987d3f | ||
|
|
9222c38182 | ||
|
|
467b6b8387 | ||
|
|
8863c8f09e | ||
|
|
e16fefd869 | ||
|
|
c6118ca2cc | ||
|
|
764f5de2f4 | ||
|
|
cfcaf64a4b | ||
|
|
402cd603a4 | ||
|
|
22a510ff44 | ||
|
|
61be785a67 | ||
|
|
11852843e7 | ||
|
|
525d9e0c7d | ||
|
|
9d63137eac | ||
|
|
266a1b5d52 | ||
|
|
450bdf69bc | ||
|
|
720c309932 | ||
|
|
d8cf8d97a8 | ||
|
|
d0d012d4e7 | ||
|
|
013b50b794 | ||
|
|
dac5df5a98 | ||
|
|
f279aaee8e | ||
|
|
d0e6121adf | ||
|
|
9ac24e235e | ||
|
|
7c7f7161fc | ||
|
|
e339d25a0d | ||
|
|
39c04074e7 | ||
|
|
92775d8a40 | ||
|
|
df03de2c02 | ||
|
|
48e9310660 | ||
|
|
c1dc0ee56e | ||
|
|
bf5f605e76 | ||
|
|
e08a85d865 | ||
|
|
093a17107e | ||
|
|
44bcb8d122 | ||
|
|
013ae2e503 | ||
|
|
b47d236d72 |
14
.github/ISSUE_TEMPLATE/1_broken_site.yml
vendored
14
.github/ISSUE_TEMPLATE/1_broken_site.yml
vendored
@@ -1,6 +1,6 @@
|
||||
name: Broken site support
|
||||
name: Broken site
|
||||
description: Report broken or misfunctioning site
|
||||
labels: [triage, extractor-bug]
|
||||
labels: [triage, site-bug]
|
||||
body:
|
||||
- type: checkboxes
|
||||
id: checklist
|
||||
@@ -11,7 +11,7 @@ body:
|
||||
options:
|
||||
- label: I'm reporting a broken site
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **2021.11.10.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||
- label: I've verified that I'm running yt-dlp version **2022.03.08.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||
required: true
|
||||
- label: I've checked that all provided URLs are alive and playable in a browser
|
||||
required: true
|
||||
@@ -44,19 +44,19 @@ body:
|
||||
label: Verbose log
|
||||
description: |
|
||||
Provide the complete verbose output of yt-dlp **that clearly demonstrates the problem**.
|
||||
Add the `-Uv` flag to your command line you run yt-dlp with (`yt-dlp -Uv <your command line>`), copy the WHOLE output and insert it below.
|
||||
Add the `-vU` flag to your command line you run yt-dlp with (`yt-dlp -vU <your command line>`), copy the WHOLE output and insert it below.
|
||||
It should look similar to this:
|
||||
placeholder: |
|
||||
[debug] Command-line config: ['-Uv', 'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Command-line config: ['-vU', 'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Portable config file: yt-dlp.conf
|
||||
[debug] Portable config: ['-i']
|
||||
[debug] Encodings: locale cp1252, fs utf-8, stdout utf-8, stderr utf-8, pref cp1252
|
||||
[debug] yt-dlp version 2021.11.10.1 (exe)
|
||||
[debug] yt-dlp version 2022.03.08.1 (exe)
|
||||
[debug] Python version 3.8.8 (CPython 64bit) - Windows-10-10.0.19041-SP0
|
||||
[debug] exe versions: ffmpeg 3.0.1, ffprobe 3.0.1
|
||||
[debug] Optional libraries: Cryptodome, keyring, mutagen, sqlite, websockets
|
||||
[debug] Proxy map: {}
|
||||
yt-dlp is up to date (2021.11.10.1)
|
||||
yt-dlp is up to date (2022.03.08.1)
|
||||
<more lines>
|
||||
render: shell
|
||||
validations:
|
||||
|
||||
@@ -11,7 +11,7 @@ body:
|
||||
options:
|
||||
- label: I'm reporting a new site support request
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **2021.11.10.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||
- label: I've verified that I'm running yt-dlp version **2022.03.08.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||
required: true
|
||||
- label: I've checked that all provided URLs are alive and playable in a browser
|
||||
required: true
|
||||
@@ -34,7 +34,7 @@ body:
|
||||
label: Example URLs
|
||||
description: |
|
||||
Provide all kinds of example URLs for which support should be added
|
||||
value: |
|
||||
placeholder: |
|
||||
- Single video: https://www.youtube.com/watch?v=BaW_jenozKc
|
||||
- Single video: https://youtu.be/BaW_jenozKc
|
||||
- Playlist: https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc
|
||||
@@ -55,19 +55,19 @@ body:
|
||||
label: Verbose log
|
||||
description: |
|
||||
Provide the complete verbose output **using one of the example URLs provided above**.
|
||||
Add the `-Uv` flag to your command line you run yt-dlp with (`yt-dlp -Uv <your command line>`), copy the WHOLE output and insert it below.
|
||||
Add the `-vU` flag to your command line you run yt-dlp with (`yt-dlp -vU <your command line>`), copy the WHOLE output and insert it below.
|
||||
It should look similar to this:
|
||||
placeholder: |
|
||||
[debug] Command-line config: ['-Uv', 'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Command-line config: ['-vU', 'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Portable config file: yt-dlp.conf
|
||||
[debug] Portable config: ['-i']
|
||||
[debug] Encodings: locale cp1252, fs utf-8, stdout utf-8, stderr utf-8, pref cp1252
|
||||
[debug] yt-dlp version 2021.11.10.1 (exe)
|
||||
[debug] yt-dlp version 2022.03.08.1 (exe)
|
||||
[debug] Python version 3.8.8 (CPython 64bit) - Windows-10-10.0.19041-SP0
|
||||
[debug] exe versions: ffmpeg 3.0.1, ffprobe 3.0.1
|
||||
[debug] Optional libraries: Cryptodome, keyring, mutagen, sqlite, websockets
|
||||
[debug] Proxy map: {}
|
||||
yt-dlp is up to date (2021.11.10.1)
|
||||
yt-dlp is up to date (2022.03.08.1)
|
||||
<more lines>
|
||||
render: shell
|
||||
validations:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
name: Site feature request
|
||||
description: Request a new functionality for a site
|
||||
description: Request a new functionality for a supported site
|
||||
labels: [triage, site-enhancement]
|
||||
body:
|
||||
- type: checkboxes
|
||||
@@ -11,7 +11,7 @@ body:
|
||||
options:
|
||||
- label: I'm reporting a site feature request
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **2021.11.10.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||
- label: I've verified that I'm running yt-dlp version **2022.03.08.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||
required: true
|
||||
- label: I've checked that all provided URLs are alive and playable in a browser
|
||||
required: true
|
||||
@@ -32,7 +32,7 @@ body:
|
||||
label: Example URLs
|
||||
description: |
|
||||
Example URLs that can be used to demonstrate the requested feature
|
||||
value: |
|
||||
placeholder: |
|
||||
https://www.youtube.com/watch?v=BaW_jenozKc
|
||||
validations:
|
||||
required: true
|
||||
@@ -47,3 +47,26 @@ body:
|
||||
placeholder: WRITE DESCRIPTION HERE
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: log
|
||||
attributes:
|
||||
label: Verbose log
|
||||
description: |
|
||||
Provide the complete verbose output of yt-dlp that demonstrates the need for the enhancement.
|
||||
Add the `-vU` flag to your command line you run yt-dlp with (`yt-dlp -vU <your command line>`), copy the WHOLE output and insert it below.
|
||||
It should look similar to this:
|
||||
placeholder: |
|
||||
[debug] Command-line config: ['-vU', 'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Portable config file: yt-dlp.conf
|
||||
[debug] Portable config: ['-i']
|
||||
[debug] Encodings: locale cp1252, fs utf-8, stdout utf-8, stderr utf-8, pref cp1252
|
||||
[debug] yt-dlp version 2022.03.08.1 (exe)
|
||||
[debug] Python version 3.8.8 (CPython 64bit) - Windows-10-10.0.19041-SP0
|
||||
[debug] exe versions: ffmpeg 3.0.1, ffprobe 3.0.1
|
||||
[debug] Optional libraries: Cryptodome, keyring, mutagen, sqlite, websockets
|
||||
[debug] Proxy map: {}
|
||||
yt-dlp is up to date (2022.03.08.1)
|
||||
<more lines>
|
||||
render: shell
|
||||
validations:
|
||||
required: true
|
||||
|
||||
12
.github/ISSUE_TEMPLATE/4_bug_report.yml
vendored
12
.github/ISSUE_TEMPLATE/4_bug_report.yml
vendored
@@ -1,6 +1,6 @@
|
||||
name: Bug report
|
||||
description: Report a bug unrelated to any particular site or extractor
|
||||
labels: [triage,bug]
|
||||
labels: [triage, bug]
|
||||
body:
|
||||
- type: checkboxes
|
||||
id: checklist
|
||||
@@ -11,7 +11,7 @@ body:
|
||||
options:
|
||||
- label: I'm reporting a bug unrelated to a specific site
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **2021.11.10.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||
- label: I've verified that I'm running yt-dlp version **2022.03.08.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||
required: true
|
||||
- label: I've checked that all provided URLs are alive and playable in a browser
|
||||
required: true
|
||||
@@ -38,19 +38,19 @@ body:
|
||||
label: Verbose log
|
||||
description: |
|
||||
Provide the complete verbose output of yt-dlp **that clearly demonstrates the problem**.
|
||||
Add the `-Uv` flag to **your** command line you run yt-dlp with (`yt-dlp -Uv <your command line>`), copy the WHOLE output and insert it below.
|
||||
Add the `-vU` flag to **your** command line you run yt-dlp with (`yt-dlp -vU <your command line>`), copy the WHOLE output and insert it below.
|
||||
It should look similar to this:
|
||||
placeholder: |
|
||||
[debug] Command-line config: ['-Uv', 'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Command-line config: ['-vU', 'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Portable config file: yt-dlp.conf
|
||||
[debug] Portable config: ['-i']
|
||||
[debug] Encodings: locale cp1252, fs utf-8, stdout utf-8, stderr utf-8, pref cp1252
|
||||
[debug] yt-dlp version 2021.11.10.1 (exe)
|
||||
[debug] yt-dlp version 2022.03.08.1 (exe)
|
||||
[debug] Python version 3.8.8 (CPython 64bit) - Windows-10-10.0.19041-SP0
|
||||
[debug] exe versions: ffmpeg 3.0.1, ffprobe 3.0.1
|
||||
[debug] Optional libraries: Cryptodome, keyring, mutagen, sqlite, websockets
|
||||
[debug] Proxy map: {}
|
||||
yt-dlp is up to date (2021.11.10.1)
|
||||
yt-dlp is up to date (2022.03.08.1)
|
||||
<more lines>
|
||||
render: shell
|
||||
validations:
|
||||
|
||||
6
.github/ISSUE_TEMPLATE/5_feature_request.yml
vendored
6
.github/ISSUE_TEMPLATE/5_feature_request.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Feature request request
|
||||
name: Feature request
|
||||
description: Request a new functionality unrelated to any particular site or extractor
|
||||
labels: [triage, enhancement]
|
||||
body:
|
||||
@@ -11,7 +11,9 @@ body:
|
||||
options:
|
||||
- label: I'm reporting a feature request
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **2021.11.10.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **2022.03.08.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||
required: true
|
||||
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates
|
||||
required: true
|
||||
|
||||
27
.github/ISSUE_TEMPLATE/6_question.yml
vendored
27
.github/ISSUE_TEMPLATE/6_question.yml
vendored
@@ -9,7 +9,7 @@ body:
|
||||
description: |
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||
options:
|
||||
- label: I'm asking a question and not reporting a bug/feature request
|
||||
- label: I'm asking a question and **not** reporting a bug/feature request
|
||||
required: true
|
||||
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
||||
required: true
|
||||
@@ -24,7 +24,30 @@ body:
|
||||
description: |
|
||||
Ask your question in an arbitrary form.
|
||||
Please make sure it's worded well enough to be understood, see [is-the-description-of-the-issue-itself-sufficient](https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient).
|
||||
Provide any additional information and as much context and examples as possible
|
||||
Provide any additional information and as much context and examples as possible.
|
||||
If your question contains "isn't working" or "can you add", this is most likely the wrong template.
|
||||
If you are in doubt if this is the right template, use another template!
|
||||
placeholder: WRITE QUESTION HERE
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: log
|
||||
attributes:
|
||||
label: Verbose log
|
||||
description: |
|
||||
If your question involes a yt-dlp command, provide the complete verbose output of that command.
|
||||
Add the `-vU` flag to **your** command line you run yt-dlp with (`yt-dlp -vU <your command line>`), copy the WHOLE output and insert it below.
|
||||
It should look similar to this:
|
||||
placeholder: |
|
||||
[debug] Command-line config: ['-vU', 'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Portable config file: yt-dlp.conf
|
||||
[debug] Portable config: ['-i']
|
||||
[debug] Encodings: locale cp1252, fs utf-8, stdout utf-8, stderr utf-8, pref cp1252
|
||||
[debug] yt-dlp version 2021.12.01 (exe)
|
||||
[debug] Python version 3.8.8 (CPython 64bit) - Windows-10-10.0.19041-SP0
|
||||
[debug] exe versions: ffmpeg 3.0.1, ffprobe 3.0.1
|
||||
[debug] Optional libraries: Cryptodome, keyring, mutagen, sqlite, websockets
|
||||
[debug] Proxy map: {}
|
||||
yt-dlp is up to date (2021.12.01)
|
||||
<more lines>
|
||||
render: shell
|
||||
|
||||
3
.github/ISSUE_TEMPLATE/config.yml
vendored
3
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -3,3 +3,6 @@ contact_links:
|
||||
- name: Get help from the community on Discord
|
||||
url: https://discord.gg/H5MNcFW63r
|
||||
about: Join the yt-dlp Discord for community-powered support!
|
||||
- name: Matrix Bridge to the Discord server
|
||||
url: https://matrix.to/#/#yt-dlp:matrix.org
|
||||
about: For those who do not want to use Discord
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
name: Broken site support
|
||||
name: Broken site
|
||||
description: Report broken or misfunctioning site
|
||||
labels: [triage, extractor-bug]
|
||||
labels: [triage, site-bug]
|
||||
body:
|
||||
- type: checkboxes
|
||||
id: checklist
|
||||
@@ -44,10 +44,10 @@ body:
|
||||
label: Verbose log
|
||||
description: |
|
||||
Provide the complete verbose output of yt-dlp **that clearly demonstrates the problem**.
|
||||
Add the `-Uv` flag to your command line you run yt-dlp with (`yt-dlp -Uv <your command line>`), copy the WHOLE output and insert it below.
|
||||
Add the `-vU` flag to your command line you run yt-dlp with (`yt-dlp -vU <your command line>`), copy the WHOLE output and insert it below.
|
||||
It should look similar to this:
|
||||
placeholder: |
|
||||
[debug] Command-line config: ['-Uv', 'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Command-line config: ['-vU', 'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Portable config file: yt-dlp.conf
|
||||
[debug] Portable config: ['-i']
|
||||
[debug] Encodings: locale cp1252, fs utf-8, stdout utf-8, stderr utf-8, pref cp1252
|
||||
|
||||
@@ -34,7 +34,7 @@ body:
|
||||
label: Example URLs
|
||||
description: |
|
||||
Provide all kinds of example URLs for which support should be added
|
||||
value: |
|
||||
placeholder: |
|
||||
- Single video: https://www.youtube.com/watch?v=BaW_jenozKc
|
||||
- Single video: https://youtu.be/BaW_jenozKc
|
||||
- Playlist: https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc
|
||||
@@ -55,10 +55,10 @@ body:
|
||||
label: Verbose log
|
||||
description: |
|
||||
Provide the complete verbose output **using one of the example URLs provided above**.
|
||||
Add the `-Uv` flag to your command line you run yt-dlp with (`yt-dlp -Uv <your command line>`), copy the WHOLE output and insert it below.
|
||||
Add the `-vU` flag to your command line you run yt-dlp with (`yt-dlp -vU <your command line>`), copy the WHOLE output and insert it below.
|
||||
It should look similar to this:
|
||||
placeholder: |
|
||||
[debug] Command-line config: ['-Uv', 'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Command-line config: ['-vU', 'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Portable config file: yt-dlp.conf
|
||||
[debug] Portable config: ['-i']
|
||||
[debug] Encodings: locale cp1252, fs utf-8, stdout utf-8, stderr utf-8, pref cp1252
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
name: Site feature request
|
||||
description: Request a new functionality for a site
|
||||
description: Request a new functionality for a supported site
|
||||
labels: [triage, site-enhancement]
|
||||
body:
|
||||
- type: checkboxes
|
||||
@@ -32,7 +32,7 @@ body:
|
||||
label: Example URLs
|
||||
description: |
|
||||
Example URLs that can be used to demonstrate the requested feature
|
||||
value: |
|
||||
placeholder: |
|
||||
https://www.youtube.com/watch?v=BaW_jenozKc
|
||||
validations:
|
||||
required: true
|
||||
@@ -47,3 +47,26 @@ body:
|
||||
placeholder: WRITE DESCRIPTION HERE
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: log
|
||||
attributes:
|
||||
label: Verbose log
|
||||
description: |
|
||||
Provide the complete verbose output of yt-dlp that demonstrates the need for the enhancement.
|
||||
Add the `-vU` flag to your command line you run yt-dlp with (`yt-dlp -vU <your command line>`), copy the WHOLE output and insert it below.
|
||||
It should look similar to this:
|
||||
placeholder: |
|
||||
[debug] Command-line config: ['-vU', 'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Portable config file: yt-dlp.conf
|
||||
[debug] Portable config: ['-i']
|
||||
[debug] Encodings: locale cp1252, fs utf-8, stdout utf-8, stderr utf-8, pref cp1252
|
||||
[debug] yt-dlp version %(version)s (exe)
|
||||
[debug] Python version 3.8.8 (CPython 64bit) - Windows-10-10.0.19041-SP0
|
||||
[debug] exe versions: ffmpeg 3.0.1, ffprobe 3.0.1
|
||||
[debug] Optional libraries: Cryptodome, keyring, mutagen, sqlite, websockets
|
||||
[debug] Proxy map: {}
|
||||
yt-dlp is up to date (%(version)s)
|
||||
<more lines>
|
||||
render: shell
|
||||
validations:
|
||||
required: true
|
||||
|
||||
6
.github/ISSUE_TEMPLATE_tmpl/4_bug_report.yml
vendored
6
.github/ISSUE_TEMPLATE_tmpl/4_bug_report.yml
vendored
@@ -1,6 +1,6 @@
|
||||
name: Bug report
|
||||
description: Report a bug unrelated to any particular site or extractor
|
||||
labels: [triage,bug]
|
||||
labels: [triage, bug]
|
||||
body:
|
||||
- type: checkboxes
|
||||
id: checklist
|
||||
@@ -38,10 +38,10 @@ body:
|
||||
label: Verbose log
|
||||
description: |
|
||||
Provide the complete verbose output of yt-dlp **that clearly demonstrates the problem**.
|
||||
Add the `-Uv` flag to **your** command line you run yt-dlp with (`yt-dlp -Uv <your command line>`), copy the WHOLE output and insert it below.
|
||||
Add the `-vU` flag to **your** command line you run yt-dlp with (`yt-dlp -vU <your command line>`), copy the WHOLE output and insert it below.
|
||||
It should look similar to this:
|
||||
placeholder: |
|
||||
[debug] Command-line config: ['-Uv', 'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Command-line config: ['-vU', 'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Portable config file: yt-dlp.conf
|
||||
[debug] Portable config: ['-i']
|
||||
[debug] Encodings: locale cp1252, fs utf-8, stdout utf-8, stderr utf-8, pref cp1252
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
name: Feature request request
|
||||
name: Feature request
|
||||
description: Request a new functionality unrelated to any particular site or extractor
|
||||
labels: [triage, enhancement]
|
||||
body:
|
||||
@@ -11,6 +11,8 @@ body:
|
||||
options:
|
||||
- label: I'm reporting a feature request
|
||||
required: true
|
||||
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **%(version)s**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
|
||||
required: true
|
||||
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates
|
||||
|
||||
27
.github/ISSUE_TEMPLATE_tmpl/6_question.yml
vendored
27
.github/ISSUE_TEMPLATE_tmpl/6_question.yml
vendored
@@ -9,7 +9,7 @@ body:
|
||||
description: |
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of yt-dlp:
|
||||
options:
|
||||
- label: I'm asking a question and not reporting a bug/feature request
|
||||
- label: I'm asking a question and **not** reporting a bug/feature request
|
||||
required: true
|
||||
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
||||
required: true
|
||||
@@ -24,7 +24,30 @@ body:
|
||||
description: |
|
||||
Ask your question in an arbitrary form.
|
||||
Please make sure it's worded well enough to be understood, see [is-the-description-of-the-issue-itself-sufficient](https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient).
|
||||
Provide any additional information and as much context and examples as possible
|
||||
Provide any additional information and as much context and examples as possible.
|
||||
If your question contains "isn't working" or "can you add", this is most likely the wrong template.
|
||||
If you are in doubt if this is the right template, use another template!
|
||||
placeholder: WRITE QUESTION HERE
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: log
|
||||
attributes:
|
||||
label: Verbose log
|
||||
description: |
|
||||
If your question involes a yt-dlp command, provide the complete verbose output of that command.
|
||||
Add the `-vU` flag to **your** command line you run yt-dlp with (`yt-dlp -vU <your command line>`), copy the WHOLE output and insert it below.
|
||||
It should look similar to this:
|
||||
placeholder: |
|
||||
[debug] Command-line config: ['-vU', 'http://www.youtube.com/watch?v=BaW_jenozKc']
|
||||
[debug] Portable config file: yt-dlp.conf
|
||||
[debug] Portable config: ['-i']
|
||||
[debug] Encodings: locale cp1252, fs utf-8, stdout utf-8, stderr utf-8, pref cp1252
|
||||
[debug] yt-dlp version 2021.12.01 (exe)
|
||||
[debug] Python version 3.8.8 (CPython 64bit) - Windows-10-10.0.19041-SP0
|
||||
[debug] exe versions: ffmpeg 3.0.1, ffprobe 3.0.1
|
||||
[debug] Optional libraries: Cryptodome, keyring, mutagen, sqlite, websockets
|
||||
[debug] Proxy map: {}
|
||||
yt-dlp is up to date (2021.12.01)
|
||||
<more lines>
|
||||
render: shell
|
||||
|
||||
83
.github/workflows/build.yml
vendored
83
.github/workflows/build.yml
vendored
@@ -1,14 +1,11 @@
|
||||
name: Build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- release
|
||||
on: workflow_dispatch
|
||||
|
||||
jobs:
|
||||
build_unix:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version_suffix: ${{ steps.version_suffix.outputs.version_suffix }}
|
||||
ytdlp_version: ${{ steps.bump_version.outputs.ytdlp_version }}
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
sha256_bin: ${{ steps.sha256_bin.outputs.sha256_bin }}
|
||||
@@ -26,23 +23,32 @@ jobs:
|
||||
python-version: '3.8'
|
||||
- name: Install packages
|
||||
run: sudo apt-get -y install zip pandoc man
|
||||
- name: Set version suffix
|
||||
id: version_suffix
|
||||
env:
|
||||
PUSH_VERSION_COMMIT: ${{ secrets.PUSH_VERSION_COMMIT }}
|
||||
if: "env.PUSH_VERSION_COMMIT == ''"
|
||||
run: echo ::set-output name=version_suffix::$(date -u +"%H%M%S")
|
||||
- name: Bump version
|
||||
id: bump_version
|
||||
run: |
|
||||
python devscripts/update-version.py
|
||||
python devscripts/update-version.py ${{ steps.version_suffix.outputs.version_suffix }}
|
||||
make issuetemplates
|
||||
- name: Print version
|
||||
run: echo "${{ steps.bump_version.outputs.ytdlp_version }}"
|
||||
- name: Update master
|
||||
id: push_update
|
||||
- name: Push to release
|
||||
id: push_release
|
||||
run: |
|
||||
git config --global user.email "${{ github.event.pusher.email }}"
|
||||
git config --global user.name "${{ github.event.pusher.name }}"
|
||||
git config --global user.name github-actions
|
||||
git config --global user.email github-actions@example.com
|
||||
git add -u
|
||||
git commit -m "[version] update" -m ":ci skip all"
|
||||
git pull --rebase origin ${{ github.event.repository.master_branch }}
|
||||
git push origin ${{ github.event.ref }}:${{ github.event.repository.master_branch }}
|
||||
git commit -m "[version] update" -m "Created by: ${{ github.event.sender.login }}" -m ":ci skip all"
|
||||
git push origin --force ${{ github.event.ref }}:release
|
||||
echo ::set-output name=head_sha::$(git rev-parse HEAD)
|
||||
- name: Update master
|
||||
id: push_master
|
||||
env:
|
||||
PUSH_VERSION_COMMIT: ${{ secrets.PUSH_VERSION_COMMIT }}
|
||||
if: "env.PUSH_VERSION_COMMIT != ''"
|
||||
run: git push origin ${{ github.event.ref }}
|
||||
- name: Get Changelog
|
||||
id: get_changelog
|
||||
run: |
|
||||
@@ -90,7 +96,7 @@ jobs:
|
||||
env:
|
||||
BREW_TOKEN: ${{ secrets.BREW_TOKEN }}
|
||||
if: "env.BREW_TOKEN != ''"
|
||||
uses: webfactory/ssh-agent@v0.5.3
|
||||
uses: yt-dlp/ssh-agent@v0.5.3
|
||||
with:
|
||||
ssh-private-key: ${{ env.BREW_TOKEN }}
|
||||
- name: Update Homebrew Formulae
|
||||
@@ -113,7 +119,7 @@ jobs:
|
||||
with:
|
||||
tag_name: ${{ steps.bump_version.outputs.ytdlp_version }}
|
||||
release_name: yt-dlp ${{ steps.bump_version.outputs.ytdlp_version }}
|
||||
commitish: ${{ steps.push_update.outputs.head_sha }}
|
||||
commitish: ${{ steps.push_release.outputs.head_sha }}
|
||||
body: |
|
||||
#### [A description of the various files]((https://github.com/yt-dlp/yt-dlp#release-files)) are in the README
|
||||
|
||||
@@ -146,7 +152,6 @@ jobs:
|
||||
build_macos:
|
||||
runs-on: macos-11
|
||||
needs: build_unix
|
||||
if: False
|
||||
outputs:
|
||||
sha256_macos: ${{ steps.sha256_macos.outputs.sha256_macos }}
|
||||
sha512_macos: ${{ steps.sha512_macos.outputs.sha512_macos }}
|
||||
@@ -159,7 +164,7 @@ jobs:
|
||||
- name: Install Requirements
|
||||
run: |
|
||||
brew install coreutils
|
||||
/usr/bin/python3 -m pip install -U --user pip Pyinstaller mutagen pycryptodomex websockets
|
||||
/usr/bin/python3 -m pip install -U --user pip Pyinstaller==4.10 -r requirements.txt
|
||||
- name: Bump version
|
||||
id: bump_version
|
||||
run: /usr/bin/python3 devscripts/update-version.py
|
||||
@@ -186,11 +191,9 @@ jobs:
|
||||
run: echo "::set-output name=sha512_macos::$(sha512sum dist/yt-dlp_macos | awk '{print $1}')"
|
||||
|
||||
- name: Run PyInstaller Script with --onedir
|
||||
run: /usr/bin/python3 pyinst.py --target-architecture universal2 --onedir
|
||||
- uses: papeloto/action-zip@v1
|
||||
with:
|
||||
files: ./dist/yt-dlp_macos
|
||||
dest: ./dist/yt-dlp_macos.zip
|
||||
run: |
|
||||
/usr/bin/python3 pyinst.py --target-architecture universal2 --onedir
|
||||
zip ./dist/yt-dlp_macos.zip ./dist/yt-dlp_macos
|
||||
- name: Upload yt-dlp MacOS onedir
|
||||
id: upload-release-macos-zip
|
||||
uses: actions/upload-release-asset@v1
|
||||
@@ -204,7 +207,7 @@ jobs:
|
||||
- name: Get SHA2-256SUMS for yt-dlp_macos.zip
|
||||
id: sha256_macos_zip
|
||||
run: echo "::set-output name=sha256_macos_zip::$(sha256sum dist/yt-dlp_macos.zip | awk '{print $1}')"
|
||||
- name: Get SHA2-512SUMS for yt-dlp_macos
|
||||
- name: Get SHA2-512SUMS for yt-dlp_macos.zip
|
||||
id: sha512_macos_zip
|
||||
run: echo "::set-output name=sha512_macos_zip::$(sha512sum dist/yt-dlp_macos.zip | awk '{print $1}')"
|
||||
|
||||
@@ -230,10 +233,12 @@ jobs:
|
||||
# Custom pyinstaller built with https://github.com/yt-dlp/pyinstaller-builds
|
||||
run: |
|
||||
python -m pip install --upgrade pip setuptools wheel py2exe
|
||||
pip install "https://yt-dlp.github.io/Pyinstaller-Builds/x86_64/pyinstaller-4.5.1-py3-none-any.whl" mutagen pycryptodomex websockets
|
||||
pip install "https://yt-dlp.github.io/Pyinstaller-Builds/x86_64/pyinstaller-4.10-py3-none-any.whl" -r requirements.txt
|
||||
- name: Bump version
|
||||
id: bump_version
|
||||
run: python devscripts/update-version.py
|
||||
env:
|
||||
version_suffix: ${{ needs.build_unix.outputs.version_suffix }}
|
||||
run: python devscripts/update-version.py ${{ env.version_suffix }}
|
||||
- name: Build lazy extractors
|
||||
id: lazy_extractors
|
||||
run: python devscripts/make_lazy_extractors.py
|
||||
@@ -257,11 +262,9 @@ jobs:
|
||||
run: echo "::set-output name=sha512_win::$((Get-FileHash dist\yt-dlp.exe -Algorithm SHA512).Hash.ToLower())"
|
||||
|
||||
- name: Run PyInstaller Script with --onedir
|
||||
run: python pyinst.py --onedir
|
||||
- uses: papeloto/action-zip@v1
|
||||
with:
|
||||
files: ./dist/yt-dlp
|
||||
dest: ./dist/yt-dlp_win.zip
|
||||
run: |
|
||||
python pyinst.py --onedir
|
||||
Compress-Archive -LiteralPath ./dist/yt-dlp -DestinationPath ./dist/yt-dlp_win.zip
|
||||
- name: Upload yt-dlp Windows onedir
|
||||
id: upload-release-windows-zip
|
||||
uses: actions/upload-release-asset@v1
|
||||
@@ -317,10 +320,12 @@ jobs:
|
||||
- name: Install Requirements
|
||||
run: |
|
||||
python -m pip install --upgrade pip setuptools wheel
|
||||
pip install "https://yt-dlp.github.io/Pyinstaller-Builds/i686/pyinstaller-4.5.1-py3-none-any.whl" mutagen pycryptodomex websockets
|
||||
pip install "https://yt-dlp.github.io/Pyinstaller-Builds/i686/pyinstaller-4.10-py3-none-any.whl" -r requirements.txt
|
||||
- name: Bump version
|
||||
id: bump_version
|
||||
run: python devscripts/update-version.py
|
||||
env:
|
||||
version_suffix: ${{ needs.build_unix.outputs.version_suffix }}
|
||||
run: python devscripts/update-version.py ${{ env.version_suffix }}
|
||||
- name: Build lazy extractors
|
||||
id: lazy_extractors
|
||||
run: python devscripts/make_lazy_extractors.py
|
||||
@@ -345,7 +350,7 @@ jobs:
|
||||
|
||||
finish:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build_unix, build_windows, build_windows32]
|
||||
needs: [build_unix, build_windows, build_windows32, build_macos]
|
||||
|
||||
steps:
|
||||
- name: Make SHA2-256SUMS file
|
||||
@@ -365,8 +370,8 @@ jobs:
|
||||
echo "${{ env.SHA256_PY2EXE }} yt-dlp_min.exe" >> SHA2-256SUMS
|
||||
echo "${{ env.SHA256_WIN32 }} yt-dlp_x86.exe" >> SHA2-256SUMS
|
||||
echo "${{ env.SHA256_WIN_ZIP }} yt-dlp_win.zip" >> SHA2-256SUMS
|
||||
# echo "${{ env.SHA256_MACOS }} yt-dlp_macos" >> SHA2-256SUMS
|
||||
# echo "${{ env.SHA256_MACOS_ZIP }} yt-dlp_macos.zip" >> SHA2-256SUMS
|
||||
echo "${{ env.SHA256_MACOS }} yt-dlp_macos" >> SHA2-256SUMS
|
||||
echo "${{ env.SHA256_MACOS_ZIP }} yt-dlp_macos.zip" >> SHA2-256SUMS
|
||||
- name: Upload 256SUMS file
|
||||
id: upload-sums
|
||||
uses: actions/upload-release-asset@v1
|
||||
@@ -394,8 +399,8 @@ jobs:
|
||||
echo "${{ env.SHA512_WIN_ZIP }} yt-dlp_win.zip" >> SHA2-512SUMS
|
||||
echo "${{ env.SHA512_PY2EXE }} yt-dlp_min.exe" >> SHA2-512SUMS
|
||||
echo "${{ env.SHA512_WIN32 }} yt-dlp_x86.exe" >> SHA2-512SUMS
|
||||
# echo "${{ env.SHA512_MACOS }} yt-dlp_macos" >> SHA2-512SUMS
|
||||
# echo "${{ env.SHA512_MACOS_ZIP }} yt-dlp_macos.zip" >> SHA2-512SUMS
|
||||
echo "${{ env.SHA512_MACOS }} yt-dlp_macos" >> SHA2-512SUMS
|
||||
echo "${{ env.SHA512_MACOS_ZIP }} yt-dlp_macos.zip" >> SHA2-512SUMS
|
||||
- name: Upload 512SUMS file
|
||||
id: upload-512sums
|
||||
uses: actions/upload-release-asset@v1
|
||||
|
||||
76
.gitignore
vendored
76
.gitignore
vendored
@@ -1,48 +1,58 @@
|
||||
# Config
|
||||
*.conf
|
||||
*.spec
|
||||
cookies
|
||||
*cookies.txt
|
||||
.netrc
|
||||
|
||||
# Downloaded
|
||||
*.srt
|
||||
*.ttml
|
||||
*.sbv
|
||||
*.vtt
|
||||
*.flv
|
||||
*.mp4
|
||||
*.m4a
|
||||
*.m4v
|
||||
*.mp3
|
||||
*.3gp
|
||||
*.webm
|
||||
*.wav
|
||||
*.ape
|
||||
*.mkv
|
||||
*.flac
|
||||
*.avi
|
||||
*.swf
|
||||
*.part
|
||||
*.part-*
|
||||
*.ytdl
|
||||
*.annotations.xml
|
||||
*.aria2
|
||||
*.description
|
||||
*.dump
|
||||
*.frag
|
||||
*.frag.aria2
|
||||
*.frag.urls
|
||||
*.aria2
|
||||
*.swp
|
||||
*.ogg
|
||||
*.opus
|
||||
*.info.json
|
||||
*.live_chat.json
|
||||
*.jpg
|
||||
*.jpeg
|
||||
*.png
|
||||
*.webp
|
||||
*.annotations.xml
|
||||
*.description
|
||||
*.meta
|
||||
*.part*
|
||||
*.tmp
|
||||
*.temp
|
||||
*.unknown_video
|
||||
*.ytdl
|
||||
.cache/
|
||||
|
||||
*.3gp
|
||||
*.ape
|
||||
*.ass
|
||||
*.avi
|
||||
*.desktop
|
||||
*.flac
|
||||
*.flv
|
||||
*.jpeg
|
||||
*.jpg
|
||||
*.m4a
|
||||
*.m4v
|
||||
*.mhtml
|
||||
*.mkv
|
||||
*.mov
|
||||
*.mp3
|
||||
*.mp4
|
||||
*.ogg
|
||||
*.opus
|
||||
*.png
|
||||
*.sbv
|
||||
*.srt
|
||||
*.swf
|
||||
*.swp
|
||||
*.ttml
|
||||
*.url
|
||||
*.vtt
|
||||
*.wav
|
||||
*.webloc
|
||||
*.webm
|
||||
*.webp
|
||||
|
||||
# Allow config/media files in testdata
|
||||
!test/**
|
||||
|
||||
@@ -80,11 +90,10 @@ README.txt
|
||||
*.1
|
||||
*.bash-completion
|
||||
*.fish
|
||||
*.exe
|
||||
*.tar.gz
|
||||
*.zsh
|
||||
*.spec
|
||||
test/testdata/player-*.js
|
||||
test/testdata/sigs/player-*.js
|
||||
|
||||
# Binary
|
||||
/youtube-dl
|
||||
@@ -98,6 +107,7 @@ yt-dlp.zip
|
||||
*.iml
|
||||
.vscode
|
||||
*.sublime-*
|
||||
*.code-workspace
|
||||
|
||||
# Lazy extractors
|
||||
*/extractor/lazy_extractors.py
|
||||
|
||||
135
CONTRIBUTING.md
135
CONTRIBUTING.md
@@ -10,6 +10,8 @@
|
||||
- [Does the issue involve one problem, and one problem only?](#does-the-issue-involve-one-problem-and-one-problem-only)
|
||||
- [Is anyone going to need the feature?](#is-anyone-going-to-need-the-feature)
|
||||
- [Is your question about yt-dlp?](#is-your-question-about-yt-dlp)
|
||||
- [Are you willing to share account details if needed?](#are-you-willing-to-share-account-details-if-needed)
|
||||
- [Is the website primarily used for piracy](#is-the-website-primarily-used-for-piracy)
|
||||
- [DEVELOPER INSTRUCTIONS](#developer-instructions)
|
||||
- [Adding new feature or making overarching changes](#adding-new-feature-or-making-overarching-changes)
|
||||
- [Adding support for a new site](#adding-support-for-a-new-site)
|
||||
@@ -18,10 +20,12 @@
|
||||
- [Provide fallbacks](#provide-fallbacks)
|
||||
- [Regular expressions](#regular-expressions)
|
||||
- [Long lines policy](#long-lines-policy)
|
||||
- [Quotes](#quotes)
|
||||
- [Inline values](#inline-values)
|
||||
- [Collapse fallbacks](#collapse-fallbacks)
|
||||
- [Trailing parentheses](#trailing-parentheses)
|
||||
- [Use convenience conversion and parsing functions](#use-convenience-conversion-and-parsing-functions)
|
||||
- [My pull request is labeled pending-fixes](#my-pull-request-is-labeled-pending-fixes)
|
||||
- [EMBEDDING YT-DLP](README.md#embedding-yt-dlp)
|
||||
|
||||
|
||||
@@ -30,9 +34,9 @@
|
||||
|
||||
Bugs and suggestions should be reported at: [yt-dlp/yt-dlp/issues](https://github.com/yt-dlp/yt-dlp/issues). Unless you were prompted to or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email. For discussions, join us in our [discord server](https://discord.gg/H5MNcFW63r).
|
||||
|
||||
**Please include the full output of yt-dlp when run with `-Uv`**, i.e. **add** `-Uv` flag to **your command line**, copy the **whole** output and post it in the issue body wrapped in \`\`\` for better formatting. It should look similar to this:
|
||||
**Please include the full output of yt-dlp when run with `-vU`**, i.e. **add** `-vU` flag to **your command line**, copy the **whole** output and post it in the issue body wrapped in \`\`\` for better formatting. It should look similar to this:
|
||||
```
|
||||
$ yt-dlp -Uv <your command line>
|
||||
$ yt-dlp -vU <your command line>
|
||||
[debug] Command-line config: ['-v', 'demo.com']
|
||||
[debug] Encodings: locale UTF-8, fs utf-8, out utf-8, pref UTF-8
|
||||
[debug] yt-dlp version 2021.09.25 (zip)
|
||||
@@ -63,7 +67,7 @@ So please elaborate on what feature you are requesting, or what bug you want to
|
||||
|
||||
If your report is shorter than two lines, it is almost certainly missing some of these, which makes it hard for us to respond to it. We're often too polite to close the issue outright, but the missing info makes misinterpretation likely. We often get frustrated by these issues, since the only possible way for us to move forward on them is to ask for clarification over and over.
|
||||
|
||||
For bug reports, this means that your report should contain the **complete** output of yt-dlp when called with the `-Uv` flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information.
|
||||
For bug reports, this means that your report should contain the **complete** output of yt-dlp when called with the `-vU` flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information.
|
||||
|
||||
If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--write-pages` and upload the `.dump` files you get [somewhere](https://gist.github.com).
|
||||
|
||||
@@ -111,7 +115,7 @@ If the issue is with `youtube-dl` (the upstream fork of yt-dlp) and not with yt-
|
||||
|
||||
### Are you willing to share account details if needed?
|
||||
|
||||
The maintainers and potential contributors of the project often do not have an account for the website you are asking support for. So any developer interested in solving your issue may ask you for account details. It is your personal discression whether you are willing to share the account in order for the developer to try and solve your issue. However, if you are unwilling or unable to provide details, they obviously cannot work on the issue and it cannot be solved unless some developer who both has an account and is willing/able to contribute decides to solve it.
|
||||
The maintainers and potential contributors of the project often do not have an account for the website you are asking support for. So any developer interested in solving your issue may ask you for account details. It is your personal discretion whether you are willing to share the account in order for the developer to try and solve your issue. However, if you are unwilling or unable to provide details, they obviously cannot work on the issue and it cannot be solved unless some developer who both has an account and is willing/able to contribute decides to solve it.
|
||||
|
||||
By sharing an account with anyone, you agree to bear all risks associated with it. The maintainers and yt-dlp can't be held responsible for any misuse of the credentials.
|
||||
|
||||
@@ -121,6 +125,10 @@ While these steps won't necessarily ensure that no misuse of the account takes p
|
||||
- Change the password before sharing the account to something random (use [this](https://passwordsgenerator.net/) if you don't have a random password generator).
|
||||
- Change the password after receiving the account back.
|
||||
|
||||
### Is the website primarily used for piracy?
|
||||
|
||||
We follow [youtube-dl's policy](https://github.com/ytdl-org/youtube-dl#can-you-add-support-for-this-anime-video-site-or-site-which-shows-current-movies-for-free) to not support services that is primarily used for infringing copyright. Additionally, it has been decided to not to support porn sites that specialize in deep fake. We also cannot support any service that serves only [DRM protected content](https://en.wikipedia.org/wiki/Digital_rights_management).
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -208,14 +216,14 @@ After you have ensured this site is distributing its content legally, you can fo
|
||||
}
|
||||
```
|
||||
1. Add an import in [`yt_dlp/extractor/extractors.py`](yt_dlp/extractor/extractors.py).
|
||||
1. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, the tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. Note that tests with `only_matching` key in test's dict are not counted in. You can also run all the tests in one go with `TestDownload.test_YourExtractor_all`
|
||||
1. Make sure you have atleast one test for your extractor. Even if all videos covered by the extractor are expected to be inaccessible for automated testing, tests should still be added with a `skip` parameter indicating why the purticular test is disabled from running.
|
||||
1. Run `python test/test_download.py TestDownload.test_YourExtractor` (note that `YourExtractor` doesn't end with `IE`). This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, the tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. Note that tests with `only_matching` key in test's dict are not counted in. You can also run all the tests in one go with `TestDownload.test_YourExtractor_all`
|
||||
1. Make sure you have atleast one test for your extractor. Even if all videos covered by the extractor are expected to be inaccessible for automated testing, tests should still be added with a `skip` parameter indicating why the particular test is disabled from running.
|
||||
1. Have a look at [`yt_dlp/extractor/common.py`](yt_dlp/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](yt_dlp/extractor/common.py#L91-L426). Add tests and code for as many as you want.
|
||||
1. Make sure your code follows [yt-dlp coding conventions](#yt-dlp-coding-conventions) and check the code with [flake8](https://flake8.pycqa.org/en/latest/index.html#quickstart):
|
||||
|
||||
$ flake8 yt_dlp/extractor/yourextractor.py
|
||||
|
||||
1. Make sure your code works under all [Python](https://www.python.org/) versions supported by yt-dlp, namely CPython and PyPy for Python 3.6 and above. Backward compatability is not required for even older versions of Python.
|
||||
1. Make sure your code works under all [Python](https://www.python.org/) versions supported by yt-dlp, namely CPython and PyPy for Python 3.6 and above. Backward compatibility is not required for even older versions of Python.
|
||||
1. When the tests pass, [add](https://git-scm.com/docs/git-add) the new files, [commit](https://git-scm.com/docs/git-commit) them and [push](https://git-scm.com/docs/git-push) the result, like this:
|
||||
|
||||
$ git add yt_dlp/extractor/extractors.py
|
||||
@@ -227,6 +235,13 @@ After you have ensured this site is distributing its content legally, you can fo
|
||||
|
||||
In any case, thank you very much for your contributions!
|
||||
|
||||
**Tip:** To test extractors that require login information, create a file `test/local_parameters.json` and add `"usenetrc": true` or your username and password in it:
|
||||
```json
|
||||
{
|
||||
"username": "your user name",
|
||||
"password": "your password"
|
||||
}
|
||||
```
|
||||
|
||||
## yt-dlp coding conventions
|
||||
|
||||
@@ -243,7 +258,11 @@ For extraction to work yt-dlp relies on metadata your extractor extracts and pro
|
||||
- `title` (media title)
|
||||
- `url` (media download URL) or `formats`
|
||||
|
||||
The aforementioned metafields are the critical data that the extraction does not make any sense without and if any of them fail to be extracted then the extractor is considered completely broken. While, in fact, only `id` is technically mandatory, due to compatability reasons, yt-dlp also treats `title` as mandatory. The extractor is allowed to return the info dict without url or formats in some special cases if it allows the user to extract usefull information with `--ignore-no-formats-error` - Eg: when the video is a live stream that has not started yet.
|
||||
The aforementioned metafields are the critical data that the extraction does not make any sense without and if any of them fail to be extracted then the extractor is considered completely broken. While all extractors must return a `title`, they must also allow it's extraction to be non-fatal.
|
||||
|
||||
For pornographic sites, appropriate `age_limit` must also be returned.
|
||||
|
||||
The extractor is allowed to return the info dict without url or formats in some special cases if it allows the user to extract usefull information with `--ignore-no-formats-error` - Eg: when the video is a live stream that has not started yet.
|
||||
|
||||
[Any field](yt_dlp/extractor/common.py#219-L426) apart from the aforementioned ones are considered **optional**. That means that extraction should be **tolerant** to situations when sources for these fields can potentially be unavailable (even if they are always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields.
|
||||
|
||||
@@ -444,10 +463,14 @@ Here the presence or absence of other attributes including `style` is irrelevent
|
||||
|
||||
### Long lines policy
|
||||
|
||||
There is a soft limit to keep lines of code under 100 characters long. This means it should be respected if possible and if it does not make readability and code maintenance worse. Sometimes, it may be reasonable to go upto 120 characters and sometimes even 80 can be unreadable. Keep in mind that this is not a hard limit and is just one of many tools to make the code more readable
|
||||
There is a soft limit to keep lines of code under 100 characters long. This means it should be respected if possible and if it does not make readability and code maintenance worse. Sometimes, it may be reasonable to go upto 120 characters and sometimes even 80 can be unreadable. Keep in mind that this is not a hard limit and is just one of many tools to make the code more readable.
|
||||
|
||||
For example, you should **never** split long string literals like URLs or some other often copied entities over multiple lines to fit this limit:
|
||||
|
||||
Conversely, don't unecessarily split small lines further. As a rule of thumb, if removing the line split keeps the code under 80 characters, it should be a single line.
|
||||
|
||||
##### Examples
|
||||
|
||||
Correct:
|
||||
|
||||
```python
|
||||
@@ -461,6 +484,47 @@ Incorrect:
|
||||
'PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4'
|
||||
```
|
||||
|
||||
Correct:
|
||||
|
||||
```python
|
||||
uploader = traverse_obj(info, ('uploader', 'name'), ('author', 'fullname'))
|
||||
```
|
||||
|
||||
Incorrect:
|
||||
|
||||
```python
|
||||
uploader = traverse_obj(
|
||||
info,
|
||||
('uploader', 'name'),
|
||||
('author', 'fullname'))
|
||||
```
|
||||
|
||||
Correct:
|
||||
|
||||
```python
|
||||
formats = self._extract_m3u8_formats(
|
||||
m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls',
|
||||
note='Downloading HD m3u8 information', errnote='Unable to download HD m3u8 information')
|
||||
```
|
||||
|
||||
Incorrect:
|
||||
|
||||
```python
|
||||
formats = self._extract_m3u8_formats(m3u8_url,
|
||||
video_id,
|
||||
'mp4',
|
||||
'm3u8_native',
|
||||
m3u8_id='hls',
|
||||
note='Downloading HD m3u8 information',
|
||||
errnote='Unable to download HD m3u8 information')
|
||||
```
|
||||
|
||||
|
||||
### Quotes
|
||||
|
||||
Always use single quotes for strings (even if the string has `'`) and double quotes for docstrings. Use `'''` only for multi-line strings. An exception can be made if a string has multiple single quotes in it and escaping makes it significantly harder to read. For f-strings, use you can use double quotes on the inside. But avoid f-strings that have too many quotes inside.
|
||||
|
||||
|
||||
### Inline values
|
||||
|
||||
Extracting variables is acceptable for reducing code duplication and improving readability of complex expressions. However, you should avoid extracting variables used only once and moving them to opposite parts of the extractor file, which makes reading the linear flow difficult.
|
||||
@@ -510,15 +574,22 @@ Methods supporting list of patterns are: `_search_regex`, `_html_search_regex`,
|
||||
|
||||
### Trailing parentheses
|
||||
|
||||
Always move trailing parentheses after the last argument.
|
||||
Always move trailing parentheses used for grouping/functions after the last argument. On the other hand, literal list/tuple/dict/set should closed be in a new line. Generators and list/dict comprehensions may use either style
|
||||
|
||||
Note that this *does not* apply to braces `}` or square brackets `]` both of which should closed be in a new line
|
||||
|
||||
#### Example
|
||||
#### Examples
|
||||
|
||||
Correct:
|
||||
|
||||
```python
|
||||
url = try_get(
|
||||
info,
|
||||
lambda x: x['ResultSet']['Result'][0]['VideoUrlSet']['VideoUrl'],
|
||||
list)
|
||||
```
|
||||
Correct:
|
||||
|
||||
```python
|
||||
url = try_get(info,
|
||||
lambda x: x['ResultSet']['Result'][0]['VideoUrlSet']['VideoUrl'],
|
||||
list)
|
||||
```
|
||||
@@ -526,11 +597,45 @@ Correct:
|
||||
Incorrect:
|
||||
|
||||
```python
|
||||
url = try_get(
|
||||
info,
|
||||
lambda x: x['ResultSet']['Result'][0]['VideoUrlSet']['VideoUrl'],
|
||||
list,
|
||||
)
|
||||
```
|
||||
|
||||
Correct:
|
||||
|
||||
```python
|
||||
f = {
|
||||
'url': url,
|
||||
'format_id': format_id,
|
||||
}
|
||||
```
|
||||
|
||||
Incorrect:
|
||||
|
||||
```python
|
||||
f = {'url': url,
|
||||
'format_id': format_id}
|
||||
```
|
||||
|
||||
Correct:
|
||||
|
||||
```python
|
||||
formats = [process_formats(f) for f in format_data
|
||||
if f.get('type') in ('hls', 'dash', 'direct') and f.get('downloadable')]
|
||||
```
|
||||
|
||||
Correct:
|
||||
|
||||
```python
|
||||
formats = [
|
||||
process_formats(f) for f in format_data
|
||||
if f.get('type') in ('hls', 'dash', 'direct') and f.get('downloadable')
|
||||
]
|
||||
```
|
||||
|
||||
|
||||
### Use convenience conversion and parsing functions
|
||||
|
||||
@@ -559,6 +664,10 @@ duration = float_or_none(video.get('durationMs'), scale=1000)
|
||||
view_count = int_or_none(video.get('views'))
|
||||
```
|
||||
|
||||
# My pull request is labeled pending-fixes
|
||||
|
||||
The `pending-fixes` label is added when there are changes requested to a PR. When the necessary changes are made, the label should be removed. However, despite our best efforts, it may sometimes happen that the maintainer did not see the changes or forgot to remove the label. If your PR is still marked as `pending-fixes` a few days after all requested changes have been made, feel free to ping the maintainer who labeled your issue and ask them to re-review and remove the label.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
77
CONTRIBUTORS
77
CONTRIBUTORS
@@ -2,6 +2,7 @@ pukkandan (owner)
|
||||
shirt-dev (collaborator)
|
||||
coletdjnz/colethedj (collaborator)
|
||||
Ashish0804 (collaborator)
|
||||
nao20010128nao/Lesmiscore (collaborator)
|
||||
h-h-h-h
|
||||
pauldubois98
|
||||
nixxo
|
||||
@@ -19,7 +20,6 @@ samiksome
|
||||
alxnull
|
||||
FelixFrog
|
||||
Zocker1999NET
|
||||
nao20010128nao
|
||||
kurumigi
|
||||
bbepis
|
||||
animelover1984/horahoradev
|
||||
@@ -139,3 +139,78 @@ rhendric
|
||||
sdomi
|
||||
selfisekai
|
||||
stanoarn
|
||||
0xA7404A/Aurora
|
||||
4a1e2y5
|
||||
aarubui
|
||||
chio0hai
|
||||
cntrl-s
|
||||
Deer-Spangle
|
||||
DEvmIb
|
||||
Grabien/MaximVol
|
||||
j54vc1bk
|
||||
mpeter50
|
||||
mrpapersonic
|
||||
pabs3
|
||||
staubichsauger
|
||||
xenova
|
||||
Yakabuff
|
||||
zulaport
|
||||
ehoogeveen-medweb
|
||||
PilzAdam
|
||||
zmousm
|
||||
iw0nderhow
|
||||
unit193
|
||||
TwoThousandHedgehogs/KathrynElrod
|
||||
Jertzukka
|
||||
cypheron
|
||||
Hyeeji
|
||||
bwildenhain
|
||||
C0D3D3V
|
||||
kebianizao
|
||||
Lapin0t
|
||||
abdullah-if
|
||||
DavidSkrundz
|
||||
mkubecek
|
||||
raleeper
|
||||
YuenSzeHong
|
||||
Sematre
|
||||
jaller94
|
||||
r5d
|
||||
julien-hadleyjack
|
||||
git-anony-mouse
|
||||
mdawar
|
||||
trassshhub
|
||||
foghawk
|
||||
k3ns1n
|
||||
teridon
|
||||
mozlima
|
||||
timendum
|
||||
ischmidt20
|
||||
CreaValix
|
||||
sian1468
|
||||
arkamar
|
||||
hyano
|
||||
KiberInfinity
|
||||
tejing1
|
||||
Bricio
|
||||
lazypete365
|
||||
Aniruddh-J
|
||||
blackgear
|
||||
CplPwnies
|
||||
cyberfox1691
|
||||
FestplattenSchnitzel
|
||||
hatienl0i261299
|
||||
iphoting
|
||||
jakeogh
|
||||
lukasfink1
|
||||
lyz-code
|
||||
marieell
|
||||
mdpauley
|
||||
Mipsters
|
||||
mxmehl
|
||||
ofkz
|
||||
P-reducible
|
||||
pycabbage
|
||||
regarten
|
||||
Ronnnny
|
||||
schn0sch
|
||||
|
||||
657
Changelog.md
657
Changelog.md
@@ -5,15 +5,554 @@
|
||||
|
||||
* Run `make doc`
|
||||
* Update Changelog.md and CONTRIBUTORS
|
||||
* Change "Merged with ytdl" version in Readme.md if needed
|
||||
* Add new/fixed extractors in "new features" section of Readme.md
|
||||
* Commit as `Release <version>`
|
||||
* Push to origin/release using `git push origin master:release`
|
||||
build task will now run
|
||||
|
||||
* Change "Based on ytdl" version in Readme.md if needed
|
||||
* Commit as `Release <version>` and push to master
|
||||
* Dispatch the workflow https://github.com/yt-dlp/yt-dlp/actions/workflows/build.yml on master
|
||||
-->
|
||||
|
||||
|
||||
### 2022.03.08.1
|
||||
|
||||
* [cleanup] Refactor `__init__.py`
|
||||
* [build] Fix bug
|
||||
|
||||
### 2022.03.08
|
||||
|
||||
* Merge youtube-dl: Upto [commit/6508688](https://github.com/ytdl-org/youtube-dl/commit/6508688e88c83bb811653083db9351702cd39a6a) (except NDR)
|
||||
* Add regex operator and quoting to format filters by [lukasfink1](https://github.com/lukasfink1)
|
||||
* Add brotli content-encoding support by [coletdjnz](https://github.com/coletdjnz)
|
||||
* Add pre-processor stage `after_filter`
|
||||
* Better error message when no `--live-from-start` format
|
||||
* Create necessary directories for `--print-to-file`
|
||||
* Fill more fields for playlists by [Lesmiscore](https://github.com/Lesmiscore)
|
||||
* Fix `-all` for `--sub-langs`
|
||||
* Fix doubling of `video_id` in `ExtractorError`
|
||||
* Fix for when stdout/stderr encoding is `None`
|
||||
* Handle negative duration from extractor
|
||||
* Implement `--add-header` without modifying `std_headers`
|
||||
* Obey `--abort-on-error` for "ffmpeg not installed"
|
||||
* Set `webpage_url_...` from `webpage_url` and not input URL
|
||||
* Tolerate failure to `--write-link` due to unknown URL
|
||||
* [aria2c] Add `--http-accept-gzip=true`
|
||||
* [build] Update pyinstaller to 4.10 by [shirt-dev](https://github.com/shirt-dev)
|
||||
* [cookies] Update MacOS12 `Cookies.binarycookies` location by [mdpauley](https://github.com/mdpauley)
|
||||
* [devscripts] Improve `prepare_manpage`
|
||||
* [downloader] Do not use aria2c for non-native `m3u8`
|
||||
* [downloader] Obey `--file-access-retries` when deleting/renaming by [ehoogeveen-medweb](https://github.com/ehoogeveen-medweb)
|
||||
* [extractor] Allow `http_headers` to be specified for `thumbnails`
|
||||
* [extractor] Extract subtitles from manifests for vimeo, globo, kaltura, svt by [fstirlitz](https://github.com/fstirlitz)
|
||||
* [extractor] Fix for manifests without period duration by [dirkf](https://github.com/dirkf), [pukkandan](https://github.com/pukkandan)
|
||||
* [extractor] Support `--mark-watched` without `_NETRC_MACHINE` by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [FFmpegConcat] Abort on `--simulate`
|
||||
* [FormatSort] Consider `acodec`=`ogg` as `vorbis`
|
||||
* [fragment] Fix bugs around resuming with Range by [Lesmiscore](https://github.com/Lesmiscore)
|
||||
* [fragment] Improve `--live-from-start` for YouTube livestreams by [Lesmiscore](https://github.com/Lesmiscore)
|
||||
* [generic] Pass referer to extracted formats
|
||||
* [generic] Set rss `guid` as video id by [Bricio](https://github.com/Bricio)
|
||||
* [options] Better ambiguous option resolution
|
||||
* [options] Rename `--clean-infojson` to `--clean-info-json`
|
||||
* [SponsorBlock] Fixes for highlight and "full video labels" by [nihil-admirari](https://github.com/nihil-admirari)
|
||||
* [Sponsorblock] minor fixes by [nihil-admirari](https://github.com/nihil-admirari)
|
||||
* [utils] Better traceback for `ExtractorError`
|
||||
* [utils] Fix file locking for AOSP by [jakeogh](https://github.com/jakeogh)
|
||||
* [utils] Improve file locking
|
||||
* [utils] OnDemandPagedList: Do not download pages after error
|
||||
* [utils] render_table: Fix character calculation for removing extra gap by [Lesmiscore](https://github.com/Lesmiscore)
|
||||
* [utils] Use `locked_file` for `sanitize_open` by [jakeogh](https://github.com/jakeogh)
|
||||
* [utils] Validate `DateRange` input
|
||||
* [utils] WebSockets wrapper for non-async functions by [Lesmiscore](https://github.com/Lesmiscore)
|
||||
* [cleanup] Don't pass protocol to `_extract_m3u8_formats` for live videos
|
||||
* [cleanup] Remove extractors for some dead websites by [marieell](https://github.com/marieell)
|
||||
* [cleanup, docs] Misc cleanup
|
||||
* [AbemaTV] Add extractors by [Lesmiscore](https://github.com/Lesmiscore)
|
||||
* [adobepass] Add Suddenlink MSO by [CplPwnies](https://github.com/CplPwnies)
|
||||
* [ant1newsgr] Add extractor by [zmousm](https://github.com/zmousm)
|
||||
* [bigo] Add extractor by [Lesmiscore](https://github.com/Lesmiscore)
|
||||
* [Caltrans] Add extractor by [Bricio](https://github.com/Bricio)
|
||||
* [daystar] Add extractor by [hatienl0i261299](https://github.com/hatienl0i261299)
|
||||
* [fc2:live] Add extractor by [Lesmiscore](https://github.com/Lesmiscore)
|
||||
* [fptplay] Add extractor by [hatienl0i261299](https://github.com/hatienl0i261299)
|
||||
* [murrtube] Add extractor by [cyberfox1691](https://github.com/cyberfox1691)
|
||||
* [nfb] Add extractor by [ofkz](https://github.com/ofkz)
|
||||
* [niconico] Add playlist extractors and refactor by [Lesmiscore](https://github.com/Lesmiscore)
|
||||
* [peekvids] Add extractor by [schn0sch](https://github.com/schn0sch)
|
||||
* [piapro] Add extractor by [pycabbage](https://github.com/pycabbage), [Lesmiscore](https://github.com/Lesmiscore)
|
||||
* [rokfin] Add extractor by [P-reducible](https://github.com/P-reducible), [pukkandan](https://github.com/pukkandan)
|
||||
* [rokfin] Add stack and channel extractors by [P-reducible](https://github.com/P-reducible), [pukkandan](https://github.com/pukkandan)
|
||||
* [ruv.is] Add extractor by [iw0nderhow](https://github.com/iw0nderhow)
|
||||
* [telegram] Add extractor by [hatienl0i261299](https://github.com/hatienl0i261299)
|
||||
* [VideocampusSachsen] Add extractors by [FestplattenSchnitzel](https://github.com/FestplattenSchnitzel)
|
||||
* [xinpianchang] Add extractor by [hatienl0i261299](https://github.com/hatienl0i261299)
|
||||
* [abc] Support 1080p by [Ronnnny](https://github.com/Ronnnny)
|
||||
* [afreecatv] Support password-protected livestreams by [wlritchi](https://github.com/wlritchi)
|
||||
* [ard] Fix valid URL
|
||||
* [ATVAt] Detect geo-restriction by [marieell](https://github.com/marieell)
|
||||
* [bandcamp] Detect acodec
|
||||
* [bandcamp] Fix user URLs by [lyz-code](https://github.com/lyz-code)
|
||||
* [bbc] Fix extraction of news articles by [ajj8](https://github.com/ajj8)
|
||||
* [beeg] Fix extractor by [Bricio](https://github.com/Bricio)
|
||||
* [bigo] Fix extractor to not to use `form_params`
|
||||
* [Bilibili] Pass referer for all formats by [blackgear](https://github.com/blackgear)
|
||||
* [Biqle] Fix extractor by [Bricio](https://github.com/Bricio)
|
||||
* [ccma] Fix timestamp parsing by [nyuszika7h](https://github.com/nyuszika7h)
|
||||
* [crunchyroll] Better error reporting on login failure by [tejing1](https://github.com/tejing1)
|
||||
* [cspan] Support of C-Span congress videos by [Grabien](https://github.com/Grabien)
|
||||
* [dropbox] fix regex by [zenerdi0de](https://github.com/zenerdi0de)
|
||||
* [fc2] Fix extraction by [Lesmiscore](https://github.com/Lesmiscore)
|
||||
* [fujitv] Extract resolution for free sources by [YuenSzeHong](https://github.com/YuenSzeHong)
|
||||
* [Gettr] Add `GettrStreamingIE` by [i6t](https://github.com/i6t)
|
||||
* [Gettr] Fix formats order by [i6t](https://github.com/i6t)
|
||||
* [Gettr] Improve extractor by [i6t](https://github.com/i6t)
|
||||
* [globo] Expand valid URL by [Bricio](https://github.com/Bricio)
|
||||
* [lbry] Fix `--ignore-no-formats-error`
|
||||
* [manyvids] Extract `uploader` by [regarten](https://github.com/regarten)
|
||||
* [mildom] Fix linter
|
||||
* [mildom] Rework extractors by [Lesmiscore](https://github.com/Lesmiscore)
|
||||
* [mirrativ] Cleanup extractor code by [Lesmiscore](https://github.com/Lesmiscore)
|
||||
* [nhk] Add support for NHK for School by [Lesmiscore](https://github.com/Lesmiscore)
|
||||
* [niconico:tag] Add support for searching tags
|
||||
* [nrk] Add fallback API
|
||||
* [peekvids] Use JSON-LD by [schn0sch](https://github.com/schn0sch)
|
||||
* [peertube] Add media.fsfe.org by [mxmehl](https://github.com/mxmehl)
|
||||
* [rtvs] Fix extractor by [Bricio](https://github.com/Bricio)
|
||||
* [spiegel] Fix `_VALID_URL`
|
||||
* [ThumbnailsConvertor] Support `webp`
|
||||
* [tiktok] Fix `vm.tiktok`/`vt.tiktok` URLs
|
||||
* [tubitv] Fix/improve TV series extraction by [bbepis](https://github.com/bbepis)
|
||||
* [tumblr] Fix extractor by [foghawk](https://github.com/foghawk)
|
||||
* [twitcasting] Add fallback for finding running live by [Lesmiscore](https://github.com/Lesmiscore)
|
||||
* [TwitCasting] Check for password protection by [Lesmiscore](https://github.com/Lesmiscore)
|
||||
* [twitcasting] Fix extraction by [Lesmiscore](https://github.com/Lesmiscore)
|
||||
* [twitch] Fix field name of `view_count`
|
||||
* [twitter] Fix for private videos by [iphoting](https://github.com/iphoting)
|
||||
* [washingtonpost] Fix extractor by [Bricio](https://github.com/Bricio)
|
||||
* [youtube:tab] Add `approximate_date` extractor-arg
|
||||
* [youtube:tab] Follow redirect to regional channel by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube:tab] Reject webpage data if redirected to home page
|
||||
* [youtube] De-prioritize potentially damaged formats
|
||||
* [youtube] Differentiate descriptive audio by language code
|
||||
* [youtube] Ensure subtitle urls are absolute by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Escape possible `$` in `_extract_n_function_name` regex by [Lesmiscore](https://github.com/Lesmiscore)
|
||||
* [youtube] Fix automatic captions
|
||||
* [youtube] Fix n-sig extraction for phone player JS by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [youtube] Further de-prioritize 3gp format
|
||||
* [youtube] Label original auto-subs
|
||||
* [youtube] Prefer UTC upload date for videos by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [zaq1] Remove dead extractor by [marieell](https://github.com/marieell)
|
||||
* [zee5] Support web-series by [Aniruddh-J](https://github.com/Aniruddh-J)
|
||||
* [zingmp3] Fix extractor by [hatienl0i261299](https://github.com/hatienl0i261299)
|
||||
* [zoom] Add support for screen cast by [Mipsters](https://github.com/Mipsters)
|
||||
|
||||
|
||||
### 2022.02.04
|
||||
|
||||
* [youtube:search] Fix extractor by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube:search] Add tests
|
||||
* [twitcasting] Enforce UTF-8 for POST payload by [Lesmiscore](https://github.com/Lesmiscore)
|
||||
* [mediaset] Fix extractor by [nixxo](https://github.com/nixxo)
|
||||
* [websocket] Make syntax error in `websockets` module non-fatal
|
||||
|
||||
### 2022.02.03
|
||||
|
||||
* Merge youtube-dl: Upto [commit/78ce962](https://github.com/ytdl-org/youtube-dl/commit/78ce962f4fe020994c216dd2671546fbe58a5c67)
|
||||
* Add option `--print-to-file`
|
||||
* Make nested --config-locations relative to parent file
|
||||
* Ensure `_type` is present in `info.json`
|
||||
* Fix `--compat-options list-formats`
|
||||
* Fix/improve `InAdvancePagedList`
|
||||
* [downloader/ffmpeg] Handle unknown formats better
|
||||
* [outtmpl] Handle `-o ""` better
|
||||
* [outtmpl] Handle hard-coded file extension better
|
||||
* [extractor] Add convinience function `_yes_playlist`
|
||||
* [extractor] Allow non-fatal `title` extraction
|
||||
* [extractor] Extract video inside `Article` json_ld
|
||||
* [generic] Allow further processing of json_ld URL
|
||||
* [cookies] Fix keyring selection for unsupported desktops
|
||||
* [utils] Strip double spaces in `clean_html` by [dirkf](https://github.com/dirkf)
|
||||
* [aes] Add `unpad_pkcs7`
|
||||
* [test] Fix `test_youtube_playlist_noplaylist`
|
||||
* [docs,cleanup] Misc cleanup
|
||||
* [dplay] Add extractors for site changes by [Sipherdrakon](https://github.com/Sipherdrakon)
|
||||
* [ertgr] Add extractors by [zmousm](https://github.com/zmousm), [dirkf](https://github.com/dirkf)
|
||||
* [Musicdex] Add extractors by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [YandexVideoPreview] Add extractor by [KiberInfinity](https://github.com/KiberInfinity)
|
||||
* [youtube] Add extractor `YoutubeMusicSearchURLIE`
|
||||
* [archive.org] Ignore unnecessary files
|
||||
* [Bilibili] Add 8k support by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [bilibili] Fix extractor, make anthology title non-fatal
|
||||
* [CAM4] Add thumbnail extraction by [alerikaisattera](https://github.com/alerikaisattera)
|
||||
* [cctv] De-prioritize sample format
|
||||
* [crunchyroll:beta] Add cookies support by [tejing1](https://github.com/tejing1)
|
||||
* [crunchyroll] Fix login by [tejing1](https://github.com/tejing1)
|
||||
* [doodstream] Fix extractor
|
||||
* [fc2] Fix extraction by [Lesmiscore](https://github.com/Lesmiscore)
|
||||
* [FFmpegConcat] Abort on --skip-download and download errors
|
||||
* [Fujitv] Extract metadata and support premium by [YuenSzeHong](https://github.com/YuenSzeHong)
|
||||
* [globo] Fix extractor by [Bricio](https://github.com/Bricio)
|
||||
* [glomex] Simplify embed detection
|
||||
* [GoogleSearch] Fix extractor
|
||||
* [Instagram] Fix extraction when logged in by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [iq.com] Add VIP support by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [mildom] Fix extractor by [lazypete365](https://github.com/lazypete365)
|
||||
* [MySpass] Fix video url processing by [trassshhub](https://github.com/trassshhub)
|
||||
* [Odnoklassniki] Improve embedded players extraction by [KiberInfinity](https://github.com/KiberInfinity)
|
||||
* [orf:tvthek] Lazy playlist extraction and obey --no-playlist
|
||||
* [Pladform] Fix redirection to external player by [KiberInfinity](https://github.com/KiberInfinity)
|
||||
* [ThisOldHouse] Improve Premium URL check by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [TikTok] Iterate through app versions by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [tumblr] Fix 403 errors and handle vimeo embeds by [foghawk](https://github.com/foghawk)
|
||||
* [viki] Fix "Bad request" for manifest by [nyuszika7h](https://github.com/nyuszika7h)
|
||||
* [Vimm] add recording extractor by [alerikaisattera](https://github.com/alerikaisattera)
|
||||
* [web.archive:youtube] Add `ytarchive:` prefix and misc cleanup
|
||||
* [youtube:api] Do not use seek when reading HTTPError response by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Fix n-sig for player e06dea74
|
||||
* [youtube, cleanup] Misc fixes and cleanup
|
||||
|
||||
|
||||
### 2022.01.21
|
||||
|
||||
* Add option `--concat-playlist` to **concat videos in a playlist**
|
||||
* Allow **multiple and nested configuration files**
|
||||
* Add more post-processing stages (`after_video`, `playlist`)
|
||||
* Allow `--exec` to be run at any post-processing stage (Deprecates `--exec-before-download`)
|
||||
* Allow `--print` to be run at any post-processing stage
|
||||
* Allow listing formats, thumbnails, subtitles using `--print` by [pukkandan](https://github.com/pukkandan), [Zirro](https://github.com/Zirro)
|
||||
* Add fields `video_autonumber`, `modified_date`, `modified_timestamp`, `playlist_count`, `channel_follower_count`
|
||||
* Add key `requested_downloads` in the root `info_dict`
|
||||
* Write `download_archive` only after all formats are downloaded
|
||||
* [FfmpegMetadata] Allow setting metadata of individual streams using `meta<n>_` prefix
|
||||
* Add option `--legacy-server-connect` by [xtkoba](https://github.com/xtkoba)
|
||||
* Allow escaped `,` in `--extractor-args`
|
||||
* Allow unicode characters in `info.json`
|
||||
* Check for existing thumbnail/subtitle in final directory
|
||||
* Don't treat empty containers as `None` in `sanitize_info`
|
||||
* Fix `-s --ignore-no-formats --force-write-archive`
|
||||
* Fix live title for multiple formats
|
||||
* List playlist thumbnails in `--list-thumbnails`
|
||||
* Raise error if subtitle download fails
|
||||
* [cookies] Fix bug when keyring is unspecified
|
||||
* [ffmpeg] Ignore unknown streams, standardize use of `-map 0`
|
||||
* [outtmpl] Alternate form for `D` and fix suffix's case
|
||||
* [utils] Add `Sec-Fetch-Mode` to `std_headers`
|
||||
* [utils] Fix `format_bytes` output for Bytes by [pukkandan](https://github.com/pukkandan), [mdawar](https://github.com/mdawar)
|
||||
* [utils] Handle `ss:xxx` in `parse_duration`
|
||||
* [utils] Improve parsing for nested HTML elements by [zmousm](https://github.com/zmousm), [pukkandan](https://github.com/pukkandan)
|
||||
* [utils] Use key `None` in `traverse_obj` to return as-is
|
||||
* [extractor] Detect more subtitle codecs in MPD manifests by [fstirlitz](https://github.com/fstirlitz)
|
||||
* [extractor] Extract chapters from JSON-LD by [iw0nderhow](https://github.com/iw0nderhow), [pukkandan](https://github.com/pukkandan)
|
||||
* [extractor] Extract thumbnails from JSON-LD by [nixxo](https://github.com/nixxo)
|
||||
* [extractor] Improve `url_result` and related
|
||||
* [generic] Improve KVS player extraction by [trassshhub](https://github.com/trassshhub)
|
||||
* [build] Reduce dependency on third party workflows
|
||||
* [extractor,cleanup] Use `_search_nextjs_data`, `format_field`
|
||||
* [cleanup] Minor fixes and cleanup
|
||||
* [docs] Improvements
|
||||
* [test] Fix TestVerboseOutput
|
||||
* [afreecatv] Add livestreams extractor by [wlritchi](https://github.com/wlritchi)
|
||||
* [callin] Add extractor by [foghawk](https://github.com/foghawk)
|
||||
* [CrowdBunker] Add extractors by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [daftsex] Add extractors by [k3ns1n](https://github.com/k3ns1n)
|
||||
* [digitalconcerthall] Add extractor by [teridon](https://github.com/teridon)
|
||||
* [Drooble] Add extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [EuropeanTour] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [iq.com] Add extractors by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [KelbyOne] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [LnkIE] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [MainStreaming] Add extractor by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [megatvcom] Add extractors by [zmousm](https://github.com/zmousm)
|
||||
* [Newsy] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [noodlemagazine] Add extractor by [trassshhub](https://github.com/trassshhub)
|
||||
* [PokerGo] Add extractors by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [Pornez] Add extractor by [mozlima](https://github.com/mozlima)
|
||||
* [PRX] Add Extractors by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [RTNews] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [Rule34video] Add extractor by [trassshhub](https://github.com/trassshhub)
|
||||
* [tvopengr] Add extractors by [zmousm](https://github.com/zmousm)
|
||||
* [Vimm] Add extractor by [alerikaisattera](https://github.com/alerikaisattera)
|
||||
* [glomex] Add extractors by [zmousm](https://github.com/zmousm)
|
||||
* [instagram] Add story/highlight extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [openrec] Add movie extractor by [Lesmiscore](https://github.com/Lesmiscore)
|
||||
* [rai] Add Raiplaysound extractors by [nixxo](https://github.com/nixxo), [pukkandan](https://github.com/pukkandan)
|
||||
* [aparat] Fix extractor
|
||||
* [ard] Extract subtitles by [fstirlitz](https://github.com/fstirlitz)
|
||||
* [BiliIntl] Add login by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [CeskaTelevize] Use `http` for manifests
|
||||
* [CTVNewsIE] Add fallback for video search by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [dplay] Migrate DiscoveryPlusItaly to DiscoveryPlus by [timendum](https://github.com/timendum)
|
||||
* [dplay] Re-structure DiscoveryPlus extractors
|
||||
* [Dropbox] Support password protected files and more formats by [zenerdi0de](https://github.com/zenerdi0de)
|
||||
* [facebook] Fix extraction from groups
|
||||
* [facebook] Improve title and uploader extraction
|
||||
* [facebook] Parse dash manifests
|
||||
* [fox] Extract m3u8 from preview by [ischmidt20](https://github.com/ischmidt20)
|
||||
* [funk] Support origin URLs
|
||||
* [gfycat] Fix `uploader`
|
||||
* [gfycat] Support embeds by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [hotstar] Add extractor args to ignore tags by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [hrfernsehen] Fix ardloader extraction by [CreaValix](https://github.com/CreaValix)
|
||||
* [instagram] Fix username extraction for stories and highlights by [nyuszika7h](https://github.com/nyuszika7h)
|
||||
* [kakao] Detect geo-restriction
|
||||
* [line] Remove `tv.line.me` by [sian1468](https://github.com/sian1468)
|
||||
* [mixch] Add `MixchArchiveIE` by [Lesmiscore](https://github.com/Lesmiscore)
|
||||
* [mixcloud] Detect restrictions by [llacb47](https://github.com/llacb47)
|
||||
* [NBCSports] Fix extraction of platform URLs by [ischmidt20](https://github.com/ischmidt20)
|
||||
* [Nexx] Extract more metadata by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [Nexx] Support 3q CDN by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [pbs] de-prioritize AD formats
|
||||
* [PornHub,YouTube] Refresh onion addresses by [unit193](https://github.com/unit193)
|
||||
* [RedBullTV] Parse subtitles from manifest by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [streamcz] Fix extractor by [arkamar](https://github.com/arkamar), [pukkandan](https://github.com/pukkandan)
|
||||
* [Ted] Rewrite extractor by [pukkandan](https://github.com/pukkandan), [trassshhub](https://github.com/trassshhub)
|
||||
* [Theta] Fix valid URL by [alerikaisattera](https://github.com/alerikaisattera)
|
||||
* [ThisOldHouseIE] Add support for premium videos by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [TikTok] Fix extraction for sigi-based webpages, add API fallback by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [TikTok] Pass cookies to formats, and misc fixes by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [TikTok] Extract captions, user thumbnail by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [TikTok] Change app version by [MinePlayersPE](https://github.com/MinePlayersPE), [llacb47](https://github.com/llacb47)
|
||||
* [TVer] Extract message for unaired live by [Lesmiscore](https://github.com/Lesmiscore)
|
||||
* [twitcasting] Refactor extractor by [Lesmiscore](https://github.com/Lesmiscore)
|
||||
* [twitter] Fix video in quoted tweets
|
||||
* [veoh] Improve extractor by [foghawk](https://github.com/foghawk)
|
||||
* [vk] Capture `clip` URLs
|
||||
* [vk] Fix VKUserVideosIE by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [vk] Improve `_VALID_URL` by [k3ns1n](https://github.com/k3ns1n)
|
||||
* [VrtNU] Handle empty title by [pgaig](https://github.com/pgaig)
|
||||
* [XVideos] Check HLS formats by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [yahoo:gyao] Improved playlist handling by [hyano](https://github.com/hyano)
|
||||
* [youtube:tab] Extract more playlist metadata by [coletdjnz](https://github.com/coletdjnz), [pukkandan](https://github.com/pukkandan)
|
||||
* [youtube:tab] Raise error on tab redirect by [krichbanana](https://github.com/krichbanana), [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Update Innertube clients by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Detect live-stream embeds
|
||||
* [youtube] Do not return `upload_date` for playlists
|
||||
* [youtube] Extract channel subscriber count by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Make invalid storyboard URL non-fatal
|
||||
* [youtube] Enforce UTC, update innertube clients and tests by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [zdf] Add chapter extraction by [iw0nderhow](https://github.com/iw0nderhow)
|
||||
* [zee5] Add geo-bypass
|
||||
|
||||
|
||||
### 2021.12.27
|
||||
|
||||
* Avoid recursion error when re-extracting info
|
||||
* [ffmpeg] Fix position of `--ppa`
|
||||
* [aria2c] Don't show progress when `--no-progress`
|
||||
* [cookies] Support other keyrings by [mbway](https://github.com/mbway)
|
||||
* [EmbedThumbnail] Prefer AtomicParsley over ffmpeg if available
|
||||
* [generic] Fix HTTP KVS Player by [git-anony-mouse](https://github.com/git-anony-mouse)
|
||||
* [ThumbnailsConvertor] Fix for when there are no thumbnails
|
||||
* [docs] Add examples for using `TYPES:` in `-P`/`-o`
|
||||
* [PixivSketch] Add extractors by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [tiktok] Add music, sticker and tag IEs by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [BiliIntl] Fix extractor by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [CBC] Fix URL regex
|
||||
* [tiktok] Fix `extractor_key` used in archive
|
||||
* [youtube] **End `live-from-start` properly when stream ends with 403**
|
||||
* [Zee5] Fix VALID_URL for tv-shows by [Ashish0804](https://github.com/Ashish0804)
|
||||
|
||||
### 2021.12.25
|
||||
|
||||
* [dash,youtube] **Download live from start to end** by [nao20010128nao](https://github.com/nao20010128nao), [pukkandan](https://github.com/pukkandan)
|
||||
* Add option `--live-from-start` to enable downloading live videos from start
|
||||
* Add key `is_from_start` in formats to identify formats (of live videos) that downloads from start
|
||||
* [dash] Create protocol `http_dash_segments_generator` that allows a function to be passed instead of fragments
|
||||
* [fragment] Allow multiple live dash formats to download simultaneously
|
||||
* [youtube] Implement fragment re-fetching for the live dash formats
|
||||
* [youtube] Re-extract dash manifest every 5 hours (manifest expires in 6hrs)
|
||||
* [postprocessor/ffmpeg] Add `FFmpegFixupDuplicateMoovPP` to fixup duplicated moov atoms
|
||||
* Known issues:
|
||||
* Ctrl+C doesn't work on Windows when downloading multiple formats
|
||||
* If video becomes private, download hangs
|
||||
* [SponsorBlock] Add `Filler` and `Highlight` categories by [nihil-admirari](https://github.com/nihil-admirari), [pukkandan](https://github.com/pukkandan)
|
||||
* Change `--sponsorblock-cut all` to `--sponsorblock-cut default` if you do not want filler sections to be removed
|
||||
* Add field `webpage_url_domain`
|
||||
* Add interactive format selection with `-f -`
|
||||
* Add option `--file-access-retries` by [ehoogeveen-medweb](https://github.com/ehoogeveen-medweb)
|
||||
* [outtmpl] Add alternate forms `S`, `D` and improve `id` detection
|
||||
* [outtmpl] Add operator `&` for replacement text by [PilzAdam](https://github.com/PilzAdam)
|
||||
* [EmbedSubtitle] Disable duration check temporarily
|
||||
* [extractor] Add `_search_nuxt_data` by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [extractor] Ignore errors in comment extraction when `-i` is given
|
||||
* [extractor] Standardize `_live_title`
|
||||
* [FormatSort] Prevent incorrect deprecation warning
|
||||
* [generic] Extract m3u8 formats from JSON-LD
|
||||
* [postprocessor/ffmpeg] Always add `faststart`
|
||||
* [utils] Fix parsing `YYYYMMDD` dates in Nov/Dec by [wlritchi](https://github.com/wlritchi)
|
||||
* [utils] Improve `parse_count`
|
||||
* [utils] Update `std_headers` by [kikuyan](https://github.com/kikuyan), [fstirlitz](https://github.com/fstirlitz)
|
||||
* [lazy_extractors] Fix for search IEs
|
||||
* [extractor] Support default implicit graph in JSON-LD by [zmousm](https://github.com/zmousm)
|
||||
* Allow `--no-write-thumbnail` to override `--write-all-thumbnail`
|
||||
* Fix `--throttled-rate`
|
||||
* Fix control characters being printed to `--console-title`
|
||||
* Fix PostProcessor hooks not registered for some PPs
|
||||
* Pre-process when using `--flat-playlist`
|
||||
* Remove known invalid thumbnails from `info_dict`
|
||||
* Add warning when using `-f best`
|
||||
* Use `parse_duration` for `--wait-for-video` and some minor fix
|
||||
* [test/download] Add more fields
|
||||
* [test/download] Ignore field `webpage_url_domain` by [std-move](https://github.com/std-move)
|
||||
* [compat] Suppress errors in enabling VT mode
|
||||
* [docs] Improve manpage format by [iw0nderhow](https://github.com/iw0nderhow), [pukkandan](https://github.com/pukkandan)
|
||||
* [docs,cleanup] Minor fixes and cleanup
|
||||
* [cleanup] Fix some typos by [unit193](https://github.com/unit193)
|
||||
* [ABC:iview] Add show extractor by [pabs3](https://github.com/pabs3)
|
||||
* [dropout] Add extractor by [TwoThousandHedgehogs](https://github.com/TwoThousandHedgehogs), [pukkandan](https://github.com/pukkandan)
|
||||
* [GameJolt] Add extractors by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [gofile] Add extractor by [Jertzukka](https://github.com/Jertzukka), [Ashish0804](https://github.com/Ashish0804)
|
||||
* [hse] Add extractors by [cypheron](https://github.com/cypheron), [pukkandan](https://github.com/pukkandan)
|
||||
* [NateTV] Add NateIE and NateProgramIE by [Ashish0804](https://github.com/Ashish0804), [Hyeeji](https://github.com/Hyeeji)
|
||||
* [OpenCast] Add extractors by [bwildenhain](https://github.com/bwildenhain), [C0D3D3V](https://github.com/C0D3D3V)
|
||||
* [rtve] Add `RTVEAudioIE` by [kebianizao](https://github.com/kebianizao)
|
||||
* [Rutube] Add RutubeChannelIE by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [skeb] Add extractor by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [soundcloud] Add related tracks extractor by [Lapin0t](https://github.com/Lapin0t)
|
||||
* [toggo] Add extractor by [nyuszika7h](https://github.com/nyuszika7h)
|
||||
* [TrueID] Add extractor by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [audiomack] Update album and song VALID_URL by [abdullah-if](https://github.com/abdullah-if), [dirkf](https://github.com/dirkf)
|
||||
* [CBC Gem] Extract 1080p formats by [DavidSkrundz](https://github.com/DavidSkrundz)
|
||||
* [ceskatelevize] Fetch iframe from nextJS data by [mkubecek](https://github.com/mkubecek)
|
||||
* [crackle] Look for non-DRM formats by [raleeper](https://github.com/raleeper)
|
||||
* [dplay] Temporary fix for `discoveryplus.com/it`
|
||||
* [DiscoveryPlusShowBaseIE] yield actual video id by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [Facebook] Handle redirect URLs
|
||||
* [fujitv] Extract 1080p from `tv_android` m3u8 by [YuenSzeHong](https://github.com/YuenSzeHong)
|
||||
* [gronkh] Support new URL pattern by [Sematre](https://github.com/Sematre)
|
||||
* [instagram] Expand valid URL by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [Instagram] Try bypassing login wall with embed page by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [Jamendo] Fix use of `_VALID_URL_RE` by [jaller94](https://github.com/jaller94)
|
||||
* [LBRY] Support livestreams by [Ashish0804](https://github.com/Ashish0804), [pukkandan](https://github.com/pukkandan)
|
||||
* [NJPWWorld] Extract formats from m3u8 by [aarubui](https://github.com/aarubui)
|
||||
* [NovaEmbed] update player regex by [std-move](https://github.com/std-move)
|
||||
* [npr] Make SMIL extraction non-fatal by [r5d](https://github.com/r5d)
|
||||
* [ntvcojp] Extract NUXT data by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [ok.ru] add mobile fallback by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [olympics] Add uploader and cleanup by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [ondemandkorea] Update `jw_config` regex by [julien-hadleyjack](https://github.com/julien-hadleyjack)
|
||||
* [PlutoTV] Expand `_VALID_URL`
|
||||
* [RaiNews] Fix extractor by [nixxo](https://github.com/nixxo)
|
||||
* [RCTIPlusSeries] Lazy extraction and video type selection by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [redtube] Handle formats delivered inside a JSON by [dirkf](https://github.com/dirkf), [nixxo](https://github.com/nixxo)
|
||||
* [SonyLiv] Add OTP login support by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [Steam] Fix extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [TikTok] Pass cookies to mobile API by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [trovo] Fix inheritance of `TrovoChannelBaseIE`
|
||||
* [TVer] Extract better thumbnails by [YuenSzeHong](https://github.com/YuenSzeHong)
|
||||
* [vimeo] Extract chapters
|
||||
* [web.archive:youtube] Improve metadata extraction by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube:comments] Add more options for limiting number of comments extracted by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube:tab] Extract more metadata from feeds/channels/playlists by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube:tab] Extract video thumbnails from playlist by [coletdjnz](https://github.com/coletdjnz), [pukkandan](https://github.com/pukkandan)
|
||||
* [youtube:tab] Ignore query when redirecting channel to playlist and cleanup of related code
|
||||
* [youtube] Fix `ytsearchdate`
|
||||
* [zdf] Support videos with different ptmd location by [iw0nderhow](https://github.com/iw0nderhow)
|
||||
* [zee5] Support /episodes in URL
|
||||
|
||||
|
||||
### 2021.12.01
|
||||
|
||||
* **Add option `--wait-for-video` to wait for scheduled streams**
|
||||
* Add option `--break-per-input` to apply --break-on... to each input URL
|
||||
* Add option `--embed-info-json` to embed info.json in mkv
|
||||
* Add compat-option `embed-metadata`
|
||||
* Allow using a custom format selector through API
|
||||
* [AES] Add ECB mode by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [build] Fix MacOS Build
|
||||
* [build] Save Git HEAD at release alongside version info
|
||||
* [build] Use `workflow_dispatch` for release
|
||||
* [downloader/ffmpeg] Fix for direct videos inside mpd manifests
|
||||
* [downloader] Add colors to download progress
|
||||
* [EmbedSubtitles] Slightly relax duration check and related cleanup
|
||||
* [ExtractAudio] Fix conversion to `wav` and `vorbis`
|
||||
* [ExtractAudio] Support `alac`
|
||||
* [extractor] Extract `average_rating` from JSON-LD
|
||||
* [FixupM3u8] Fixup MPEG-TS in MP4 container
|
||||
* [generic] Support mpd manifests without extension by [shirt](https://github.com/shirt-dev)
|
||||
* [hls] Better FairPlay DRM detection by [nyuszika7h](https://github.com/nyuszika7h)
|
||||
* [jsinterp] Fix splice to handle float (for youtube js player f1ca6900)
|
||||
* [utils] Allow alignment in `render_table` and add tests
|
||||
* [utils] Fix `PagedList`
|
||||
* [utils] Fix error when copying `LazyList`
|
||||
* Clarify video/audio-only formats in -F
|
||||
* Ensure directory exists when checking formats
|
||||
* Ensure path for link files exists by [Zirro](https://github.com/Zirro)
|
||||
* Ensure same config file is not loaded multiple times
|
||||
* Fix `postprocessor_hooks`
|
||||
* Fix `--break-on-archive` when pre-checking
|
||||
* Fix `--check-formats` for `mhtml`
|
||||
* Fix `--load-info-json` of playlists with failed entries
|
||||
* Fix `--trim-filename` when filename has `.`
|
||||
* Fix bug in parsing `--add-header`
|
||||
* Fix error in `report_unplayable_conflict` by [shirt](https://github.com/shirt-dev)
|
||||
* Fix writing playlist infojson with `--no-clean-infojson`
|
||||
* Validate --get-bypass-country
|
||||
* [blogger] Add extractor by [pabs3](https://github.com/pabs3)
|
||||
* [breitbart] Add extractor by [Grabien](https://github.com/Grabien)
|
||||
* [CableAV] Add extractor by [j54vc1bk](https://github.com/j54vc1bk)
|
||||
* [CanalAlpha] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [CozyTV] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [CPTwentyFour] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [DiscoveryPlus] Add `DiscoveryPlusItalyShowIE` by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [ESPNCricInfo] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [LinkedIn] Add extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [mixch] Add extractor by [nao20010128nao](https://github.com/nao20010128nao)
|
||||
* [nebula] Add `NebulaCollectionIE` and rewrite extractor by [hheimbuerger](https://github.com/hheimbuerger)
|
||||
* [OneFootball] Add extractor by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [peer.tv] Add extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [radiozet] Add extractor by [0xA7404A](https://github.com/0xA7404A) (Aurora)
|
||||
* [redgifs] Add extractor by [chio0hai](https://github.com/chio0hai)
|
||||
* [RedGifs] Add Search and User extractors by [Deer-Spangle](https://github.com/Deer-Spangle)
|
||||
* [rtrfm] Add extractor by [pabs3](https://github.com/pabs3)
|
||||
* [Streamff] Add extractor by [cntrl-s](https://github.com/cntrl-s)
|
||||
* [Stripchat] Add extractor by [zulaport](https://github.com/zulaport)
|
||||
* [Aljazeera] Fix extractor by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [AmazonStoreIE] Fix regex to not match vdp urls by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [ARDBetaMediathek] Handle new URLs
|
||||
* [bbc] Get all available formats by [nyuszika7h](https://github.com/nyuszika7h)
|
||||
* [Bilibili] Fix title extraction by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [CBC Gem] Fix for shows that don't have all seasons by [makeworld-the-better-one](https://github.com/makeworld-the-better-one)
|
||||
* [curiositystream] Add more metadata
|
||||
* [CuriosityStream] Fix series
|
||||
* [DiscoveryPlus] Rewrite extractors by [Ashish0804](https://github.com/Ashish0804), [pukkandan](https://github.com/pukkandan)
|
||||
* [HotStar] Set language field from tags by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [instagram, cleanup] Refactor extractors
|
||||
* [Instagram] Display more login errors by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [itv] Fix extractor by [staubichsauger](https://github.com/staubichsauger), [pukkandan](https://github.com/pukkandan)
|
||||
* [mediaklikk] Expand valid URL
|
||||
* [MTV] Improve mgid extraction by [Sipherdrakon](https://github.com/Sipherdrakon), [kikuyan](https://github.com/kikuyan)
|
||||
* [nexx] Better error message for unsupported format
|
||||
* [NovaEmbed] Fix extractor by [pukkandan](https://github.com/pukkandan), [std-move](https://github.com/std-move)
|
||||
* [PatreonUser] Do not capture RSS URLs
|
||||
* [Reddit] Add support for 1080p videos by [xenova](https://github.com/xenova)
|
||||
* [RoosterTeethSeries] Fix for multiple pages by [MinePlayersPE](https://github.com/MinePlayersPE)
|
||||
* [sbs] Fix for movies and livestreams
|
||||
* [Senate.gov] Add SenateGovIE and fix SenateISVPIE by [Grabien](https://github.com/Grabien), [pukkandan](https://github.com/pukkandan)
|
||||
* [soundcloud:search] Fix pagination
|
||||
* [tiktok:user] Set `webpage_url` correctly
|
||||
* [Tokentube] Fix description by [u-spec-png](https://github.com/u-spec-png)
|
||||
* [trovo] Fix extractor by [nyuszika7h](https://github.com/nyuszika7h)
|
||||
* [tv2] Expand valid URL
|
||||
* [Tvplayhome] Fix extractor by [pukkandan](https://github.com/pukkandan), [18928172992817182](https://github.com/18928172992817182)
|
||||
* [Twitch:vod] Add chapters by [mpeter50](https://github.com/mpeter50)
|
||||
* [twitch:vod] Extract live status by [DEvmIb](https://github.com/DEvmIb)
|
||||
* [VidLii] Add 720p support by [mrpapersonic](https://github.com/mrpapersonic)
|
||||
* [vimeo] Add fallback for config URL
|
||||
* [vimeo] Sort http formats higher
|
||||
* [WDR] Expand valid URL
|
||||
* [willow] Add extractor by [aarubui](https://github.com/aarubui)
|
||||
* [xvideos] Detect embed URLs by [4a1e2y5](https://github.com/4a1e2y5)
|
||||
* [xvideos] Fix extractor by [Yakabuff](https://github.com/Yakabuff)
|
||||
* [youtube, cleanup] Reorganize Tab and Search extractor inheritances
|
||||
* [youtube:search_url] Add playlist/channel support
|
||||
* [youtube] Add `default` player client by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [youtube] Add storyboard formats
|
||||
* [youtube] Decrypt n-sig for URLs with `ratebypass`
|
||||
* [youtube] Minor improvement to format sorting
|
||||
* [cleanup] Add deprecation warnings
|
||||
* [cleanup] Refactor `JSInterpreter._seperate`
|
||||
* [Cleanup] Remove some unnecessary groups in regexes by [Ashish0804](https://github.com/Ashish0804)
|
||||
* [cleanup] Misc cleanup
|
||||
|
||||
|
||||
### 2021.11.10.1
|
||||
|
||||
* Temporarily disable MacOS Build
|
||||
@@ -40,7 +579,7 @@
|
||||
* [fragment] Fix progress display in fragmented downloads
|
||||
* [downloader/ffmpeg] Fix vtt download with ffmpeg
|
||||
* [ffmpeg] Detect presence of setts and libavformat version
|
||||
* [ExtractAudio] Rescale --audio-quality correctly by [CrypticSignal](https://github.com/CrypticSignal), [pukkandan](https://github.com/pukkandan)
|
||||
* [ExtractAudio] Rescale `--audio-quality` correctly by [CrypticSignal](https://github.com/CrypticSignal), [pukkandan](https://github.com/pukkandan)
|
||||
* [ExtractAudio] Use `libfdk_aac` if available by [CrypticSignal](https://github.com/CrypticSignal)
|
||||
* [FormatSort] `eac3` is better than `ac3`
|
||||
* [FormatSort] Fix some fields' defaults
|
||||
@@ -1372,7 +1911,7 @@
|
||||
* Cleaned up the fork for public use
|
||||
|
||||
|
||||
**PS**: All uncredited changes above this point are authored by [pukkandan](https://github.com/pukkandan)
|
||||
**Note**: All uncredited changes above this point are authored by [pukkandan](https://github.com/pukkandan)
|
||||
|
||||
### Unreleased changes in [blackjack4494/yt-dlc](https://github.com/blackjack4494/yt-dlc)
|
||||
* Updated to youtube-dl release 2020.11.26 by [pukkandan](https://github.com/pukkandan)
|
||||
@@ -1397,8 +1936,110 @@
|
||||
* [spreaker] fix SpreakerShowIE test URL by [pukkandan](https://github.com/pukkandan)
|
||||
* [Vlive] Fix playlist handling when downloading a channel by [kyuyeunk](https://github.com/kyuyeunk)
|
||||
* [tmz] Fix extractor by [diegorodriguezv](https://github.com/diegorodriguezv)
|
||||
* [ITV] BTCC URL update by [WolfganP](https://github.com/WolfganP)
|
||||
* [generic] Detect embedded bitchute videos by [pukkandan](https://github.com/pukkandan)
|
||||
* [generic] Extract embedded youtube and twitter videos by [diegorodriguezv](https://github.com/diegorodriguezv)
|
||||
* [ffmpeg] Ensure all streams are copied by [pukkandan](https://github.com/pukkandan)
|
||||
* [embedthumbnail] Fix for os.rename error by [pukkandan](https://github.com/pukkandan)
|
||||
* make_win.bat: don't use UPX to pack vcruntime140.dll by [jbruchon](https://github.com/jbruchon)
|
||||
|
||||
|
||||
### Changelog of [blackjack4494/yt-dlc](https://github.com/blackjack4494/yt-dlc) till release 2020.11.11-3
|
||||
|
||||
**Note**: This was constructed from the merge commit messages and may not be entirely accurate
|
||||
|
||||
* [bandcamp] fix failing test. remove subclass hack by [insaneracist](https://github.com/insaneracist)
|
||||
* [bandcamp] restore album downloads by [insaneracist](https://github.com/insaneracist)
|
||||
* [francetv] fix extractor by [Surkal](https://github.com/Surkal)
|
||||
* [gdcvault] fix extractor by [blackjack4494](https://github.com/blackjack4494)
|
||||
* [hotstar] Move to API v1 by [theincognito-inc](https://github.com/theincognito-inc)
|
||||
* [hrfernsehen] add extractor by [blocktrron](https://github.com/blocktrron)
|
||||
* [kakao] new apis by [blackjack4494](https://github.com/blackjack4494)
|
||||
* [la7] fix missing protocol by [nixxo](https://github.com/nixxo)
|
||||
* [mailru] removed escaped braces, use urljoin, added tests by [nixxo](https://github.com/nixxo)
|
||||
* [MTV/Nick] universal mgid extractor + fix nick.de feed by [blackjack4494](https://github.com/blackjack4494)
|
||||
* [mtv] Fix a missing match_id by [nixxo](https://github.com/nixxo)
|
||||
* [Mtv] updated extractor logic & more by [blackjack4494](https://github.com/blackjack4494)
|
||||
* [ndr] support Daserste ndr by [blackjack4494](https://github.com/blackjack4494)
|
||||
* [Netzkino] Only use video id to find metadata by [TobiX](https://github.com/TobiX)
|
||||
* [newgrounds] fix: video download by [insaneracist](https://github.com/insaneracist)
|
||||
* [nitter] Add new extractor by [B0pol](https://github.com/B0pol)
|
||||
* [soundcloud] Resolve audio/x-wav by [tfvlrue](https://github.com/tfvlrue)
|
||||
* [soundcloud] sets pattern and tests by [blackjack4494](https://github.com/blackjack4494)
|
||||
* [SouthparkDE/MTV] another mgid extraction (mtv_base) feed url updated by [blackjack4494](https://github.com/blackjack4494)
|
||||
* [StoryFire] Add new extractor by [sgstair](https://github.com/sgstair)
|
||||
* [twitch] by [geauxlo](https://github.com/geauxlo)
|
||||
* [videa] Adapt to updates by [adrianheine](https://github.com/adrianheine)
|
||||
* [Viki] subtitles, formats by [blackjack4494](https://github.com/blackjack4494)
|
||||
* [vlive] fix extractor for revamped website by [exwm](https://github.com/exwm)
|
||||
* [xtube] fix extractor by [insaneracist](https://github.com/insaneracist)
|
||||
* [youtube] Convert subs when download is skipped by [blackjack4494](https://github.com/blackjack4494)
|
||||
* [youtube] Fix age gate detection by [random-nick](https://github.com/random-nick)
|
||||
* [youtube] fix yt-only playback when age restricted/gated - requires cookies by [blackjack4494](https://github.com/blackjack4494)
|
||||
* [youtube] fix: extract artist metadata from ytInitialData by [insaneracist](https://github.com/insaneracist)
|
||||
* [youtube] fix: extract mix playlist ids from ytInitialData by [insaneracist](https://github.com/insaneracist)
|
||||
* [youtube] fix: mix playlist title by [insaneracist](https://github.com/insaneracist)
|
||||
* [youtube] fix: Youtube Music playlists by [insaneracist](https://github.com/insaneracist)
|
||||
* [Youtube] Fixed problem with new youtube player by [peet1993](https://github.com/peet1993)
|
||||
* [zoom] Fix url parsing for url's containing /share/ and dots by [Romern](https://github.com/Romern)
|
||||
* [zoom] new extractor by [insaneracist](https://github.com/insaneracist)
|
||||
* abc by [adrianheine](https://github.com/adrianheine)
|
||||
* Added Comcast_SSO fix by [merval](https://github.com/merval)
|
||||
* Added DRM logic to brightcove by [merval](https://github.com/merval)
|
||||
* Added regex for ABC.com site. by [kucksdorfs](https://github.com/kucksdorfs)
|
||||
* alura by [hugohaa](https://github.com/hugohaa)
|
||||
* Arbitrary merges by [fstirlitz](https://github.com/fstirlitz)
|
||||
* ard.py_add_playlist_support by [martin54](https://github.com/martin54)
|
||||
* Bugfix/youtube/chapters fix extractor by [gschizas](https://github.com/gschizas)
|
||||
* bugfix_youtube_like_extraction by [RedpointsBots](https://github.com/RedpointsBots)
|
||||
* Create build workflow by [blackjack4494](https://github.com/blackjack4494)
|
||||
* deezer by [LucBerge](https://github.com/LucBerge)
|
||||
* Detect embedded bitchute videos by [pukkandan](https://github.com/pukkandan)
|
||||
* Don't install tests by [l29ah](https://github.com/l29ah)
|
||||
* Don't try to embed/convert json subtitles generated by [youtube](https://github.com/youtube) livechat by [pukkandan](https://github.com/pukkandan)
|
||||
* Doodstream by [sxvghd](https://github.com/sxvghd)
|
||||
* duboku by [lkho](https://github.com/lkho)
|
||||
* elonet by [tpikonen](https://github.com/tpikonen)
|
||||
* ext/remuxe-video by [Zocker1999NET](https://github.com/Zocker1999NET)
|
||||
* fall-back to the old way to fetch subtitles, if needed by [RobinD42](https://github.com/RobinD42)
|
||||
* feature_subscriber_count by [RedpointsBots](https://github.com/RedpointsBots)
|
||||
* Fix external downloader when there is no http_header by [pukkandan](https://github.com/pukkandan)
|
||||
* Fix issue triggered by [tubeup](https://github.com/tubeup) by [nsapa](https://github.com/nsapa)
|
||||
* Fix YoutubePlaylistsIE by [ZenulAbidin](https://github.com/ZenulAbidin)
|
||||
* fix-mitele' by [DjMoren](https://github.com/DjMoren)
|
||||
* fix/google-drive-cookie-issue by [legraphista](https://github.com/legraphista)
|
||||
* fix_tiktok by [mervel-mervel](https://github.com/mervel-mervel)
|
||||
* Fixed problem with JS player URL by [peet1993](https://github.com/peet1993)
|
||||
* fixYTSearch by [xarantolus](https://github.com/xarantolus)
|
||||
* FliegendeWurst-3sat-zdf-merger-bugfix-feature
|
||||
* gilou-bandcamp_update
|
||||
* implement ThisVid extractor by [rigstot](https://github.com/rigstot)
|
||||
* JensTimmerman-patch-1 by [JensTimmerman](https://github.com/JensTimmerman)
|
||||
* Keep download archive in memory for better performance by [jbruchon](https://github.com/jbruchon)
|
||||
* la7-fix by [iamleot](https://github.com/iamleot)
|
||||
* magenta by [adrianheine](https://github.com/adrianheine)
|
||||
* Merge 26564 from [adrianheine](https://github.com/adrianheine)
|
||||
* Merge code from [ddland](https://github.com/ddland)
|
||||
* Merge code from [nixxo](https://github.com/nixxo)
|
||||
* Merge code from [ssaqua](https://github.com/ssaqua)
|
||||
* Merge code from [zubearc](https://github.com/zubearc)
|
||||
* mkvthumbnail by [MrDoritos](https://github.com/MrDoritos)
|
||||
* myvideo_ge by [fonkap](https://github.com/fonkap)
|
||||
* naver by [SeonjaeHyeon](https://github.com/SeonjaeHyeon)
|
||||
* ondemandkorea by [julien-hadleyjack](https://github.com/julien-hadleyjack)
|
||||
* rai-update by [iamleot](https://github.com/iamleot)
|
||||
* RFC: youtube: Polymer UI and JSON endpoints for playlists by [wlritchi](https://github.com/wlritchi)
|
||||
* rutv by [adrianheine](https://github.com/adrianheine)
|
||||
* Sc extractor web auth by [blackjack4494](https://github.com/blackjack4494)
|
||||
* Switch from binary search tree to Python sets by [jbruchon](https://github.com/jbruchon)
|
||||
* tiktok by [skyme5](https://github.com/skyme5)
|
||||
* tvnow by [TinyToweringTree](https://github.com/TinyToweringTree)
|
||||
* twitch-fix by [lel-amri](https://github.com/lel-amri)
|
||||
* Twitter shortener by [blackjack4494](https://github.com/blackjack4494)
|
||||
* Update README.md by [JensTimmerman](https://github.com/JensTimmerman)
|
||||
* Update to reflect website changes. by [amigatomte](https://github.com/amigatomte)
|
||||
* use webarchive to fix a dead link in README by [B0pol](https://github.com/B0pol)
|
||||
* Viki the second by [blackjack4494](https://github.com/blackjack4494)
|
||||
* wdr-subtitles by [mrtnmtth](https://github.com/mrtnmtth)
|
||||
* Webpfix by [alexmerkel](https://github.com/alexmerkel)
|
||||
* Youtube live chat by [siikamiika](https://github.com/siikamiika)
|
||||
|
||||
@@ -28,6 +28,7 @@ You can also find lists of all [contributors of yt-dlp](CONTRIBUTORS) and [autho
|
||||
[](https://github.com/sponsors/coletdjnz)
|
||||
|
||||
* YouTube improvements including: age-gate bypass, private playlists, multiple-clients (to avoid throttling) and a lot of under-the-hood improvements
|
||||
* Added support for downloading YoutubeWebArchive videos
|
||||
|
||||
|
||||
|
||||
@@ -35,5 +36,15 @@ You can also find lists of all [contributors of yt-dlp](CONTRIBUTORS) and [autho
|
||||
|
||||
[](https://ko-fi.com/ashish0804)
|
||||
|
||||
* Added support for new websites Zee5, MXPlayer, DiscoveryPlusIndia, ShemarooMe, Utreon etc
|
||||
* Added playlist/series downloads for TubiTv, SonyLIV, Voot, HotStar etc
|
||||
* Added support for new websites BiliIntl, DiscoveryPlusIndia, OlympicsReplay, PlanetMarathi, ShemarooMe, Utreon, Zee5 etc
|
||||
* Added playlist/series downloads for Hotstar, ParamountPlus, Rumble, SonyLIV, Trovo, TubiTv, Voot etc
|
||||
* Improved/fixed support for HiDive, HotStar, Hungama, LBRY, LinkedInLearning, Mxplayer, SonyLiv, TV2, Vimeo, VLive etc
|
||||
|
||||
|
||||
## [Lesmiscore](https://github.com/Lesmiscore) (nao20010128nao)
|
||||
|
||||
**Bitcoin**: bc1qfd02r007cutfdjwjmyy9w23rjvtls6ncve7r3s
|
||||
**Monacoin**: mona1q3tf7dzvshrhfe3md379xtvt2n22duhglv5dskr
|
||||
|
||||
* Download live from start to end for YouTube
|
||||
* Added support for new websites mildom, PixivSketch, skeb, radiko, voicy, mirrativ, openrec, whowatch, damtomo, 17.live, mixch etc
|
||||
|
||||
16
Makefile
16
Makefile
@@ -1,5 +1,6 @@
|
||||
all: lazy-extractors yt-dlp doc pypi-files
|
||||
clean: clean-test clean-dist clean-cache
|
||||
clean: clean-test clean-dist
|
||||
clean-all: clean clean-cache
|
||||
completions: completion-bash completion-fish completion-zsh
|
||||
doc: README.md CONTRIBUTING.md issuetemplates supportedsites
|
||||
ot: offlinetest
|
||||
@@ -13,13 +14,15 @@ pypi-files: AUTHORS Changelog.md LICENSE README.md README.txt supportedsites com
|
||||
.PHONY: all clean install test tar pypi-files completions ot offlinetest codetest supportedsites
|
||||
|
||||
clean-test:
|
||||
rm -rf *.3gp *.annotations.xml *.ape *.avi *.description *.dump *.flac *.flv *.frag *.frag.aria2 *.frag.urls \
|
||||
*.info.json *.jpeg *.jpg *.live_chat.json *.m4a *.m4v *.mkv *.mp3 *.mp4 *.ogg *.opus *.part* *.png *.sbv *.srt \
|
||||
*.swf *.swp *.ttml *.vtt *.wav *.webm *.webp *.ytdl test/testdata/player-*.js
|
||||
rm -rf test/testdata/sigs/player-*.js tmp/ *.annotations.xml *.aria2 *.description *.dump *.frag \
|
||||
*.frag.aria2 *.frag.urls *.info.json *.live_chat.json *.meta *.part* *.tmp *.temp *.unknown_video *.ytdl \
|
||||
*.3gp *.ape *.ass *.avi *.desktop *.flac *.flv *.jpeg *.jpg *.m4a *.m4v *.mhtml *.mkv *.mov *.mp3 \
|
||||
*.mp4 *.ogg *.opus *.png *.sbv *.srt *.swf *.swp *.ttml *.url *.vtt *.wav *.webloc *.webm *.webp
|
||||
clean-dist:
|
||||
rm -rf yt-dlp.1.temp.md yt-dlp.1 README.txt MANIFEST build/ dist/ .coverage cover/ yt-dlp.tar.gz completions/ yt_dlp/extractor/lazy_extractors.py *.spec CONTRIBUTING.md.tmp yt-dlp yt-dlp.exe yt_dlp.egg-info/ AUTHORS .mailmap
|
||||
rm -rf yt-dlp.1.temp.md yt-dlp.1 README.txt MANIFEST build/ dist/ .coverage cover/ yt-dlp.tar.gz completions/ \
|
||||
yt_dlp/extractor/lazy_extractors.py *.spec CONTRIBUTING.md.tmp yt-dlp yt-dlp.exe yt_dlp.egg-info/ AUTHORS .mailmap
|
||||
clean-cache:
|
||||
find . -name "*.pyc" -o -name "*.class" -delete
|
||||
find . \( -name "*.pyc" -o -name "*.class" \) -delete
|
||||
|
||||
completion-bash: completions/bash/yt-dlp
|
||||
completion-fish: completions/fish/yt-dlp.fish
|
||||
@@ -31,7 +34,6 @@ DESTDIR ?= .
|
||||
BINDIR ?= $(PREFIX)/bin
|
||||
MANDIR ?= $(PREFIX)/man
|
||||
SHAREDIR ?= $(PREFIX)/share
|
||||
# make_supportedsites.py doesnot work correctly in python2
|
||||
PYTHON ?= /usr/bin/env python3
|
||||
|
||||
# set SYSCONFDIR to /etc if PREFIX=/usr or PREFIX=/usr/local
|
||||
|
||||
@@ -39,12 +39,6 @@ class {name}({bases}):
|
||||
_module = '{module}'
|
||||
'''
|
||||
|
||||
make_valid_template = '''
|
||||
@classmethod
|
||||
def _make_valid_url(cls):
|
||||
return {valid_url!r}
|
||||
'''
|
||||
|
||||
|
||||
def get_base_name(base):
|
||||
if base is InfoExtractor:
|
||||
@@ -61,15 +55,14 @@ def build_lazy_ie(ie, name):
|
||||
bases=', '.join(map(get_base_name, ie.__bases__)),
|
||||
module=ie.__module__)
|
||||
valid_url = getattr(ie, '_VALID_URL', None)
|
||||
if not valid_url and hasattr(ie, '_make_valid_url'):
|
||||
valid_url = ie._make_valid_url()
|
||||
if valid_url:
|
||||
s += f' _VALID_URL = {valid_url!r}\n'
|
||||
if not ie._WORKING:
|
||||
s += ' _WORKING = False\n'
|
||||
if ie.suitable.__func__ is not InfoExtractor.suitable.__func__:
|
||||
s += f'\n{getsource(ie.suitable)}'
|
||||
if hasattr(ie, '_make_valid_url'):
|
||||
# search extractors
|
||||
s += make_valid_template.format(valid_url=ie._make_valid_url())
|
||||
return s
|
||||
|
||||
|
||||
|
||||
@@ -13,12 +13,14 @@ PREFIX = r'''%yt-dlp(1)
|
||||
|
||||
# NAME
|
||||
|
||||
youtube\-dl \- download videos from youtube.com or other video platforms
|
||||
yt\-dlp \- A youtube-dl fork with additional features and patches
|
||||
|
||||
# SYNOPSIS
|
||||
|
||||
**yt-dlp** \[OPTIONS\] URL [URL...]
|
||||
|
||||
# DESCRIPTION
|
||||
|
||||
'''
|
||||
|
||||
|
||||
@@ -33,47 +35,63 @@ def main():
|
||||
with io.open(README_FILE, encoding='utf-8') as f:
|
||||
readme = f.read()
|
||||
|
||||
readme = re.sub(r'(?s)^.*?(?=# DESCRIPTION)', '', readme)
|
||||
readme = re.sub(r'\s+yt-dlp \[OPTIONS\] URL \[URL\.\.\.\]', '', readme)
|
||||
readme = PREFIX + readme
|
||||
|
||||
readme = filter_excluded_sections(readme)
|
||||
readme = move_sections(readme)
|
||||
readme = filter_options(readme)
|
||||
|
||||
with io.open(outfile, 'w', encoding='utf-8') as outf:
|
||||
outf.write(readme)
|
||||
outf.write(PREFIX + readme)
|
||||
|
||||
|
||||
def filter_excluded_sections(readme):
|
||||
EXCLUDED_SECTION_BEGIN_STRING = re.escape('<!-- MANPAGE: BEGIN EXCLUDED SECTION -->')
|
||||
EXCLUDED_SECTION_END_STRING = re.escape('<!-- MANPAGE: END EXCLUDED SECTION -->')
|
||||
return re.sub(
|
||||
rf'(?s){EXCLUDED_SECTION_BEGIN_STRING}.+?{EXCLUDED_SECTION_END_STRING}\n',
|
||||
'', readme)
|
||||
|
||||
|
||||
def move_sections(readme):
|
||||
MOVE_TAG_TEMPLATE = '<!-- MANPAGE: MOVE "%s" SECTION HERE -->'
|
||||
sections = re.findall(r'(?m)^%s$' % (
|
||||
re.escape(MOVE_TAG_TEMPLATE).replace(r'\%', '%') % '(.+)'), readme)
|
||||
|
||||
for section_name in sections:
|
||||
move_tag = MOVE_TAG_TEMPLATE % section_name
|
||||
if readme.count(move_tag) > 1:
|
||||
raise Exception(f'There is more than one occurrence of "{move_tag}". This is unexpected')
|
||||
|
||||
sections = re.findall(rf'(?sm)(^# {re.escape(section_name)}.+?)(?=^# )', readme)
|
||||
if len(sections) < 1:
|
||||
raise Exception(f'The section {section_name} does not exist')
|
||||
elif len(sections) > 1:
|
||||
raise Exception(f'There are multiple occurrences of section {section_name}, this is unhandled')
|
||||
|
||||
readme = readme.replace(sections[0], '', 1).replace(move_tag, sections[0], 1)
|
||||
return readme
|
||||
|
||||
|
||||
def filter_options(readme):
|
||||
ret = ''
|
||||
in_options = False
|
||||
for line in readme.split('\n'):
|
||||
if line.startswith('# '):
|
||||
if line[2:].startswith('OPTIONS'):
|
||||
in_options = True
|
||||
else:
|
||||
in_options = False
|
||||
|
||||
if in_options:
|
||||
if line.lstrip().startswith('-'):
|
||||
split = re.split(r'\s{2,}', line.lstrip())
|
||||
# Description string may start with `-` as well. If there is
|
||||
# only one piece then it's a description bit not an option.
|
||||
if len(split) > 1:
|
||||
option, description = split
|
||||
split_option = option.split(' ')
|
||||
|
||||
if not split_option[-1].startswith('-'): # metavar
|
||||
option = ' '.join(split_option[:-1] + ['*%s*' % split_option[-1]])
|
||||
section = re.search(r'(?sm)^# USAGE AND OPTIONS\n.+?(?=^# )', readme).group(0)
|
||||
options = '# OPTIONS\n'
|
||||
for line in section.split('\n')[1:]:
|
||||
mobj = re.fullmatch(r'''(?x)
|
||||
\s{4}(?P<opt>-(?:,\s|[^\s])+)
|
||||
(?:\s(?P<meta>(?:[^\s]|\s(?!\s))+))?
|
||||
(\s{2,}(?P<desc>.+))?
|
||||
''', line)
|
||||
if not mobj:
|
||||
options += f'{line.lstrip()}\n'
|
||||
continue
|
||||
option, metavar, description = mobj.group('opt', 'meta', 'desc')
|
||||
|
||||
# Pandoc's definition_lists. See http://pandoc.org/README.html
|
||||
# for more information.
|
||||
ret += '\n%s\n: %s\n' % (option, description)
|
||||
option = f'{option} *{metavar}*' if metavar else option
|
||||
description = f'{description}\n' if description else ''
|
||||
options += f'\n{option}\n: {description}'
|
||||
continue
|
||||
ret += line.lstrip() + '\n'
|
||||
else:
|
||||
ret += line + '\n'
|
||||
|
||||
return ret
|
||||
return readme.replace(section, options, 1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -1,33 +1,42 @@
|
||||
#!/usr/bin/env python3
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from datetime import datetime
|
||||
# import urllib.request
|
||||
import sys
|
||||
import subprocess
|
||||
|
||||
# response = urllib.request.urlopen('https://blackjack4494.github.io/youtube-dlc/update/LATEST_VERSION')
|
||||
# old_version = response.read().decode('utf-8')
|
||||
|
||||
exec(compile(open('yt_dlp/version.py').read(), 'yt_dlp/version.py', 'exec'))
|
||||
with open('yt_dlp/version.py', 'rt') as f:
|
||||
exec(compile(f.read(), 'yt_dlp/version.py', 'exec'))
|
||||
old_version = locals()['__version__']
|
||||
|
||||
old_version_list = old_version.split(".", 4)
|
||||
old_version_list = old_version.split('.')
|
||||
|
||||
old_ver = '.'.join(old_version_list[:3])
|
||||
old_rev = old_version_list[3] if len(old_version_list) > 3 else ''
|
||||
|
||||
ver = datetime.utcnow().strftime("%Y.%m.%d")
|
||||
rev = str(int(old_rev or 0) + 1) if old_ver == ver else ''
|
||||
|
||||
rev = (sys.argv[1:] or [''])[0] # Use first argument, if present as revision number
|
||||
if not rev:
|
||||
rev = str(int(old_rev or 0) + 1) if old_ver == ver else ''
|
||||
|
||||
VERSION = '.'.join((ver, rev)) if rev else ver
|
||||
# VERSION_LIST = [(int(v) for v in ver.split(".") + [rev or 0])]
|
||||
|
||||
try:
|
||||
sp = subprocess.Popen(['git', 'rev-parse', '--short', 'HEAD'], stdout=subprocess.PIPE)
|
||||
GIT_HEAD = sp.communicate()[0].decode().strip() or None
|
||||
except Exception:
|
||||
GIT_HEAD = None
|
||||
|
||||
VERSION_FILE = f'''\
|
||||
# Autogenerated by devscripts/update-version.py
|
||||
|
||||
__version__ = {VERSION!r}
|
||||
|
||||
RELEASE_GIT_HEAD = {GIT_HEAD!r}
|
||||
'''
|
||||
|
||||
with open('yt_dlp/version.py', 'wt') as f:
|
||||
f.write(VERSION_FILE)
|
||||
|
||||
print('::set-output name=ytdlp_version::' + VERSION)
|
||||
|
||||
file_version_py = open('yt_dlp/version.py', 'rt')
|
||||
data = file_version_py.read()
|
||||
data = data.replace(old_version, VERSION)
|
||||
file_version_py.close()
|
||||
|
||||
file_version_py = open('yt_dlp/version.py', 'wt')
|
||||
file_version_py.write(data)
|
||||
file_version_py.close()
|
||||
print(f'\nVersion = {VERSION}, Git HEAD = {GIT_HEAD}')
|
||||
|
||||
5
docs/Contributing.md
Normal file
5
docs/Contributing.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
orphan: true
|
||||
---
|
||||
```{include} ../Contributing.md
|
||||
```
|
||||
@@ -40,7 +40,7 @@ def main():
|
||||
'--icon=devscripts/logo.ico',
|
||||
'--upx-exclude=vcruntime140.dll',
|
||||
'--noconfirm',
|
||||
*dependancy_options(),
|
||||
*dependency_options(),
|
||||
*opts,
|
||||
'yt_dlp/__main__.py',
|
||||
]
|
||||
@@ -73,11 +73,11 @@ def version_to_list(version):
|
||||
return list(map(int, version_list)) + [0] * (4 - len(version_list))
|
||||
|
||||
|
||||
def dependancy_options():
|
||||
dependancies = [pycryptodome_module(), 'mutagen'] + collect_submodules('websockets')
|
||||
def dependency_options():
|
||||
dependencies = [pycryptodome_module(), 'mutagen', 'brotli'] + collect_submodules('websockets')
|
||||
excluded_modules = ['test', 'ytdlp_plugins', 'youtube-dl', 'youtube-dlc']
|
||||
|
||||
yield from (f'--hidden-import={module}' for module in dependancies)
|
||||
yield from (f'--hidden-import={module}' for module in dependencies)
|
||||
yield from (f'--exclude-module={module}' for module in excluded_modules)
|
||||
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
mutagen
|
||||
pycryptodomex
|
||||
websockets
|
||||
brotli; platform_python_implementation=='CPython'
|
||||
brotlicffi; platform_python_implementation!='CPython'
|
||||
4
setup.py
4
setup.py
@@ -21,9 +21,9 @@ DESCRIPTION = 'A youtube-dl fork with additional features and patches'
|
||||
LONG_DESCRIPTION = '\n\n'.join((
|
||||
'Official repository: <https://github.com/yt-dlp/yt-dlp>',
|
||||
'**PS**: Some links in this document will not work since this is a copy of the README.md from Github',
|
||||
open('README.md', 'r', encoding='utf-8').read()))
|
||||
open('README.md', encoding='utf-8').read()))
|
||||
|
||||
REQUIREMENTS = ['mutagen', 'pycryptodomex', 'websockets']
|
||||
REQUIREMENTS = open('requirements.txt', encoding='utf-8').read().splitlines()
|
||||
|
||||
|
||||
if sys.argv[1:2] == ['py2exe']:
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
- **17live:clip**
|
||||
- **1tv**: Первый канал
|
||||
- **20min**
|
||||
- **220.ro**
|
||||
- **23video**
|
||||
- **247sports**
|
||||
- **24video**
|
||||
@@ -11,7 +10,6 @@
|
||||
- **3sat**
|
||||
- **4tube**
|
||||
- **56.com**
|
||||
- **5min**
|
||||
- **6play**
|
||||
- **7plus**
|
||||
- **8tracks**
|
||||
@@ -21,10 +19,13 @@
|
||||
- **9now.com.au**
|
||||
- **abc.net.au**
|
||||
- **abc.net.au:iview**
|
||||
- **abc.net.au:iview:showseries**
|
||||
- **abcnews**
|
||||
- **abcnews:video**
|
||||
- **abcotvs**: ABC Owned Television Stations
|
||||
- **abcotvs:clips**
|
||||
- **AbemaTV**
|
||||
- **AbemaTVTitle**
|
||||
- **AcademicEarth:Course**
|
||||
- **acast**
|
||||
- **acast:channel**
|
||||
@@ -40,11 +41,14 @@
|
||||
- **aenetworks:collection**
|
||||
- **aenetworks:show**
|
||||
- **afreecatv**: afreecatv.com
|
||||
- **afreecatv:live**: afreecatv.com
|
||||
- **AirMozilla**
|
||||
- **AliExpressLive**
|
||||
- **AlJazeera**
|
||||
- **Allocine**
|
||||
- **AlphaPorno**
|
||||
- **Alsace20TV**
|
||||
- **Alsace20TVEmbed**
|
||||
- **Alura**
|
||||
- **AluraCourse**
|
||||
- **Amara**
|
||||
@@ -52,11 +56,15 @@
|
||||
- **AMCNetworks**
|
||||
- **AmericasTestKitchen**
|
||||
- **AmericasTestKitchenSeason**
|
||||
- **AmHistoryChannel**
|
||||
- **anderetijden**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
||||
- **AnimalPlanet**
|
||||
- **AnimeLab**
|
||||
- **AnimeLabShows**
|
||||
- **AnimeOnDemand**
|
||||
- **ant1newsgr:article**: ant1news.gr articles
|
||||
- **ant1newsgr:embed**: ant1news.gr embedded videos
|
||||
- **ant1newsgr:watch**: ant1news.gr videos
|
||||
- **Anvato**
|
||||
- **aol.com**: Yahoo screen and movies
|
||||
- **APA**
|
||||
@@ -74,6 +82,7 @@
|
||||
- **Arkena**
|
||||
- **arte.sky.it**
|
||||
- **ArteTV**
|
||||
- **ArteTVCategory**
|
||||
- **ArteTVEmbed**
|
||||
- **ArteTVPlaylist**
|
||||
- **AsianCrush**
|
||||
@@ -98,8 +107,8 @@
|
||||
- **bandaichannel**
|
||||
- **Bandcamp**
|
||||
- **Bandcamp:album**
|
||||
- **Bandcamp:user**
|
||||
- **Bandcamp:weekly**
|
||||
- **BandcampMusic**
|
||||
- **bangumi.bilibili.com**: BiliBili番剧
|
||||
- **BannedVideo**
|
||||
- **bbc**: BBC
|
||||
@@ -121,6 +130,7 @@
|
||||
- **bfmtv:live**
|
||||
- **BibelTV**
|
||||
- **Bigflix**
|
||||
- **Bigo**
|
||||
- **Bild**: Bild.de
|
||||
- **BiliBili**
|
||||
- **Bilibili category extractor**
|
||||
@@ -141,6 +151,7 @@
|
||||
- **BlackboardCollaborate**
|
||||
- **BleacherReport**
|
||||
- **BleacherReportCMS**
|
||||
- **blogger.com**
|
||||
- **Bloomberg**
|
||||
- **BokeCC**
|
||||
- **BongaCams**
|
||||
@@ -150,6 +161,7 @@
|
||||
- **BR**: Bayerischer Rundfunk
|
||||
- **BravoTV**
|
||||
- **Break**
|
||||
- **BreitBart**
|
||||
- **brightcove:legacy**
|
||||
- **brightcove:new**
|
||||
- **BRMediathek**: Bayerischer Rundfunk Mediathek
|
||||
@@ -158,11 +170,15 @@
|
||||
- **BusinessInsider**
|
||||
- **BuzzFeed**
|
||||
- **BYUtv**
|
||||
- **CableAV**
|
||||
- **Callin**
|
||||
- **Caltrans**
|
||||
- **CAM4**
|
||||
- **Camdemy**
|
||||
- **CamdemyFolder**
|
||||
- **CamModels**
|
||||
- **CamWithHer**
|
||||
- **CanalAlpha**
|
||||
- **canalc2.tv**
|
||||
- **Canalplus**: mycanal.fr and piwiplus.fr
|
||||
- **Canvas**
|
||||
@@ -220,24 +236,34 @@
|
||||
- **ComedyCentralTV**
|
||||
- **CondeNast**: Condé Nast media group: Allure, Architectural Digest, Ars Technica, Bon Appétit, Brides, Condé Nast, Condé Nast Traveler, Details, Epicurious, GQ, Glamour, Golf Digest, SELF, Teen Vogue, The New Yorker, Vanity Fair, Vogue, W Magazine, WIRED
|
||||
- **CONtv**
|
||||
- **CookingChannel**
|
||||
- **Corus**
|
||||
- **Coub**
|
||||
- **CozyTV**
|
||||
- **cp24**
|
||||
- **cpac**
|
||||
- **cpac:playlist**
|
||||
- **Cracked**
|
||||
- **Crackle**
|
||||
- **CrooksAndLiars**
|
||||
- **CrowdBunker**
|
||||
- **CrowdBunkerChannel**
|
||||
- **crunchyroll**
|
||||
- **crunchyroll:beta**
|
||||
- **crunchyroll:playlist**
|
||||
- **crunchyroll:playlist:beta**
|
||||
- **CSpan**: C-SPAN
|
||||
- **CSpanCongress**
|
||||
- **CtsNews**: 華視新聞
|
||||
- **CTV**
|
||||
- **CTVNews**
|
||||
- **cu.ntv.co.jp**: Nippon Television Network
|
||||
- **CultureUnplugged**
|
||||
- **curiositystream**
|
||||
- **curiositystream:collection**
|
||||
- **curiositystream:collections**
|
||||
- **curiositystream:series**
|
||||
- **CWTV**
|
||||
- **Daftsex**
|
||||
- **DagelijkseKost**: dagelijksekost.een.be
|
||||
- **DailyMail**
|
||||
- **dailymotion**
|
||||
@@ -249,24 +275,27 @@
|
||||
- **daum.net:clip**
|
||||
- **daum.net:playlist**
|
||||
- **daum.net:user**
|
||||
- **daystar:clip**
|
||||
- **DBTV**
|
||||
- **DctpTv**
|
||||
- **DeezerAlbum**
|
||||
- **DeezerPlaylist**
|
||||
- **defense.gouv.fr**
|
||||
- **democracynow**
|
||||
- **DestinationAmerica**
|
||||
- **DHM**: Filmarchiv - Deutsches Historisches Museum
|
||||
- **Digg**
|
||||
- **DigitalConcertHall**: DigitalConcertHall extractor
|
||||
- **DigitallySpeaking**
|
||||
- **Digiteka**
|
||||
- **Discovery**
|
||||
- **DiscoveryGo**
|
||||
- **DiscoveryGoPlaylist**
|
||||
- **DiscoveryLife**
|
||||
- **DiscoveryNetworksDe**
|
||||
- **DiscoveryPlus**
|
||||
- **DiscoveryPlusIndia**
|
||||
- **DiscoveryPlusIndiaShow**
|
||||
- **DiscoveryVR**
|
||||
- **DiscoveryPlusItaly**
|
||||
- **DiscoveryPlusItalyShow**
|
||||
- **Disney**
|
||||
- **DIYNetwork**
|
||||
- **dlive:stream**
|
||||
@@ -278,7 +307,10 @@
|
||||
- **DouyuTV**: 斗鱼
|
||||
- **DPlay**
|
||||
- **DRBonanza**
|
||||
- **Drooble**
|
||||
- **Dropbox**
|
||||
- **Dropout**
|
||||
- **DropoutSeason**
|
||||
- **DrTuber**
|
||||
- **drtv**
|
||||
- **drtv:live**
|
||||
@@ -312,11 +344,16 @@
|
||||
- **Eporner**
|
||||
- **EroProfile**
|
||||
- **EroProfile:album**
|
||||
- **ertflix**: ERTFLIX videos
|
||||
- **ertflix:codename**: ERTFLIX videos by codename
|
||||
- **ertwebtv:embed**: ert.gr webtv embedded videos
|
||||
- **Escapist**
|
||||
- **ESPN**
|
||||
- **ESPNArticle**
|
||||
- **ESPNCricInfo**
|
||||
- **EsriVideo**
|
||||
- **Europa**
|
||||
- **EuropeanTour**
|
||||
- **EUScreen**
|
||||
- **EWETV**
|
||||
- **ExpoTV**
|
||||
@@ -330,6 +367,7 @@
|
||||
- **faz.net**
|
||||
- **fc2**
|
||||
- **fc2:embed**
|
||||
- **fc2:live**
|
||||
- **Fczenit**
|
||||
- **Filmmodu**
|
||||
- **filmon**
|
||||
@@ -339,6 +377,7 @@
|
||||
- **FiveTV**
|
||||
- **Flickr**
|
||||
- **Folketinget**: Folketinget (ft.dk; Danish parliament)
|
||||
- **FoodNetwork**
|
||||
- **FootyRoom**
|
||||
- **Formula1**
|
||||
- **FOX**
|
||||
@@ -348,6 +387,7 @@
|
||||
- **foxnews**: Fox News and Fox Business Video
|
||||
- **foxnews:article**
|
||||
- **FoxSports**
|
||||
- **fptplay**: fptplay.vn
|
||||
- **FranceCulture**
|
||||
- **FranceInter**
|
||||
- **FranceTV**
|
||||
@@ -355,7 +395,6 @@
|
||||
- **FranceTVSite**
|
||||
- **Freesound**
|
||||
- **freespeech.org**
|
||||
- **FreshLive**
|
||||
- **FrontendMasters**
|
||||
- **FrontendMastersCourse**
|
||||
- **FrontendMastersLesson**
|
||||
@@ -370,6 +409,12 @@
|
||||
- **GabTV**
|
||||
- **Gaia**
|
||||
- **GameInformer**
|
||||
- **GameJolt**
|
||||
- **GameJoltCommunity**
|
||||
- **GameJoltGame**
|
||||
- **GameJoltGameSoundtrack**
|
||||
- **GameJoltSearch**
|
||||
- **GameJoltUser**
|
||||
- **GameSpot**
|
||||
- **GameStar**
|
||||
- **Gaskrank**
|
||||
@@ -381,6 +426,7 @@
|
||||
- **gem.cbc.ca:playlist**
|
||||
- **generic**: Generic downloader that works on some sites
|
||||
- **Gettr**
|
||||
- **GettrStreaming**
|
||||
- **Gfycat**
|
||||
- **GiantBomb**
|
||||
- **Giga**
|
||||
@@ -388,8 +434,12 @@
|
||||
- **Glide**: Glide mobile video messages (glide.me)
|
||||
- **Globo**
|
||||
- **GloboArticle**
|
||||
- **glomex**: Glomex videos
|
||||
- **glomex:embed**: Glomex embedded videos
|
||||
- **Go**
|
||||
- **GoDiscovery**
|
||||
- **GodTube**
|
||||
- **Gofile**
|
||||
- **Golem**
|
||||
- **google:podcasts**
|
||||
- **google:podcasts:feed**
|
||||
@@ -409,6 +459,7 @@
|
||||
- **hetklokhuis**
|
||||
- **hgtv.com:show**
|
||||
- **HGTVDe**
|
||||
- **HGTVUsa**
|
||||
- **HiDive**
|
||||
- **HistoricFilms**
|
||||
- **history:player**
|
||||
@@ -417,7 +468,6 @@
|
||||
- **hitbox:live**
|
||||
- **HitRecord**
|
||||
- **hketv**: 香港教育局教育電視 (HKETV) Educational Television, Hong Kong Educational Bureau
|
||||
- **HornBunny**
|
||||
- **HotNewHipHop**
|
||||
- **hotstar**
|
||||
- **hotstar:playlist**
|
||||
@@ -427,6 +477,8 @@
|
||||
- **hrfernsehen**
|
||||
- **HRTi**
|
||||
- **HRTiPlaylist**
|
||||
- **HSEProduct**
|
||||
- **HSEShow**
|
||||
- **Huajiao**: 花椒直播
|
||||
- **HuffPost**: Huffington Post
|
||||
- **Hungama**
|
||||
@@ -448,15 +500,18 @@
|
||||
- **IndavideoEmbed**
|
||||
- **InfoQ**
|
||||
- **Instagram**
|
||||
- **instagram:tag**: Instagram hashtag search
|
||||
- **instagram:story**
|
||||
- **instagram:tag**: Instagram hashtag search URLs
|
||||
- **instagram:user**: Instagram user profile
|
||||
- **InstagramIOS**: IOS instagram:// URL
|
||||
- **Internazionale**
|
||||
- **InternetVideoArchive**
|
||||
- **InvestigationDiscovery**
|
||||
- **IPrima**
|
||||
- **IPrimaCNN**
|
||||
- **iq.com**: International version of iQiyi
|
||||
- **iq.com:album**
|
||||
- **iqiyi**: 爱奇艺
|
||||
- **Ir90Tv**
|
||||
- **ITTF**
|
||||
- **ITV**
|
||||
- **ITVBTCC**
|
||||
@@ -473,11 +528,11 @@
|
||||
- **JWPlatform**
|
||||
- **Kakao**
|
||||
- **Kaltura**
|
||||
- **Kankan**
|
||||
- **Karaoketv**
|
||||
- **KarriereVideos**
|
||||
- **Katsomo**
|
||||
- **KeezMovies**
|
||||
- **KelbyOne**
|
||||
- **Ketnet**
|
||||
- **khanacademy**
|
||||
- **khanacademy:unit**
|
||||
@@ -523,7 +578,7 @@
|
||||
- **limelight:channel_list**
|
||||
- **LineLive**
|
||||
- **LineLiveChannel**
|
||||
- **LineTV**
|
||||
- **LinkedIn**
|
||||
- **linkedin:learning**
|
||||
- **linkedin:learning:course**
|
||||
- **LinuxAcademy**
|
||||
@@ -531,6 +586,7 @@
|
||||
- **LiveJournal**
|
||||
- **livestream**
|
||||
- **livestream:original**
|
||||
- **Lnk**
|
||||
- **LnkGo**
|
||||
- **loc**: Library of Congress
|
||||
- **LocalNews8**
|
||||
@@ -543,6 +599,7 @@
|
||||
- **mailru**: Видео@Mail.Ru
|
||||
- **mailru:music**: Музыка@Mail.Ru
|
||||
- **mailru:music:search**: Музыка@Mail.Ru
|
||||
- **MainStreaming**: MainStreaming Player
|
||||
- **MallTV**
|
||||
- **mangomolo:live**
|
||||
- **mangomolo:video**
|
||||
@@ -569,6 +626,8 @@
|
||||
- **MediasiteNamedCatalog**
|
||||
- **Medici**
|
||||
- **megaphone.fm**: megaphone.fm embedded players
|
||||
- **megatvcom**: megatv.com videos
|
||||
- **megatvcom:embed**: megatv.com embedded videos
|
||||
- **Meipai**: 美拍
|
||||
- **MelonVOD**
|
||||
- **META**
|
||||
@@ -580,8 +639,9 @@
|
||||
- **MiaoPai**
|
||||
- **microsoftstream**: Microsoft Stream
|
||||
- **mildom**: Record ongoing live by specific user in Mildom
|
||||
- **mildom:clip**: Clip in Mildom
|
||||
- **mildom:user:vod**: Download all VODs from specific user in Mildom
|
||||
- **mildom:vod**: Download a VOD in Mildom
|
||||
- **mildom:vod**: VOD in Mildom
|
||||
- **minds**
|
||||
- **minds:channel**
|
||||
- **minds:group**
|
||||
@@ -591,6 +651,8 @@
|
||||
- **mirrativ**
|
||||
- **mirrativ:user**
|
||||
- **MiTele**: mitele.es
|
||||
- **mixch**
|
||||
- **mixch:archive**
|
||||
- **mixcloud**
|
||||
- **mixcloud:playlist**
|
||||
- **mixcloud:user**
|
||||
@@ -622,7 +684,13 @@
|
||||
- **mtvservices:embedded**
|
||||
- **MTVUutisetArticle**
|
||||
- **MuenchenTV**: münchen.tv
|
||||
- **Murrtube**
|
||||
- **MurrtubeUser**: Murrtube user profile
|
||||
- **MuseScore**
|
||||
- **MusicdexAlbum**
|
||||
- **MusicdexArtist**
|
||||
- **MusicdexPlaylist**
|
||||
- **MusicdexSong**
|
||||
- **mva**: Microsoft Virtual Academy videos
|
||||
- **mva:course**: Microsoft Virtual Academy courses
|
||||
- **Mwave**
|
||||
@@ -641,6 +709,8 @@
|
||||
- **n-tv.de**
|
||||
- **N1Info:article**
|
||||
- **N1InfoAsset**
|
||||
- **Nate**
|
||||
- **NateProgram**
|
||||
- **natgeo:video**
|
||||
- **NationalGeographicTV**
|
||||
- **Naver**
|
||||
@@ -663,6 +733,7 @@
|
||||
- **ndr:embed:base**
|
||||
- **NDTV**
|
||||
- **Nebula**
|
||||
- **nebula:collection**
|
||||
- **NerdCubedFeed**
|
||||
- **netease:album**: 网易云音乐 - 专辑
|
||||
- **netease:djradio**: 网易云音乐 - 电台
|
||||
@@ -677,14 +748,19 @@
|
||||
- **Newgrounds:playlist**
|
||||
- **Newgrounds:user**
|
||||
- **Newstube**
|
||||
- **Newsy**
|
||||
- **NextMedia**: 蘋果日報
|
||||
- **NextMediaActionNews**: 蘋果日報 - 動新聞
|
||||
- **NextTV**: 壹電視
|
||||
- **Nexx**
|
||||
- **NexxEmbed**
|
||||
- **NFB**
|
||||
- **NFHSNetwork**
|
||||
- **nfl.com** (Currently broken)
|
||||
- **nfl.com:article** (Currently broken)
|
||||
- **NhkForSchoolBangumi**
|
||||
- **NhkForSchoolProgramList**
|
||||
- **NhkForSchoolSubject**: Portal page for each school subjects, like Japanese (kokugo, 国語) or math (sansuu/suugaku or 算数・数学)
|
||||
- **NhkVod**
|
||||
- **NhkVodProgram**
|
||||
- **nhl.com**
|
||||
@@ -694,10 +770,13 @@
|
||||
- **nickelodeonru**
|
||||
- **nicknight**
|
||||
- **niconico**: ニコニコ動画
|
||||
- **NiconicoPlaylist**
|
||||
- **niconico:history**: NicoNico user history. Requires cookies.
|
||||
- **niconico:playlist**
|
||||
- **niconico:series**
|
||||
- **niconico:tag**: NicoNico video tag URLs
|
||||
- **NiconicoUser**
|
||||
- **nicovideo:search**: Nico video searches; "nicosearch:" prefix
|
||||
- **nicovideo:search:date**: Nico video searches, newest first; "nicosearchdate:" prefix
|
||||
- **nicovideo:search**: Nico video search; "nicosearch:" prefix
|
||||
- **nicovideo:search:date**: Nico video search, newest first; "nicosearchdate:" prefix
|
||||
- **nicovideo:search_url**: Nico video search URLs
|
||||
- **Nintendo**
|
||||
- **Nitter**
|
||||
@@ -706,6 +785,7 @@
|
||||
- **NJPWWorld**: 新日本プロレスワールド
|
||||
- **NobelPrize**
|
||||
- **NonkTube**
|
||||
- **NoodleMagazine**
|
||||
- **Noovo**
|
||||
- **Normalboots**
|
||||
- **NosVideo**
|
||||
@@ -746,6 +826,7 @@
|
||||
- **OlympicsReplay**
|
||||
- **on24**: ON24
|
||||
- **OnDemandKorea**
|
||||
- **OneFootball**
|
||||
- **onet.pl**
|
||||
- **onet.tv**
|
||||
- **onet.tv:channel**
|
||||
@@ -753,8 +834,11 @@
|
||||
- **OnionStudios**
|
||||
- **Ooyala**
|
||||
- **OoyalaExternal**
|
||||
- **Opencast**
|
||||
- **OpencastPlaylist**
|
||||
- **openrec**
|
||||
- **openrec:capture**
|
||||
- **openrec:movie**
|
||||
- **OraTV**
|
||||
- **orf:burgenland**: Radio Burgenland
|
||||
- **orf:fm4**: radio FM4
|
||||
@@ -788,6 +872,8 @@
|
||||
- **PatreonUser**
|
||||
- **pbs**: Public Broadcasting Service (PBS) and member stations: PBS: Public Broadcasting Service, APT - Alabama Public Television (WBIQ), GPB/Georgia Public Broadcasting (WGTV), Mississippi Public Broadcasting (WMPN), Nashville Public Television (WNPT), WFSU-TV (WFSU), WSRE (WSRE), WTCI (WTCI), WPBA/Channel 30 (WPBA), Alaska Public Media (KAKM), Arizona PBS (KAET), KNME-TV/Channel 5 (KNME), Vegas PBS (KLVX), AETN/ARKANSAS ETV NETWORK (KETS), KET (WKLE), WKNO/Channel 10 (WKNO), LPB/LOUISIANA PUBLIC BROADCASTING (WLPB), OETA (KETA), Ozarks Public Television (KOZK), WSIU Public Broadcasting (WSIU), KEET TV (KEET), KIXE/Channel 9 (KIXE), KPBS San Diego (KPBS), KQED (KQED), KVIE Public Television (KVIE), PBS SoCal/KOCE (KOCE), ValleyPBS (KVPT), CONNECTICUT PUBLIC TELEVISION (WEDH), KNPB Channel 5 (KNPB), SOPTV (KSYS), Rocky Mountain PBS (KRMA), KENW-TV3 (KENW), KUED Channel 7 (KUED), Wyoming PBS (KCWC), Colorado Public Television / KBDI 12 (KBDI), KBYU-TV (KBYU), Thirteen/WNET New York (WNET), WGBH/Channel 2 (WGBH), WGBY (WGBY), NJTV Public Media NJ (WNJT), WLIW21 (WLIW), mpt/Maryland Public Television (WMPB), WETA Television and Radio (WETA), WHYY (WHYY), PBS 39 (WLVT), WVPT - Your Source for PBS and More! (WVPT), Howard University Television (WHUT), WEDU PBS (WEDU), WGCU Public Media (WGCU), WPBT2 (WPBT), WUCF TV (WUCF), WUFT/Channel 5 (WUFT), WXEL/Channel 42 (WXEL), WLRN/Channel 17 (WLRN), WUSF Public Broadcasting (WUSF), ETV (WRLK), UNC-TV (WUNC), PBS Hawaii - Oceanic Cable Channel 10 (KHET), Idaho Public Television (KAID), KSPS (KSPS), OPB (KOPB), KWSU/Channel 10 & KTNW/Channel 31 (KWSU), WILL-TV (WILL), Network Knowledge - WSEC/Springfield (WSEC), WTTW11 (WTTW), Iowa Public Television/IPTV (KDIN), Nine Network (KETC), PBS39 Fort Wayne (WFWA), WFYI Indianapolis (WFYI), Milwaukee Public Television (WMVS), WNIN (WNIN), WNIT Public Television (WNIT), WPT (WPNE), WVUT/Channel 22 (WVUT), WEIU/Channel 51 (WEIU), WQPT-TV (WQPT), WYCC PBS Chicago (WYCC), WIPB-TV (WIPB), WTIU (WTIU), CET (WCET), ThinkTVNetwork (WPTD), WBGU-TV (WBGU), WGVU TV (WGVU), NET1 (KUON), Pioneer Public Television (KWCM), SDPB Television (KUSD), TPT (KTCA), KSMQ (KSMQ), KPTS/Channel 8 (KPTS), KTWU/Channel 11 (KTWU), East Tennessee PBS (WSJK), WCTE-TV (WCTE), WLJT, Channel 11 (WLJT), WOSU TV (WOSU), WOUB/WOUC (WOUB), WVPB (WVPB), WKYU-PBS (WKYU), KERA 13 (KERA), MPBN (WCBB), Mountain Lake PBS (WCFE), NHPTV (WENH), Vermont PBS (WETK), witf (WITF), WQED Multimedia (WQED), WMHT Educational Telecommunications (WMHT), Q-TV (WDCQ), WTVS Detroit Public TV (WTVS), CMU Public Television (WCMU), WKAR-TV (WKAR), WNMU-TV Public TV 13 (WNMU), WDSE - WRPT (WDSE), WGTE TV (WGTE), Lakeland Public Television (KAWE), KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS), MontanaPBS (KUSM), KRWG/Channel 22 (KRWG), KACV (KACV), KCOS/Channel 13 (KCOS), WCNY/Channel 24 (WCNY), WNED (WNED), WPBS (WPBS), WSKG Public TV (WSKG), WXXI (WXXI), WPSU (WPSU), WVIA Public Media Studios (WVIA), WTVI (WTVI), Western Reserve PBS (WNEO), WVIZ/PBS ideastream (WVIZ), KCTS 9 (KCTS), Basin PBS (KPBT), KUHT / Channel 8 (KUHT), KLRN (KLRN), KLRU (KLRU), WTJX Channel 12 (WTJX), WCVE PBS (WCVE), KBTC Public Television (KBTC)
|
||||
- **PearVideo**
|
||||
- **PeekVids**
|
||||
- **peer.tv**
|
||||
- **PeerTube**
|
||||
- **PeerTube:Playlist**
|
||||
- **peloton**
|
||||
@@ -799,12 +885,15 @@
|
||||
- **PhilharmonieDeParis**: Philharmonie de Paris
|
||||
- **phoenix.de**
|
||||
- **Photobucket**
|
||||
- **Piapro**
|
||||
- **Picarto**
|
||||
- **PicartoVod**
|
||||
- **Piksel**
|
||||
- **Pinkbike**
|
||||
- **Pinterest**
|
||||
- **PinterestCollection**
|
||||
- **pixiv:sketch**
|
||||
- **pixiv:sketch:user**
|
||||
- **Pladform**
|
||||
- **PlanetMarathi**
|
||||
- **Platzi**
|
||||
@@ -816,6 +905,7 @@
|
||||
- **PlaysTV**
|
||||
- **Playtvak**: Playtvak.cz, iDNES.cz and Lidovky.cz
|
||||
- **Playvid**
|
||||
- **PlayVids**
|
||||
- **Playwire**
|
||||
- **pluralsight**
|
||||
- **pluralsight:course**
|
||||
@@ -823,6 +913,8 @@
|
||||
- **podomatic**
|
||||
- **Pokemon**
|
||||
- **PokemonWatch**
|
||||
- **PokerGo**
|
||||
- **PokerGoCollection**
|
||||
- **PolsatGo**
|
||||
- **PolskieRadio**
|
||||
- **polskieradio:kierowcow**
|
||||
@@ -834,6 +926,7 @@
|
||||
- **PopcornTV**
|
||||
- **PornCom**
|
||||
- **PornerBros**
|
||||
- **Pornez**
|
||||
- **PornFlip**
|
||||
- **PornHd**
|
||||
- **PornHub**: PornHub and Thumbzilla
|
||||
@@ -848,6 +941,11 @@
|
||||
- **PressTV**
|
||||
- **ProjectVeritas**
|
||||
- **prosiebensat1**: ProSiebenSat.1 Digital
|
||||
- **PRXAccount**
|
||||
- **PRXSeries**
|
||||
- **prxseries:search**: PRX Series Search; "prxseries:" prefix
|
||||
- **prxstories:search**: PRX Stories Search; "prxstories:" prefix
|
||||
- **PRXStory**
|
||||
- **puhutv**
|
||||
- **puhutv:serie**
|
||||
- **Puls4**
|
||||
@@ -873,6 +971,7 @@
|
||||
- **RadioJavan**
|
||||
- **radiokapital**
|
||||
- **radiokapital:show**
|
||||
- **RadioZetPodcast**
|
||||
- **radlive**
|
||||
- **radlive:channel**
|
||||
- **radlive:season**
|
||||
@@ -880,8 +979,9 @@
|
||||
- **RaiPlay**
|
||||
- **RaiPlayLive**
|
||||
- **RaiPlayPlaylist**
|
||||
- **RaiPlayRadio**
|
||||
- **RaiPlayRadioPlaylist**
|
||||
- **RaiPlaySound**
|
||||
- **RaiPlaySoundLive**
|
||||
- **RaiPlaySoundPlaylist**
|
||||
- **RayWenderlich**
|
||||
- **RayWenderlichCourse**
|
||||
- **RBMARadio**
|
||||
@@ -897,7 +997,9 @@
|
||||
- **RedBullTV**
|
||||
- **RedBullTVRrnContent**
|
||||
- **Reddit**
|
||||
- **RedditR**
|
||||
- **RedGifs**
|
||||
- **RedGifsSearch**: Redgifs search
|
||||
- **RedGifsUser**: Redgifs user
|
||||
- **RedTube**
|
||||
- **RegioTV**
|
||||
- **RENTV**
|
||||
@@ -908,38 +1010,49 @@
|
||||
- **RICE**
|
||||
- **RMCDecouverte**
|
||||
- **RockstarGames**
|
||||
- **Rokfin**
|
||||
- **rokfin:channel**
|
||||
- **rokfin:stack**
|
||||
- **RoosterTeeth**
|
||||
- **RoosterTeethSeries**
|
||||
- **RottenTomatoes**
|
||||
- **Roxwel**
|
||||
- **Rozhlas**
|
||||
- **RTBF**
|
||||
- **RTDocumentry**
|
||||
- **RTDocumentryPlaylist**
|
||||
- **rte**: Raidió Teilifís Éireann TV
|
||||
- **rte:radio**: Raidió Teilifís Éireann radio
|
||||
- **rtl.nl**: rtl.nl and rtlxl.nl
|
||||
- **rtl2**
|
||||
- **rtl2:you**
|
||||
- **rtl2:you:series**
|
||||
- **RTNews**
|
||||
- **RTP**
|
||||
- **RTRFM**
|
||||
- **RTS**: RTS.ch
|
||||
- **rtve.es:alacarta**: RTVE a la carta
|
||||
- **rtve.es:audio**: RTVE audio
|
||||
- **rtve.es:infantil**: RTVE infantil
|
||||
- **rtve.es:live**: RTVE.es live streams
|
||||
- **rtve.es:television**
|
||||
- **RTVNH**
|
||||
- **RTVS**
|
||||
- **RUHD**
|
||||
- **Rule34Video**
|
||||
- **RumbleChannel**
|
||||
- **RumbleEmbed**
|
||||
- **Ruptly**
|
||||
- **rutube**: Rutube videos
|
||||
- **rutube:channel**: Rutube channels
|
||||
- **rutube:channel**: Rutube channel
|
||||
- **rutube:embed**: Rutube embedded videos
|
||||
- **rutube:movie**: Rutube movies
|
||||
- **rutube:person**: Rutube person videos
|
||||
- **rutube:playlist**: Rutube playlists
|
||||
- **rutube:tags**: Rutube tags
|
||||
- **RUTV**: RUTV.RU
|
||||
- **Ruutu**
|
||||
- **Ruv**
|
||||
- **ruv.is:spila**
|
||||
- **safari**: safaribooksonline.com online video
|
||||
- **safari:api**
|
||||
- **safari:course**: safaribooksonline.com online courses
|
||||
@@ -960,6 +1073,7 @@
|
||||
- **SCTE**
|
||||
- **SCTECourse**
|
||||
- **Seeker**
|
||||
- **SenateGov**
|
||||
- **SenateISVP**
|
||||
- **SendtoNews**
|
||||
- **Servus**
|
||||
@@ -975,6 +1089,7 @@
|
||||
- **simplecast:episode**
|
||||
- **simplecast:podcast**
|
||||
- **Sina**
|
||||
- **Skeb**
|
||||
- **sky.it**
|
||||
- **sky:news**
|
||||
- **sky:news:story**
|
||||
@@ -994,6 +1109,7 @@
|
||||
- **SonyLIVSeries**
|
||||
- **soundcloud**
|
||||
- **soundcloud:playlist**
|
||||
- **soundcloud:related**
|
||||
- **soundcloud:search**: Soundcloud search; "scsearch:" prefix
|
||||
- **soundcloud:set**
|
||||
- **soundcloud:trackstation**
|
||||
@@ -1038,8 +1154,10 @@
|
||||
- **Streamanity**
|
||||
- **streamcloud.eu**
|
||||
- **StreamCZ**
|
||||
- **StreamFF**
|
||||
- **StreetVoice**
|
||||
- **StretchInternet**
|
||||
- **Stripchat**
|
||||
- **stv:player**
|
||||
- **SunPorno**
|
||||
- **sverigesradio:episode**
|
||||
@@ -1065,12 +1183,16 @@
|
||||
- **TeamTreeHouse**
|
||||
- **TechTalks**
|
||||
- **techtv.mit.edu**
|
||||
- **ted**
|
||||
- **TedEmbed**
|
||||
- **TedPlaylist**
|
||||
- **TedSeries**
|
||||
- **TedTalk**
|
||||
- **Tele13**
|
||||
- **Tele5**
|
||||
- **TeleBruxelles**
|
||||
- **Telecinco**: telecinco.es, cuatro.com and mediaset.es
|
||||
- **Telegraaf**
|
||||
- **telegram:embed**
|
||||
- **TeleMB**
|
||||
- **Telemundo**
|
||||
- **TeleQuebec**
|
||||
@@ -1087,7 +1209,6 @@
|
||||
- **TheIntercept**
|
||||
- **ThePlatform**
|
||||
- **ThePlatformFeed**
|
||||
- **TheScene**
|
||||
- **TheStar**
|
||||
- **TheSun**
|
||||
- **ThetaStream**
|
||||
@@ -1099,12 +1220,17 @@
|
||||
- **ThreeSpeak**
|
||||
- **ThreeSpeakUser**
|
||||
- **TikTok**
|
||||
- **tiktok:effect**
|
||||
- **tiktok:sound**
|
||||
- **tiktok:tag**
|
||||
- **tiktok:user**
|
||||
- **tinypic**: tinypic.com videos
|
||||
- **TLC**
|
||||
- **TMZ**
|
||||
- **TNAFlix**
|
||||
- **TNAFlixNetworkEmbed**
|
||||
- **toggle**
|
||||
- **toggo**
|
||||
- **Tokentube**
|
||||
- **Tokentube:channel**
|
||||
- **ToonGoggles**
|
||||
@@ -1112,11 +1238,13 @@
|
||||
- **Toypics**: Toypics video
|
||||
- **ToypicsUser**: Toypics user profile
|
||||
- **TrailerAddict** (Currently broken)
|
||||
- **TravelChannel**
|
||||
- **Trilulilu**
|
||||
- **Trovo**
|
||||
- **TrovoChannelClip**: All Clips of a trovo.live channel; "trovoclip:" prefix
|
||||
- **TrovoChannelVod**: All VODs of a trovo.live channel; "trovovod:" prefix
|
||||
- **TrovoVod**
|
||||
- **TrueID**
|
||||
- **TruNews**
|
||||
- **TruTV**
|
||||
- **Tube8**
|
||||
@@ -1158,6 +1286,8 @@
|
||||
- **TVNowNew**
|
||||
- **TVNowSeason**
|
||||
- **TVNowShow**
|
||||
- **tvopengr:embed**: tvopen.gr embedded videos
|
||||
- **tvopengr:watch**: tvopen.gr (and ethnos.gr) videos
|
||||
- **tvp**: Telewizja Polska
|
||||
- **tvp:embed**: Telewizja Polska
|
||||
- **tvp:series**
|
||||
@@ -1221,9 +1351,11 @@
|
||||
- **Viddler**
|
||||
- **Videa**
|
||||
- **video.arnes.si**: Arnes Video
|
||||
- **video.google:search**: Google Video search; "gvsearch:" prefix (Currently broken)
|
||||
- **video.google:search**: Google Video search; "gvsearch:" prefix
|
||||
- **video.sky.it**
|
||||
- **video.sky.it:live**
|
||||
- **VideocampusSachsen**
|
||||
- **VideocampusSachsenEmbed**
|
||||
- **VideoDetective**
|
||||
- **videofy.me**
|
||||
- **videomore**
|
||||
@@ -1250,6 +1382,8 @@
|
||||
- **vimeo:review**: Review pages on vimeo
|
||||
- **vimeo:user**
|
||||
- **vimeo:watchlater**: Vimeo watch later list, "vimeowatchlater" keyword (requires authentication)
|
||||
- **Vimm:recording**
|
||||
- **Vimm:stream**
|
||||
- **Vimple**: Vimple - one-click video hosting
|
||||
- **Vine**
|
||||
- **vine:user**
|
||||
@@ -1264,6 +1398,7 @@
|
||||
- **vlive**
|
||||
- **vlive:channel**
|
||||
- **vlive:post**
|
||||
- **vm.tiktok**
|
||||
- **Vodlocker**
|
||||
- **VODPl**
|
||||
- **VODPlatform**
|
||||
@@ -1283,7 +1418,6 @@
|
||||
- **VShare**
|
||||
- **VTM**
|
||||
- **VTXTV**
|
||||
- **vube**: Vube.com
|
||||
- **VuClip**
|
||||
- **Vupload**
|
||||
- **VVVVID**
|
||||
@@ -1299,10 +1433,10 @@
|
||||
- **WatchBox**
|
||||
- **WatchIndianPorn**: Watch Indian Porn
|
||||
- **WDR**
|
||||
- **wdr:mobile**
|
||||
- **wdr:mobile** (Currently broken)
|
||||
- **WDRElefant**
|
||||
- **WDRPage**
|
||||
- **web.archive:youtube**: web.archive.org saved youtube videos
|
||||
- **web.archive:youtube**: web.archive.org saved youtube videos, "ytarchive:" prefix
|
||||
- **Webcaster**
|
||||
- **WebcasterFeed**
|
||||
- **WebOfStories**
|
||||
@@ -1311,6 +1445,7 @@
|
||||
- **WeiboMobile**
|
||||
- **WeiqiTV**: WQTV
|
||||
- **whowatch**
|
||||
- **Willow**
|
||||
- **WimTV**
|
||||
- **Wistia**
|
||||
- **WistiaPlaylist**
|
||||
@@ -1333,6 +1468,7 @@
|
||||
- **xiami:song**: 虾米音乐
|
||||
- **ximalaya**: 喜马拉雅FM
|
||||
- **ximalaya:album**: 喜马拉雅FM 专辑
|
||||
- **xinpianchang**: xinpianchang.com
|
||||
- **XMinus**
|
||||
- **XNXX**
|
||||
- **Xstream**
|
||||
@@ -1352,6 +1488,7 @@
|
||||
- **yandexmusic:playlist**: Яндекс.Музыка - Плейлист
|
||||
- **yandexmusic:track**: Яндекс.Музыка - Трек
|
||||
- **YandexVideo**
|
||||
- **YandexVideoPreview**
|
||||
- **YapFiles**
|
||||
- **YesJapan**
|
||||
- **yinyuetai:video**: 音悦Tai
|
||||
@@ -1368,16 +1505,18 @@
|
||||
- **youtube**: YouTube
|
||||
- **youtube:favorites**: YouTube liked videos; ":ytfav" keyword (requires cookies)
|
||||
- **youtube:history**: Youtube watch history; ":ythis" keyword (requires cookies)
|
||||
- **youtube:music:search_url**: YouTube music search URLs with selectable sections (Eg: #songs)
|
||||
- **youtube:playlist**: YouTube playlists
|
||||
- **youtube:recommended**: YouTube recommended videos; ":ytrec" keyword
|
||||
- **youtube:search**: YouTube searches; "ytsearch:" prefix
|
||||
- **youtube:search:date**: YouTube searches, newest videos first; "ytsearchdate:" prefix
|
||||
- **youtube:search**: YouTube search; "ytsearch:" prefix
|
||||
- **youtube:search:date**: YouTube search, newest videos first; "ytsearchdate:" prefix
|
||||
- **youtube:search_url**: YouTube search URLs with sorting and filter support
|
||||
- **youtube:subscriptions**: YouTube subscriptions feed; ":ytsubs" keyword (requires cookies)
|
||||
- **youtube:tab**: YouTube Tabs
|
||||
- **youtube:user**: YouTube user videos; "ytuser:" prefix
|
||||
- **youtube:watchlater**: Youtube watch later list; ":ytwatchlater" keyword (requires cookies)
|
||||
- **YoutubeLivestreamEmbed**: YouTube livestream embeds
|
||||
- **YoutubeYtBe**: youtu.be
|
||||
- **YoutubeYtUser**: YouTube user videos; "ytuser:" prefix
|
||||
- **Zapiks**
|
||||
- **Zattoo**
|
||||
- **ZattooLive**
|
||||
@@ -1388,7 +1527,7 @@
|
||||
- **ZenYandex**
|
||||
- **ZenYandexChannel**
|
||||
- **Zhihu**
|
||||
- **zingmp3**: mp3.zing.vn
|
||||
- **zingmp3**: zingmp3.vn
|
||||
- **zingmp3:album**
|
||||
- **zoom**
|
||||
- **Zype**
|
||||
|
||||
@@ -194,6 +194,53 @@ def expect_dict(self, got_dict, expected_dict):
|
||||
expect_value(self, got, expected, info_field)
|
||||
|
||||
|
||||
def sanitize_got_info_dict(got_dict):
|
||||
IGNORED_FIELDS = (
|
||||
# Format keys
|
||||
'url', 'manifest_url', 'format', 'format_id', 'format_note', 'width', 'height', 'resolution',
|
||||
'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'vbr', 'fps', 'vcodec', 'container', 'filesize',
|
||||
'filesize_approx', 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'preference',
|
||||
'language', 'language_preference', 'quality', 'source_preference', 'http_headers',
|
||||
'stretched_ratio', 'no_resume', 'has_drm', 'downloader_options',
|
||||
|
||||
# RTMP formats
|
||||
'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time',
|
||||
|
||||
# Lists
|
||||
'formats', 'thumbnails', 'subtitles', 'automatic_captions', 'comments', 'entries',
|
||||
|
||||
# Auto-generated
|
||||
'autonumber', 'playlist', 'format_index', 'video_ext', 'audio_ext', 'duration_string', 'epoch',
|
||||
'fulltitle', 'extractor', 'extractor_key', 'filepath', 'infojson_filename', 'original_url', 'n_entries',
|
||||
|
||||
# Only live_status needs to be checked
|
||||
'is_live', 'was_live',
|
||||
)
|
||||
|
||||
IGNORED_PREFIXES = ('', 'playlist', 'requested', 'webpage')
|
||||
|
||||
def sanitize(key, value):
|
||||
if isinstance(value, str) and len(value) > 100 and key != 'thumbnail':
|
||||
return f'md5:{md5(value)}'
|
||||
elif isinstance(value, list) and len(value) > 10:
|
||||
return f'count:{len(value)}'
|
||||
elif key.endswith('_count') and isinstance(value, int):
|
||||
return int
|
||||
return value
|
||||
|
||||
test_info_dict = {
|
||||
key: sanitize(key, value) for key, value in got_dict.items()
|
||||
if value is not None and key not in IGNORED_FIELDS and not any(
|
||||
key.startswith(f'{prefix}_') for prefix in IGNORED_PREFIXES)
|
||||
}
|
||||
|
||||
# display_id may be generated from id
|
||||
if test_info_dict.get('display_id') == test_info_dict.get('id'):
|
||||
test_info_dict.pop('display_id')
|
||||
|
||||
return test_info_dict
|
||||
|
||||
|
||||
def expect_info_dict(self, got_dict, expected_dict):
|
||||
expect_dict(self, got_dict, expected_dict)
|
||||
# Check for the presence of mandatory fields
|
||||
@@ -207,15 +254,15 @@ def expect_info_dict(self, got_dict, expected_dict):
|
||||
for key in ['webpage_url', 'extractor', 'extractor_key']:
|
||||
self.assertTrue(got_dict.get(key), 'Missing field: %s' % key)
|
||||
|
||||
# Are checkable fields missing from the test case definition?
|
||||
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
|
||||
for key, value in got_dict.items()
|
||||
if value and key in ('id', 'title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location', 'age_limit'))
|
||||
test_info_dict = sanitize_got_info_dict(got_dict)
|
||||
|
||||
missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
|
||||
if missing_keys:
|
||||
def _repr(v):
|
||||
if isinstance(v, compat_str):
|
||||
return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'").replace('\n', '\\n')
|
||||
elif isinstance(v, type):
|
||||
return v.__name__
|
||||
else:
|
||||
return repr(v)
|
||||
info_dict_str = ''
|
||||
|
||||
@@ -99,10 +99,10 @@ class TestInfoExtractor(unittest.TestCase):
|
||||
self.assertRaises(RegexNotFoundError, ie._html_search_meta, ('z', 'x'), html, None, fatal=True)
|
||||
|
||||
def test_search_json_ld_realworld(self):
|
||||
_TESTS = [
|
||||
# https://github.com/ytdl-org/youtube-dl/issues/23306
|
||||
expect_dict(
|
||||
self,
|
||||
self.ie._search_json_ld(r'''<script type="application/ld+json">
|
||||
(
|
||||
r'''<script type="application/ld+json">
|
||||
{
|
||||
"@context": "http://schema.org/",
|
||||
"@type": "VideoObject",
|
||||
@@ -135,7 +135,7 @@ class TestInfoExtractor(unittest.TestCase):
|
||||
"name": "Kleio Valentien",
|
||||
"url": "https://www.eporner.com/pornstar/kleio-valentien/"
|
||||
}]}
|
||||
</script>''', None),
|
||||
</script>''',
|
||||
{
|
||||
'title': '1 On 1 With Kleio',
|
||||
'description': 'Kleio Valentien',
|
||||
@@ -145,7 +145,161 @@ class TestInfoExtractor(unittest.TestCase):
|
||||
'view_count': 1120958,
|
||||
'width': 1920,
|
||||
'height': 1080,
|
||||
})
|
||||
},
|
||||
{},
|
||||
),
|
||||
(
|
||||
r'''<script type="application/ld+json">
|
||||
{
|
||||
"@context": "https://schema.org",
|
||||
"@graph": [
|
||||
{
|
||||
"@type": "NewsArticle",
|
||||
"mainEntityOfPage": {
|
||||
"@type": "WebPage",
|
||||
"@id": "https://www.ant1news.gr/Society/article/620286/symmoria-anilikon-dikigoros-thymaton-ithelan-na-toys-apoteleiosoyn"
|
||||
},
|
||||
"headline": "Συμμορία ανηλίκων – δικηγόρος θυμάτων: ήθελαν να τους αποτελειώσουν",
|
||||
"name": "Συμμορία ανηλίκων – δικηγόρος θυμάτων: ήθελαν να τους αποτελειώσουν",
|
||||
"description": "Τα παιδιά δέχθηκαν την επίθεση επειδή αρνήθηκαν να γίνουν μέλη της συμμορίας, ανέφερε ο Γ. Ζαχαρόπουλος.",
|
||||
"image": {
|
||||
"@type": "ImageObject",
|
||||
"url": "https://ant1media.azureedge.net/imgHandler/1100/a635c968-be71-447c-bf9c-80d843ece21e.jpg",
|
||||
"width": 1100,
|
||||
"height": 756 },
|
||||
"datePublished": "2021-11-10T08:50:00+03:00",
|
||||
"dateModified": "2021-11-10T08:52:53+03:00",
|
||||
"author": {
|
||||
"@type": "Person",
|
||||
"@id": "https://www.ant1news.gr/",
|
||||
"name": "Ant1news",
|
||||
"image": "https://www.ant1news.gr/images/logo-e5d7e4b3e714c88e8d2eca96130142f6.png",
|
||||
"url": "https://www.ant1news.gr/"
|
||||
},
|
||||
"publisher": {
|
||||
"@type": "Organization",
|
||||
"@id": "https://www.ant1news.gr#publisher",
|
||||
"name": "Ant1news",
|
||||
"url": "https://www.ant1news.gr",
|
||||
"logo": {
|
||||
"@type": "ImageObject",
|
||||
"url": "https://www.ant1news.gr/images/logo-e5d7e4b3e714c88e8d2eca96130142f6.png",
|
||||
"width": 400,
|
||||
"height": 400 },
|
||||
"sameAs": [
|
||||
"https://www.facebook.com/Ant1news.gr",
|
||||
"https://twitter.com/antennanews",
|
||||
"https://www.youtube.com/channel/UC0smvAbfczoN75dP0Hw4Pzw",
|
||||
"https://www.instagram.com/ant1news/"
|
||||
]
|
||||
},
|
||||
|
||||
"keywords": "μαχαίρωμα,συμμορία ανηλίκων,ΕΙΔΗΣΕΙΣ,ΕΙΔΗΣΕΙΣ ΣΗΜΕΡΑ,ΝΕΑ,Κοινωνία - Ant1news",
|
||||
|
||||
|
||||
"articleSection": "Κοινωνία"
|
||||
}
|
||||
]
|
||||
}
|
||||
</script>''',
|
||||
{
|
||||
'timestamp': 1636523400,
|
||||
'title': 'md5:91fe569e952e4d146485740ae927662b',
|
||||
},
|
||||
{'expected_type': 'NewsArticle'},
|
||||
),
|
||||
(
|
||||
r'''<script type="application/ld+json">
|
||||
{"url":"/vrtnu/a-z/het-journaal/2021/het-journaal-het-journaal-19u-20211231/",
|
||||
"name":"Het journaal 19u",
|
||||
"description":"Het journaal 19u van vrijdag 31 december 2021.",
|
||||
"potentialAction":{"url":"https://vrtnu.page.link/pfVy6ihgCAJKgHqe8","@type":"ShareAction"},
|
||||
"mainEntityOfPage":{"@id":"1640092242445","@type":"WebPage"},
|
||||
"publication":[{
|
||||
"startDate":"2021-12-31T19:00:00.000+01:00",
|
||||
"endDate":"2022-01-30T23:55:00.000+01:00",
|
||||
"publishedBy":{"name":"een","@type":"Organization"},
|
||||
"publishedOn":{"url":"https://www.vrt.be/vrtnu/","name":"VRT NU","@type":"BroadcastService"},
|
||||
"@id":"pbs-pub-3a7ec233-da95-4c1e-9b2b-cf5fdfebcbe8",
|
||||
"@type":"BroadcastEvent"
|
||||
}],
|
||||
"video":{
|
||||
"name":"Het journaal - Aflevering 365 (Seizoen 2021)",
|
||||
"description":"Het journaal 19u van vrijdag 31 december 2021. Bekijk aflevering 365 van seizoen 2021 met VRT NU via de site of app.",
|
||||
"thumbnailUrl":"//images.vrt.be/width1280/2021/12/31/80d5ed00-6a64-11ec-b07d-02b7b76bf47f.jpg",
|
||||
"expires":"2022-01-30T23:55:00.000+01:00",
|
||||
"hasPart":[
|
||||
{"name":"Explosie Turnhout","startOffset":70,"@type":"Clip"},
|
||||
{"name":"Jaarwisseling","startOffset":440,"@type":"Clip"},
|
||||
{"name":"Natuurbranden Colorado","startOffset":1179,"@type":"Clip"},
|
||||
{"name":"Klimaatverandering","startOffset":1263,"@type":"Clip"},
|
||||
{"name":"Zacht weer","startOffset":1367,"@type":"Clip"},
|
||||
{"name":"Financiële balans","startOffset":1383,"@type":"Clip"},
|
||||
{"name":"Club Brugge","startOffset":1484,"@type":"Clip"},
|
||||
{"name":"Mentale gezondheid bij topsporters","startOffset":1575,"@type":"Clip"},
|
||||
{"name":"Olympische Winterspelen","startOffset":1728,"@type":"Clip"},
|
||||
{"name":"Sober oudjaar in Nederland","startOffset":1873,"@type":"Clip"}
|
||||
],
|
||||
"duration":"PT34M39.23S",
|
||||
"uploadDate":"2021-12-31T19:00:00.000+01:00",
|
||||
"@id":"vid-9457d0c6-b8ac-4aba-b5e1-15aa3a3295b5",
|
||||
"@type":"VideoObject"
|
||||
},
|
||||
"genre":["Nieuws en actua"],
|
||||
"episodeNumber":365,
|
||||
"partOfSeries":{"name":"Het journaal","@id":"222831405527","@type":"TVSeries"},
|
||||
"partOfSeason":{"name":"Seizoen 2021","@id":"961809365527","@type":"TVSeason"},
|
||||
"@context":"https://schema.org","@id":"961685295527","@type":"TVEpisode"}</script>
|
||||
''',
|
||||
{
|
||||
'chapters': [
|
||||
{"title": "Explosie Turnhout", "start_time": 70, "end_time": 440},
|
||||
{"title": "Jaarwisseling", "start_time": 440, "end_time": 1179},
|
||||
{"title": "Natuurbranden Colorado", "start_time": 1179, "end_time": 1263},
|
||||
{"title": "Klimaatverandering", "start_time": 1263, "end_time": 1367},
|
||||
{"title": "Zacht weer", "start_time": 1367, "end_time": 1383},
|
||||
{"title": "Financiële balans", "start_time": 1383, "end_time": 1484},
|
||||
{"title": "Club Brugge", "start_time": 1484, "end_time": 1575},
|
||||
{"title": "Mentale gezondheid bij topsporters", "start_time": 1575, "end_time": 1728},
|
||||
{"title": "Olympische Winterspelen", "start_time": 1728, "end_time": 1873},
|
||||
{"title": "Sober oudjaar in Nederland", "start_time": 1873, "end_time": 2079.23}
|
||||
],
|
||||
'title': 'Het journaal - Aflevering 365 (Seizoen 2021)'
|
||||
}, {}
|
||||
),
|
||||
(
|
||||
# test multiple thumbnails in a list
|
||||
r'''
|
||||
<script type="application/ld+json">
|
||||
{"@context":"https://schema.org",
|
||||
"@type":"VideoObject",
|
||||
"thumbnailUrl":["https://www.rainews.it/cropgd/640x360/dl/img/2021/12/30/1640886376927_GettyImages.jpg"]}
|
||||
</script>''',
|
||||
{
|
||||
'thumbnails': [{'url': 'https://www.rainews.it/cropgd/640x360/dl/img/2021/12/30/1640886376927_GettyImages.jpg'}],
|
||||
},
|
||||
{},
|
||||
),
|
||||
(
|
||||
# test single thumbnail
|
||||
r'''
|
||||
<script type="application/ld+json">
|
||||
{"@context":"https://schema.org",
|
||||
"@type":"VideoObject",
|
||||
"thumbnailUrl":"https://www.rainews.it/cropgd/640x360/dl/img/2021/12/30/1640886376927_GettyImages.jpg"}
|
||||
</script>''',
|
||||
{
|
||||
'thumbnails': [{'url': 'https://www.rainews.it/cropgd/640x360/dl/img/2021/12/30/1640886376927_GettyImages.jpg'}],
|
||||
},
|
||||
{},
|
||||
)
|
||||
]
|
||||
for html, expected_dict, search_json_ld_kwargs in _TESTS:
|
||||
expect_dict(
|
||||
self,
|
||||
self.ie._search_json_ld(html, None, **search_json_ld_kwargs),
|
||||
expected_dict
|
||||
)
|
||||
|
||||
def test_download_json(self):
|
||||
uri = encode_data_uri(b'{"foo": "blah"}', 'application/json')
|
||||
|
||||
@@ -30,8 +30,7 @@ class YDL(FakeYDL):
|
||||
self.msgs = []
|
||||
|
||||
def process_info(self, info_dict):
|
||||
info_dict.pop('__original_infodict', None)
|
||||
self.downloaded_info_dicts.append(info_dict)
|
||||
self.downloaded_info_dicts.append(info_dict.copy())
|
||||
|
||||
def to_screen(self, msg):
|
||||
self.msgs.append(msg)
|
||||
@@ -137,7 +136,7 @@ class TestFormatSelection(unittest.TestCase):
|
||||
test('webm/mp4', '47')
|
||||
test('3gp/40/mp4', '35')
|
||||
test('example-with-dashes', 'example-with-dashes')
|
||||
test('all', '35', 'example-with-dashes', '45', '47', '2') # Order doesn't actually matter for this
|
||||
test('all', '2', '47', '45', 'example-with-dashes', '35')
|
||||
test('mergeall', '2+47+45+example-with-dashes+35', multi=True)
|
||||
|
||||
def test_format_selection_audio(self):
|
||||
@@ -520,7 +519,7 @@ class TestFormatSelection(unittest.TestCase):
|
||||
ydl = YDL({'format': 'all[width>=400][width<=600]'})
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
|
||||
self.assertEqual(downloaded_ids, ['B', 'C', 'D'])
|
||||
self.assertEqual(downloaded_ids, ['D', 'C', 'B'])
|
||||
|
||||
ydl = YDL({'format': 'best[height<40]'})
|
||||
try:
|
||||
@@ -645,6 +644,7 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
'ext': 'mp4',
|
||||
'width': None,
|
||||
'height': 1080,
|
||||
'filesize': 1024,
|
||||
'title1': '$PATH',
|
||||
'title2': '%PATH%',
|
||||
'title3': 'foo/bar\\test',
|
||||
@@ -717,6 +717,7 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
test('%(id)s', '.abcd', info={'id': '.abcd'})
|
||||
test('%(id)s', 'ab__cd', info={'id': 'ab__cd'})
|
||||
test('%(id)s', ('ab:cd', 'ab -cd'), info={'id': 'ab:cd'})
|
||||
test('%(id.0)s', '-', info={'id': '--'})
|
||||
|
||||
# Invalid templates
|
||||
self.assertTrue(isinstance(YoutubeDL.validate_outtmpl('%(title)'), ValueError))
|
||||
@@ -777,6 +778,11 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
test('%(title5)#U', 'a\u0301e\u0301i\u0301 𝐀')
|
||||
test('%(title5)+U', 'áéí A')
|
||||
test('%(title5)+#U', 'a\u0301e\u0301i\u0301 A')
|
||||
test('%(height)D', '1k')
|
||||
test('%(filesize)#D', '1Ki')
|
||||
test('%(height)5.2D', ' 1.08k')
|
||||
test('%(title4)#S', 'foo_bar_test')
|
||||
test('%(title4).10S', ('foo \'bar\' ', 'foo \'bar\'' + ('#' if compat_os_name == 'nt' else ' ')))
|
||||
if compat_os_name == 'nt':
|
||||
test('%(title4)q', ('"foo \\"bar\\" test"', "'foo _'bar_' test'"))
|
||||
test('%(formats.:.id)#q', ('"id 1" "id 2" "id 3"', "'id 1' 'id 2' 'id 3'"))
|
||||
@@ -808,6 +814,11 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
test('%(width-100,height+width|def)s', 'def')
|
||||
test('%(timestamp-x>%H\\,%M\\,%S,timestamp>%H\\,%M\\,%S)s', '12,00,00')
|
||||
|
||||
# Replacement
|
||||
test('%(id&foo)s.bar', 'foo.bar')
|
||||
test('%(title&foo)s.bar', 'NA.bar')
|
||||
test('%(title&foo|baz)s.bar', 'baz.bar')
|
||||
|
||||
# Laziness
|
||||
def gen():
|
||||
yield from range(5)
|
||||
@@ -885,20 +896,6 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
os.unlink(filename)
|
||||
|
||||
def test_match_filter(self):
|
||||
class FilterYDL(YDL):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(FilterYDL, self).__init__(*args, **kwargs)
|
||||
self.params['simulate'] = True
|
||||
|
||||
def process_info(self, info_dict):
|
||||
super(YDL, self).process_info(info_dict)
|
||||
|
||||
def _match_entry(self, info_dict, incomplete=False):
|
||||
res = super(FilterYDL, self)._match_entry(info_dict, incomplete)
|
||||
if res is None:
|
||||
self.downloaded_info_dicts.append(info_dict)
|
||||
return res
|
||||
|
||||
first = {
|
||||
'id': '1',
|
||||
'url': TEST_URL,
|
||||
@@ -926,7 +923,7 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
videos = [first, second]
|
||||
|
||||
def get_videos(filter_=None):
|
||||
ydl = FilterYDL({'match_filter': filter_})
|
||||
ydl = YDL({'match_filter': filter_, 'simulate': True})
|
||||
for v in videos:
|
||||
ydl.process_ie_result(v, download=True)
|
||||
return [v['id'] for v in ydl.downloaded_info_dicts]
|
||||
@@ -1141,6 +1138,7 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
self.assertTrue(entries[1] is None)
|
||||
self.assertEqual(len(ydl.downloaded_info_dicts), 1)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
entries[2].pop('requested_downloads', None)
|
||||
self.assertEqual(entries[2], downloaded)
|
||||
self.assertEqual(downloaded['url'], TEST_URL)
|
||||
self.assertEqual(downloaded['title'], 'Video Transparent 2')
|
||||
|
||||
@@ -10,6 +10,8 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
from yt_dlp.aes import (
|
||||
aes_decrypt,
|
||||
aes_encrypt,
|
||||
aes_ecb_encrypt,
|
||||
aes_ecb_decrypt,
|
||||
aes_cbc_decrypt,
|
||||
aes_cbc_decrypt_bytes,
|
||||
aes_cbc_encrypt,
|
||||
@@ -17,7 +19,8 @@ from yt_dlp.aes import (
|
||||
aes_ctr_encrypt,
|
||||
aes_gcm_decrypt_and_verify,
|
||||
aes_gcm_decrypt_and_verify_bytes,
|
||||
aes_decrypt_text
|
||||
aes_decrypt_text,
|
||||
BLOCK_SIZE_BYTES,
|
||||
)
|
||||
from yt_dlp.compat import compat_pycrypto_AES
|
||||
from yt_dlp.utils import bytes_to_intlist, intlist_to_bytes
|
||||
@@ -94,6 +97,19 @@ class TestAES(unittest.TestCase):
|
||||
decrypted = (aes_decrypt_text(encrypted, password, 32))
|
||||
self.assertEqual(decrypted, self.secret_msg)
|
||||
|
||||
def test_ecb_encrypt(self):
|
||||
data = bytes_to_intlist(self.secret_msg)
|
||||
data += [0x08] * (BLOCK_SIZE_BYTES - len(data) % BLOCK_SIZE_BYTES)
|
||||
encrypted = intlist_to_bytes(aes_ecb_encrypt(data, self.key, self.iv))
|
||||
self.assertEqual(
|
||||
encrypted,
|
||||
b'\xaa\x86]\x81\x97>\x02\x92\x9d\x1bR[[L/u\xd3&\xd1(h\xde{\x81\x94\xba\x02\xae\xbd\xa6\xd0:')
|
||||
|
||||
def test_ecb_decrypt(self):
|
||||
data = bytes_to_intlist(b'\xaa\x86]\x81\x97>\x02\x92\x9d\x1bR[[L/u\xd3&\xd1(h\xde{\x81\x94\xba\x02\xae\xbd\xa6\xd0:')
|
||||
decrypted = intlist_to_bytes(aes_ecb_decrypt(data, self.key, self.iv))
|
||||
self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
@@ -38,7 +38,6 @@ class TestAllURLsMatching(unittest.TestCase):
|
||||
assertTab('https://www.youtube.com/AsapSCIENCE')
|
||||
assertTab('https://www.youtube.com/embedded')
|
||||
assertTab('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
|
||||
assertTab('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||
assertTab('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
|
||||
assertTab('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') # 668
|
||||
self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M'))
|
||||
|
||||
@@ -8,6 +8,8 @@ from yt_dlp.cookies import (
|
||||
WindowsChromeCookieDecryptor,
|
||||
parse_safari_cookies,
|
||||
pbkdf2_sha1,
|
||||
_get_linux_desktop_environment,
|
||||
_LinuxDesktopEnvironment,
|
||||
)
|
||||
|
||||
|
||||
@@ -42,6 +44,37 @@ class MonkeyPatch:
|
||||
|
||||
|
||||
class TestCookies(unittest.TestCase):
|
||||
def test_get_desktop_environment(self):
|
||||
""" based on https://chromium.googlesource.com/chromium/src/+/refs/heads/main/base/nix/xdg_util_unittest.cc """
|
||||
test_cases = [
|
||||
({}, _LinuxDesktopEnvironment.OTHER),
|
||||
|
||||
({'DESKTOP_SESSION': 'gnome'}, _LinuxDesktopEnvironment.GNOME),
|
||||
({'DESKTOP_SESSION': 'mate'}, _LinuxDesktopEnvironment.GNOME),
|
||||
({'DESKTOP_SESSION': 'kde4'}, _LinuxDesktopEnvironment.KDE),
|
||||
({'DESKTOP_SESSION': 'kde'}, _LinuxDesktopEnvironment.KDE),
|
||||
({'DESKTOP_SESSION': 'xfce'}, _LinuxDesktopEnvironment.XFCE),
|
||||
|
||||
({'GNOME_DESKTOP_SESSION_ID': 1}, _LinuxDesktopEnvironment.GNOME),
|
||||
({'KDE_FULL_SESSION': 1}, _LinuxDesktopEnvironment.KDE),
|
||||
|
||||
({'XDG_CURRENT_DESKTOP': 'X-Cinnamon'}, _LinuxDesktopEnvironment.CINNAMON),
|
||||
({'XDG_CURRENT_DESKTOP': 'GNOME'}, _LinuxDesktopEnvironment.GNOME),
|
||||
({'XDG_CURRENT_DESKTOP': 'GNOME:GNOME-Classic'}, _LinuxDesktopEnvironment.GNOME),
|
||||
({'XDG_CURRENT_DESKTOP': 'GNOME : GNOME-Classic'}, _LinuxDesktopEnvironment.GNOME),
|
||||
|
||||
({'XDG_CURRENT_DESKTOP': 'Unity', 'DESKTOP_SESSION': 'gnome-fallback'}, _LinuxDesktopEnvironment.GNOME),
|
||||
({'XDG_CURRENT_DESKTOP': 'KDE', 'KDE_SESSION_VERSION': '5'}, _LinuxDesktopEnvironment.KDE),
|
||||
({'XDG_CURRENT_DESKTOP': 'KDE'}, _LinuxDesktopEnvironment.KDE),
|
||||
({'XDG_CURRENT_DESKTOP': 'Pantheon'}, _LinuxDesktopEnvironment.PANTHEON),
|
||||
({'XDG_CURRENT_DESKTOP': 'Unity'}, _LinuxDesktopEnvironment.UNITY),
|
||||
({'XDG_CURRENT_DESKTOP': 'Unity:Unity7'}, _LinuxDesktopEnvironment.UNITY),
|
||||
({'XDG_CURRENT_DESKTOP': 'Unity:Unity8'}, _LinuxDesktopEnvironment.UNITY),
|
||||
]
|
||||
|
||||
for env, expected_desktop_environment in test_cases:
|
||||
self.assertEqual(_get_linux_desktop_environment(env), expected_desktop_environment)
|
||||
|
||||
def test_chrome_cookie_decryptor_linux_derive_key(self):
|
||||
key = LinuxChromeCookieDecryptor.derive_key(b'abc')
|
||||
self.assertEqual(key, b'7\xa1\xec\xd4m\xfcA\xc7\xb19Z\xd0\x19\xdcM\x17')
|
||||
@@ -58,8 +91,7 @@ class TestCookies(unittest.TestCase):
|
||||
self.assertEqual(decryptor.decrypt(encrypted_value), value)
|
||||
|
||||
def test_chrome_cookie_decryptor_linux_v11(self):
|
||||
with MonkeyPatch(cookies, {'_get_linux_keyring_password': lambda *args, **kwargs: b'',
|
||||
'KEYRING_AVAILABLE': True}):
|
||||
with MonkeyPatch(cookies, {'_get_linux_keyring_password': lambda *args, **kwargs: b''}):
|
||||
encrypted_value = b'v11#\x81\x10>`w\x8f)\xc0\xb2\xc1\r\xf4\x1al\xdd\x93\xfd\xf8\xf8N\xf2\xa9\x83\xf1\xe9o\x0elVQd'
|
||||
value = 'tz=Europe.London'
|
||||
decryptor = LinuxChromeCookieDecryptor('Chrome', Logger())
|
||||
|
||||
@@ -53,7 +53,7 @@ class YoutubeDL(yt_dlp.YoutubeDL):
|
||||
raise ExtractorError(message)
|
||||
|
||||
def process_info(self, info_dict):
|
||||
self.processed_info_dicts.append(info_dict)
|
||||
self.processed_info_dicts.append(info_dict.copy())
|
||||
return super(YoutubeDL, self).process_info(info_dict)
|
||||
|
||||
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
# coding: utf-8
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from yt_dlp.options import _hide_login_info
|
||||
|
||||
|
||||
class TestOptions(unittest.TestCase):
|
||||
def test_hide_login_info(self):
|
||||
self.assertEqual(_hide_login_info(['-u', 'foo', '-p', 'bar']),
|
||||
['-u', 'PRIVATE', '-p', 'PRIVATE'])
|
||||
self.assertEqual(_hide_login_info(['-u']), ['-u'])
|
||||
self.assertEqual(_hide_login_info(['-u', 'foo', '-u', 'bar']),
|
||||
['-u', 'PRIVATE', '-u', 'PRIVATE'])
|
||||
self.assertEqual(_hide_login_info(['--username=foo']),
|
||||
['--username=PRIVATE'])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
@@ -124,11 +124,11 @@ class TestModifyChaptersPP(unittest.TestCase):
|
||||
chapters = self._chapters([70], ['c']) + [
|
||||
self._sponsor_chapter(10, 20, 'sponsor'),
|
||||
self._sponsor_chapter(30, 40, 'preview'),
|
||||
self._sponsor_chapter(50, 60, 'sponsor')]
|
||||
self._sponsor_chapter(50, 60, 'filler')]
|
||||
expected = self._chapters(
|
||||
[10, 20, 30, 40, 50, 60, 70],
|
||||
['c', '[SponsorBlock]: Sponsor', 'c', '[SponsorBlock]: Preview/Recap',
|
||||
'c', '[SponsorBlock]: Sponsor', 'c'])
|
||||
'c', '[SponsorBlock]: Filler Tangent', 'c'])
|
||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||
|
||||
def test_remove_marked_arrange_sponsors_UniqueNamesForOverlappingSponsors(self):
|
||||
|
||||
@@ -13,7 +13,7 @@ from test.helper import FakeYDL, md5, is_download_test
|
||||
from yt_dlp.extractor import (
|
||||
YoutubeIE,
|
||||
DailymotionIE,
|
||||
TEDIE,
|
||||
TedTalkIE,
|
||||
VimeoIE,
|
||||
WallaIE,
|
||||
CeskaTelevizeIE,
|
||||
@@ -141,7 +141,7 @@ class TestDailymotionSubtitles(BaseTestSubtitles):
|
||||
@is_download_test
|
||||
class TestTedSubtitles(BaseTestSubtitles):
|
||||
url = 'http://www.ted.com/talks/dan_dennett_on_our_consciousness.html'
|
||||
IE = TEDIE
|
||||
IE = TedTalkIE
|
||||
|
||||
def test_allsubtitles(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
|
||||
@@ -23,6 +23,7 @@ from yt_dlp.utils import (
|
||||
caesar,
|
||||
clean_html,
|
||||
clean_podcast_url,
|
||||
Config,
|
||||
date_from_str,
|
||||
datetime_from_str,
|
||||
DateRange,
|
||||
@@ -37,11 +38,18 @@ from yt_dlp.utils import (
|
||||
ExtractorError,
|
||||
find_xpath_attr,
|
||||
fix_xml_ampersands,
|
||||
format_bytes,
|
||||
float_or_none,
|
||||
get_element_by_class,
|
||||
get_element_by_attribute,
|
||||
get_elements_by_class,
|
||||
get_elements_by_attribute,
|
||||
get_element_html_by_class,
|
||||
get_element_html_by_attribute,
|
||||
get_elements_html_by_class,
|
||||
get_elements_html_by_attribute,
|
||||
get_elements_text_and_html_by_attribute,
|
||||
get_element_text_and_html_by_tag,
|
||||
InAdvancePagedList,
|
||||
int_or_none,
|
||||
intlist_to_bytes,
|
||||
@@ -116,6 +124,7 @@ from yt_dlp.compat import (
|
||||
compat_chr,
|
||||
compat_etree_fromstring,
|
||||
compat_getenv,
|
||||
compat_HTMLParseError,
|
||||
compat_os_name,
|
||||
compat_setenv,
|
||||
)
|
||||
@@ -634,6 +643,8 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertEqual(parse_duration('PT1H0.040S'), 3600.04)
|
||||
self.assertEqual(parse_duration('PT00H03M30SZ'), 210)
|
||||
self.assertEqual(parse_duration('P0Y0M0DT0H4M20.880S'), 260.88)
|
||||
self.assertEqual(parse_duration('01:02:03:050'), 3723.05)
|
||||
self.assertEqual(parse_duration('103:050'), 103.05)
|
||||
|
||||
def test_fix_xml_ampersands(self):
|
||||
self.assertEqual(
|
||||
@@ -1156,9 +1167,16 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertEqual(parse_count('1000'), 1000)
|
||||
self.assertEqual(parse_count('1.000'), 1000)
|
||||
self.assertEqual(parse_count('1.1k'), 1100)
|
||||
self.assertEqual(parse_count('1.1 k'), 1100)
|
||||
self.assertEqual(parse_count('1,1 k'), 1100)
|
||||
self.assertEqual(parse_count('1.1kk'), 1100000)
|
||||
self.assertEqual(parse_count('1.1kk '), 1100000)
|
||||
self.assertEqual(parse_count('1,1kk'), 1100000)
|
||||
self.assertEqual(parse_count('100 views'), 100)
|
||||
self.assertEqual(parse_count('1,100 views'), 1100)
|
||||
self.assertEqual(parse_count('1.1kk views'), 1100000)
|
||||
self.assertEqual(parse_count('10M views'), 10000000)
|
||||
self.assertEqual(parse_count('has 10M views'), 10000000)
|
||||
|
||||
def test_parse_resolution(self):
|
||||
self.assertEqual(parse_resolution(None), {})
|
||||
@@ -1222,12 +1240,49 @@ ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4')
|
||||
def test_render_table(self):
|
||||
self.assertEqual(
|
||||
render_table(
|
||||
['a', 'bcd'],
|
||||
[[123, 4], [9999, 51]]),
|
||||
['a', 'empty', 'bcd'],
|
||||
[[123, '', 4], [9999, '', 51]]),
|
||||
'a empty bcd\n'
|
||||
'123 4\n'
|
||||
'9999 51')
|
||||
|
||||
self.assertEqual(
|
||||
render_table(
|
||||
['a', 'empty', 'bcd'],
|
||||
[[123, '', 4], [9999, '', 51]],
|
||||
hide_empty=True),
|
||||
'a bcd\n'
|
||||
'123 4\n'
|
||||
'9999 51')
|
||||
|
||||
self.assertEqual(
|
||||
render_table(
|
||||
['\ta', 'bcd'],
|
||||
[['1\t23', 4], ['\t9999', 51]]),
|
||||
' a bcd\n'
|
||||
'1 23 4\n'
|
||||
'9999 51')
|
||||
|
||||
self.assertEqual(
|
||||
render_table(
|
||||
['a', 'bcd'],
|
||||
[[123, 4], [9999, 51]],
|
||||
delim='-'),
|
||||
'a bcd\n'
|
||||
'--------\n'
|
||||
'123 4\n'
|
||||
'9999 51')
|
||||
|
||||
self.assertEqual(
|
||||
render_table(
|
||||
['a', 'bcd'],
|
||||
[[123, 4], [9999, 51]],
|
||||
delim='-', extra_gap=2),
|
||||
'a bcd\n'
|
||||
'----------\n'
|
||||
'123 4\n'
|
||||
'9999 51')
|
||||
|
||||
def test_match_str(self):
|
||||
# Unary
|
||||
self.assertFalse(match_str('xy', {'x': 1200}))
|
||||
@@ -1529,46 +1584,116 @@ Line 1
|
||||
self.assertEqual(urshift(3, 1), 1)
|
||||
self.assertEqual(urshift(-3, 1), 2147483646)
|
||||
|
||||
def test_get_element_by_class(self):
|
||||
html = '''
|
||||
GET_ELEMENT_BY_CLASS_TEST_STRING = '''
|
||||
<span class="foo bar">nice</span>
|
||||
'''
|
||||
|
||||
def test_get_element_by_class(self):
|
||||
html = self.GET_ELEMENT_BY_CLASS_TEST_STRING
|
||||
|
||||
self.assertEqual(get_element_by_class('foo', html), 'nice')
|
||||
self.assertEqual(get_element_by_class('no-such-class', html), None)
|
||||
|
||||
def test_get_element_by_attribute(self):
|
||||
html = '''
|
||||
<span class="foo bar">nice</span>
|
||||
def test_get_element_html_by_class(self):
|
||||
html = self.GET_ELEMENT_BY_CLASS_TEST_STRING
|
||||
|
||||
self.assertEqual(get_element_html_by_class('foo', html), html.strip())
|
||||
self.assertEqual(get_element_by_class('no-such-class', html), None)
|
||||
|
||||
GET_ELEMENT_BY_ATTRIBUTE_TEST_STRING = '''
|
||||
<div itemprop="author" itemscope>foo</div>
|
||||
'''
|
||||
|
||||
def test_get_element_by_attribute(self):
|
||||
html = self.GET_ELEMENT_BY_CLASS_TEST_STRING
|
||||
|
||||
self.assertEqual(get_element_by_attribute('class', 'foo bar', html), 'nice')
|
||||
self.assertEqual(get_element_by_attribute('class', 'foo', html), None)
|
||||
self.assertEqual(get_element_by_attribute('class', 'no-such-foo', html), None)
|
||||
|
||||
html = '''
|
||||
<div itemprop="author" itemscope>foo</div>
|
||||
'''
|
||||
html = self.GET_ELEMENT_BY_ATTRIBUTE_TEST_STRING
|
||||
|
||||
self.assertEqual(get_element_by_attribute('itemprop', 'author', html), 'foo')
|
||||
|
||||
def test_get_elements_by_class(self):
|
||||
html = '''
|
||||
def test_get_element_html_by_attribute(self):
|
||||
html = self.GET_ELEMENT_BY_CLASS_TEST_STRING
|
||||
|
||||
self.assertEqual(get_element_html_by_attribute('class', 'foo bar', html), html.strip())
|
||||
self.assertEqual(get_element_html_by_attribute('class', 'foo', html), None)
|
||||
self.assertEqual(get_element_html_by_attribute('class', 'no-such-foo', html), None)
|
||||
|
||||
html = self.GET_ELEMENT_BY_ATTRIBUTE_TEST_STRING
|
||||
|
||||
self.assertEqual(get_element_html_by_attribute('itemprop', 'author', html), html.strip())
|
||||
|
||||
GET_ELEMENTS_BY_CLASS_TEST_STRING = '''
|
||||
<span class="foo bar">nice</span><span class="foo bar">also nice</span>
|
||||
'''
|
||||
GET_ELEMENTS_BY_CLASS_RES = ['<span class="foo bar">nice</span>', '<span class="foo bar">also nice</span>']
|
||||
|
||||
def test_get_elements_by_class(self):
|
||||
html = self.GET_ELEMENTS_BY_CLASS_TEST_STRING
|
||||
|
||||
self.assertEqual(get_elements_by_class('foo', html), ['nice', 'also nice'])
|
||||
self.assertEqual(get_elements_by_class('no-such-class', html), [])
|
||||
|
||||
def test_get_elements_html_by_class(self):
|
||||
html = self.GET_ELEMENTS_BY_CLASS_TEST_STRING
|
||||
|
||||
self.assertEqual(get_elements_html_by_class('foo', html), self.GET_ELEMENTS_BY_CLASS_RES)
|
||||
self.assertEqual(get_elements_html_by_class('no-such-class', html), [])
|
||||
|
||||
def test_get_elements_by_attribute(self):
|
||||
html = '''
|
||||
<span class="foo bar">nice</span><span class="foo bar">also nice</span>
|
||||
'''
|
||||
html = self.GET_ELEMENTS_BY_CLASS_TEST_STRING
|
||||
|
||||
self.assertEqual(get_elements_by_attribute('class', 'foo bar', html), ['nice', 'also nice'])
|
||||
self.assertEqual(get_elements_by_attribute('class', 'foo', html), [])
|
||||
self.assertEqual(get_elements_by_attribute('class', 'no-such-foo', html), [])
|
||||
|
||||
def test_get_elements_html_by_attribute(self):
|
||||
html = self.GET_ELEMENTS_BY_CLASS_TEST_STRING
|
||||
|
||||
self.assertEqual(get_elements_html_by_attribute('class', 'foo bar', html), self.GET_ELEMENTS_BY_CLASS_RES)
|
||||
self.assertEqual(get_elements_html_by_attribute('class', 'foo', html), [])
|
||||
self.assertEqual(get_elements_html_by_attribute('class', 'no-such-foo', html), [])
|
||||
|
||||
def test_get_elements_text_and_html_by_attribute(self):
|
||||
html = self.GET_ELEMENTS_BY_CLASS_TEST_STRING
|
||||
|
||||
self.assertEqual(
|
||||
list(get_elements_text_and_html_by_attribute('class', 'foo bar', html)),
|
||||
list(zip(['nice', 'also nice'], self.GET_ELEMENTS_BY_CLASS_RES)))
|
||||
self.assertEqual(list(get_elements_text_and_html_by_attribute('class', 'foo', html)), [])
|
||||
self.assertEqual(list(get_elements_text_and_html_by_attribute('class', 'no-such-foo', html)), [])
|
||||
|
||||
GET_ELEMENT_BY_TAG_TEST_STRING = '''
|
||||
random text lorem ipsum</p>
|
||||
<div>
|
||||
this should be returned
|
||||
<span>this should also be returned</span>
|
||||
<div>
|
||||
this should also be returned
|
||||
</div>
|
||||
closing tag above should not trick, so this should also be returned
|
||||
</div>
|
||||
but this text should not be returned
|
||||
'''
|
||||
GET_ELEMENT_BY_TAG_RES_OUTERDIV_HTML = GET_ELEMENT_BY_TAG_TEST_STRING.strip()[32:276]
|
||||
GET_ELEMENT_BY_TAG_RES_OUTERDIV_TEXT = GET_ELEMENT_BY_TAG_RES_OUTERDIV_HTML[5:-6]
|
||||
GET_ELEMENT_BY_TAG_RES_INNERSPAN_HTML = GET_ELEMENT_BY_TAG_TEST_STRING.strip()[78:119]
|
||||
GET_ELEMENT_BY_TAG_RES_INNERSPAN_TEXT = GET_ELEMENT_BY_TAG_RES_INNERSPAN_HTML[6:-7]
|
||||
|
||||
def test_get_element_text_and_html_by_tag(self):
|
||||
html = self.GET_ELEMENT_BY_TAG_TEST_STRING
|
||||
|
||||
self.assertEqual(
|
||||
get_element_text_and_html_by_tag('div', html),
|
||||
(self.GET_ELEMENT_BY_TAG_RES_OUTERDIV_TEXT, self.GET_ELEMENT_BY_TAG_RES_OUTERDIV_HTML))
|
||||
self.assertEqual(
|
||||
get_element_text_and_html_by_tag('span', html),
|
||||
(self.GET_ELEMENT_BY_TAG_RES_INNERSPAN_TEXT, self.GET_ELEMENT_BY_TAG_RES_INNERSPAN_HTML))
|
||||
self.assertRaises(compat_HTMLParseError, get_element_text_and_html_by_tag, 'article', html)
|
||||
|
||||
def test_iri_to_uri(self):
|
||||
self.assertEqual(
|
||||
iri_to_uri('https://www.google.com/search?q=foo&ie=utf-8&oe=utf-8&client=firefox-b'),
|
||||
@@ -1620,9 +1745,9 @@ Line 1
|
||||
self.assertEqual(repr(LazyList(it)), repr(it))
|
||||
self.assertEqual(str(LazyList(it)), str(it))
|
||||
|
||||
self.assertEqual(list(LazyList(it).reverse()), it[::-1])
|
||||
self.assertEqual(list(LazyList(it).reverse()[1:3:7]), it[::-1][1:3:7])
|
||||
self.assertEqual(list(LazyList(it).reverse()[::-1]), it)
|
||||
self.assertEqual(list(LazyList(it, reverse=True)), it[::-1])
|
||||
self.assertEqual(list(reversed(LazyList(it))[::-1]), it)
|
||||
self.assertEqual(list(reversed(LazyList(it))[1:3:7]), it[::-1][1:3:7])
|
||||
|
||||
def test_LazyList_laziness(self):
|
||||
|
||||
@@ -1635,15 +1760,36 @@ Line 1
|
||||
test(ll, 5, 5, range(6))
|
||||
test(ll, -3, 7, range(10))
|
||||
|
||||
ll = LazyList(range(10)).reverse()
|
||||
ll = LazyList(range(10), reverse=True)
|
||||
test(ll, -1, 0, range(1))
|
||||
test(ll, 3, 6, range(10))
|
||||
|
||||
ll = LazyList(itertools.count())
|
||||
test(ll, 10, 10, range(11))
|
||||
ll.reverse()
|
||||
ll = reversed(ll)
|
||||
test(ll, -15, 14, range(15))
|
||||
|
||||
def test_format_bytes(self):
|
||||
self.assertEqual(format_bytes(0), '0.00B')
|
||||
self.assertEqual(format_bytes(1000), '1000.00B')
|
||||
self.assertEqual(format_bytes(1024), '1.00KiB')
|
||||
self.assertEqual(format_bytes(1024**2), '1.00MiB')
|
||||
self.assertEqual(format_bytes(1024**3), '1.00GiB')
|
||||
self.assertEqual(format_bytes(1024**4), '1.00TiB')
|
||||
self.assertEqual(format_bytes(1024**5), '1.00PiB')
|
||||
self.assertEqual(format_bytes(1024**6), '1.00EiB')
|
||||
self.assertEqual(format_bytes(1024**7), '1.00ZiB')
|
||||
self.assertEqual(format_bytes(1024**8), '1.00YiB')
|
||||
|
||||
def test_hide_login_info(self):
|
||||
self.assertEqual(Config.hide_login_info(['-u', 'foo', '-p', 'bar']),
|
||||
['-u', 'PRIVATE', '-p', 'PRIVATE'])
|
||||
self.assertEqual(Config.hide_login_info(['-u']), ['-u'])
|
||||
self.assertEqual(Config.hide_login_info(['-u', 'foo', '-u', 'bar']),
|
||||
['-u', 'PRIVATE', '-u', 'PRIVATE'])
|
||||
self.assertEqual(Config.hide_login_info(['--username=foo']),
|
||||
['--username=PRIVATE'])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
@@ -19,52 +19,52 @@ class TestVerboseOutput(unittest.TestCase):
|
||||
[
|
||||
sys.executable, 'yt_dlp/__main__.py', '-v',
|
||||
'--username', 'johnsmith@gmail.com',
|
||||
'--password', 'secret',
|
||||
'--password', 'my_secret_password',
|
||||
], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
sout, serr = outp.communicate()
|
||||
self.assertTrue(b'--username' in serr)
|
||||
self.assertTrue(b'johnsmith' not in serr)
|
||||
self.assertTrue(b'--password' in serr)
|
||||
self.assertTrue(b'secret' not in serr)
|
||||
self.assertTrue(b'my_secret_password' not in serr)
|
||||
|
||||
def test_private_info_shortarg(self):
|
||||
outp = subprocess.Popen(
|
||||
[
|
||||
sys.executable, 'yt_dlp/__main__.py', '-v',
|
||||
'-u', 'johnsmith@gmail.com',
|
||||
'-p', 'secret',
|
||||
'-p', 'my_secret_password',
|
||||
], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
sout, serr = outp.communicate()
|
||||
self.assertTrue(b'-u' in serr)
|
||||
self.assertTrue(b'johnsmith' not in serr)
|
||||
self.assertTrue(b'-p' in serr)
|
||||
self.assertTrue(b'secret' not in serr)
|
||||
self.assertTrue(b'my_secret_password' not in serr)
|
||||
|
||||
def test_private_info_eq(self):
|
||||
outp = subprocess.Popen(
|
||||
[
|
||||
sys.executable, 'yt_dlp/__main__.py', '-v',
|
||||
'--username=johnsmith@gmail.com',
|
||||
'--password=secret',
|
||||
'--password=my_secret_password',
|
||||
], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
sout, serr = outp.communicate()
|
||||
self.assertTrue(b'--username' in serr)
|
||||
self.assertTrue(b'johnsmith' not in serr)
|
||||
self.assertTrue(b'--password' in serr)
|
||||
self.assertTrue(b'secret' not in serr)
|
||||
self.assertTrue(b'my_secret_password' not in serr)
|
||||
|
||||
def test_private_info_shortarg_eq(self):
|
||||
outp = subprocess.Popen(
|
||||
[
|
||||
sys.executable, 'yt_dlp/__main__.py', '-v',
|
||||
'-u=johnsmith@gmail.com',
|
||||
'-p=secret',
|
||||
'-p=my_secret_password',
|
||||
], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
sout, serr = outp.communicate()
|
||||
self.assertTrue(b'-u' in serr)
|
||||
self.assertTrue(b'johnsmith' not in serr)
|
||||
self.assertTrue(b'-p' in serr)
|
||||
self.assertTrue(b'secret' not in serr)
|
||||
self.assertTrue(b'my_secret_password' not in serr)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -9,11 +9,9 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from test.helper import FakeYDL, is_download_test
|
||||
|
||||
|
||||
from yt_dlp.extractor import (
|
||||
YoutubePlaylistIE,
|
||||
YoutubeTabIE,
|
||||
YoutubeIE,
|
||||
YoutubeTabIE,
|
||||
)
|
||||
|
||||
|
||||
@@ -26,38 +24,20 @@ class TestYoutubeLists(unittest.TestCase):
|
||||
def test_youtube_playlist_noplaylist(self):
|
||||
dl = FakeYDL()
|
||||
dl.params['noplaylist'] = True
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
result = ie.extract('https://www.youtube.com/watch?v=FXxLjLQi3Fg&list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
|
||||
ie = YoutubeTabIE(dl)
|
||||
result = ie.extract('https://www.youtube.com/watch?v=OmJ-4B-mS-Y&list=PLydZ2Hrp_gPRJViZjLFKaBMgCQOYEEkyp&index=2')
|
||||
self.assertEqual(result['_type'], 'url')
|
||||
self.assertEqual(YoutubeIE().extract_id(result['url']), 'FXxLjLQi3Fg')
|
||||
|
||||
def test_youtube_course(self):
|
||||
dl = FakeYDL()
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
# TODO find a > 100 (paginating?) videos course
|
||||
result = ie.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||
entries = list(result['entries'])
|
||||
self.assertEqual(YoutubeIE().extract_id(entries[0]['url']), 'j9WZyLZCBzs')
|
||||
self.assertEqual(len(entries), 25)
|
||||
self.assertEqual(YoutubeIE().extract_id(entries[-1]['url']), 'rYefUsYuEp0')
|
||||
self.assertEqual(result['ie_key'], YoutubeIE.ie_key())
|
||||
self.assertEqual(YoutubeIE.extract_id(result['url']), 'OmJ-4B-mS-Y')
|
||||
|
||||
def test_youtube_mix(self):
|
||||
dl = FakeYDL()
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
result = ie.extract('https://www.youtube.com/watch?v=W01L70IGBgE&index=2&list=RDOQpdSVF_k_w')
|
||||
entries = result['entries']
|
||||
ie = YoutubeTabIE(dl)
|
||||
result = ie.extract('https://www.youtube.com/watch?v=tyITL_exICo&list=RDCLAK5uy_kLWIr9gv1XLlPbaDS965-Db4TrBoUTxQ8')
|
||||
entries = list(result['entries'])
|
||||
self.assertTrue(len(entries) >= 50)
|
||||
original_video = entries[0]
|
||||
self.assertEqual(original_video['id'], 'OQpdSVF_k_w')
|
||||
|
||||
def test_youtube_toptracks(self):
|
||||
print('Skipping: The playlist page gives error 500')
|
||||
return
|
||||
dl = FakeYDL()
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
result = ie.extract('https://www.youtube.com/playlist?list=MCUS')
|
||||
entries = result['entries']
|
||||
self.assertEqual(len(entries), 100)
|
||||
self.assertEqual(original_video['id'], 'tyITL_exICo')
|
||||
|
||||
def test_youtube_flat_playlist_extraction(self):
|
||||
dl = FakeYDL()
|
||||
@@ -68,10 +48,10 @@ class TestYoutubeLists(unittest.TestCase):
|
||||
entries = list(result['entries'])
|
||||
self.assertTrue(len(entries) == 1)
|
||||
video = entries[0]
|
||||
self.assertEqual(video['_type'], 'url_transparent')
|
||||
self.assertEqual(video['_type'], 'url')
|
||||
self.assertEqual(video['ie_key'], 'Youtube')
|
||||
self.assertEqual(video['id'], 'BaW_jenozKc')
|
||||
self.assertEqual(video['url'], 'BaW_jenozKc')
|
||||
self.assertEqual(video['url'], 'https://www.youtube.com/watch?v=BaW_jenozKc')
|
||||
self.assertEqual(video['title'], 'youtube-dl test video "\'/\\ä↭𝕐')
|
||||
self.assertEqual(video['duration'], 10)
|
||||
self.assertEqual(video['uploader'], 'Philipp Hagemeister')
|
||||
|
||||
@@ -74,6 +74,26 @@ _NSIG_TESTS = [
|
||||
'https://www.youtube.com/s/player/f8cb7a3b/player_ias.vflset/en_US/base.js',
|
||||
'oBo2h5euWy6osrUt', 'ivXHpm7qJjJN',
|
||||
),
|
||||
(
|
||||
'https://www.youtube.com/s/player/2dfe380c/player_ias.vflset/en_US/base.js',
|
||||
'oBo2h5euWy6osrUt', '3DIBbn3qdQ',
|
||||
),
|
||||
(
|
||||
'https://www.youtube.com/s/player/f1ca6900/player_ias.vflset/en_US/base.js',
|
||||
'cu3wyu6LQn2hse', 'jvxetvmlI9AN9Q',
|
||||
),
|
||||
(
|
||||
'https://www.youtube.com/s/player/8040e515/player_ias.vflset/en_US/base.js',
|
||||
'wvOFaY-yjgDuIEg5', 'HkfBFDHmgw4rsw',
|
||||
),
|
||||
(
|
||||
'https://www.youtube.com/s/player/e06dea74/player_ias.vflset/en_US/base.js',
|
||||
'AiuodmaDDYw8d3y4bf', 'ankd8eza2T6Qmw',
|
||||
),
|
||||
(
|
||||
'https://www.youtube.com/s/player/5dd88d1d/player-plasma-ias-phone-en_US.vflset/base.js',
|
||||
'kSxKFLeqzv_ZyHSAt', 'n8gS8oRlHOxPFA',
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@@ -104,10 +124,17 @@ class TestPlayerInfo(unittest.TestCase):
|
||||
class TestSignature(unittest.TestCase):
|
||||
def setUp(self):
|
||||
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
self.TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata')
|
||||
self.TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata/sigs')
|
||||
if not os.path.exists(self.TESTDATA_DIR):
|
||||
os.mkdir(self.TESTDATA_DIR)
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
for f in os.listdir(self.TESTDATA_DIR):
|
||||
os.remove(f)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
def t_factory(name, sig_func, url_pattern):
|
||||
def make_tfunc(url, sig_input, expected_sig):
|
||||
|
||||
1126
yt_dlp/YoutubeDL.py
1126
yt_dlp/YoutubeDL.py
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -2,8 +2,15 @@ from __future__ import unicode_literals
|
||||
|
||||
from math import ceil
|
||||
|
||||
from .compat import compat_b64decode, compat_pycrypto_AES
|
||||
from .utils import bytes_to_intlist, intlist_to_bytes
|
||||
from .compat import (
|
||||
compat_b64decode,
|
||||
compat_ord,
|
||||
compat_pycrypto_AES,
|
||||
)
|
||||
from .utils import (
|
||||
bytes_to_intlist,
|
||||
intlist_to_bytes,
|
||||
)
|
||||
|
||||
|
||||
if compat_pycrypto_AES:
|
||||
@@ -25,9 +32,55 @@ else:
|
||||
return intlist_to_bytes(aes_gcm_decrypt_and_verify(*map(bytes_to_intlist, (data, key, tag, nonce))))
|
||||
|
||||
|
||||
def unpad_pkcs7(data):
|
||||
return data[:-compat_ord(data[-1])]
|
||||
|
||||
|
||||
BLOCK_SIZE_BYTES = 16
|
||||
|
||||
|
||||
def aes_ecb_encrypt(data, key, iv=None):
|
||||
"""
|
||||
Encrypt with aes in ECB mode
|
||||
|
||||
@param {int[]} data cleartext
|
||||
@param {int[]} key 16/24/32-Byte cipher key
|
||||
@param {int[]} iv Unused for this mode
|
||||
@returns {int[]} encrypted data
|
||||
"""
|
||||
expanded_key = key_expansion(key)
|
||||
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
||||
|
||||
encrypted_data = []
|
||||
for i in range(block_count):
|
||||
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
|
||||
encrypted_data += aes_encrypt(block, expanded_key)
|
||||
encrypted_data = encrypted_data[:len(data)]
|
||||
|
||||
return encrypted_data
|
||||
|
||||
|
||||
def aes_ecb_decrypt(data, key, iv=None):
|
||||
"""
|
||||
Decrypt with aes in ECB mode
|
||||
|
||||
@param {int[]} data cleartext
|
||||
@param {int[]} key 16/24/32-Byte cipher key
|
||||
@param {int[]} iv Unused for this mode
|
||||
@returns {int[]} decrypted data
|
||||
"""
|
||||
expanded_key = key_expansion(key)
|
||||
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
||||
|
||||
encrypted_data = []
|
||||
for i in range(block_count):
|
||||
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
|
||||
encrypted_data += aes_decrypt(block, expanded_key)
|
||||
encrypted_data = encrypted_data[:len(data)]
|
||||
|
||||
return encrypted_data
|
||||
|
||||
|
||||
def aes_ctr_decrypt(data, key, iv):
|
||||
"""
|
||||
Decrypt with aes in counter mode
|
||||
@@ -464,5 +517,6 @@ __all__ = [
|
||||
'aes_encrypt',
|
||||
'aes_gcm_decrypt_and_verify',
|
||||
'aes_gcm_decrypt_and_verify_bytes',
|
||||
'key_expansion'
|
||||
'key_expansion',
|
||||
'unpad_pkcs7',
|
||||
]
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
import asyncio
|
||||
import base64
|
||||
import collections
|
||||
import ctypes
|
||||
import getpass
|
||||
import html
|
||||
@@ -133,6 +134,16 @@ except AttributeError:
|
||||
asyncio.run = compat_asyncio_run
|
||||
|
||||
|
||||
try: # >= 3.7
|
||||
asyncio.tasks.all_tasks
|
||||
except AttributeError:
|
||||
asyncio.tasks.all_tasks = asyncio.tasks.Task.all_tasks
|
||||
|
||||
try:
|
||||
import websockets as compat_websockets
|
||||
except ImportError:
|
||||
compat_websockets = None
|
||||
|
||||
# Python 3.8+ does not honor %HOME% on windows, but this breaks compatibility with youtube-dl
|
||||
# See https://github.com/yt-dlp/yt-dlp/issues/792
|
||||
# https://docs.python.org/3/library/os.path.html#os.path.expanduser
|
||||
@@ -159,27 +170,45 @@ except ImportError:
|
||||
except ImportError:
|
||||
compat_pycrypto_AES = None
|
||||
|
||||
try:
|
||||
import brotlicffi as compat_brotli
|
||||
except ImportError:
|
||||
try:
|
||||
import brotli as compat_brotli
|
||||
except ImportError:
|
||||
compat_brotli = None
|
||||
|
||||
WINDOWS_VT_MODE = False if compat_os_name == 'nt' else None
|
||||
|
||||
|
||||
def windows_enable_vt_mode(): # TODO: Do this the proper way https://bugs.python.org/issue30075
|
||||
if compat_os_name != 'nt':
|
||||
return
|
||||
global WINDOWS_VT_MODE
|
||||
startupinfo = subprocess.STARTUPINFO()
|
||||
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
|
||||
try:
|
||||
subprocess.Popen('', shell=True, startupinfo=startupinfo)
|
||||
WINDOWS_VT_MODE = True
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
# Deprecated
|
||||
|
||||
compat_basestring = str
|
||||
compat_chr = chr
|
||||
compat_filter = filter
|
||||
compat_input = input
|
||||
compat_integer_types = (int, )
|
||||
compat_kwargs = lambda kwargs: kwargs
|
||||
compat_map = map
|
||||
compat_numeric_types = (int, float, complex)
|
||||
compat_str = str
|
||||
compat_xpath = lambda xpath: xpath
|
||||
compat_zip = zip
|
||||
|
||||
compat_collections_abc = collections.abc
|
||||
compat_HTMLParser = html.parser.HTMLParser
|
||||
compat_HTTPError = urllib.error.HTTPError
|
||||
compat_Struct = struct.Struct
|
||||
@@ -226,6 +255,7 @@ compat_xml_parse_error = etree.ParseError
|
||||
# Set public objects
|
||||
|
||||
__all__ = [
|
||||
'WINDOWS_VT_MODE',
|
||||
'compat_HTMLParseError',
|
||||
'compat_HTMLParser',
|
||||
'compat_HTTPError',
|
||||
@@ -235,7 +265,9 @@ __all__ = [
|
||||
'compat_asyncio_run',
|
||||
'compat_b64decode',
|
||||
'compat_basestring',
|
||||
'compat_brotli',
|
||||
'compat_chr',
|
||||
'compat_collections_abc',
|
||||
'compat_cookiejar',
|
||||
'compat_cookiejar_Cookie',
|
||||
'compat_cookies',
|
||||
@@ -245,6 +277,7 @@ __all__ = [
|
||||
'compat_etree_fromstring',
|
||||
'compat_etree_register_namespace',
|
||||
'compat_expanduser',
|
||||
'compat_filter',
|
||||
'compat_get_terminal_size',
|
||||
'compat_getenv',
|
||||
'compat_getpass',
|
||||
@@ -256,6 +289,7 @@ __all__ = [
|
||||
'compat_integer_types',
|
||||
'compat_itertools_count',
|
||||
'compat_kwargs',
|
||||
'compat_map',
|
||||
'compat_numeric_types',
|
||||
'compat_ord',
|
||||
'compat_os_name',
|
||||
@@ -287,6 +321,7 @@ __all__ = [
|
||||
'compat_urllib_response',
|
||||
'compat_urlparse',
|
||||
'compat_urlretrieve',
|
||||
'compat_websockets',
|
||||
'compat_xml_parse_error',
|
||||
'compat_xpath',
|
||||
'compat_zip',
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import contextlib
|
||||
import ctypes
|
||||
import json
|
||||
import os
|
||||
@@ -7,15 +8,19 @@ import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from enum import Enum, auto
|
||||
from hashlib import pbkdf2_hmac
|
||||
|
||||
from .aes import aes_cbc_decrypt_bytes, aes_gcm_decrypt_and_verify_bytes
|
||||
from .aes import (
|
||||
aes_cbc_decrypt_bytes,
|
||||
aes_gcm_decrypt_and_verify_bytes,
|
||||
unpad_pkcs7,
|
||||
)
|
||||
from .compat import (
|
||||
compat_b64decode,
|
||||
compat_cookiejar_Cookie,
|
||||
)
|
||||
from .utils import (
|
||||
bug_reports_message,
|
||||
expand_path,
|
||||
Popen,
|
||||
YoutubeDLCookieJar,
|
||||
@@ -31,19 +36,16 @@ except ImportError:
|
||||
|
||||
|
||||
try:
|
||||
import keyring
|
||||
KEYRING_AVAILABLE = True
|
||||
KEYRING_UNAVAILABLE_REASON = f'due to unknown reasons{bug_reports_message()}'
|
||||
import secretstorage
|
||||
SECRETSTORAGE_AVAILABLE = True
|
||||
except ImportError:
|
||||
KEYRING_AVAILABLE = False
|
||||
KEYRING_UNAVAILABLE_REASON = (
|
||||
'as the `keyring` module is not installed. '
|
||||
'Please install by running `python3 -m pip install keyring`. '
|
||||
'Depending on your platform, additional packages may be required '
|
||||
'to access the keyring; see https://pypi.org/project/keyring')
|
||||
SECRETSTORAGE_AVAILABLE = False
|
||||
SECRETSTORAGE_UNAVAILABLE_REASON = (
|
||||
'as the `secretstorage` module is not installed. '
|
||||
'Please install by running `python3 -m pip install secretstorage`.')
|
||||
except Exception as _err:
|
||||
KEYRING_AVAILABLE = False
|
||||
KEYRING_UNAVAILABLE_REASON = 'as the `keyring` module could not be initialized: %s' % _err
|
||||
SECRETSTORAGE_AVAILABLE = False
|
||||
SECRETSTORAGE_UNAVAILABLE_REASON = f'as the `secretstorage` module could not be initialized. {_err}'
|
||||
|
||||
|
||||
CHROMIUM_BASED_BROWSERS = {'brave', 'chrome', 'chromium', 'edge', 'opera', 'vivaldi'}
|
||||
@@ -74,8 +76,8 @@ class YDLLogger:
|
||||
def load_cookies(cookie_file, browser_specification, ydl):
|
||||
cookie_jars = []
|
||||
if browser_specification is not None:
|
||||
browser_name, profile = _parse_browser_specification(*browser_specification)
|
||||
cookie_jars.append(extract_cookies_from_browser(browser_name, profile, YDLLogger(ydl)))
|
||||
browser_name, profile, keyring = _parse_browser_specification(*browser_specification)
|
||||
cookie_jars.append(extract_cookies_from_browser(browser_name, profile, YDLLogger(ydl), keyring=keyring))
|
||||
|
||||
if cookie_file is not None:
|
||||
cookie_file = expand_path(cookie_file)
|
||||
@@ -87,13 +89,13 @@ def load_cookies(cookie_file, browser_specification, ydl):
|
||||
return _merge_cookie_jars(cookie_jars)
|
||||
|
||||
|
||||
def extract_cookies_from_browser(browser_name, profile=None, logger=YDLLogger()):
|
||||
def extract_cookies_from_browser(browser_name, profile=None, logger=YDLLogger(), *, keyring=None):
|
||||
if browser_name == 'firefox':
|
||||
return _extract_firefox_cookies(profile, logger)
|
||||
elif browser_name == 'safari':
|
||||
return _extract_safari_cookies(profile, logger)
|
||||
elif browser_name in CHROMIUM_BASED_BROWSERS:
|
||||
return _extract_chrome_cookies(browser_name, profile, logger)
|
||||
return _extract_chrome_cookies(browser_name, profile, keyring, logger)
|
||||
else:
|
||||
raise ValueError('unknown browser: {}'.format(browser_name))
|
||||
|
||||
@@ -207,7 +209,7 @@ def _get_chromium_based_browser_settings(browser_name):
|
||||
}
|
||||
|
||||
|
||||
def _extract_chrome_cookies(browser_name, profile, logger):
|
||||
def _extract_chrome_cookies(browser_name, profile, keyring, logger):
|
||||
logger.info('Extracting cookies from {}'.format(browser_name))
|
||||
|
||||
if not SQLITE_AVAILABLE:
|
||||
@@ -234,7 +236,7 @@ def _extract_chrome_cookies(browser_name, profile, logger):
|
||||
raise FileNotFoundError('could not find {} cookies database in "{}"'.format(browser_name, search_root))
|
||||
logger.debug('Extracting cookies from: "{}"'.format(cookie_database_path))
|
||||
|
||||
decryptor = get_cookie_decryptor(config['browser_dir'], config['keyring_name'], logger)
|
||||
decryptor = get_cookie_decryptor(config['browser_dir'], config['keyring_name'], logger, keyring=keyring)
|
||||
|
||||
with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir:
|
||||
cursor = None
|
||||
@@ -247,6 +249,7 @@ def _extract_chrome_cookies(browser_name, profile, logger):
|
||||
'expires_utc, {} FROM cookies'.format(secure_column))
|
||||
jar = YoutubeDLCookieJar()
|
||||
failed_cookies = 0
|
||||
unencrypted_cookies = 0
|
||||
for host_key, name, value, encrypted_value, path, expires_utc, is_secure in cursor.fetchall():
|
||||
host_key = host_key.decode('utf-8')
|
||||
name = name.decode('utf-8')
|
||||
@@ -258,6 +261,8 @@ def _extract_chrome_cookies(browser_name, profile, logger):
|
||||
if value is None:
|
||||
failed_cookies += 1
|
||||
continue
|
||||
else:
|
||||
unencrypted_cookies += 1
|
||||
|
||||
cookie = compat_cookiejar_Cookie(
|
||||
version=0, name=name, value=value, port=None, port_specified=False,
|
||||
@@ -270,6 +275,9 @@ def _extract_chrome_cookies(browser_name, profile, logger):
|
||||
else:
|
||||
failed_message = ''
|
||||
logger.info('Extracted {} cookies from {}{}'.format(len(jar), browser_name, failed_message))
|
||||
counts = decryptor.cookie_counts.copy()
|
||||
counts['unencrypted'] = unencrypted_cookies
|
||||
logger.debug('cookie version breakdown: {}'.format(counts))
|
||||
return jar
|
||||
finally:
|
||||
if cursor is not None:
|
||||
@@ -305,10 +313,14 @@ class ChromeCookieDecryptor:
|
||||
def decrypt(self, encrypted_value):
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def cookie_counts(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def get_cookie_decryptor(browser_root, browser_keyring_name, logger):
|
||||
|
||||
def get_cookie_decryptor(browser_root, browser_keyring_name, logger, *, keyring=None):
|
||||
if sys.platform in ('linux', 'linux2'):
|
||||
return LinuxChromeCookieDecryptor(browser_keyring_name, logger)
|
||||
return LinuxChromeCookieDecryptor(browser_keyring_name, logger, keyring=keyring)
|
||||
elif sys.platform == 'darwin':
|
||||
return MacChromeCookieDecryptor(browser_keyring_name, logger)
|
||||
elif sys.platform == 'win32':
|
||||
@@ -319,13 +331,12 @@ def get_cookie_decryptor(browser_root, browser_keyring_name, logger):
|
||||
|
||||
|
||||
class LinuxChromeCookieDecryptor(ChromeCookieDecryptor):
|
||||
def __init__(self, browser_keyring_name, logger):
|
||||
def __init__(self, browser_keyring_name, logger, *, keyring=None):
|
||||
self._logger = logger
|
||||
self._v10_key = self.derive_key(b'peanuts')
|
||||
if KEYRING_AVAILABLE:
|
||||
self._v11_key = self.derive_key(_get_linux_keyring_password(browser_keyring_name))
|
||||
else:
|
||||
self._v11_key = None
|
||||
password = _get_linux_keyring_password(browser_keyring_name, keyring, logger)
|
||||
self._v11_key = None if password is None else self.derive_key(password)
|
||||
self._cookie_counts = {'v10': 0, 'v11': 0, 'other': 0}
|
||||
|
||||
@staticmethod
|
||||
def derive_key(password):
|
||||
@@ -333,20 +344,27 @@ class LinuxChromeCookieDecryptor(ChromeCookieDecryptor):
|
||||
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_linux.cc
|
||||
return pbkdf2_sha1(password, salt=b'saltysalt', iterations=1, key_length=16)
|
||||
|
||||
@property
|
||||
def cookie_counts(self):
|
||||
return self._cookie_counts
|
||||
|
||||
def decrypt(self, encrypted_value):
|
||||
version = encrypted_value[:3]
|
||||
ciphertext = encrypted_value[3:]
|
||||
|
||||
if version == b'v10':
|
||||
self._cookie_counts['v10'] += 1
|
||||
return _decrypt_aes_cbc(ciphertext, self._v10_key, self._logger)
|
||||
|
||||
elif version == b'v11':
|
||||
self._cookie_counts['v11'] += 1
|
||||
if self._v11_key is None:
|
||||
self._logger.warning(f'cannot decrypt cookie {KEYRING_UNAVAILABLE_REASON}', only_once=True)
|
||||
self._logger.warning('cannot decrypt v11 cookies: no key found', only_once=True)
|
||||
return None
|
||||
return _decrypt_aes_cbc(ciphertext, self._v11_key, self._logger)
|
||||
|
||||
else:
|
||||
self._cookie_counts['other'] += 1
|
||||
return None
|
||||
|
||||
|
||||
@@ -355,6 +373,7 @@ class MacChromeCookieDecryptor(ChromeCookieDecryptor):
|
||||
self._logger = logger
|
||||
password = _get_mac_keyring_password(browser_keyring_name, logger)
|
||||
self._v10_key = None if password is None else self.derive_key(password)
|
||||
self._cookie_counts = {'v10': 0, 'other': 0}
|
||||
|
||||
@staticmethod
|
||||
def derive_key(password):
|
||||
@@ -362,11 +381,16 @@ class MacChromeCookieDecryptor(ChromeCookieDecryptor):
|
||||
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_mac.mm
|
||||
return pbkdf2_sha1(password, salt=b'saltysalt', iterations=1003, key_length=16)
|
||||
|
||||
@property
|
||||
def cookie_counts(self):
|
||||
return self._cookie_counts
|
||||
|
||||
def decrypt(self, encrypted_value):
|
||||
version = encrypted_value[:3]
|
||||
ciphertext = encrypted_value[3:]
|
||||
|
||||
if version == b'v10':
|
||||
self._cookie_counts['v10'] += 1
|
||||
if self._v10_key is None:
|
||||
self._logger.warning('cannot decrypt v10 cookies: no key found', only_once=True)
|
||||
return None
|
||||
@@ -374,6 +398,7 @@ class MacChromeCookieDecryptor(ChromeCookieDecryptor):
|
||||
return _decrypt_aes_cbc(ciphertext, self._v10_key, self._logger)
|
||||
|
||||
else:
|
||||
self._cookie_counts['other'] += 1
|
||||
# other prefixes are considered 'old data' which were stored as plaintext
|
||||
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_mac.mm
|
||||
return encrypted_value
|
||||
@@ -383,12 +408,18 @@ class WindowsChromeCookieDecryptor(ChromeCookieDecryptor):
|
||||
def __init__(self, browser_root, logger):
|
||||
self._logger = logger
|
||||
self._v10_key = _get_windows_v10_key(browser_root, logger)
|
||||
self._cookie_counts = {'v10': 0, 'other': 0}
|
||||
|
||||
@property
|
||||
def cookie_counts(self):
|
||||
return self._cookie_counts
|
||||
|
||||
def decrypt(self, encrypted_value):
|
||||
version = encrypted_value[:3]
|
||||
ciphertext = encrypted_value[3:]
|
||||
|
||||
if version == b'v10':
|
||||
self._cookie_counts['v10'] += 1
|
||||
if self._v10_key is None:
|
||||
self._logger.warning('cannot decrypt v10 cookies: no key found', only_once=True)
|
||||
return None
|
||||
@@ -408,6 +439,7 @@ class WindowsChromeCookieDecryptor(ChromeCookieDecryptor):
|
||||
return _decrypt_aes_gcm(ciphertext, self._v10_key, nonce, authentication_tag, self._logger)
|
||||
|
||||
else:
|
||||
self._cookie_counts['other'] += 1
|
||||
# any other prefix means the data is DPAPI encrypted
|
||||
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_win.cc
|
||||
return _decrypt_windows_dpapi(encrypted_value, self._logger).decode('utf-8')
|
||||
@@ -421,6 +453,9 @@ def _extract_safari_cookies(profile, logger):
|
||||
|
||||
cookies_path = os.path.expanduser('~/Library/Cookies/Cookies.binarycookies')
|
||||
|
||||
if not os.path.isfile(cookies_path):
|
||||
logger.debug('Trying secondary cookie location')
|
||||
cookies_path = os.path.expanduser('~/Library/Containers/com.apple.Safari/Data/Library/Cookies/Cookies.binarycookies')
|
||||
if not os.path.isfile(cookies_path):
|
||||
raise FileNotFoundError('could not find safari cookies database')
|
||||
|
||||
@@ -577,35 +612,213 @@ def parse_safari_cookies(data, jar=None, logger=YDLLogger()):
|
||||
return jar
|
||||
|
||||
|
||||
def _get_linux_keyring_password(browser_keyring_name):
|
||||
password = keyring.get_password('{} Keys'.format(browser_keyring_name),
|
||||
'{} Safe Storage'.format(browser_keyring_name))
|
||||
if password is None:
|
||||
class _LinuxDesktopEnvironment(Enum):
|
||||
"""
|
||||
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/base/nix/xdg_util.h
|
||||
DesktopEnvironment
|
||||
"""
|
||||
OTHER = auto()
|
||||
CINNAMON = auto()
|
||||
GNOME = auto()
|
||||
KDE = auto()
|
||||
PANTHEON = auto()
|
||||
UNITY = auto()
|
||||
XFCE = auto()
|
||||
|
||||
|
||||
class _LinuxKeyring(Enum):
|
||||
"""
|
||||
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/key_storage_util_linux.h
|
||||
SelectedLinuxBackend
|
||||
"""
|
||||
KWALLET = auto()
|
||||
GNOMEKEYRING = auto()
|
||||
BASICTEXT = auto()
|
||||
|
||||
|
||||
SUPPORTED_KEYRINGS = _LinuxKeyring.__members__.keys()
|
||||
|
||||
|
||||
def _get_linux_desktop_environment(env):
|
||||
"""
|
||||
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/base/nix/xdg_util.cc
|
||||
GetDesktopEnvironment
|
||||
"""
|
||||
xdg_current_desktop = env.get('XDG_CURRENT_DESKTOP', None)
|
||||
desktop_session = env.get('DESKTOP_SESSION', None)
|
||||
if xdg_current_desktop is not None:
|
||||
xdg_current_desktop = xdg_current_desktop.split(':')[0].strip()
|
||||
|
||||
if xdg_current_desktop == 'Unity':
|
||||
if desktop_session is not None and 'gnome-fallback' in desktop_session:
|
||||
return _LinuxDesktopEnvironment.GNOME
|
||||
else:
|
||||
return _LinuxDesktopEnvironment.UNITY
|
||||
elif xdg_current_desktop == 'GNOME':
|
||||
return _LinuxDesktopEnvironment.GNOME
|
||||
elif xdg_current_desktop == 'X-Cinnamon':
|
||||
return _LinuxDesktopEnvironment.CINNAMON
|
||||
elif xdg_current_desktop == 'KDE':
|
||||
return _LinuxDesktopEnvironment.KDE
|
||||
elif xdg_current_desktop == 'Pantheon':
|
||||
return _LinuxDesktopEnvironment.PANTHEON
|
||||
elif xdg_current_desktop == 'XFCE':
|
||||
return _LinuxDesktopEnvironment.XFCE
|
||||
elif desktop_session is not None:
|
||||
if desktop_session in ('mate', 'gnome'):
|
||||
return _LinuxDesktopEnvironment.GNOME
|
||||
elif 'kde' in desktop_session:
|
||||
return _LinuxDesktopEnvironment.KDE
|
||||
elif 'xfce' in desktop_session:
|
||||
return _LinuxDesktopEnvironment.XFCE
|
||||
else:
|
||||
if 'GNOME_DESKTOP_SESSION_ID' in env:
|
||||
return _LinuxDesktopEnvironment.GNOME
|
||||
elif 'KDE_FULL_SESSION' in env:
|
||||
return _LinuxDesktopEnvironment.KDE
|
||||
return _LinuxDesktopEnvironment.OTHER
|
||||
|
||||
|
||||
def _choose_linux_keyring(logger):
|
||||
"""
|
||||
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/key_storage_util_linux.cc
|
||||
SelectBackend
|
||||
"""
|
||||
desktop_environment = _get_linux_desktop_environment(os.environ)
|
||||
logger.debug('detected desktop environment: {}'.format(desktop_environment.name))
|
||||
if desktop_environment == _LinuxDesktopEnvironment.KDE:
|
||||
linux_keyring = _LinuxKeyring.KWALLET
|
||||
elif desktop_environment == _LinuxDesktopEnvironment.OTHER:
|
||||
linux_keyring = _LinuxKeyring.BASICTEXT
|
||||
else:
|
||||
linux_keyring = _LinuxKeyring.GNOMEKEYRING
|
||||
return linux_keyring
|
||||
|
||||
|
||||
def _get_kwallet_network_wallet(logger):
|
||||
""" The name of the wallet used to store network passwords.
|
||||
|
||||
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/kwallet_dbus.cc
|
||||
KWalletDBus::NetworkWallet
|
||||
which does a dbus call to the following function:
|
||||
https://api.kde.org/frameworks/kwallet/html/classKWallet_1_1Wallet.html
|
||||
Wallet::NetworkWallet
|
||||
"""
|
||||
default_wallet = 'kdewallet'
|
||||
try:
|
||||
proc = Popen([
|
||||
'dbus-send', '--session', '--print-reply=literal',
|
||||
'--dest=org.kde.kwalletd5',
|
||||
'/modules/kwalletd5',
|
||||
'org.kde.KWallet.networkWallet'
|
||||
], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
|
||||
|
||||
stdout, stderr = proc.communicate_or_kill()
|
||||
if proc.returncode != 0:
|
||||
logger.warning('failed to read NetworkWallet')
|
||||
return default_wallet
|
||||
else:
|
||||
network_wallet = stdout.decode('utf-8').strip()
|
||||
logger.debug('NetworkWallet = "{}"'.format(network_wallet))
|
||||
return network_wallet
|
||||
except BaseException as e:
|
||||
logger.warning('exception while obtaining NetworkWallet: {}'.format(e))
|
||||
return default_wallet
|
||||
|
||||
|
||||
def _get_kwallet_password(browser_keyring_name, logger):
|
||||
logger.debug('using kwallet-query to obtain password from kwallet')
|
||||
|
||||
if shutil.which('kwallet-query') is None:
|
||||
logger.error('kwallet-query command not found. KWallet and kwallet-query '
|
||||
'must be installed to read from KWallet. kwallet-query should be'
|
||||
'included in the kwallet package for your distribution')
|
||||
return b''
|
||||
|
||||
network_wallet = _get_kwallet_network_wallet(logger)
|
||||
|
||||
try:
|
||||
proc = Popen([
|
||||
'kwallet-query',
|
||||
'--read-password', '{} Safe Storage'.format(browser_keyring_name),
|
||||
'--folder', '{} Keys'.format(browser_keyring_name),
|
||||
network_wallet
|
||||
], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
|
||||
|
||||
stdout, stderr = proc.communicate_or_kill()
|
||||
if proc.returncode != 0:
|
||||
logger.error('kwallet-query failed with return code {}. Please consult '
|
||||
'the kwallet-query man page for details'.format(proc.returncode))
|
||||
return b''
|
||||
else:
|
||||
if stdout.lower().startswith(b'failed to read'):
|
||||
logger.debug('failed to read password from kwallet. Using empty string instead')
|
||||
# this sometimes occurs in KDE because chrome does not check hasEntry and instead
|
||||
# just tries to read the value (which kwallet returns "") whereas keyring checks hasEntry
|
||||
# to verify this:
|
||||
# just tries to read the value (which kwallet returns "") whereas kwallet-query
|
||||
# checks hasEntry. To verify this:
|
||||
# dbus-monitor "interface='org.kde.KWallet'" "type=method_return"
|
||||
# while starting chrome.
|
||||
# this may be a bug as the intended behaviour is to generate a random password and store
|
||||
# it, but that doesn't matter here.
|
||||
password = ''
|
||||
return password.encode('utf-8')
|
||||
return b''
|
||||
else:
|
||||
logger.debug('password found')
|
||||
if stdout[-1:] == b'\n':
|
||||
stdout = stdout[:-1]
|
||||
return stdout
|
||||
except BaseException as e:
|
||||
logger.warning(f'exception running kwallet-query: {type(e).__name__}({e})')
|
||||
return b''
|
||||
|
||||
|
||||
def _get_gnome_keyring_password(browser_keyring_name, logger):
|
||||
if not SECRETSTORAGE_AVAILABLE:
|
||||
logger.error('secretstorage not available {}'.format(SECRETSTORAGE_UNAVAILABLE_REASON))
|
||||
return b''
|
||||
# the Gnome keyring does not seem to organise keys in the same way as KWallet,
|
||||
# using `dbus-monitor` during startup, it can be observed that chromium lists all keys
|
||||
# and presumably searches for its key in the list. It appears that we must do the same.
|
||||
# https://github.com/jaraco/keyring/issues/556
|
||||
with contextlib.closing(secretstorage.dbus_init()) as con:
|
||||
col = secretstorage.get_default_collection(con)
|
||||
for item in col.get_all_items():
|
||||
if item.get_label() == '{} Safe Storage'.format(browser_keyring_name):
|
||||
return item.get_secret()
|
||||
else:
|
||||
logger.error('failed to read from keyring')
|
||||
return b''
|
||||
|
||||
|
||||
def _get_linux_keyring_password(browser_keyring_name, keyring, logger):
|
||||
# note: chrome/chromium can be run with the following flags to determine which keyring backend
|
||||
# it has chosen to use
|
||||
# chromium --enable-logging=stderr --v=1 2>&1 | grep key_storage_
|
||||
# Chromium supports a flag: --password-store=<basic|gnome|kwallet> so the automatic detection
|
||||
# will not be sufficient in all cases.
|
||||
|
||||
keyring = _LinuxKeyring[keyring] if keyring else _choose_linux_keyring(logger)
|
||||
logger.debug(f'Chosen keyring: {keyring.name}')
|
||||
|
||||
if keyring == _LinuxKeyring.KWALLET:
|
||||
return _get_kwallet_password(browser_keyring_name, logger)
|
||||
elif keyring == _LinuxKeyring.GNOMEKEYRING:
|
||||
return _get_gnome_keyring_password(browser_keyring_name, logger)
|
||||
elif keyring == _LinuxKeyring.BASICTEXT:
|
||||
# when basic text is chosen, all cookies are stored as v10 (so no keyring password is required)
|
||||
return None
|
||||
assert False, f'Unknown keyring {keyring}'
|
||||
|
||||
|
||||
def _get_mac_keyring_password(browser_keyring_name, logger):
|
||||
if KEYRING_AVAILABLE:
|
||||
logger.debug('using keyring to obtain password')
|
||||
password = keyring.get_password('{} Safe Storage'.format(browser_keyring_name), browser_keyring_name)
|
||||
return password.encode('utf-8')
|
||||
else:
|
||||
logger.debug('using find-generic-password to obtain password')
|
||||
logger.debug('using find-generic-password to obtain password from OSX keychain')
|
||||
try:
|
||||
proc = Popen(
|
||||
['security', 'find-generic-password',
|
||||
'-w', # write password to stdout
|
||||
'-a', browser_keyring_name, # match 'account'
|
||||
'-s', '{} Safe Storage'.format(browser_keyring_name)], # match 'service'
|
||||
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
|
||||
try:
|
||||
|
||||
stdout, stderr = proc.communicate_or_kill()
|
||||
if stdout[-1:] == b'\n':
|
||||
stdout = stdout[:-1]
|
||||
@@ -640,10 +853,9 @@ def pbkdf2_sha1(password, salt, iterations, key_length):
|
||||
|
||||
|
||||
def _decrypt_aes_cbc(ciphertext, key, logger, initialization_vector=b' ' * 16):
|
||||
plaintext = aes_cbc_decrypt_bytes(ciphertext, key, initialization_vector)
|
||||
padding_length = plaintext[-1]
|
||||
plaintext = unpad_pkcs7(aes_cbc_decrypt_bytes(ciphertext, key, initialization_vector))
|
||||
try:
|
||||
return plaintext[:-padding_length].decode('utf-8')
|
||||
return plaintext.decode('utf-8')
|
||||
except UnicodeDecodeError:
|
||||
logger.warning('failed to decrypt cookie (AES-CBC) because UTF-8 decoding failed. Possibly the key is wrong?', only_once=True)
|
||||
return None
|
||||
@@ -736,10 +948,11 @@ def _is_path(value):
|
||||
return os.path.sep in value
|
||||
|
||||
|
||||
def _parse_browser_specification(browser_name, profile=None):
|
||||
browser_name = browser_name.lower()
|
||||
def _parse_browser_specification(browser_name, profile=None, keyring=None):
|
||||
if browser_name not in SUPPORTED_BROWSERS:
|
||||
raise ValueError(f'unsupported browser: "{browser_name}"')
|
||||
if keyring not in (None, *SUPPORTED_KEYRINGS):
|
||||
raise ValueError(f'unsupported keyring: "{keyring}"')
|
||||
if profile is not None and _is_path(profile):
|
||||
profile = os.path.expanduser(profile)
|
||||
return browser_name, profile
|
||||
return browser_name, profile, keyring
|
||||
|
||||
@@ -12,10 +12,15 @@ def get_suitable_downloader(info_dict, params={}, default=NO_DEFAULT, protocol=N
|
||||
info_copy = info_dict.copy()
|
||||
info_copy['to_stdout'] = to_stdout
|
||||
|
||||
downloaders = [_get_suitable_downloader(info_copy, proto, params, default)
|
||||
for proto in (protocol or info_copy['protocol']).split('+')]
|
||||
protocols = (protocol or info_copy['protocol']).split('+')
|
||||
downloaders = [_get_suitable_downloader(info_copy, proto, params, default) for proto in protocols]
|
||||
|
||||
if set(downloaders) == {FFmpegFD} and FFmpegFD.can_merge_formats(info_copy, params):
|
||||
return FFmpegFD
|
||||
elif (set(downloaders) == {DashSegmentsFD}
|
||||
and not (to_stdout and len(protocols) > 1)
|
||||
and set(protocols) == {'http_dash_segments_generator'}):
|
||||
return DashSegmentsFD
|
||||
elif len(downloaders) == 1:
|
||||
return downloaders[0]
|
||||
return None
|
||||
@@ -25,6 +30,7 @@ def get_suitable_downloader(info_dict, params={}, default=NO_DEFAULT, protocol=N
|
||||
from .common import FileDownloader
|
||||
from .dash import DashSegmentsFD
|
||||
from .f4m import F4mFD
|
||||
from .fc2 import FC2LiveFD
|
||||
from .hls import HlsFD
|
||||
from .http import HttpFD
|
||||
from .rtmp import RtmpFD
|
||||
@@ -41,6 +47,7 @@ from .external import (
|
||||
|
||||
PROTOCOL_MAP = {
|
||||
'rtmp': RtmpFD,
|
||||
'rtmpe': RtmpFD,
|
||||
'rtmp_ffmpeg': FFmpegFD,
|
||||
'm3u8_native': HlsFD,
|
||||
'm3u8': FFmpegFD,
|
||||
@@ -48,9 +55,11 @@ PROTOCOL_MAP = {
|
||||
'rtsp': RtspFD,
|
||||
'f4m': F4mFD,
|
||||
'http_dash_segments': DashSegmentsFD,
|
||||
'http_dash_segments_generator': DashSegmentsFD,
|
||||
'ism': IsmFD,
|
||||
'mhtml': MhtmlFD,
|
||||
'niconico_dmc': NiconicoDmcFD,
|
||||
'fc2_live': FC2LiveFD,
|
||||
'websocket_frag': WebSocketFragmentFD,
|
||||
'youtube_live_chat': YoutubeLiveChatFD,
|
||||
'youtube_live_chat_replay': YoutubeLiveChatFD,
|
||||
@@ -62,6 +71,7 @@ def shorten_protocol_name(proto, simplify=False):
|
||||
'm3u8_native': 'm3u8_n',
|
||||
'rtmp_ffmpeg': 'rtmp_f',
|
||||
'http_dash_segments': 'dash',
|
||||
'http_dash_segments_generator': 'dash_g',
|
||||
'niconico_dmc': 'dmc',
|
||||
'websocket_frag': 'WSfrag',
|
||||
}
|
||||
@@ -70,6 +80,7 @@ def shorten_protocol_name(proto, simplify=False):
|
||||
'https': 'http',
|
||||
'ftps': 'ftp',
|
||||
'm3u8_native': 'm3u8',
|
||||
'http_dash_segments_generator': 'dash',
|
||||
'rtmp_ffmpeg': 'rtmp',
|
||||
'm3u8_frag_urls': 'm3u8',
|
||||
'dash_frag_urls': 'dash',
|
||||
@@ -108,7 +119,7 @@ def _get_suitable_downloader(info_dict, protocol, params, default):
|
||||
return FFmpegFD
|
||||
elif (external_downloader or '').lower() == 'native':
|
||||
return HlsFD
|
||||
elif get_suitable_downloader(
|
||||
elif protocol == 'm3u8_native' and get_suitable_downloader(
|
||||
info_dict, params, None, protocol='m3u8_frag_urls', to_stdout=info_dict['to_stdout']):
|
||||
return HlsFD
|
||||
elif params.get('hls_prefer_native') is True:
|
||||
|
||||
@@ -4,12 +4,14 @@ import os
|
||||
import re
|
||||
import time
|
||||
import random
|
||||
import errno
|
||||
|
||||
from ..utils import (
|
||||
decodeArgument,
|
||||
encodeFilename,
|
||||
error_to_compat_str,
|
||||
format_bytes,
|
||||
sanitize_open,
|
||||
shell_quote,
|
||||
timeconvert,
|
||||
timetuple_from_msec,
|
||||
@@ -39,6 +41,7 @@ class FileDownloader(object):
|
||||
ratelimit: Download speed limit, in bytes/sec.
|
||||
throttledratelimit: Assume the download is being throttled below this speed (bytes/sec)
|
||||
retries: Number of times to retry for HTTP error 5xx
|
||||
file_access_retries: Number of times to retry on file access error
|
||||
buffersize: Size of download buffer in bytes.
|
||||
noresizebuffer: Do not automatically resize the download buffer.
|
||||
continuedl: Try to continue downloads if possible.
|
||||
@@ -93,6 +96,8 @@ class FileDownloader(object):
|
||||
def format_percent(percent):
|
||||
if percent is None:
|
||||
return '---.-%'
|
||||
elif percent == 100:
|
||||
return '100%'
|
||||
return '%6s' % ('%3.1f%%' % percent)
|
||||
|
||||
@staticmethod
|
||||
@@ -205,13 +210,41 @@ class FileDownloader(object):
|
||||
def ytdl_filename(self, filename):
|
||||
return filename + '.ytdl'
|
||||
|
||||
def wrap_file_access(action, *, fatal=False):
|
||||
def outer(func):
|
||||
def inner(self, *args, **kwargs):
|
||||
file_access_retries = self.params.get('file_access_retries', 0)
|
||||
retry = 0
|
||||
while True:
|
||||
try:
|
||||
return func(self, *args, **kwargs)
|
||||
except (IOError, OSError) as err:
|
||||
retry = retry + 1
|
||||
if retry > file_access_retries or err.errno not in (errno.EACCES, errno.EINVAL):
|
||||
if not fatal:
|
||||
self.report_error(f'unable to {action} file: {err}')
|
||||
return
|
||||
raise
|
||||
self.to_screen(
|
||||
f'[download] Unable to {action} file due to file access error. '
|
||||
f'Retrying (attempt {retry} of {self.format_retries(file_access_retries)}) ...')
|
||||
time.sleep(0.01)
|
||||
return inner
|
||||
return outer
|
||||
|
||||
@wrap_file_access('open', fatal=True)
|
||||
def sanitize_open(self, filename, open_mode):
|
||||
return sanitize_open(filename, open_mode)
|
||||
|
||||
@wrap_file_access('remove')
|
||||
def try_remove(self, filename):
|
||||
os.remove(filename)
|
||||
|
||||
@wrap_file_access('rename')
|
||||
def try_rename(self, old_filename, new_filename):
|
||||
if old_filename == new_filename:
|
||||
return
|
||||
try:
|
||||
os.replace(old_filename, new_filename)
|
||||
except (IOError, OSError) as err:
|
||||
self.report_error(f'unable to rename file: {err}')
|
||||
|
||||
def try_utime(self, filename, last_modified_hdr):
|
||||
"""Try to set the last-modified time of the given file."""
|
||||
@@ -247,11 +280,29 @@ class FileDownloader(object):
|
||||
self._multiline = BreaklineStatusPrinter(self.ydl._screen_file, lines)
|
||||
else:
|
||||
self._multiline = MultilinePrinter(self.ydl._screen_file, lines, not self.params.get('quiet'))
|
||||
self._multiline.allow_colors = self._multiline._HAVE_FULLCAP and not self.params.get('no_color')
|
||||
|
||||
def _finish_multiline_status(self):
|
||||
self._multiline.end()
|
||||
|
||||
def _report_progress_status(self, s):
|
||||
_progress_styles = {
|
||||
'downloaded_bytes': 'light blue',
|
||||
'percent': 'light blue',
|
||||
'eta': 'yellow',
|
||||
'speed': 'green',
|
||||
'elapsed': 'bold white',
|
||||
'total_bytes': '',
|
||||
'total_bytes_estimate': '',
|
||||
}
|
||||
|
||||
def _report_progress_status(self, s, default_template):
|
||||
for name, style in self._progress_styles.items():
|
||||
name = f'_{name}_str'
|
||||
if name not in s:
|
||||
continue
|
||||
s[name] = self._format_progress(s[name], style)
|
||||
s['_default_template'] = default_template % s
|
||||
|
||||
progress_dict = s.copy()
|
||||
progress_dict.pop('info_dict')
|
||||
progress_dict = {'info': s['info_dict'], 'progress': progress_dict}
|
||||
@@ -264,6 +315,10 @@ class FileDownloader(object):
|
||||
progress_template.get('download-title') or 'yt-dlp %(progress._default_template)s',
|
||||
progress_dict))
|
||||
|
||||
def _format_progress(self, *args, **kwargs):
|
||||
return self.ydl._format_text(
|
||||
self._multiline.stream, self._multiline.allow_colors, *args, **kwargs)
|
||||
|
||||
def report_progress(self, s):
|
||||
if s['status'] == 'finished':
|
||||
if self.params.get('noprogress'):
|
||||
@@ -276,8 +331,7 @@ class FileDownloader(object):
|
||||
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
|
||||
msg_template += ' in %(_elapsed_str)s'
|
||||
s['_percent_str'] = self.format_percent(100)
|
||||
s['_default_template'] = msg_template % s
|
||||
self._report_progress_status(s)
|
||||
self._report_progress_status(s, msg_template)
|
||||
return
|
||||
|
||||
if s['status'] != 'downloading':
|
||||
@@ -286,7 +340,7 @@ class FileDownloader(object):
|
||||
if s.get('eta') is not None:
|
||||
s['_eta_str'] = self.format_eta(s['eta'])
|
||||
else:
|
||||
s['_eta_str'] = 'Unknown ETA'
|
||||
s['_eta_str'] = 'Unknown'
|
||||
|
||||
if s.get('total_bytes') and s.get('downloaded_bytes') is not None:
|
||||
s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes'])
|
||||
@@ -318,13 +372,12 @@ class FileDownloader(object):
|
||||
else:
|
||||
msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s'
|
||||
else:
|
||||
msg_template = '%(_percent_str)s % at %(_speed_str)s ETA %(_eta_str)s'
|
||||
msg_template = '%(_percent_str)s at %(_speed_str)s ETA %(_eta_str)s'
|
||||
if s.get('fragment_index') and s.get('fragment_count'):
|
||||
msg_template += ' (frag %(fragment_index)s/%(fragment_count)s)'
|
||||
elif s.get('fragment_index'):
|
||||
msg_template += ' (frag %(fragment_index)s)'
|
||||
s['_default_template'] = msg_template % s
|
||||
self._report_progress_status(s)
|
||||
self._report_progress_status(s, msg_template)
|
||||
|
||||
def report_resuming_byte(self, resume_len):
|
||||
"""Report attempt to resume at given byte."""
|
||||
@@ -375,6 +428,7 @@ class FileDownloader(object):
|
||||
'status': 'finished',
|
||||
'total_bytes': os.path.getsize(encodeFilename(filename)),
|
||||
}, info_dict)
|
||||
self._finish_multiline_status()
|
||||
return True, False
|
||||
|
||||
if subtitle is False:
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
import time
|
||||
|
||||
from ..downloader import get_suitable_downloader
|
||||
from .fragment import FragmentFD
|
||||
@@ -15,27 +16,53 @@ class DashSegmentsFD(FragmentFD):
|
||||
FD_NAME = 'dashsegments'
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
if info_dict.get('is_live'):
|
||||
if info_dict.get('is_live') and set(info_dict['protocol'].split('+')) != {'http_dash_segments_generator'}:
|
||||
self.report_error('Live DASH videos are not supported')
|
||||
|
||||
fragment_base_url = info_dict.get('fragment_base_url')
|
||||
fragments = info_dict['fragments'][:1] if self.params.get(
|
||||
'test', False) else info_dict['fragments']
|
||||
|
||||
real_start = time.time()
|
||||
real_downloader = get_suitable_downloader(
|
||||
info_dict, self.params, None, protocol='dash_frag_urls', to_stdout=(filename == '-'))
|
||||
|
||||
requested_formats = [{**info_dict, **fmt} for fmt in info_dict.get('requested_formats', [])]
|
||||
args = []
|
||||
for fmt in requested_formats or [info_dict]:
|
||||
try:
|
||||
fragment_count = 1 if self.params.get('test') else len(fmt['fragments'])
|
||||
except TypeError:
|
||||
fragment_count = None
|
||||
ctx = {
|
||||
'filename': filename,
|
||||
'total_frags': len(fragments),
|
||||
'filename': fmt.get('filepath') or filename,
|
||||
'live': 'is_from_start' if fmt.get('is_from_start') else fmt.get('is_live'),
|
||||
'total_frags': fragment_count,
|
||||
}
|
||||
|
||||
if real_downloader:
|
||||
self._prepare_external_frag_download(ctx)
|
||||
else:
|
||||
self._prepare_and_start_frag_download(ctx, info_dict)
|
||||
self._prepare_and_start_frag_download(ctx, fmt)
|
||||
ctx['start'] = real_start
|
||||
|
||||
fragments_to_download = self._get_fragments(fmt, ctx)
|
||||
|
||||
if real_downloader:
|
||||
self.to_screen(
|
||||
'[%s] Fragment downloads will be delegated to %s' % (self.FD_NAME, real_downloader.get_basename()))
|
||||
info_dict['fragments'] = list(fragments_to_download)
|
||||
fd = real_downloader(self.ydl, self.params)
|
||||
return fd.real_download(filename, info_dict)
|
||||
|
||||
args.append([ctx, fragments_to_download, fmt])
|
||||
|
||||
return self.download_and_append_fragments_multiple(*args)
|
||||
|
||||
def _resolve_fragments(self, fragments, ctx):
|
||||
fragments = fragments(ctx) if callable(fragments) else fragments
|
||||
return [next(iter(fragments))] if self.params.get('test') else fragments
|
||||
|
||||
def _get_fragments(self, fmt, ctx):
|
||||
fragment_base_url = fmt.get('fragment_base_url')
|
||||
fragments = self._resolve_fragments(fmt['fragments'], ctx)
|
||||
|
||||
fragments_to_download = []
|
||||
frag_index = 0
|
||||
for i, fragment in enumerate(fragments):
|
||||
frag_index += 1
|
||||
@@ -46,17 +73,8 @@ class DashSegmentsFD(FragmentFD):
|
||||
assert fragment_base_url
|
||||
fragment_url = urljoin(fragment_base_url, fragment['path'])
|
||||
|
||||
fragments_to_download.append({
|
||||
yield {
|
||||
'frag_index': frag_index,
|
||||
'index': i,
|
||||
'url': fragment_url,
|
||||
})
|
||||
|
||||
if real_downloader:
|
||||
self.to_screen(
|
||||
'[%s] Fragment downloads will be delegated to %s' % (self.FD_NAME, real_downloader.get_basename()))
|
||||
info_dict['fragments'] = fragments_to_download
|
||||
fd = real_downloader(self.ydl, self.params)
|
||||
return fd.real_download(filename, info_dict)
|
||||
|
||||
return self.download_and_append_fragments(ctx, fragments_to_download, info_dict)
|
||||
}
|
||||
|
||||
@@ -17,12 +17,13 @@ from ..utils import (
|
||||
cli_valueless_option,
|
||||
cli_bool_option,
|
||||
_configuration_args,
|
||||
determine_ext,
|
||||
encodeFilename,
|
||||
encodeArgument,
|
||||
handle_youtubedl_headers,
|
||||
check_executable,
|
||||
Popen,
|
||||
sanitize_open,
|
||||
remove_end,
|
||||
)
|
||||
|
||||
|
||||
@@ -144,11 +145,11 @@ class ExternalFD(FragmentFD):
|
||||
return -1
|
||||
|
||||
decrypt_fragment = self.decrypter(info_dict)
|
||||
dest, _ = sanitize_open(tmpfilename, 'wb')
|
||||
dest, _ = self.sanitize_open(tmpfilename, 'wb')
|
||||
for frag_index, fragment in enumerate(info_dict['fragments']):
|
||||
fragment_filename = '%s-Frag%d' % (tmpfilename, frag_index)
|
||||
try:
|
||||
src, _ = sanitize_open(fragment_filename, 'rb')
|
||||
src, _ = self.sanitize_open(fragment_filename, 'rb')
|
||||
except IOError as err:
|
||||
if skip_unavailable_fragments and frag_index > 1:
|
||||
self.report_skip_fragment(frag_index, err)
|
||||
@@ -158,9 +159,9 @@ class ExternalFD(FragmentFD):
|
||||
dest.write(decrypt_fragment(fragment, src.read()))
|
||||
src.close()
|
||||
if not self.params.get('keep_fragments', False):
|
||||
os.remove(encodeFilename(fragment_filename))
|
||||
self.try_remove(encodeFilename(fragment_filename))
|
||||
dest.close()
|
||||
os.remove(encodeFilename('%s.frag.urls' % tmpfilename))
|
||||
self.try_remove(encodeFilename('%s.frag.urls' % tmpfilename))
|
||||
return 0
|
||||
|
||||
|
||||
@@ -252,7 +253,7 @@ class Aria2cFD(ExternalFD):
|
||||
def _make_cmd(self, tmpfilename, info_dict):
|
||||
cmd = [self.exe, '-c',
|
||||
'--console-log-level=warn', '--summary-interval=0', '--download-result=hide',
|
||||
'--file-allocation=none', '-x16', '-j16', '-s16']
|
||||
'--http-accept-gzip=true', '--file-allocation=none', '-x16', '-j16', '-s16']
|
||||
if 'fragments' in info_dict:
|
||||
cmd += ['--allow-overwrite=true', '--allow-piece-length-change=true']
|
||||
else:
|
||||
@@ -266,6 +267,7 @@ class Aria2cFD(ExternalFD):
|
||||
cmd += self._option('--all-proxy', 'proxy')
|
||||
cmd += self._bool_option('--check-certificate', 'nocheckcertificate', 'false', 'true', '=')
|
||||
cmd += self._bool_option('--remote-time', 'updatetime', 'true', 'false', '=')
|
||||
cmd += self._bool_option('--show-console-readout', 'noprogress', 'false', 'true', '=')
|
||||
cmd += self._configuration_args()
|
||||
|
||||
# aria2c strips out spaces from the beginning/end of filenames and paths.
|
||||
@@ -290,7 +292,7 @@ class Aria2cFD(ExternalFD):
|
||||
for frag_index, fragment in enumerate(info_dict['fragments']):
|
||||
fragment_filename = '%s-Frag%d' % (os.path.basename(tmpfilename), frag_index)
|
||||
url_list.append('%s\n\tout=%s' % (fragment['url'], fragment_filename))
|
||||
stream, _ = sanitize_open(url_list_file, 'wb')
|
||||
stream, _ = self.sanitize_open(url_list_file, 'wb')
|
||||
stream.write('\n'.join(url_list).encode('utf-8'))
|
||||
stream.close()
|
||||
cmd += ['-i', url_list_file]
|
||||
@@ -304,7 +306,7 @@ class HttpieFD(ExternalFD):
|
||||
|
||||
@classmethod
|
||||
def available(cls, path=None):
|
||||
return ExternalFD.available(cls, path or 'http')
|
||||
return super().available(path or 'http')
|
||||
|
||||
def _make_cmd(self, tmpfilename, info_dict):
|
||||
cmd = ['http', '--download', '--output', tmpfilename, info_dict['url']]
|
||||
@@ -443,8 +445,7 @@ class FFmpegFD(ExternalFD):
|
||||
if info_dict.get('requested_formats') or protocol == 'http_dash_segments':
|
||||
for (i, fmt) in enumerate(info_dict.get('requested_formats') or [info_dict]):
|
||||
stream_number = fmt.get('manifest_stream_number', 0)
|
||||
a_or_v = 'a' if fmt.get('acodec') != 'none' else 'v'
|
||||
args.extend(['-map', f'{i}:{a_or_v}:{stream_number}'])
|
||||
args.extend(['-map', f'{i}:{stream_number}'])
|
||||
|
||||
if self.params.get('test', False):
|
||||
args += ['-fs', compat_str(self._TEST_FILE_SIZE)]
|
||||
@@ -464,6 +465,15 @@ class FFmpegFD(ExternalFD):
|
||||
args += ['-f', 'flv']
|
||||
elif ext == 'mp4' and tmpfilename == '-':
|
||||
args += ['-f', 'mpegts']
|
||||
elif ext == 'unknown_video':
|
||||
ext = determine_ext(remove_end(tmpfilename, '.part'))
|
||||
if ext == 'unknown_video':
|
||||
self.report_warning(
|
||||
'The video format is unknown and cannot be downloaded by ffmpeg. '
|
||||
'Explicitly set the extension in the filename to attempt download in that format')
|
||||
else:
|
||||
self.report_warning(f'The video format is unknown. Trying to download as {ext} according to the filename')
|
||||
args += ['-f', EXT_TO_OUT_FORMATS.get(ext, ext)]
|
||||
else:
|
||||
args += ['-f', EXT_TO_OUT_FORMATS.get(ext, ext)]
|
||||
|
||||
|
||||
@@ -366,7 +366,7 @@ class F4mFD(FragmentFD):
|
||||
ctx = {
|
||||
'filename': filename,
|
||||
'total_frags': total_frags,
|
||||
'live': live,
|
||||
'live': bool(live),
|
||||
}
|
||||
|
||||
self._prepare_frag_download(ctx)
|
||||
|
||||
41
yt_dlp/downloader/fc2.py
Normal file
41
yt_dlp/downloader/fc2.py
Normal file
@@ -0,0 +1,41 @@
|
||||
from __future__ import division, unicode_literals
|
||||
|
||||
import threading
|
||||
|
||||
from .common import FileDownloader
|
||||
from .external import FFmpegFD
|
||||
|
||||
|
||||
class FC2LiveFD(FileDownloader):
|
||||
"""
|
||||
Downloads FC2 live without being stopped. <br>
|
||||
Note, this is not a part of public API, and will be removed without notice.
|
||||
DO NOT USE
|
||||
"""
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
ws = info_dict['ws']
|
||||
|
||||
heartbeat_lock = threading.Lock()
|
||||
heartbeat_state = [None, 1]
|
||||
|
||||
def heartbeat():
|
||||
try:
|
||||
heartbeat_state[1] += 1
|
||||
ws.send('{"name":"heartbeat","arguments":{},"id":%d}' % heartbeat_state[1])
|
||||
except Exception:
|
||||
self.to_screen('[fc2:live] Heartbeat failed')
|
||||
|
||||
with heartbeat_lock:
|
||||
heartbeat_state[0] = threading.Timer(30, heartbeat)
|
||||
heartbeat_state[0]._daemonic = True
|
||||
heartbeat_state[0].start()
|
||||
|
||||
heartbeat()
|
||||
|
||||
new_info_dict = info_dict.copy()
|
||||
new_info_dict.update({
|
||||
'ws': None,
|
||||
'protocol': 'live_ffmpeg',
|
||||
})
|
||||
return FFmpegFD(self.ydl, self.params or {}).download(filename, new_info_dict)
|
||||
@@ -1,9 +1,10 @@
|
||||
from __future__ import division, unicode_literals
|
||||
|
||||
import http.client
|
||||
import json
|
||||
import math
|
||||
import os
|
||||
import time
|
||||
import json
|
||||
from math import ceil
|
||||
|
||||
try:
|
||||
import concurrent.futures
|
||||
@@ -13,8 +14,9 @@ except ImportError:
|
||||
|
||||
from .common import FileDownloader
|
||||
from .http import HttpFD
|
||||
from ..aes import aes_cbc_decrypt_bytes
|
||||
from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
|
||||
from ..compat import (
|
||||
compat_os_name,
|
||||
compat_urllib_error,
|
||||
compat_struct_pack,
|
||||
)
|
||||
@@ -22,8 +24,8 @@ from ..utils import (
|
||||
DownloadError,
|
||||
error_to_compat_str,
|
||||
encodeFilename,
|
||||
sanitize_open,
|
||||
sanitized_Request,
|
||||
traverse_obj,
|
||||
)
|
||||
|
||||
|
||||
@@ -90,11 +92,11 @@ class FragmentFD(FileDownloader):
|
||||
self._start_frag_download(ctx, info_dict)
|
||||
|
||||
def __do_ytdl_file(self, ctx):
|
||||
return not ctx['live'] and not ctx['tmpfilename'] == '-' and not self.params.get('_no_ytdl_file')
|
||||
return ctx['live'] is not True and ctx['tmpfilename'] != '-' and not self.params.get('_no_ytdl_file')
|
||||
|
||||
def _read_ytdl_file(self, ctx):
|
||||
assert 'ytdl_corrupt' not in ctx
|
||||
stream, _ = sanitize_open(self.ytdl_filename(ctx['filename']), 'r')
|
||||
stream, _ = self.sanitize_open(self.ytdl_filename(ctx['filename']), 'r')
|
||||
try:
|
||||
ytdl_data = json.loads(stream.read())
|
||||
ctx['fragment_index'] = ytdl_data['downloader']['current_fragment']['index']
|
||||
@@ -106,7 +108,7 @@ class FragmentFD(FileDownloader):
|
||||
stream.close()
|
||||
|
||||
def _write_ytdl_file(self, ctx):
|
||||
frag_index_stream, _ = sanitize_open(self.ytdl_filename(ctx['filename']), 'w')
|
||||
frag_index_stream, _ = self.sanitize_open(self.ytdl_filename(ctx['filename']), 'w')
|
||||
try:
|
||||
downloader = {
|
||||
'current_fragment': {
|
||||
@@ -135,10 +137,15 @@ class FragmentFD(FileDownloader):
|
||||
if fragment_info_dict.get('filetime'):
|
||||
ctx['fragment_filetime'] = fragment_info_dict.get('filetime')
|
||||
ctx['fragment_filename_sanitized'] = fragment_filename
|
||||
try:
|
||||
return True, self._read_fragment(ctx)
|
||||
except FileNotFoundError:
|
||||
if not info_dict.get('is_live'):
|
||||
raise
|
||||
return False, None
|
||||
|
||||
def _read_fragment(self, ctx):
|
||||
down, frag_sanitized = sanitize_open(ctx['fragment_filename_sanitized'], 'rb')
|
||||
down, frag_sanitized = self.sanitize_open(ctx['fragment_filename_sanitized'], 'rb')
|
||||
ctx['fragment_filename_sanitized'] = frag_sanitized
|
||||
frag_content = down.read()
|
||||
down.close()
|
||||
@@ -152,7 +159,7 @@ class FragmentFD(FileDownloader):
|
||||
if self.__do_ytdl_file(ctx):
|
||||
self._write_ytdl_file(ctx)
|
||||
if not self.params.get('keep_fragments', False):
|
||||
os.remove(encodeFilename(ctx['fragment_filename_sanitized']))
|
||||
self.try_remove(encodeFilename(ctx['fragment_filename_sanitized']))
|
||||
del ctx['fragment_filename_sanitized']
|
||||
|
||||
def _prepare_frag_download(self, ctx):
|
||||
@@ -171,7 +178,7 @@ class FragmentFD(FileDownloader):
|
||||
dl = HttpQuietDownloader(
|
||||
self.ydl,
|
||||
{
|
||||
'continuedl': True,
|
||||
'continuedl': self.params.get('continuedl', True),
|
||||
'quiet': self.params.get('quiet'),
|
||||
'noprogress': True,
|
||||
'ratelimit': self.params.get('ratelimit'),
|
||||
@@ -214,7 +221,7 @@ class FragmentFD(FileDownloader):
|
||||
self._write_ytdl_file(ctx)
|
||||
assert ctx['fragment_index'] == 0
|
||||
|
||||
dest_stream, tmpfilename = sanitize_open(tmpfilename, open_mode)
|
||||
dest_stream, tmpfilename = self.sanitize_open(tmpfilename, open_mode)
|
||||
|
||||
ctx.update({
|
||||
'dl': dl,
|
||||
@@ -298,7 +305,7 @@ class FragmentFD(FileDownloader):
|
||||
if self.__do_ytdl_file(ctx):
|
||||
ytdl_filename = encodeFilename(self.ytdl_filename(ctx['filename']))
|
||||
if os.path.isfile(ytdl_filename):
|
||||
os.remove(ytdl_filename)
|
||||
self.try_remove(ytdl_filename)
|
||||
elapsed = time.time() - ctx['started']
|
||||
|
||||
if ctx['tmpfilename'] == '-':
|
||||
@@ -365,8 +372,7 @@ class FragmentFD(FileDownloader):
|
||||
# not what it decrypts to.
|
||||
if self.params.get('test', False):
|
||||
return frag_content
|
||||
decrypted_data = aes_cbc_decrypt_bytes(frag_content, decrypt_info['KEY'], iv)
|
||||
return decrypted_data[:-decrypted_data[-1]]
|
||||
return unpad_pkcs7(aes_cbc_decrypt_bytes(frag_content, decrypt_info['KEY'], iv))
|
||||
|
||||
return decrypt_fragment
|
||||
|
||||
@@ -375,45 +381,86 @@ class FragmentFD(FileDownloader):
|
||||
@params (ctx1, fragments1, info_dict1), (ctx2, fragments2, info_dict2), ...
|
||||
all args must be either tuple or list
|
||||
'''
|
||||
interrupt_trigger = [True]
|
||||
max_progress = len(args)
|
||||
if max_progress == 1:
|
||||
return self.download_and_append_fragments(*args[0], pack_func=pack_func, finish_func=finish_func)
|
||||
max_workers = self.params.get('concurrent_fragment_downloads', max_progress)
|
||||
max_workers = self.params.get('concurrent_fragment_downloads', 1)
|
||||
if max_progress > 1:
|
||||
self._prepare_multiline_status(max_progress)
|
||||
is_live = any(traverse_obj(args, (..., 2, 'is_live'), default=[]))
|
||||
|
||||
def thread_func(idx, ctx, fragments, info_dict, tpe):
|
||||
ctx['max_progress'] = max_progress
|
||||
ctx['progress_idx'] = idx
|
||||
return self.download_and_append_fragments(ctx, fragments, info_dict, pack_func=pack_func, finish_func=finish_func, tpe=tpe)
|
||||
return self.download_and_append_fragments(
|
||||
ctx, fragments, info_dict, pack_func=pack_func, finish_func=finish_func,
|
||||
tpe=tpe, interrupt_trigger=interrupt_trigger)
|
||||
|
||||
class FTPE(concurrent.futures.ThreadPoolExecutor):
|
||||
# has to stop this or it's going to wait on the worker thread itself
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
pass
|
||||
|
||||
if compat_os_name == 'nt':
|
||||
def bindoj_result(future):
|
||||
while True:
|
||||
try:
|
||||
return future.result(0.1)
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except concurrent.futures.TimeoutError:
|
||||
continue
|
||||
else:
|
||||
def bindoj_result(future):
|
||||
return future.result()
|
||||
|
||||
def interrupt_trigger_iter(fg):
|
||||
for f in fg:
|
||||
if not interrupt_trigger[0]:
|
||||
break
|
||||
yield f
|
||||
|
||||
spins = []
|
||||
for idx, (ctx, fragments, info_dict) in enumerate(args):
|
||||
tpe = FTPE(ceil(max_workers / max_progress))
|
||||
job = tpe.submit(thread_func, idx, ctx, fragments, info_dict, tpe)
|
||||
tpe = FTPE(math.ceil(max_workers / max_progress))
|
||||
job = tpe.submit(thread_func, idx, ctx, interrupt_trigger_iter(fragments), info_dict, tpe)
|
||||
spins.append((tpe, job))
|
||||
|
||||
result = True
|
||||
for tpe, job in spins:
|
||||
try:
|
||||
result = result and job.result()
|
||||
result = result and bindoj_result(job)
|
||||
except KeyboardInterrupt:
|
||||
interrupt_trigger[0] = False
|
||||
finally:
|
||||
tpe.shutdown(wait=True)
|
||||
if not interrupt_trigger[0] and not is_live:
|
||||
raise KeyboardInterrupt()
|
||||
# we expect the user wants to stop and DO WANT the preceding postprocessors to run;
|
||||
# so returning a intermediate result here instead of KeyboardInterrupt on live
|
||||
return result
|
||||
|
||||
def download_and_append_fragments(self, ctx, fragments, info_dict, *, pack_func=None, finish_func=None, tpe=None):
|
||||
def download_and_append_fragments(
|
||||
self, ctx, fragments, info_dict, *, pack_func=None, finish_func=None,
|
||||
tpe=None, interrupt_trigger=None):
|
||||
if not interrupt_trigger:
|
||||
interrupt_trigger = (True, )
|
||||
|
||||
fragment_retries = self.params.get('fragment_retries', 0)
|
||||
is_fatal = (lambda idx: idx == 0) if self.params.get('skip_unavailable_fragments', True) else (lambda _: True)
|
||||
is_fatal = (
|
||||
((lambda _: False) if info_dict.get('is_live') else (lambda idx: idx == 0))
|
||||
if self.params.get('skip_unavailable_fragments', True) else (lambda _: True))
|
||||
|
||||
if not pack_func:
|
||||
pack_func = lambda frag_content, _: frag_content
|
||||
|
||||
def download_fragment(fragment, ctx):
|
||||
if not interrupt_trigger[0]:
|
||||
return False, fragment['frag_index']
|
||||
|
||||
frag_index = ctx['fragment_index'] = fragment['frag_index']
|
||||
ctx['last_error'] = None
|
||||
headers = info_dict.get('http_headers', {}).copy()
|
||||
byte_range = fragment.get('byte_range')
|
||||
if byte_range:
|
||||
@@ -428,12 +475,13 @@ class FragmentFD(FileDownloader):
|
||||
if not success:
|
||||
return False, frag_index
|
||||
break
|
||||
except compat_urllib_error.HTTPError as err:
|
||||
except (compat_urllib_error.HTTPError, http.client.IncompleteRead) as err:
|
||||
# Unavailable (possibly temporary) fragments may be served.
|
||||
# First we try to retry then either skip or abort.
|
||||
# See https://github.com/ytdl-org/youtube-dl/issues/10165,
|
||||
# https://github.com/ytdl-org/youtube-dl/issues/10448).
|
||||
count += 1
|
||||
ctx['last_error'] = err
|
||||
if count <= fragment_retries:
|
||||
self.report_retry_fragment(err, frag_index, count, fragment_retries)
|
||||
except DownloadError:
|
||||
@@ -466,7 +514,8 @@ class FragmentFD(FileDownloader):
|
||||
|
||||
decrypt_fragment = self.decrypter(info_dict)
|
||||
|
||||
max_workers = self.params.get('concurrent_fragment_downloads', 1)
|
||||
max_workers = math.ceil(
|
||||
self.params.get('concurrent_fragment_downloads', 1) / ctx.get('max_progress', 1))
|
||||
if can_threaded_download and max_workers > 1:
|
||||
|
||||
def _download_fragment(fragment):
|
||||
@@ -484,6 +533,8 @@ class FragmentFD(FileDownloader):
|
||||
return False
|
||||
else:
|
||||
for fragment in fragments:
|
||||
if not interrupt_trigger[0]:
|
||||
break
|
||||
frag_content, frag_index = download_fragment(fragment, ctx)
|
||||
result = append_fragment(decrypt_fragment(fragment, frag_content), frag_index, ctx)
|
||||
if not result:
|
||||
|
||||
@@ -77,6 +77,15 @@ class HlsFD(FragmentFD):
|
||||
message = ('The stream has AES-128 encryption and neither ffmpeg nor pycryptodomex are available; '
|
||||
'Decryption will be performed natively, but will be extremely slow')
|
||||
if not can_download:
|
||||
has_drm = re.search('|'.join([
|
||||
r'#EXT-X-FAXS-CM:', # Adobe Flash Access
|
||||
r'#EXT-X-(?:SESSION-)?KEY:.*?URI="skd://', # Apple FairPlay
|
||||
]), s)
|
||||
if has_drm and not self.params.get('allow_unplayable_formats'):
|
||||
self.report_error(
|
||||
'This video is DRM protected; Try selecting another format with --format or '
|
||||
'add --check-formats to automatically fallback to the next best format')
|
||||
return False
|
||||
message = message or 'Unsupported features have been detected'
|
||||
fd = FFmpegFD(self.ydl, self.params)
|
||||
self.report_warning(f'{message}; extraction will be delegated to {fd.get_basename()}')
|
||||
|
||||
@@ -5,7 +5,6 @@ import os
|
||||
import socket
|
||||
import time
|
||||
import random
|
||||
import re
|
||||
|
||||
from .common import FileDownloader
|
||||
from ..compat import (
|
||||
@@ -16,7 +15,7 @@ from ..utils import (
|
||||
ContentTooShortError,
|
||||
encodeFilename,
|
||||
int_or_none,
|
||||
sanitize_open,
|
||||
parse_http_range,
|
||||
sanitized_Request,
|
||||
ThrottledDownload,
|
||||
write_xattr,
|
||||
@@ -60,6 +59,9 @@ class HttpFD(FileDownloader):
|
||||
ctx.chunk_size = None
|
||||
throttle_start = None
|
||||
|
||||
# parse given Range
|
||||
req_start, req_end, _ = parse_http_range(headers.get('Range'))
|
||||
|
||||
if self.params.get('continuedl', True):
|
||||
# Establish possible resume length
|
||||
if os.path.isfile(encodeFilename(ctx.tmpfilename)):
|
||||
@@ -92,6 +94,9 @@ class HttpFD(FileDownloader):
|
||||
if not is_test and chunk_size else chunk_size)
|
||||
if ctx.resume_len > 0:
|
||||
range_start = ctx.resume_len
|
||||
if req_start is not None:
|
||||
# offset the beginning of Range to be within request
|
||||
range_start += req_start
|
||||
if ctx.is_resume:
|
||||
self.report_resuming_byte(ctx.resume_len)
|
||||
ctx.open_mode = 'ab'
|
||||
@@ -100,7 +105,17 @@ class HttpFD(FileDownloader):
|
||||
else:
|
||||
range_start = None
|
||||
ctx.is_resume = False
|
||||
range_end = range_start + ctx.chunk_size - 1 if ctx.chunk_size else None
|
||||
|
||||
if ctx.chunk_size:
|
||||
chunk_aware_end = range_start + ctx.chunk_size - 1
|
||||
# we're not allowed to download outside Range
|
||||
range_end = chunk_aware_end if req_end is None else min(chunk_aware_end, req_end)
|
||||
elif req_end is not None:
|
||||
# there's no need for chunked downloads, so download until the end of Range
|
||||
range_end = req_end
|
||||
else:
|
||||
range_end = None
|
||||
|
||||
if range_end and ctx.data_len is not None and range_end >= ctx.data_len:
|
||||
range_end = ctx.data_len - 1
|
||||
has_range = range_start is not None
|
||||
@@ -125,13 +140,9 @@ class HttpFD(FileDownloader):
|
||||
# https://github.com/ytdl-org/youtube-dl/issues/6057#issuecomment-126129799)
|
||||
if has_range:
|
||||
content_range = ctx.data.headers.get('Content-Range')
|
||||
if content_range:
|
||||
content_range_m = re.search(r'bytes (\d+)-(\d+)?(?:/(\d+))?', content_range)
|
||||
content_range_start, content_range_end, content_len = parse_http_range(content_range)
|
||||
if content_range_start is not None and range_start == content_range_start:
|
||||
# Content-Range is present and matches requested Range, resume is possible
|
||||
if content_range_m:
|
||||
if range_start == int(content_range_m.group(1)):
|
||||
content_range_end = int_or_none(content_range_m.group(2))
|
||||
content_len = int_or_none(content_range_m.group(3))
|
||||
accept_content_len = (
|
||||
# Non-chunked download
|
||||
not ctx.chunk_size
|
||||
@@ -263,7 +274,7 @@ class HttpFD(FileDownloader):
|
||||
# Open destination file just in time
|
||||
if ctx.stream is None:
|
||||
try:
|
||||
ctx.stream, ctx.tmpfilename = sanitize_open(
|
||||
ctx.stream, ctx.tmpfilename = self.sanitize_open(
|
||||
ctx.tmpfilename, ctx.open_mode)
|
||||
assert ctx.stream is not None
|
||||
ctx.filename = self.undo_temp_name(ctx.tmpfilename)
|
||||
|
||||
@@ -114,8 +114,8 @@ body > figure > img {
|
||||
fragment_base_url = info_dict.get('fragment_base_url')
|
||||
fragments = info_dict['fragments'][:1] if self.params.get(
|
||||
'test', False) else info_dict['fragments']
|
||||
title = info_dict['title']
|
||||
origin = info_dict['webpage_url']
|
||||
title = info_dict.get('title', info_dict['format_id'])
|
||||
origin = info_dict.get('webpage_url', info_dict['url'])
|
||||
|
||||
ctx = {
|
||||
'filename': filename,
|
||||
|
||||
@@ -5,9 +5,12 @@ import threading
|
||||
|
||||
try:
|
||||
import websockets
|
||||
has_websockets = True
|
||||
except ImportError:
|
||||
except (ImportError, SyntaxError):
|
||||
# websockets 3.10 on python 3.6 causes SyntaxError
|
||||
# See https://github.com/yt-dlp/yt-dlp/issues/2633
|
||||
has_websockets = False
|
||||
else:
|
||||
has_websockets = True
|
||||
|
||||
from .common import FileDownloader
|
||||
from .external import FFmpegFD
|
||||
|
||||
@@ -22,6 +22,9 @@ class YoutubeLiveChatFD(FragmentFD):
|
||||
def real_download(self, filename, info_dict):
|
||||
video_id = info_dict['video_id']
|
||||
self.to_screen('[%s] Downloading live chat' % self.FD_NAME)
|
||||
if not self.params.get('skip_download'):
|
||||
self.report_warning('Live chat download runs until the livestream ends. '
|
||||
'If you wish to download the video simultaneously, run a separate yt-dlp instance')
|
||||
|
||||
fragment_retries = self.params.get('fragment_retries', 0)
|
||||
test = self.params.get('test', False)
|
||||
|
||||
@@ -8,6 +8,7 @@ import time
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
dict_get,
|
||||
ExtractorError,
|
||||
js_to_json,
|
||||
int_or_none,
|
||||
@@ -212,7 +213,7 @@ class ABCIViewIE(InfoExtractor):
|
||||
'hdnea': token,
|
||||
})
|
||||
|
||||
for sd in ('720', 'sd', 'sd-low'):
|
||||
for sd in ('1080', '720', 'sd', 'sd-low'):
|
||||
sd_url = try_get(
|
||||
stream, lambda x: x['streams']['hls'][sd], compat_str)
|
||||
if not sd_url:
|
||||
@@ -233,8 +234,6 @@ class ABCIViewIE(InfoExtractor):
|
||||
}]
|
||||
|
||||
is_live = video_params.get('livestream') == '1'
|
||||
if is_live:
|
||||
title = self._live_title(title)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
@@ -255,3 +254,65 @@ class ABCIViewIE(InfoExtractor):
|
||||
'subtitles': subtitles,
|
||||
'is_live': is_live,
|
||||
}
|
||||
|
||||
|
||||
class ABCIViewShowSeriesIE(InfoExtractor):
|
||||
IE_NAME = 'abc.net.au:iview:showseries'
|
||||
_VALID_URL = r'https?://iview\.abc\.net\.au/show/(?P<id>[^/]+)(?:/series/\d+)?$'
|
||||
_GEO_COUNTRIES = ['AU']
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://iview.abc.net.au/show/upper-middle-bogan',
|
||||
'info_dict': {
|
||||
'id': '124870-1',
|
||||
'title': 'Series 1',
|
||||
'description': 'md5:93119346c24a7c322d446d8eece430ff',
|
||||
'series': 'Upper Middle Bogan',
|
||||
'season': 'Series 1',
|
||||
'thumbnail': r're:^https?://cdn\.iview\.abc\.net\.au/thumbs/.*\.jpg$'
|
||||
},
|
||||
'playlist_count': 8,
|
||||
}, {
|
||||
'url': 'https://iview.abc.net.au/show/upper-middle-bogan',
|
||||
'info_dict': {
|
||||
'id': 'CO1108V001S00',
|
||||
'ext': 'mp4',
|
||||
'title': 'Series 1 Ep 1 I\'m A Swan',
|
||||
'description': 'md5:7b676758c1de11a30b79b4d301e8da93',
|
||||
'series': 'Upper Middle Bogan',
|
||||
'uploader_id': 'abc1',
|
||||
'upload_date': '20210630',
|
||||
'timestamp': 1625036400,
|
||||
},
|
||||
'params': {
|
||||
'noplaylist': True,
|
||||
'skip_download': 'm3u8',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
show_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, show_id)
|
||||
webpage_data = self._search_regex(
|
||||
r'window\.__INITIAL_STATE__\s*=\s*[\'"](.+?)[\'"]\s*;',
|
||||
webpage, 'initial state')
|
||||
video_data = self._parse_json(
|
||||
unescapeHTML(webpage_data).encode('utf-8').decode('unicode_escape'), show_id)
|
||||
video_data = video_data['route']['pageData']['_embedded']
|
||||
|
||||
highlight = try_get(video_data, lambda x: x['highlightVideo']['shareUrl'])
|
||||
if not self._yes_playlist(show_id, bool(highlight), video_label='highlight video'):
|
||||
return self.url_result(highlight, ie=ABCIViewIE.ie_key())
|
||||
|
||||
series = video_data['selectedSeries']
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'entries': [self.url_result(episode['shareUrl'])
|
||||
for episode in series['_embedded']['videoEpisodes']],
|
||||
'id': series.get('id'),
|
||||
'title': dict_get(series, ('title', 'displaySubtitle')),
|
||||
'description': series.get('description'),
|
||||
'series': dict_get(series, ('showTitle', 'displayTitle')),
|
||||
'season': dict_get(series, ('title', 'displaySubtitle')),
|
||||
'thumbnail': series.get('thumbnail'),
|
||||
}
|
||||
|
||||
484
yt_dlp/extractor/abematv.py
Normal file
484
yt_dlp/extractor/abematv.py
Normal file
@@ -0,0 +1,484 @@
|
||||
import io
|
||||
import json
|
||||
import time
|
||||
import hashlib
|
||||
import hmac
|
||||
import re
|
||||
import struct
|
||||
from base64 import urlsafe_b64encode
|
||||
from binascii import unhexlify
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..aes import aes_ecb_decrypt
|
||||
from ..compat import (
|
||||
compat_urllib_response,
|
||||
compat_urllib_parse_urlparse,
|
||||
compat_urllib_request,
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
decode_base,
|
||||
int_or_none,
|
||||
random_uuidv4,
|
||||
request_to_url,
|
||||
time_seconds,
|
||||
update_url_query,
|
||||
traverse_obj,
|
||||
intlist_to_bytes,
|
||||
bytes_to_intlist,
|
||||
urljoin,
|
||||
)
|
||||
|
||||
|
||||
# NOTE: network handler related code is temporary thing until network stack overhaul PRs are merged (#2861/#2862)
|
||||
|
||||
def add_opener(ydl, handler):
|
||||
''' Add a handler for opening URLs, like _download_webpage '''
|
||||
# https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L426
|
||||
# https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L605
|
||||
assert isinstance(ydl._opener, compat_urllib_request.OpenerDirector)
|
||||
ydl._opener.add_handler(handler)
|
||||
|
||||
|
||||
def remove_opener(ydl, handler):
|
||||
'''
|
||||
Remove handler(s) for opening URLs
|
||||
@param handler Either handler object itself or handler type.
|
||||
Specifying handler type will remove all handler which isinstance returns True.
|
||||
'''
|
||||
# https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L426
|
||||
# https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L605
|
||||
opener = ydl._opener
|
||||
assert isinstance(ydl._opener, compat_urllib_request.OpenerDirector)
|
||||
if isinstance(handler, (type, tuple)):
|
||||
find_cp = lambda x: isinstance(x, handler)
|
||||
else:
|
||||
find_cp = lambda x: x is handler
|
||||
|
||||
removed = []
|
||||
for meth in dir(handler):
|
||||
if meth in ["redirect_request", "do_open", "proxy_open"]:
|
||||
# oops, coincidental match
|
||||
continue
|
||||
|
||||
i = meth.find("_")
|
||||
protocol = meth[:i]
|
||||
condition = meth[i + 1:]
|
||||
|
||||
if condition.startswith("error"):
|
||||
j = condition.find("_") + i + 1
|
||||
kind = meth[j + 1:]
|
||||
try:
|
||||
kind = int(kind)
|
||||
except ValueError:
|
||||
pass
|
||||
lookup = opener.handle_error.get(protocol, {})
|
||||
opener.handle_error[protocol] = lookup
|
||||
elif condition == "open":
|
||||
kind = protocol
|
||||
lookup = opener.handle_open
|
||||
elif condition == "response":
|
||||
kind = protocol
|
||||
lookup = opener.process_response
|
||||
elif condition == "request":
|
||||
kind = protocol
|
||||
lookup = opener.process_request
|
||||
else:
|
||||
continue
|
||||
|
||||
handlers = lookup.setdefault(kind, [])
|
||||
if handlers:
|
||||
handlers[:] = [x for x in handlers if not find_cp(x)]
|
||||
|
||||
removed.append(x for x in handlers if find_cp(x))
|
||||
|
||||
if removed:
|
||||
for x in opener.handlers:
|
||||
if find_cp(x):
|
||||
x.add_parent(None)
|
||||
opener.handlers[:] = [x for x in opener.handlers if not find_cp(x)]
|
||||
|
||||
|
||||
class AbemaLicenseHandler(compat_urllib_request.BaseHandler):
|
||||
handler_order = 499
|
||||
STRTABLE = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
|
||||
HKEY = b'3AF0298C219469522A313570E8583005A642E73EDD58E3EA2FB7339D3DF1597E'
|
||||
|
||||
def __init__(self, ie: 'AbemaTVIE'):
|
||||
# the protcol that this should really handle is 'abematv-license://'
|
||||
# abematv_license_open is just a placeholder for development purposes
|
||||
# ref. https://github.com/python/cpython/blob/f4c03484da59049eb62a9bf7777b963e2267d187/Lib/urllib/request.py#L510
|
||||
setattr(self, 'abematv-license_open', getattr(self, 'abematv_license_open'))
|
||||
self.ie = ie
|
||||
|
||||
def _get_videokey_from_ticket(self, ticket):
|
||||
to_show = self.ie._downloader.params.get('verbose', False)
|
||||
media_token = self.ie._get_media_token(to_show=to_show)
|
||||
|
||||
license_response = self.ie._download_json(
|
||||
'https://license.abema.io/abematv-hls', None, note='Requesting playback license' if to_show else False,
|
||||
query={'t': media_token},
|
||||
data=json.dumps({
|
||||
'kv': 'a',
|
||||
'lt': ticket
|
||||
}).encode('utf-8'),
|
||||
headers={
|
||||
'Content-Type': 'application/json',
|
||||
})
|
||||
|
||||
res = decode_base(license_response['k'], self.STRTABLE)
|
||||
encvideokey = bytes_to_intlist(struct.pack('>QQ', res >> 64, res & 0xffffffffffffffff))
|
||||
|
||||
h = hmac.new(
|
||||
unhexlify(self.HKEY),
|
||||
(license_response['cid'] + self.ie._DEVICE_ID).encode('utf-8'),
|
||||
digestmod=hashlib.sha256)
|
||||
enckey = bytes_to_intlist(h.digest())
|
||||
|
||||
return intlist_to_bytes(aes_ecb_decrypt(encvideokey, enckey))
|
||||
|
||||
def abematv_license_open(self, url):
|
||||
url = request_to_url(url)
|
||||
ticket = compat_urllib_parse_urlparse(url).netloc
|
||||
response_data = self._get_videokey_from_ticket(ticket)
|
||||
return compat_urllib_response.addinfourl(io.BytesIO(response_data), headers={
|
||||
'Content-Length': len(response_data),
|
||||
}, url=url, code=200)
|
||||
|
||||
|
||||
class AbemaTVBaseIE(InfoExtractor):
|
||||
def _extract_breadcrumb_list(self, webpage, video_id):
|
||||
for jld in re.finditer(
|
||||
r'(?is)</span></li></ul><script[^>]+type=(["\']?)application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>',
|
||||
webpage):
|
||||
jsonld = self._parse_json(jld.group('json_ld'), video_id, fatal=False)
|
||||
if jsonld:
|
||||
if jsonld.get('@type') != 'BreadcrumbList':
|
||||
continue
|
||||
trav = traverse_obj(jsonld, ('itemListElement', ..., 'name'))
|
||||
if trav:
|
||||
return trav
|
||||
return []
|
||||
|
||||
|
||||
class AbemaTVIE(AbemaTVBaseIE):
|
||||
_VALID_URL = r'https?://abema\.tv/(?P<type>now-on-air|video/episode|channels/.+?/slots)/(?P<id>[^?/]+)'
|
||||
_NETRC_MACHINE = 'abematv'
|
||||
_TESTS = [{
|
||||
'url': 'https://abema.tv/video/episode/194-25_s2_p1',
|
||||
'info_dict': {
|
||||
'id': '194-25_s2_p1',
|
||||
'title': '第1話 「チーズケーキ」 「モーニング再び」',
|
||||
'series': '異世界食堂2',
|
||||
'series_number': 2,
|
||||
'episode': '第1話 「チーズケーキ」 「モーニング再び」',
|
||||
'episode_number': 1,
|
||||
},
|
||||
'skip': 'expired',
|
||||
}, {
|
||||
'url': 'https://abema.tv/channels/anime-live2/slots/E8tvAnMJ7a9a5d',
|
||||
'info_dict': {
|
||||
'id': 'E8tvAnMJ7a9a5d',
|
||||
'title': 'ゆるキャン△ SEASON2 全話一挙【無料ビデオ72時間】',
|
||||
'series': 'ゆるキャン△ SEASON2',
|
||||
'episode': 'ゆるキャン△ SEASON2 全話一挙【無料ビデオ72時間】',
|
||||
'series_number': 2,
|
||||
'episode_number': 1,
|
||||
'description': 'md5:9c5a3172ae763278f9303922f0ea5b17',
|
||||
},
|
||||
'skip': 'expired',
|
||||
}, {
|
||||
'url': 'https://abema.tv/video/episode/87-877_s1282_p31047',
|
||||
'info_dict': {
|
||||
'id': 'E8tvAnMJ7a9a5d',
|
||||
'title': '第5話『光射す』',
|
||||
'description': 'md5:56d4fc1b4f7769ded5f923c55bb4695d',
|
||||
'thumbnail': r're:https://hayabusa\.io/.+',
|
||||
'series': '相棒',
|
||||
'episode': '第5話『光射す』',
|
||||
},
|
||||
'skip': 'expired',
|
||||
}, {
|
||||
'url': 'https://abema.tv/now-on-air/abema-anime',
|
||||
'info_dict': {
|
||||
'id': 'abema-anime',
|
||||
# this varies
|
||||
# 'title': '女子高生の無駄づかい 全話一挙【無料ビデオ72時間】',
|
||||
'description': 'md5:55f2e61f46a17e9230802d7bcc913d5f',
|
||||
'is_live': True,
|
||||
},
|
||||
'skip': 'Not supported until yt-dlp implements native live downloader OR AbemaTV can start a local HTTP server',
|
||||
}]
|
||||
_USERTOKEN = None
|
||||
_DEVICE_ID = None
|
||||
_TIMETABLE = None
|
||||
_MEDIATOKEN = None
|
||||
|
||||
_SECRETKEY = b'v+Gjs=25Aw5erR!J8ZuvRrCx*rGswhB&qdHd_SYerEWdU&a?3DzN9BRbp5KwY4hEmcj5#fykMjJ=AuWz5GSMY-d@H7DMEh3M@9n2G552Us$$k9cD=3TxwWe86!x#Zyhe'
|
||||
|
||||
def _generate_aks(self, deviceid):
|
||||
deviceid = deviceid.encode('utf-8')
|
||||
# add 1 hour and then drop minute and secs
|
||||
ts_1hour = int((time_seconds(hours=9) // 3600 + 1) * 3600)
|
||||
time_struct = time.gmtime(ts_1hour)
|
||||
ts_1hour_str = str(ts_1hour).encode('utf-8')
|
||||
|
||||
tmp = None
|
||||
|
||||
def mix_once(nonce):
|
||||
nonlocal tmp
|
||||
h = hmac.new(self._SECRETKEY, digestmod=hashlib.sha256)
|
||||
h.update(nonce)
|
||||
tmp = h.digest()
|
||||
|
||||
def mix_tmp(count):
|
||||
nonlocal tmp
|
||||
for i in range(count):
|
||||
mix_once(tmp)
|
||||
|
||||
def mix_twist(nonce):
|
||||
nonlocal tmp
|
||||
mix_once(urlsafe_b64encode(tmp).rstrip(b'=') + nonce)
|
||||
|
||||
mix_once(self._SECRETKEY)
|
||||
mix_tmp(time_struct.tm_mon)
|
||||
mix_twist(deviceid)
|
||||
mix_tmp(time_struct.tm_mday % 5)
|
||||
mix_twist(ts_1hour_str)
|
||||
mix_tmp(time_struct.tm_hour % 5)
|
||||
|
||||
return urlsafe_b64encode(tmp).rstrip(b'=').decode('utf-8')
|
||||
|
||||
def _get_device_token(self):
|
||||
if self._USERTOKEN:
|
||||
return self._USERTOKEN
|
||||
|
||||
self._DEVICE_ID = random_uuidv4()
|
||||
aks = self._generate_aks(self._DEVICE_ID)
|
||||
user_data = self._download_json(
|
||||
'https://api.abema.io/v1/users', None, note='Authorizing',
|
||||
data=json.dumps({
|
||||
'deviceId': self._DEVICE_ID,
|
||||
'applicationKeySecret': aks,
|
||||
}).encode('utf-8'),
|
||||
headers={
|
||||
'Content-Type': 'application/json',
|
||||
})
|
||||
self._USERTOKEN = user_data['token']
|
||||
|
||||
# don't allow adding it 2 times or more, though it's guarded
|
||||
remove_opener(self._downloader, AbemaLicenseHandler)
|
||||
add_opener(self._downloader, AbemaLicenseHandler(self))
|
||||
|
||||
return self._USERTOKEN
|
||||
|
||||
def _get_media_token(self, invalidate=False, to_show=True):
|
||||
if not invalidate and self._MEDIATOKEN:
|
||||
return self._MEDIATOKEN
|
||||
|
||||
self._MEDIATOKEN = self._download_json(
|
||||
'https://api.abema.io/v1/media/token', None, note='Fetching media token' if to_show else False,
|
||||
query={
|
||||
'osName': 'android',
|
||||
'osVersion': '6.0.1',
|
||||
'osLang': 'ja_JP',
|
||||
'osTimezone': 'Asia/Tokyo',
|
||||
'appId': 'tv.abema',
|
||||
'appVersion': '3.27.1'
|
||||
}, headers={
|
||||
'Authorization': 'bearer ' + self._get_device_token()
|
||||
})['token']
|
||||
|
||||
return self._MEDIATOKEN
|
||||
|
||||
def _real_initialize(self):
|
||||
self._login()
|
||||
|
||||
def _login(self):
|
||||
username, password = self._get_login_info()
|
||||
# No authentication to be performed
|
||||
if not username:
|
||||
return True
|
||||
|
||||
if '@' in username: # don't strictly check if it's email address or not
|
||||
ep, method = 'user/email', 'email'
|
||||
else:
|
||||
ep, method = 'oneTimePassword', 'userId'
|
||||
|
||||
login_response = self._download_json(
|
||||
f'https://api.abema.io/v1/auth/{ep}', None, note='Logging in',
|
||||
data=json.dumps({
|
||||
method: username,
|
||||
'password': password
|
||||
}).encode('utf-8'), headers={
|
||||
'Authorization': 'bearer ' + self._get_device_token(),
|
||||
'Origin': 'https://abema.tv',
|
||||
'Referer': 'https://abema.tv/',
|
||||
'Content-Type': 'application/json',
|
||||
})
|
||||
|
||||
self._USERTOKEN = login_response['token']
|
||||
self._get_media_token(True)
|
||||
|
||||
def _real_extract(self, url):
|
||||
# starting download using infojson from this extractor is undefined behavior,
|
||||
# and never be fixed in the future; you must trigger downloads by directly specifing URL.
|
||||
# (unless there's a way to hook before downloading by extractor)
|
||||
video_id, video_type = self._match_valid_url(url).group('id', 'type')
|
||||
headers = {
|
||||
'Authorization': 'Bearer ' + self._get_device_token(),
|
||||
}
|
||||
video_type = video_type.split('/')[-1]
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
canonical_url = self._search_regex(
|
||||
r'<link\s+rel="canonical"\s*href="(.+?)"', webpage, 'canonical URL',
|
||||
default=url)
|
||||
info = self._search_json_ld(webpage, video_id, default={})
|
||||
|
||||
title = self._search_regex(
|
||||
r'<span\s*class=".+?EpisodeTitleBlock__title">(.+?)</span>', webpage, 'title', default=None)
|
||||
if not title:
|
||||
jsonld = None
|
||||
for jld in re.finditer(
|
||||
r'(?is)<span\s*class="com-m-Thumbnail__image">(?:</span>)?<script[^>]+type=(["\']?)application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>',
|
||||
webpage):
|
||||
jsonld = self._parse_json(jld.group('json_ld'), video_id, fatal=False)
|
||||
if jsonld:
|
||||
break
|
||||
if jsonld:
|
||||
title = jsonld.get('caption')
|
||||
if not title and video_type == 'now-on-air':
|
||||
if not self._TIMETABLE:
|
||||
# cache the timetable because it goes to 5MiB in size (!!)
|
||||
self._TIMETABLE = self._download_json(
|
||||
'https://api.abema.io/v1/timetable/dataSet?debug=false', video_id,
|
||||
headers=headers)
|
||||
now = time_seconds(hours=9)
|
||||
for slot in self._TIMETABLE.get('slots', []):
|
||||
if slot.get('channelId') != video_id:
|
||||
continue
|
||||
if slot['startAt'] <= now and now < slot['endAt']:
|
||||
title = slot['title']
|
||||
break
|
||||
|
||||
# read breadcrumb on top of page
|
||||
breadcrumb = self._extract_breadcrumb_list(webpage, video_id)
|
||||
if breadcrumb:
|
||||
# breadcrumb list translates to: (example is 1st test for this IE)
|
||||
# Home > Anime (genre) > Isekai Shokudo 2 (series name) > Episode 1 "Cheese cakes" "Morning again" (episode title)
|
||||
# hence this works
|
||||
info['series'] = breadcrumb[-2]
|
||||
info['episode'] = breadcrumb[-1]
|
||||
if not title:
|
||||
title = info['episode']
|
||||
|
||||
description = self._html_search_regex(
|
||||
(r'<p\s+class="com-video-EpisodeDetailsBlock__content"><span\s+class=".+?">(.+?)</span></p><div',
|
||||
r'<span\s+class=".+?SlotSummary.+?">(.+?)</span></div><div',),
|
||||
webpage, 'description', default=None, group=1)
|
||||
if not description:
|
||||
og_desc = self._html_search_meta(
|
||||
('description', 'og:description', 'twitter:description'), webpage)
|
||||
if og_desc:
|
||||
description = re.sub(r'''(?sx)
|
||||
^(.+?)(?:
|
||||
アニメの動画を無料で見るならABEMA!| # anime
|
||||
等、.+ # applies for most of categories
|
||||
)?
|
||||
''', r'\1', og_desc)
|
||||
|
||||
# canonical URL may contain series and episode number
|
||||
mobj = re.search(r's(\d+)_p(\d+)$', canonical_url)
|
||||
if mobj:
|
||||
seri = int_or_none(mobj.group(1), default=float('inf'))
|
||||
epis = int_or_none(mobj.group(2), default=float('inf'))
|
||||
info['series_number'] = seri if seri < 100 else None
|
||||
# some anime like Detective Conan (though not available in AbemaTV)
|
||||
# has more than 1000 episodes (1026 as of 2021/11/15)
|
||||
info['episode_number'] = epis if epis < 2000 else None
|
||||
|
||||
is_live, m3u8_url = False, None
|
||||
if video_type == 'now-on-air':
|
||||
is_live = True
|
||||
channel_url = 'https://api.abema.io/v1/channels'
|
||||
if video_id == 'news-global':
|
||||
channel_url = update_url_query(channel_url, {'division': '1'})
|
||||
onair_channels = self._download_json(channel_url, video_id)
|
||||
for ch in onair_channels['channels']:
|
||||
if video_id == ch['id']:
|
||||
m3u8_url = ch['playback']['hls']
|
||||
break
|
||||
else:
|
||||
raise ExtractorError(f'Cannot find on-air {video_id} channel.', expected=True)
|
||||
elif video_type == 'episode':
|
||||
api_response = self._download_json(
|
||||
f'https://api.abema.io/v1/video/programs/{video_id}', video_id,
|
||||
note='Checking playability',
|
||||
headers=headers)
|
||||
ondemand_types = traverse_obj(api_response, ('terms', ..., 'onDemandType'), default=[])
|
||||
if 3 not in ondemand_types:
|
||||
# cannot acquire decryption key for these streams
|
||||
self.report_warning('This is a premium-only stream')
|
||||
|
||||
m3u8_url = f'https://vod-abematv.akamaized.net/program/{video_id}/playlist.m3u8'
|
||||
elif video_type == 'slots':
|
||||
api_response = self._download_json(
|
||||
f'https://api.abema.io/v1/media/slots/{video_id}', video_id,
|
||||
note='Checking playability',
|
||||
headers=headers)
|
||||
if not traverse_obj(api_response, ('slot', 'flags', 'timeshiftFree'), default=False):
|
||||
self.report_warning('This is a premium-only stream')
|
||||
|
||||
m3u8_url = f'https://vod-abematv.akamaized.net/slot/{video_id}/playlist.m3u8'
|
||||
else:
|
||||
raise ExtractorError('Unreachable')
|
||||
|
||||
if is_live:
|
||||
self.report_warning("This is a livestream; yt-dlp doesn't support downloading natively, but FFmpeg cannot handle m3u8 manifests from AbemaTV")
|
||||
self.report_warning('Please consider using Streamlink to download these streams (https://github.com/streamlink/streamlink)')
|
||||
formats = self._extract_m3u8_formats(
|
||||
m3u8_url, video_id, ext='mp4', live=is_live)
|
||||
|
||||
info.update({
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'formats': formats,
|
||||
'is_live': is_live,
|
||||
})
|
||||
return info
|
||||
|
||||
|
||||
class AbemaTVTitleIE(AbemaTVBaseIE):
|
||||
_VALID_URL = r'https?://abema\.tv/video/title/(?P<id>[^?/]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://abema.tv/video/title/90-1597',
|
||||
'info_dict': {
|
||||
'id': '90-1597',
|
||||
'title': 'シャッフルアイランド',
|
||||
},
|
||||
'playlist_mincount': 2,
|
||||
}, {
|
||||
'url': 'https://abema.tv/video/title/193-132',
|
||||
'info_dict': {
|
||||
'id': '193-132',
|
||||
'title': '真心が届く~僕とスターのオフィス・ラブ!?~',
|
||||
},
|
||||
'playlist_mincount': 16,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
playlist_title, breadcrumb = None, self._extract_breadcrumb_list(webpage, video_id)
|
||||
if breadcrumb:
|
||||
playlist_title = breadcrumb[-1]
|
||||
|
||||
playlist = [
|
||||
self.url_result(urljoin('https://abema.tv/', mobj.group(1)))
|
||||
for mobj in re.finditer(r'<li\s*class=".+?EpisodeList.+?"><a\s*href="(/[^"]+?)"', webpage)]
|
||||
|
||||
return self.playlist_result(playlist, playlist_title=playlist_title, playlist_id=video_id)
|
||||
@@ -8,11 +8,10 @@ import os
|
||||
import random
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..aes import aes_cbc_decrypt
|
||||
from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
|
||||
from ..compat import (
|
||||
compat_HTTPError,
|
||||
compat_b64decode,
|
||||
compat_ord,
|
||||
)
|
||||
from ..utils import (
|
||||
ass_subtitles_timecode,
|
||||
@@ -84,14 +83,11 @@ class ADNIE(InfoExtractor):
|
||||
return None
|
||||
|
||||
# http://animedigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js
|
||||
dec_subtitles = intlist_to_bytes(aes_cbc_decrypt(
|
||||
bytes_to_intlist(compat_b64decode(enc_subtitles[24:])),
|
||||
bytes_to_intlist(binascii.unhexlify(self._K + 'ab9f52f5baae7c72')),
|
||||
bytes_to_intlist(compat_b64decode(enc_subtitles[:24]))
|
||||
))
|
||||
subtitles_json = self._parse_json(
|
||||
dec_subtitles[:-compat_ord(dec_subtitles[-1])].decode(),
|
||||
None, fatal=False)
|
||||
dec_subtitles = unpad_pkcs7(aes_cbc_decrypt_bytes(
|
||||
compat_b64decode(enc_subtitles[24:]),
|
||||
binascii.unhexlify(self._K + 'ab9f52f5baae7c72'),
|
||||
compat_b64decode(enc_subtitles[:24])))
|
||||
subtitles_json = self._parse_json(dec_subtitles.decode(), None, fatal=False)
|
||||
if not subtitles_json:
|
||||
return None
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ class AdobeConnectIE(InfoExtractor):
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': self._live_title(title) if is_live else title,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'is_live': is_live,
|
||||
}
|
||||
|
||||
@@ -1345,6 +1345,11 @@ MSO_INFO = {
|
||||
'username_field': 'username',
|
||||
'password_field': 'password',
|
||||
},
|
||||
'Suddenlink': {
|
||||
'name': 'Suddenlink',
|
||||
'username_field': 'username',
|
||||
'password_field': 'password',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -1635,6 +1640,52 @@ class AdobePassIE(InfoExtractor):
|
||||
urlh.geturl(), video_id, 'Sending final bookend',
|
||||
query=hidden_data)
|
||||
|
||||
post_form(mvpd_confirm_page_res, 'Confirming Login')
|
||||
elif mso_id == 'Suddenlink':
|
||||
# Suddenlink is similar to SlingTV in using a tab history count and a meta refresh,
|
||||
# but they also do a dynmaic redirect using javascript that has to be followed as well
|
||||
first_bookend_page, urlh = post_form(
|
||||
provider_redirect_page_res, 'Pressing Continue...')
|
||||
|
||||
hidden_data = self._hidden_inputs(first_bookend_page)
|
||||
hidden_data['history_val'] = 1
|
||||
|
||||
provider_login_redirect_page = self._download_webpage(
|
||||
urlh.geturl(), video_id, 'Sending First Bookend',
|
||||
query=hidden_data)
|
||||
|
||||
provider_tryauth_url = self._html_search_regex(
|
||||
r'url:\s*[\'"]([^\'"]+)', provider_login_redirect_page, 'ajaxurl')
|
||||
|
||||
provider_tryauth_page = self._download_webpage(
|
||||
provider_tryauth_url, video_id, 'Submitting TryAuth',
|
||||
query=hidden_data)
|
||||
|
||||
provider_login_page_res = self._download_webpage_handle(
|
||||
f'https://authorize.suddenlink.net/saml/module.php/authSynacor/login.php?AuthState={provider_tryauth_page}',
|
||||
video_id, 'Getting Login Page',
|
||||
query=hidden_data)
|
||||
|
||||
provider_association_redirect, urlh = post_form(
|
||||
provider_login_page_res, 'Logging in', {
|
||||
mso_info['username_field']: username,
|
||||
mso_info['password_field']: password
|
||||
})
|
||||
|
||||
provider_refresh_redirect_url = extract_redirect_url(
|
||||
provider_association_redirect, url=urlh.geturl())
|
||||
|
||||
last_bookend_page, urlh = self._download_webpage_handle(
|
||||
provider_refresh_redirect_url, video_id,
|
||||
'Downloading Auth Association Redirect Page')
|
||||
|
||||
hidden_data = self._hidden_inputs(last_bookend_page)
|
||||
hidden_data['history_val'] = 3
|
||||
|
||||
mvpd_confirm_page_res = self._download_webpage_handle(
|
||||
urlh.geturl(), video_id, 'Sending Final Bookend',
|
||||
query=hidden_data)
|
||||
|
||||
post_form(mvpd_confirm_page_res, 'Confirming Login')
|
||||
else:
|
||||
# Some providers (e.g. DIRECTV NOW) have another meta refresh
|
||||
|
||||
@@ -10,7 +10,11 @@ from ..utils import (
|
||||
determine_ext,
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
qualities,
|
||||
traverse_obj,
|
||||
unified_strdate,
|
||||
unified_timestamp,
|
||||
update_url_query,
|
||||
url_or_none,
|
||||
urlencode_postdata,
|
||||
xpath_text,
|
||||
@@ -380,3 +384,105 @@ class AfreecaTVIE(InfoExtractor):
|
||||
})
|
||||
|
||||
return info
|
||||
|
||||
|
||||
class AfreecaTVLiveIE(AfreecaTVIE):
|
||||
|
||||
IE_NAME = 'afreecatv:live'
|
||||
_VALID_URL = r'https?://play\.afreeca(?:tv)?\.com/(?P<id>[^/]+)(?:/(?P<bno>\d+))?'
|
||||
_TESTS = [{
|
||||
'url': 'https://play.afreecatv.com/pyh3646/237852185',
|
||||
'info_dict': {
|
||||
'id': '237852185',
|
||||
'ext': 'mp4',
|
||||
'title': '【 우루과이 오늘은 무슨일이? 】',
|
||||
'uploader': '박진우[JINU]',
|
||||
'uploader_id': 'pyh3646',
|
||||
'timestamp': 1640661495,
|
||||
'is_live': True,
|
||||
},
|
||||
'skip': 'Livestream has ended',
|
||||
}, {
|
||||
'url': 'http://play.afreeca.com/pyh3646/237852185',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://play.afreeca.com/pyh3646',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
_LIVE_API_URL = 'https://live.afreecatv.com/afreeca/player_live_api.php'
|
||||
|
||||
_QUALITIES = ('sd', 'hd', 'hd2k', 'original')
|
||||
|
||||
def _real_extract(self, url):
|
||||
broadcaster_id, broadcast_no = self._match_valid_url(url).group('id', 'bno')
|
||||
password = self.get_param('videopassword')
|
||||
|
||||
info = self._download_json(self._LIVE_API_URL, broadcaster_id, fatal=False,
|
||||
data=urlencode_postdata({'bid': broadcaster_id})) or {}
|
||||
channel_info = info.get('CHANNEL') or {}
|
||||
broadcaster_id = channel_info.get('BJID') or broadcaster_id
|
||||
broadcast_no = channel_info.get('BNO') or broadcast_no
|
||||
password_protected = channel_info.get('BPWD')
|
||||
if not broadcast_no:
|
||||
raise ExtractorError(f'Unable to extract broadcast number ({broadcaster_id} may not be live)', expected=True)
|
||||
if password_protected == 'Y' and password is None:
|
||||
raise ExtractorError(
|
||||
'This livestream is protected by a password, use the --video-password option',
|
||||
expected=True)
|
||||
|
||||
formats = []
|
||||
quality_key = qualities(self._QUALITIES)
|
||||
for quality_str in self._QUALITIES:
|
||||
params = {
|
||||
'bno': broadcast_no,
|
||||
'stream_type': 'common',
|
||||
'type': 'aid',
|
||||
'quality': quality_str,
|
||||
}
|
||||
if password is not None:
|
||||
params['pwd'] = password
|
||||
aid_response = self._download_json(
|
||||
self._LIVE_API_URL, broadcast_no, fatal=False,
|
||||
data=urlencode_postdata(params),
|
||||
note=f'Downloading access token for {quality_str} stream',
|
||||
errnote=f'Unable to download access token for {quality_str} stream')
|
||||
aid = traverse_obj(aid_response, ('CHANNEL', 'AID'))
|
||||
if not aid:
|
||||
continue
|
||||
|
||||
stream_base_url = channel_info.get('RMD') or 'https://livestream-manager.afreecatv.com'
|
||||
stream_info = self._download_json(
|
||||
f'{stream_base_url}/broad_stream_assign.html', broadcast_no, fatal=False,
|
||||
query={
|
||||
'return_type': channel_info.get('CDN', 'gcp_cdn'),
|
||||
'broad_key': f'{broadcast_no}-common-{quality_str}-hls',
|
||||
},
|
||||
note=f'Downloading metadata for {quality_str} stream',
|
||||
errnote=f'Unable to download metadata for {quality_str} stream') or {}
|
||||
|
||||
if stream_info.get('view_url'):
|
||||
formats.append({
|
||||
'format_id': quality_str,
|
||||
'url': update_url_query(stream_info['view_url'], {'aid': aid}),
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
'quality': quality_key(quality_str),
|
||||
})
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
station_info = self._download_json(
|
||||
'https://st.afreecatv.com/api/get_station_status.php', broadcast_no,
|
||||
query={'szBjId': broadcaster_id}, fatal=False,
|
||||
note='Downloading channel metadata', errnote='Unable to download channel metadata') or {}
|
||||
|
||||
return {
|
||||
'id': broadcast_no,
|
||||
'title': channel_info.get('TITLE') or station_info.get('station_title'),
|
||||
'uploader': channel_info.get('BJNICK') or station_info.get('station_name'),
|
||||
'uploader_id': broadcaster_id,
|
||||
'timestamp': unified_timestamp(station_info.get('broad_start')),
|
||||
'formats': formats,
|
||||
'is_live': True,
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ class AliExpressLiveIE(InfoExtractor):
|
||||
'id': '2800002704436634',
|
||||
'ext': 'mp4',
|
||||
'title': 'CASIMA7.22',
|
||||
'thumbnail': r're:http://.*\.jpg',
|
||||
'thumbnail': r're:https?://.*\.jpg',
|
||||
'uploader': 'CASIMA Official Store',
|
||||
'timestamp': 1500717600,
|
||||
'upload_date': '20170722',
|
||||
|
||||
@@ -1,55 +1,86 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
try_get,
|
||||
)
|
||||
|
||||
|
||||
class AlJazeeraIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?aljazeera\.com/(?P<type>program/[^/]+|(?:feature|video)s)/\d{4}/\d{1,2}/\d{1,2}/(?P<id>[^/?&#]+)'
|
||||
_VALID_URL = r'https?://(?P<base>\w+\.aljazeera\.\w+)/(?P<type>programs?/[^/]+|(?:feature|video|new)s)?/\d{4}/\d{1,2}/\d{1,2}/(?P<id>[^/?&#]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://www.aljazeera.com/program/episode/2014/9/19/deliverance',
|
||||
'url': 'https://balkans.aljazeera.net/videos/2021/11/6/pojedini-domovi-u-sarajevu-jos-pod-vodom-mjestanima-se-dostavlja-hrana',
|
||||
'info_dict': {
|
||||
'id': '3792260579001',
|
||||
'id': '6280641530001',
|
||||
'ext': 'mp4',
|
||||
'title': 'The Slum - Episode 1: Deliverance',
|
||||
'description': 'As a birth attendant advocating for family planning, Remy is on the frontline of Tondo\'s battle with overcrowding.',
|
||||
'uploader_id': '665003303001',
|
||||
'timestamp': 1411116829,
|
||||
'upload_date': '20140919',
|
||||
'title': 'Pojedini domovi u Sarajevu još pod vodom, mještanima se dostavlja hrana',
|
||||
'timestamp': 1636219149,
|
||||
'description': 'U sarajevskim naseljima Rajlovac i Reljevo stambeni objekti, ali i industrijska postrojenja i dalje su pod vodom.',
|
||||
'upload_date': '20211106',
|
||||
}
|
||||
}, {
|
||||
'url': 'https://balkans.aljazeera.net/videos/2021/11/6/djokovic-usao-u-finale-mastersa-u-parizu',
|
||||
'info_dict': {
|
||||
'id': '6280654936001',
|
||||
'ext': 'mp4',
|
||||
'title': 'Đoković ušao u finale Mastersa u Parizu',
|
||||
'timestamp': 1636221686,
|
||||
'description': 'Novak Đoković je u polufinalu Mastersa u Parizu nakon preokreta pobijedio Poljaka Huberta Hurkacza.',
|
||||
'upload_date': '20211106',
|
||||
},
|
||||
'add_ie': ['BrightcoveNew'],
|
||||
'skip': 'Not accessible from Travis CI server',
|
||||
}, {
|
||||
'url': 'https://www.aljazeera.com/videos/2017/5/11/sierra-leone-709-carat-diamond-to-be-auctioned-off',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.aljazeera.com/features/2017/8/21/transforming-pakistans-buses-into-art',
|
||||
'only_matching': True,
|
||||
}]
|
||||
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_default/index.html?videoId=%s'
|
||||
BRIGHTCOVE_URL_RE = r'https?://players.brightcove.net/(?P<account>\d+)/(?P<player_id>[a-zA-Z0-9]+)_(?P<embed>[^/]+)/index.html\?videoId=(?P<id>\d+)'
|
||||
|
||||
def _real_extract(self, url):
|
||||
post_type, name = self._match_valid_url(url).groups()
|
||||
base, post_type, id = self._match_valid_url(url).groups()
|
||||
wp = {
|
||||
'balkans.aljazeera.net': 'ajb',
|
||||
'chinese.aljazeera.net': 'chinese',
|
||||
'mubasher.aljazeera.net': 'ajm',
|
||||
}.get(base) or 'aje'
|
||||
post_type = {
|
||||
'features': 'post',
|
||||
'program': 'episode',
|
||||
'programs': 'episode',
|
||||
'videos': 'video',
|
||||
'news': 'news',
|
||||
}[post_type.split('/')[0]]
|
||||
video = self._download_json(
|
||||
'https://www.aljazeera.com/graphql', name, query={
|
||||
f'https://{base}/graphql', id, query={
|
||||
'wp-site': wp,
|
||||
'operationName': 'ArchipelagoSingleArticleQuery',
|
||||
'variables': json.dumps({
|
||||
'name': name,
|
||||
'name': id,
|
||||
'postType': post_type,
|
||||
}),
|
||||
}, headers={
|
||||
'wp-site': 'aje',
|
||||
})['data']['article']['video']
|
||||
video_id = video['id']
|
||||
account_id = video.get('accountId') or '665003303001'
|
||||
player_id = video.get('playerId') or 'BkeSH5BDb'
|
||||
return self.url_result(
|
||||
self.BRIGHTCOVE_URL_TEMPLATE % (account_id, player_id, video_id),
|
||||
'BrightcoveNew', video_id)
|
||||
'wp-site': wp,
|
||||
})
|
||||
video = try_get(video, lambda x: x['data']['article']['video']) or {}
|
||||
video_id = video.get('id')
|
||||
account = video.get('accountId') or '911432371001'
|
||||
player_id = video.get('playerId') or 'csvTfAlKW'
|
||||
embed = 'default'
|
||||
|
||||
if video_id is None:
|
||||
webpage = self._download_webpage(url, id)
|
||||
|
||||
account, player_id, embed, video_id = self._search_regex(self.BRIGHTCOVE_URL_RE, webpage, 'video id',
|
||||
group=(1, 2, 3, 4), default=(None, None, None, None))
|
||||
|
||||
if video_id is None:
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'url': url,
|
||||
'ie_key': 'Generic'
|
||||
}
|
||||
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'url': f'https://players.brightcove.net/{account}/{player_id}_{embed}/index.html?videoId={video_id}',
|
||||
'ie_key': 'BrightcoveNew'
|
||||
}
|
||||
|
||||
87
yt_dlp/extractor/alsace20tv.py
Normal file
87
yt_dlp/extractor/alsace20tv.py
Normal file
@@ -0,0 +1,87 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
dict_get,
|
||||
get_element_by_class,
|
||||
int_or_none,
|
||||
unified_strdate,
|
||||
url_or_none,
|
||||
)
|
||||
|
||||
|
||||
class Alsace20TVBaseIE(InfoExtractor):
|
||||
def _extract_video(self, video_id, url=None):
|
||||
info = self._download_json(
|
||||
'https://www.alsace20.tv/visionneuse/visio_v9_js.php?key=%s&habillage=0&mode=html' % (video_id, ),
|
||||
video_id) or {}
|
||||
title = info.get('titre')
|
||||
|
||||
formats = []
|
||||
for res, fmt_url in (info.get('files') or {}).items():
|
||||
formats.extend(
|
||||
self._extract_smil_formats(fmt_url, video_id, fatal=False)
|
||||
if '/smil:_' in fmt_url
|
||||
else self._extract_mpd_formats(fmt_url, video_id, mpd_id=res, fatal=False))
|
||||
self._sort_formats(formats)
|
||||
|
||||
webpage = (url and self._download_webpage(url, video_id, fatal=False)) or ''
|
||||
thumbnail = url_or_none(dict_get(info, ('image', 'preview', )) or self._og_search_thumbnail(webpage))
|
||||
upload_date = self._search_regex(r'/(\d{6})_', thumbnail, 'upload_date', default=None)
|
||||
upload_date = unified_strdate('20%s-%s-%s' % (upload_date[:2], upload_date[2:4], upload_date[4:])) if upload_date else None
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'description': clean_html(get_element_by_class('wysiwyg', webpage)),
|
||||
'upload_date': upload_date,
|
||||
'thumbnail': thumbnail,
|
||||
'duration': int_or_none(self._og_search_property('video:duration', webpage) if webpage else None),
|
||||
'view_count': int_or_none(info.get('nb_vues')),
|
||||
}
|
||||
|
||||
|
||||
class Alsace20TVIE(Alsace20TVBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?alsace20\.tv/(?:[\w-]+/)+[\w-]+-(?P<id>[\w]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.alsace20.tv/VOD/Actu/JT/Votre-JT-jeudi-3-fevrier-lyNHCXpYJh.html',
|
||||
'info_dict': {
|
||||
'id': 'lyNHCXpYJh',
|
||||
'ext': 'mp4',
|
||||
'description': 'md5:fc0bc4a0692d3d2dba4524053de4c7b7',
|
||||
'title': 'Votre JT du jeudi 3 février',
|
||||
'upload_date': '20220203',
|
||||
'thumbnail': r're:https?://.+\.jpg',
|
||||
'duration': 1073,
|
||||
'view_count': int,
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
return self._extract_video(video_id, url)
|
||||
|
||||
|
||||
class Alsace20TVEmbedIE(Alsace20TVBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?alsace20\.tv/emb/(?P<id>[\w]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.alsace20.tv/emb/lyNHCXpYJh',
|
||||
# 'md5': 'd91851bf9af73c0ad9b2cdf76c127fbb',
|
||||
'info_dict': {
|
||||
'id': 'lyNHCXpYJh',
|
||||
'ext': 'mp4',
|
||||
'title': 'Votre JT du jeudi 3 février',
|
||||
'upload_date': '20220203',
|
||||
'thumbnail': r're:https?://.+\.jpg',
|
||||
'view_count': int,
|
||||
},
|
||||
'params': {
|
||||
'format': 'bestvideo',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
return self._extract_video(video_id)
|
||||
@@ -4,7 +4,7 @@ from ..utils import int_or_none
|
||||
|
||||
|
||||
class AmazonStoreIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:https?://)(?:www\.)?amazon\.(?:[a-z]{2,3})(?:\.[a-z]{2})?/[^/]*/?(?:dp|gp/product)/(?P<id>[^/&#$?]+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?amazon\.(?:[a-z]{2,3})(?:\.[a-z]{2})?/(?:[^/]+/)?(?:dp|gp/product)/(?P<id>[^/&#$?]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://www.amazon.co.uk/dp/B098XNCHLD/',
|
||||
|
||||
143
yt_dlp/extractor/ant1newsgr.py
Normal file
143
yt_dlp/extractor/ant1newsgr.py
Normal file
@@ -0,0 +1,143 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
HEADRequest,
|
||||
ExtractorError,
|
||||
determine_ext,
|
||||
scale_thumbnails_to_max_format_width,
|
||||
unescapeHTML,
|
||||
)
|
||||
|
||||
|
||||
class Ant1NewsGrBaseIE(InfoExtractor):
|
||||
def _download_and_extract_api_data(self, video_id, netloc, cid=None):
|
||||
url = f'{self.http_scheme()}//{netloc}{self._API_PATH}'
|
||||
info = self._download_json(url, video_id, query={'cid': cid or video_id})
|
||||
try:
|
||||
source = info['url']
|
||||
except KeyError:
|
||||
raise ExtractorError('no source found for %s' % video_id)
|
||||
formats, subs = (self._extract_m3u8_formats_and_subtitles(source, video_id, 'mp4')
|
||||
if determine_ext(source) == 'm3u8' else ([{'url': source}], {}))
|
||||
self._sort_formats(formats)
|
||||
thumbnails = scale_thumbnails_to_max_format_width(
|
||||
formats, [{'url': info['thumb']}], r'(?<=/imgHandler/)\d+')
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': info.get('title'),
|
||||
'thumbnails': thumbnails,
|
||||
'formats': formats,
|
||||
'subtitles': subs,
|
||||
}
|
||||
|
||||
|
||||
class Ant1NewsGrWatchIE(Ant1NewsGrBaseIE):
|
||||
IE_NAME = 'ant1newsgr:watch'
|
||||
IE_DESC = 'ant1news.gr videos'
|
||||
_VALID_URL = r'https?://(?P<netloc>(?:www\.)?ant1news\.gr)/watch/(?P<id>\d+)/'
|
||||
_API_PATH = '/templates/data/player'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://www.ant1news.gr/watch/1506168/ant1-news-09112021-stis-18-45',
|
||||
'md5': '95925e6b32106754235f2417e0d2dfab',
|
||||
'info_dict': {
|
||||
'id': '1506168',
|
||||
'ext': 'mp4',
|
||||
'title': 'md5:0ad00fa66ecf8aa233d26ab0dba7514a',
|
||||
'description': 'md5:18665af715a6dcfeac1d6153a44f16b0',
|
||||
'thumbnail': 'https://ant1media.azureedge.net/imgHandler/640/26d46bf6-8158-4f02-b197-7096c714b2de.jpg',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id, netloc = self._match_valid_url(url).group('id', 'netloc')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
info = self._download_and_extract_api_data(video_id, netloc)
|
||||
info['description'] = self._og_search_description(webpage)
|
||||
return info
|
||||
|
||||
|
||||
class Ant1NewsGrArticleIE(Ant1NewsGrBaseIE):
|
||||
IE_NAME = 'ant1newsgr:article'
|
||||
IE_DESC = 'ant1news.gr articles'
|
||||
_VALID_URL = r'https?://(?:www\.)?ant1news\.gr/[^/]+/article/(?P<id>\d+)/'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://www.ant1news.gr/afieromata/article/549468/o-tzeims-mpont-sta-meteora-oi-apeiles-kai-o-xesikomos-ton-kalogeron',
|
||||
'md5': '294f18331bb516539d72d85a82887dcc',
|
||||
'info_dict': {
|
||||
'id': '_xvg/m_cmbatw=',
|
||||
'ext': 'mp4',
|
||||
'title': 'md5:a93e8ecf2e4073bfdffcb38f59945411',
|
||||
'timestamp': 1603092840,
|
||||
'upload_date': '20201019',
|
||||
'thumbnail': 'https://ant1media.azureedge.net/imgHandler/640/756206d2-d640-40e2-b201-3555abdfc0db.jpg',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://ant1news.gr/Society/article/620286/symmoria-anilikon-dikigoros-thymaton-ithelan-na-toys-apoteleiosoyn',
|
||||
'info_dict': {
|
||||
'id': '620286',
|
||||
'title': 'md5:91fe569e952e4d146485740ae927662b',
|
||||
},
|
||||
'playlist_mincount': 2,
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
info = self._search_json_ld(webpage, video_id, expected_type='NewsArticle')
|
||||
embed_urls = list(Ant1NewsGrEmbedIE._extract_urls(webpage))
|
||||
if not embed_urls:
|
||||
raise ExtractorError('no videos found for %s' % video_id, expected=True)
|
||||
return self.playlist_from_matches(
|
||||
embed_urls, video_id, info.get('title'), ie=Ant1NewsGrEmbedIE.ie_key(),
|
||||
video_kwargs={'url_transparent': True, 'timestamp': info.get('timestamp')})
|
||||
|
||||
|
||||
class Ant1NewsGrEmbedIE(Ant1NewsGrBaseIE):
|
||||
IE_NAME = 'ant1newsgr:embed'
|
||||
IE_DESC = 'ant1news.gr embedded videos'
|
||||
_BASE_PLAYER_URL_RE = r'(?:https?:)?//(?:[a-zA-Z0-9\-]+\.)?(?:antenna|ant1news)\.gr/templates/pages/player'
|
||||
_VALID_URL = rf'{_BASE_PLAYER_URL_RE}\?([^#]+&)?cid=(?P<id>[^#&]+)'
|
||||
_API_PATH = '/news/templates/data/jsonPlayer'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://www.antenna.gr/templates/pages/player?cid=3f_li_c_az_jw_y_u=&w=670&h=377',
|
||||
'md5': 'dfc58c3a11a5a9aad2ba316ed447def3',
|
||||
'info_dict': {
|
||||
'id': '3f_li_c_az_jw_y_u=',
|
||||
'ext': 'mp4',
|
||||
'title': 'md5:a30c93332455f53e1e84ae0724f0adf7',
|
||||
'thumbnail': 'https://ant1media.azureedge.net/imgHandler/640/bbe31201-3f09-4a4e-87f5-8ad2159fffe2.jpg',
|
||||
},
|
||||
}]
|
||||
|
||||
@classmethod
|
||||
def _extract_urls(cls, webpage):
|
||||
_EMBED_URL_RE = rf'{cls._BASE_PLAYER_URL_RE}\?(?:(?!(?P=_q1)).)+'
|
||||
_EMBED_RE = rf'<iframe[^>]+?src=(?P<_q1>["\'])(?P<url>{_EMBED_URL_RE})(?P=_q1)'
|
||||
for mobj in re.finditer(_EMBED_RE, webpage):
|
||||
url = unescapeHTML(mobj.group('url'))
|
||||
if not cls.suitable(url):
|
||||
continue
|
||||
yield url
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
canonical_url = self._request_webpage(
|
||||
HEADRequest(url), video_id,
|
||||
note='Resolve canonical player URL',
|
||||
errnote='Could not resolve canonical player URL').geturl()
|
||||
_, netloc, _, _, query, _ = urllib.parse.urlparse(canonical_url)
|
||||
cid = urllib.parse.parse_qs(query)['cid'][0]
|
||||
|
||||
return self._download_and_extract_api_data(video_id, netloc, cid=cid)
|
||||
@@ -33,19 +33,22 @@ class AparatIE(InfoExtractor):
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _parse_options(self, webpage, video_id, fatal=True):
|
||||
return self._parse_json(self._search_regex(
|
||||
r'options\s*=\s*({.+?})\s*;', webpage, 'options', default='{}'), video_id)
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
# Provides more metadata
|
||||
# If available, provides more metadata
|
||||
webpage = self._download_webpage(url, video_id, fatal=False)
|
||||
options = self._parse_options(webpage, video_id, fatal=False)
|
||||
|
||||
if not webpage:
|
||||
if not options:
|
||||
webpage = self._download_webpage(
|
||||
'http://www.aparat.com/video/video/embed/vt/frame/showvideo/yes/videohash/' + video_id,
|
||||
video_id)
|
||||
|
||||
options = self._parse_json(self._search_regex(
|
||||
r'options\s*=\s*({.+?})\s*;', webpage, 'options'), video_id)
|
||||
video_id, 'Downloading embed webpage')
|
||||
options = self._parse_options(webpage, video_id)
|
||||
|
||||
formats = []
|
||||
for sources in (options.get('multiSRC') or []):
|
||||
|
||||
@@ -3,7 +3,9 @@ from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
clean_podcast_url,
|
||||
get_element_by_class,
|
||||
int_or_none,
|
||||
parse_iso8601,
|
||||
try_get,
|
||||
@@ -14,16 +16,17 @@ class ApplePodcastsIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://podcasts\.apple\.com/(?:[^/]+/)?podcast(?:/[^/]+){1,2}.*?\bi=(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://podcasts.apple.com/us/podcast/207-whitney-webb-returns/id1135137367?i=1000482637777',
|
||||
'md5': 'df02e6acb11c10e844946a39e7222b08',
|
||||
'md5': '41dc31cd650143e530d9423b6b5a344f',
|
||||
'info_dict': {
|
||||
'id': '1000482637777',
|
||||
'ext': 'mp3',
|
||||
'title': '207 - Whitney Webb Returns',
|
||||
'description': 'md5:13a73bade02d2e43737751e3987e1399',
|
||||
'description': 'md5:75ef4316031df7b41ced4e7b987f79c6',
|
||||
'upload_date': '20200705',
|
||||
'timestamp': 1593921600,
|
||||
'duration': 6425,
|
||||
'timestamp': 1593932400,
|
||||
'duration': 6454,
|
||||
'series': 'The Tim Dillon Show',
|
||||
'thumbnail': 're:.+[.](png|jpe?g|webp)',
|
||||
}
|
||||
}, {
|
||||
'url': 'https://podcasts.apple.com/podcast/207-whitney-webb-returns/id1135137367?i=1000482637777',
|
||||
@@ -39,24 +42,47 @@ class ApplePodcastsIE(InfoExtractor):
|
||||
def _real_extract(self, url):
|
||||
episode_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, episode_id)
|
||||
episode_data = {}
|
||||
ember_data = {}
|
||||
# new page type 2021-11
|
||||
amp_data = self._parse_json(self._search_regex(
|
||||
r'(?s)id="shoebox-media-api-cache-amp-podcasts"[^>]*>\s*({.+?})\s*<',
|
||||
webpage, 'AMP data', default='{}'), episode_id, fatal=False) or {}
|
||||
amp_data = try_get(amp_data,
|
||||
lambda a: self._parse_json(
|
||||
next(a[x] for x in iter(a) if episode_id in x),
|
||||
episode_id),
|
||||
dict) or {}
|
||||
amp_data = amp_data.get('d') or []
|
||||
episode_data = try_get(
|
||||
amp_data,
|
||||
lambda a: next(x for x in a
|
||||
if x['type'] == 'podcast-episodes' and x['id'] == episode_id),
|
||||
dict)
|
||||
if not episode_data:
|
||||
# try pre 2021-11 page type: TODO: consider deleting if no longer used
|
||||
ember_data = self._parse_json(self._search_regex(
|
||||
r'id="shoebox-ember-data-store"[^>]*>\s*({.+?})\s*<',
|
||||
webpage, 'ember data'), episode_id)
|
||||
r'(?s)id="shoebox-ember-data-store"[^>]*>\s*({.+?})\s*<',
|
||||
webpage, 'ember data'), episode_id) or {}
|
||||
ember_data = ember_data.get(episode_id) or ember_data
|
||||
episode = ember_data['data']['attributes']
|
||||
episode_data = try_get(ember_data, lambda x: x['data'], dict)
|
||||
episode = episode_data['attributes']
|
||||
description = episode.get('description') or {}
|
||||
|
||||
series = None
|
||||
for inc in (ember_data.get('included') or []):
|
||||
for inc in (amp_data or ember_data.get('included') or []):
|
||||
if inc.get('type') == 'media/podcast':
|
||||
series = try_get(inc, lambda x: x['attributes']['name'])
|
||||
series = series or clean_html(get_element_by_class('podcast-header__identity', webpage))
|
||||
|
||||
return {
|
||||
'id': episode_id,
|
||||
'title': episode['name'],
|
||||
'title': episode.get('name'),
|
||||
'url': clean_podcast_url(episode['assetUrl']),
|
||||
'description': description.get('standard') or description.get('short'),
|
||||
'timestamp': parse_iso8601(episode.get('releaseDateTime')),
|
||||
'duration': int_or_none(episode.get('durationInMilliseconds'), 1000),
|
||||
'series': series,
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'vcodec': 'none',
|
||||
}
|
||||
|
||||
@@ -3,33 +3,37 @@ from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .youtube import YoutubeIE
|
||||
from .youtube import YoutubeIE, YoutubeBaseInfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse_unquote,
|
||||
compat_urllib_parse_unquote_plus,
|
||||
compat_HTTPError
|
||||
)
|
||||
from ..utils import (
|
||||
bug_reports_message,
|
||||
clean_html,
|
||||
determine_ext,
|
||||
dict_get,
|
||||
extract_attributes,
|
||||
ExtractorError,
|
||||
get_element_by_id,
|
||||
HEADRequest,
|
||||
int_or_none,
|
||||
join_nonempty,
|
||||
KNOWN_EXTENSIONS,
|
||||
merge_dicts,
|
||||
mimetype2ext,
|
||||
orderedSet,
|
||||
parse_duration,
|
||||
parse_qs,
|
||||
RegexNotFoundError,
|
||||
str_to_int,
|
||||
str_or_none,
|
||||
traverse_obj,
|
||||
try_get,
|
||||
unified_strdate,
|
||||
unified_timestamp,
|
||||
urlhandle_detect_ext,
|
||||
url_or_none
|
||||
)
|
||||
|
||||
|
||||
@@ -61,7 +65,7 @@ class ArchiveOrgIE(InfoExtractor):
|
||||
'description': 'md5:43a603fd6c5b4b90d12a96b921212b9c',
|
||||
'uploader': 'yorkmba99@hotmail.com',
|
||||
'timestamp': 1387699629,
|
||||
'upload_date': "20131222",
|
||||
'upload_date': '20131222',
|
||||
},
|
||||
}, {
|
||||
'url': 'http://archive.org/embed/XD300-23_68HighlightsAResearchCntAugHumanIntellect',
|
||||
@@ -147,8 +151,7 @@ class ArchiveOrgIE(InfoExtractor):
|
||||
|
||||
# Archive.org metadata API doesn't clearly demarcate playlist entries
|
||||
# or subtitle tracks, so we get them from the embeddable player.
|
||||
embed_page = self._download_webpage(
|
||||
'https://archive.org/embed/' + identifier, identifier)
|
||||
embed_page = self._download_webpage(f'https://archive.org/embed/{identifier}', identifier)
|
||||
playlist = self._playlist_data(embed_page)
|
||||
|
||||
entries = {}
|
||||
@@ -163,17 +166,17 @@ class ArchiveOrgIE(InfoExtractor):
|
||||
'thumbnails': [],
|
||||
'artist': p.get('artist'),
|
||||
'track': p.get('title'),
|
||||
'subtitles': {}}
|
||||
'subtitles': {},
|
||||
}
|
||||
|
||||
for track in p.get('tracks', []):
|
||||
if track['kind'] != 'subtitles':
|
||||
continue
|
||||
|
||||
entries[p['orig']][track['label']] = {
|
||||
'url': 'https://archive.org/' + track['file'].lstrip('/')}
|
||||
'url': 'https://archive.org/' + track['file'].lstrip('/')
|
||||
}
|
||||
|
||||
metadata = self._download_json(
|
||||
'http://archive.org/metadata/' + identifier, identifier)
|
||||
metadata = self._download_json('http://archive.org/metadata/' + identifier, identifier)
|
||||
m = metadata['metadata']
|
||||
identifier = m['identifier']
|
||||
|
||||
@@ -186,7 +189,7 @@ class ArchiveOrgIE(InfoExtractor):
|
||||
'license': m.get('licenseurl'),
|
||||
'release_date': unified_strdate(m.get('date')),
|
||||
'timestamp': unified_timestamp(dict_get(m, ['publicdate', 'addeddate'])),
|
||||
'webpage_url': 'https://archive.org/details/' + identifier,
|
||||
'webpage_url': f'https://archive.org/details/{identifier}',
|
||||
'location': m.get('venue'),
|
||||
'release_year': int_or_none(m.get('year'))}
|
||||
|
||||
@@ -204,7 +207,7 @@ class ArchiveOrgIE(InfoExtractor):
|
||||
'discnumber': int_or_none(f.get('disc')),
|
||||
'release_year': int_or_none(f.get('year'))})
|
||||
entry = entries[f['name']]
|
||||
elif f.get('original') in entries:
|
||||
elif traverse_obj(f, 'original', expected_type=str) in entries:
|
||||
entry = entries[f['original']]
|
||||
else:
|
||||
continue
|
||||
@@ -227,13 +230,12 @@ class ArchiveOrgIE(InfoExtractor):
|
||||
'filesize': int_or_none(f.get('size')),
|
||||
'protocol': 'https'})
|
||||
|
||||
# Sort available formats by filesize
|
||||
for entry in entries.values():
|
||||
entry['formats'] = list(sorted(entry['formats'], key=lambda x: x.get('filesize', -1)))
|
||||
self._sort_formats(entry['formats'])
|
||||
|
||||
if len(entries) == 1:
|
||||
# If there's only one item, use it as the main info dict
|
||||
only_video = entries[list(entries.keys())[0]]
|
||||
only_video = next(iter(entries.values()))
|
||||
if entry_id:
|
||||
info = merge_dicts(only_video, info)
|
||||
else:
|
||||
@@ -258,19 +260,19 @@ class ArchiveOrgIE(InfoExtractor):
|
||||
|
||||
class YoutubeWebArchiveIE(InfoExtractor):
|
||||
IE_NAME = 'web.archive:youtube'
|
||||
IE_DESC = 'web.archive.org saved youtube videos'
|
||||
_VALID_URL = r"""(?x)^
|
||||
IE_DESC = 'web.archive.org saved youtube videos, "ytarchive:" prefix'
|
||||
_VALID_URL = r'''(?x)(?:(?P<prefix>ytarchive:)|
|
||||
(?:https?://)?web\.archive\.org/
|
||||
(?:web/)?
|
||||
(?:[0-9A-Za-z_*]+/)? # /web and the version index is optional
|
||||
|
||||
(?:https?(?::|%3[Aa])//)?
|
||||
(?:
|
||||
(?:\w+\.)?youtube\.com/watch(?:\?|%3[fF])(?:[^\#]+(?:&|%26))?v(?:=|%3[dD]) # Youtube URL
|
||||
|(wayback-fakeurl\.archive\.org/yt/) # Or the internal fake url
|
||||
(?:web/)?(?:(?P<date>[0-9]{14})?[0-9A-Za-z_*]*/)? # /web and the version index is optional
|
||||
(?:https?(?::|%3[Aa])//)?(?:
|
||||
(?:\w+\.)?youtube\.com(?::(?:80|443))?/watch(?:\.php)?(?:\?|%3[fF])(?:[^\#]+(?:&|%26))?v(?:=|%3[dD]) # Youtube URL
|
||||
|(?:wayback-fakeurl\.archive\.org/yt/) # Or the internal fake url
|
||||
)
|
||||
(?P<id>[0-9A-Za-z_-]{11})(?:%26|\#|&|$)
|
||||
"""
|
||||
)(?P<id>[0-9A-Za-z_-]{11})
|
||||
(?(prefix)
|
||||
(?::(?P<date2>[0-9]{14}))?$|
|
||||
(?:%26|[#&]|$)
|
||||
)'''
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
@@ -278,141 +280,394 @@ class YoutubeWebArchiveIE(InfoExtractor):
|
||||
'info_dict': {
|
||||
'id': 'aYAGB11YrSs',
|
||||
'ext': 'webm',
|
||||
'title': 'Team Fortress 2 - Sandviches!'
|
||||
'title': 'Team Fortress 2 - Sandviches!',
|
||||
'description': 'md5:4984c0f9a07f349fc5d8e82ab7af4eaf',
|
||||
'upload_date': '20110926',
|
||||
'uploader': 'Zeurel',
|
||||
'channel_id': 'UCukCyHaD-bK3in_pKpfH9Eg',
|
||||
'duration': 32,
|
||||
'uploader_id': 'Zeurel',
|
||||
'uploader_url': 'http://www.youtube.com/user/Zeurel'
|
||||
}
|
||||
},
|
||||
{
|
||||
}, {
|
||||
# Internal link
|
||||
'url': 'https://web.archive.org/web/2oe/http://wayback-fakeurl.archive.org/yt/97t7Xj_iBv0',
|
||||
'info_dict': {
|
||||
'id': '97t7Xj_iBv0',
|
||||
'ext': 'mp4',
|
||||
'title': 'How Flexible Machines Could Save The World'
|
||||
'title': 'Why Machines That Bend Are Better',
|
||||
'description': 'md5:00404df2c632d16a674ff8df1ecfbb6c',
|
||||
'upload_date': '20190312',
|
||||
'uploader': 'Veritasium',
|
||||
'channel_id': 'UCHnyfMqiRRG1u-2MsSQLbXA',
|
||||
'duration': 771,
|
||||
'uploader_id': '1veritasium',
|
||||
'uploader_url': 'http://www.youtube.com/user/1veritasium'
|
||||
}
|
||||
},
|
||||
{
|
||||
# Video from 2012, webm format itag 45.
|
||||
}, {
|
||||
# Video from 2012, webm format itag 45. Newest capture is deleted video, with an invalid description.
|
||||
# Should use the date in the link. Title ends with '- Youtube'. Capture has description in eow-description
|
||||
'url': 'https://web.archive.org/web/20120712231619/http://www.youtube.com/watch?v=AkhihxRKcrs&gl=US&hl=en',
|
||||
'info_dict': {
|
||||
'id': 'AkhihxRKcrs',
|
||||
'ext': 'webm',
|
||||
'title': 'Limited Run: Mondo\'s Modern Classic 1 of 3 (SDCC 2012)'
|
||||
'title': 'Limited Run: Mondo\'s Modern Classic 1 of 3 (SDCC 2012)',
|
||||
'upload_date': '20120712',
|
||||
'duration': 398,
|
||||
'description': 'md5:ff4de6a7980cb65d951c2f6966a4f2f3',
|
||||
'uploader_id': 'machinima',
|
||||
'uploader_url': 'http://www.youtube.com/user/machinima'
|
||||
}
|
||||
},
|
||||
{
|
||||
# Old flash-only video. Webpage title starts with "YouTube - ".
|
||||
}, {
|
||||
# FLV video. Video file URL does not provide itag information
|
||||
'url': 'https://web.archive.org/web/20081211103536/http://www.youtube.com/watch?v=jNQXAC9IVRw',
|
||||
'info_dict': {
|
||||
'id': 'jNQXAC9IVRw',
|
||||
'ext': 'unknown_video',
|
||||
'title': 'Me at the zoo'
|
||||
'ext': 'flv',
|
||||
'title': 'Me at the zoo',
|
||||
'upload_date': '20050423',
|
||||
'channel_id': 'UC4QobU6STFB0P71PMvOGN5A',
|
||||
'duration': 19,
|
||||
'description': 'md5:10436b12e07ac43ff8df65287a56efb4',
|
||||
'uploader_id': 'jawed',
|
||||
'uploader_url': 'http://www.youtube.com/user/jawed'
|
||||
}
|
||||
},
|
||||
{
|
||||
# Flash video with .flv extension (itag 34). Title has prefix "YouTube -"
|
||||
# Title has some weird unicode characters too.
|
||||
}, {
|
||||
'url': 'https://web.archive.org/web/20110712231407/http://www.youtube.com/watch?v=lTx3G6h2xyA',
|
||||
'info_dict': {
|
||||
'id': 'lTx3G6h2xyA',
|
||||
'ext': 'flv',
|
||||
'title': 'Madeon - Pop Culture (live mashup)'
|
||||
'title': 'Madeon - Pop Culture (live mashup)',
|
||||
'upload_date': '20110711',
|
||||
'uploader': 'Madeon',
|
||||
'channel_id': 'UCqMDNf3Pn5L7pcNkuSEeO3w',
|
||||
'duration': 204,
|
||||
'description': 'md5:f7535343b6eda34a314eff8b85444680',
|
||||
'uploader_id': 'itsmadeon',
|
||||
'uploader_url': 'http://www.youtube.com/user/itsmadeon'
|
||||
}
|
||||
}, {
|
||||
# First capture is of dead video, second is the oldest from CDX response.
|
||||
'url': 'https://web.archive.org/https://www.youtube.com/watch?v=1JYutPM8O6E',
|
||||
'info_dict': {
|
||||
'id': '1JYutPM8O6E',
|
||||
'ext': 'mp4',
|
||||
'title': 'Fake Teen Doctor Strikes AGAIN! - Weekly Weird News',
|
||||
'upload_date': '20160218',
|
||||
'channel_id': 'UCdIaNUarhzLSXGoItz7BHVA',
|
||||
'duration': 1236,
|
||||
'description': 'md5:21032bae736421e89c2edf36d1936947',
|
||||
'uploader_id': 'MachinimaETC',
|
||||
'uploader_url': 'http://www.youtube.com/user/MachinimaETC'
|
||||
}
|
||||
}, {
|
||||
# First capture of dead video, capture date in link links to dead capture.
|
||||
'url': 'https://web.archive.org/web/20180803221945/https://www.youtube.com/watch?v=6FPhZJGvf4E',
|
||||
'info_dict': {
|
||||
'id': '6FPhZJGvf4E',
|
||||
'ext': 'mp4',
|
||||
'title': 'WTF: Video Games Still Launch BROKEN?! - T.U.G.S.',
|
||||
'upload_date': '20160219',
|
||||
'channel_id': 'UCdIaNUarhzLSXGoItz7BHVA',
|
||||
'duration': 798,
|
||||
'description': 'md5:a1dbf12d9a3bd7cb4c5e33b27d77ffe7',
|
||||
'uploader_id': 'MachinimaETC',
|
||||
'uploader_url': 'http://www.youtube.com/user/MachinimaETC'
|
||||
},
|
||||
{ # Some versions of Youtube have have "YouTube" as page title in html (and later rewritten by js).
|
||||
'expected_warnings': [
|
||||
r'unable to download capture webpage \(it may not be archived\)'
|
||||
]
|
||||
}, { # Very old YouTube page, has - YouTube in title.
|
||||
'url': 'http://web.archive.org/web/20070302011044/http://youtube.com/watch?v=-06-KB9XTzg',
|
||||
'info_dict': {
|
||||
'id': '-06-KB9XTzg',
|
||||
'ext': 'flv',
|
||||
'title': 'New Coin Hack!! 100% Safe!!'
|
||||
}
|
||||
}, {
|
||||
'url': 'web.archive.org/https://www.youtube.com/watch?v=dWW7qP423y8',
|
||||
'info_dict': {
|
||||
'id': 'dWW7qP423y8',
|
||||
'ext': 'mp4',
|
||||
'title': 'It\'s Bootleg AirPods Time.',
|
||||
'upload_date': '20211021',
|
||||
'channel_id': 'UC7Jwj9fkrf1adN4fMmTkpug',
|
||||
'channel_url': 'http://www.youtube.com/channel/UC7Jwj9fkrf1adN4fMmTkpug',
|
||||
'duration': 810,
|
||||
'description': 'md5:7b567f898d8237b256f36c1a07d6d7bc',
|
||||
'uploader': 'DankPods',
|
||||
'uploader_id': 'UC7Jwj9fkrf1adN4fMmTkpug',
|
||||
'uploader_url': 'http://www.youtube.com/channel/UC7Jwj9fkrf1adN4fMmTkpug'
|
||||
}
|
||||
}, {
|
||||
# player response contains '};' See: https://github.com/ytdl-org/youtube-dl/issues/27093
|
||||
'url': 'https://web.archive.org/web/20200827003909if_/http://www.youtube.com/watch?v=6Dh-RL__uN4',
|
||||
'info_dict': {
|
||||
'id': '6Dh-RL__uN4',
|
||||
'ext': 'mp4',
|
||||
'title': 'bitch lasagna',
|
||||
'upload_date': '20181005',
|
||||
'channel_id': 'UC-lHJZR3Gqxm24_Vd_AJ5Yw',
|
||||
'channel_url': 'http://www.youtube.com/channel/UC-lHJZR3Gqxm24_Vd_AJ5Yw',
|
||||
'duration': 135,
|
||||
'description': 'md5:2dbe4051feeff2dab5f41f82bb6d11d0',
|
||||
'uploader': 'PewDiePie',
|
||||
'uploader_id': 'PewDiePie',
|
||||
'uploader_url': 'http://www.youtube.com/user/PewDiePie'
|
||||
}
|
||||
}, {
|
||||
'url': 'https://web.archive.org/web/http://www.youtube.com/watch?v=kH-G_aIBlFw',
|
||||
'info_dict': {
|
||||
'id': 'kH-G_aIBlFw',
|
||||
'ext': 'mp4',
|
||||
'title': 'kH-G_aIBlFw'
|
||||
},
|
||||
'expected_warnings': [
|
||||
'unable to extract title',
|
||||
]
|
||||
},
|
||||
{
|
||||
# First capture is a 302 redirect intermediary page.
|
||||
'url': 'https://web.archive.org/web/20050214000000/http://www.youtube.com/watch?v=0altSZ96U4M',
|
||||
'info_dict': {
|
||||
'id': '0altSZ96U4M',
|
||||
'ext': 'mp4',
|
||||
'title': '0altSZ96U4M'
|
||||
},
|
||||
'expected_warnings': [
|
||||
'unable to extract title',
|
||||
]
|
||||
},
|
||||
{
|
||||
'only_matching': True
|
||||
}, {
|
||||
'url': 'https://web.archive.org/web/20050214000000_if/http://www.youtube.com/watch?v=0altSZ96U4M',
|
||||
'only_matching': True
|
||||
}, {
|
||||
# Video not archived, only capture is unavailable video page
|
||||
'url': 'https://web.archive.org/web/20210530071008/https://www.youtube.com/watch?v=lHJTf93HL1s&spfreload=10',
|
||||
'only_matching': True,
|
||||
},
|
||||
{ # Encoded url
|
||||
'only_matching': True
|
||||
}, { # Encoded url
|
||||
'url': 'https://web.archive.org/web/20120712231619/http%3A//www.youtube.com/watch%3Fgl%3DUS%26v%3DAkhihxRKcrs%26hl%3Den',
|
||||
'only_matching': True,
|
||||
},
|
||||
{
|
||||
'only_matching': True
|
||||
}, {
|
||||
'url': 'https://web.archive.org/web/20120712231619/http%3A//www.youtube.com/watch%3Fv%3DAkhihxRKcrs%26gl%3DUS%26hl%3Den',
|
||||
'only_matching': True,
|
||||
}
|
||||
'only_matching': True
|
||||
}, {
|
||||
'url': 'https://web.archive.org/web/20060527081937/http://www.youtube.com:80/watch.php?v=ELTFsLT73fA&search=soccer',
|
||||
'only_matching': True
|
||||
}, {
|
||||
'url': 'https://web.archive.org/http://www.youtube.com:80/watch?v=-05VVye-ffg',
|
||||
'only_matching': True
|
||||
}, {
|
||||
'url': 'ytarchive:BaW_jenozKc:20050214000000',
|
||||
'only_matching': True
|
||||
}, {
|
||||
'url': 'ytarchive:BaW_jenozKc',
|
||||
'only_matching': True
|
||||
},
|
||||
]
|
||||
_YT_INITIAL_DATA_RE = r'(?:(?:(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;)|%s)' % YoutubeBaseInfoExtractor._YT_INITIAL_DATA_RE
|
||||
_YT_INITIAL_PLAYER_RESPONSE_RE = r'(?:(?:(?:window\s*\[\s*["\']ytInitialPlayerResponse["\']\s*\]|ytInitialPlayerResponse)\s*=[(\s]*({.+?})[)\s]*;)|%s)' % YoutubeBaseInfoExtractor._YT_INITIAL_PLAYER_RESPONSE_RE
|
||||
_YT_INITIAL_BOUNDARY_RE = r'(?:(?:var\s+meta|</script|\n)|%s)' % YoutubeBaseInfoExtractor._YT_INITIAL_BOUNDARY_RE
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
title = video_id # if we are not able get a title
|
||||
_YT_DEFAULT_THUMB_SERVERS = ['i.ytimg.com'] # thumbnails most likely archived on these servers
|
||||
_YT_ALL_THUMB_SERVERS = orderedSet(
|
||||
_YT_DEFAULT_THUMB_SERVERS + ['img.youtube.com', *[f'{c}{n or ""}.ytimg.com' for c in ('i', 's') for n in (*range(0, 5), 9)]])
|
||||
|
||||
def _extract_title(webpage):
|
||||
page_title = self._html_search_regex(
|
||||
r'<title>([^<]*)</title>', webpage, 'title', fatal=False) or ''
|
||||
# YouTube video pages appear to always have either 'YouTube -' as suffix or '- YouTube' as prefix.
|
||||
try:
|
||||
_WAYBACK_BASE_URL = 'https://web.archive.org/web/%sif_/'
|
||||
_OLDEST_CAPTURE_DATE = 20050214000000
|
||||
_NEWEST_CAPTURE_DATE = 20500101000000
|
||||
|
||||
def _call_cdx_api(self, item_id, url, filters: list = None, collapse: list = None, query: dict = None, note='Downloading CDX API JSON'):
|
||||
# CDX docs: https://github.com/internetarchive/wayback/blob/master/wayback-cdx-server/README.md
|
||||
query = {
|
||||
'url': url,
|
||||
'output': 'json',
|
||||
'fl': 'original,mimetype,length,timestamp',
|
||||
'limit': 500,
|
||||
'filter': ['statuscode:200'] + (filters or []),
|
||||
'collapse': collapse or [],
|
||||
**(query or {})
|
||||
}
|
||||
res = self._download_json('https://web.archive.org/cdx/search/cdx', item_id, note, query=query)
|
||||
if isinstance(res, list) and len(res) >= 2:
|
||||
# format response to make it easier to use
|
||||
return list(dict(zip(res[0], v)) for v in res[1:])
|
||||
elif not isinstance(res, list) or len(res) != 0:
|
||||
self.report_warning('Error while parsing CDX API response' + bug_reports_message())
|
||||
|
||||
def _extract_yt_initial_variable(self, webpage, regex, video_id, name):
|
||||
return self._parse_json(self._search_regex(
|
||||
(r'%s\s*%s' % (regex, self._YT_INITIAL_BOUNDARY_RE),
|
||||
regex), webpage, name, default='{}'), video_id, fatal=False)
|
||||
|
||||
def _extract_webpage_title(self, webpage):
|
||||
page_title = self._html_search_regex(
|
||||
r'<title>([^<]*)</title>', webpage, 'title', default='')
|
||||
# YouTube video pages appear to always have either 'YouTube -' as prefix or '- YouTube' as suffix.
|
||||
return self._html_search_regex(
|
||||
r'(?:YouTube\s*-\s*(.*)$)|(?:(.*)\s*-\s*YouTube$)',
|
||||
page_title, 'title', default='')
|
||||
except RegexNotFoundError:
|
||||
page_title = None
|
||||
|
||||
if not page_title:
|
||||
self.report_warning('unable to extract title', video_id=video_id)
|
||||
return
|
||||
return page_title
|
||||
def _extract_metadata(self, video_id, webpage):
|
||||
search_meta = ((lambda x: self._html_search_meta(x, webpage, default=None)) if webpage else (lambda x: None))
|
||||
player_response = self._extract_yt_initial_variable(
|
||||
webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE, video_id, 'initial player response') or {}
|
||||
initial_data = self._extract_yt_initial_variable(
|
||||
webpage, self._YT_INITIAL_DATA_RE, video_id, 'initial player response') or {}
|
||||
|
||||
# If the video is no longer available, the oldest capture may be one before it was removed.
|
||||
# Setting the capture date in url to early date seems to redirect to earliest capture.
|
||||
webpage = self._download_webpage(
|
||||
'https://web.archive.org/web/20050214000000/http://www.youtube.com/watch?v=%s' % video_id,
|
||||
video_id=video_id, fatal=False, errnote='unable to download video webpage (probably not archived).')
|
||||
if webpage:
|
||||
title = _extract_title(webpage) or title
|
||||
initial_data_video = traverse_obj(
|
||||
initial_data, ('contents', 'twoColumnWatchNextResults', 'results', 'results', 'contents', ..., 'videoPrimaryInfoRenderer'),
|
||||
expected_type=dict, get_all=False, default={})
|
||||
|
||||
# Use link translator mentioned in https://github.com/ytdl-org/youtube-dl/issues/13655
|
||||
internal_fake_url = 'https://web.archive.org/web/2oe_/http://wayback-fakeurl.archive.org/yt/%s' % video_id
|
||||
video_details = traverse_obj(
|
||||
player_response, 'videoDetails', expected_type=dict, get_all=False, default={})
|
||||
|
||||
microformats = traverse_obj(
|
||||
player_response, ('microformat', 'playerMicroformatRenderer'), expected_type=dict, get_all=False, default={})
|
||||
|
||||
video_title = (
|
||||
video_details.get('title')
|
||||
or YoutubeBaseInfoExtractor._get_text(microformats, 'title')
|
||||
or YoutubeBaseInfoExtractor._get_text(initial_data_video, 'title')
|
||||
or self._extract_webpage_title(webpage)
|
||||
or search_meta(['og:title', 'twitter:title', 'title']))
|
||||
|
||||
channel_id = str_or_none(
|
||||
video_details.get('channelId')
|
||||
or microformats.get('externalChannelId')
|
||||
or search_meta('channelId')
|
||||
or self._search_regex(
|
||||
r'data-channel-external-id=(["\'])(?P<id>(?:(?!\1).)+)\1', # @b45a9e6
|
||||
webpage, 'channel id', default=None, group='id'))
|
||||
channel_url = f'http://www.youtube.com/channel/{channel_id}' if channel_id else None
|
||||
|
||||
duration = int_or_none(
|
||||
video_details.get('lengthSeconds')
|
||||
or microformats.get('lengthSeconds')
|
||||
or parse_duration(search_meta('duration')))
|
||||
description = (
|
||||
video_details.get('shortDescription')
|
||||
or YoutubeBaseInfoExtractor._get_text(microformats, 'description')
|
||||
or clean_html(get_element_by_id('eow-description', webpage)) # @9e6dd23
|
||||
or search_meta(['description', 'og:description', 'twitter:description']))
|
||||
|
||||
uploader = video_details.get('author')
|
||||
|
||||
# Uploader ID and URL
|
||||
uploader_mobj = re.search(
|
||||
r'<link itemprop="url" href="(?P<uploader_url>https?://www\.youtube\.com/(?:user|channel)/(?P<uploader_id>[^"]+))">', # @fd05024
|
||||
webpage)
|
||||
if uploader_mobj is not None:
|
||||
uploader_id, uploader_url = uploader_mobj.group('uploader_id'), uploader_mobj.group('uploader_url')
|
||||
else:
|
||||
# @a6211d2
|
||||
uploader_url = url_or_none(microformats.get('ownerProfileUrl'))
|
||||
uploader_id = self._search_regex(
|
||||
r'(?:user|channel)/([^/]+)', uploader_url or '', 'uploader id', default=None)
|
||||
|
||||
upload_date = unified_strdate(
|
||||
dict_get(microformats, ('uploadDate', 'publishDate'))
|
||||
or search_meta(['uploadDate', 'datePublished'])
|
||||
or self._search_regex(
|
||||
[r'(?s)id="eow-date.*?>(.*?)</span>',
|
||||
r'(?:id="watch-uploader-info".*?>.*?|["\']simpleText["\']\s*:\s*["\'])(?:Published|Uploaded|Streamed live|Started) on (.+?)[<"\']'], # @7998520
|
||||
webpage, 'upload date', default=None))
|
||||
|
||||
return {
|
||||
'title': video_title,
|
||||
'description': description,
|
||||
'upload_date': upload_date,
|
||||
'uploader': uploader,
|
||||
'channel_id': channel_id,
|
||||
'channel_url': channel_url,
|
||||
'duration': duration,
|
||||
'uploader_url': uploader_url,
|
||||
'uploader_id': uploader_id,
|
||||
}
|
||||
|
||||
def _extract_thumbnails(self, video_id):
|
||||
try_all = 'thumbnails' in self._configuration_arg('check_all')
|
||||
thumbnail_base_urls = ['http://{server}/vi{webp}/{video_id}'.format(
|
||||
webp='_webp' if ext == 'webp' else '', video_id=video_id, server=server)
|
||||
for server in (self._YT_ALL_THUMB_SERVERS if try_all else self._YT_DEFAULT_THUMB_SERVERS) for ext in (('jpg', 'webp') if try_all else ('jpg',))]
|
||||
|
||||
thumbnails = []
|
||||
for url in thumbnail_base_urls:
|
||||
response = self._call_cdx_api(
|
||||
video_id, url, filters=['mimetype:image/(?:webp|jpeg)'],
|
||||
collapse=['urlkey'], query={'matchType': 'prefix'})
|
||||
if not response:
|
||||
continue
|
||||
thumbnails.extend(
|
||||
{
|
||||
'url': (self._WAYBACK_BASE_URL % (int_or_none(thumbnail_dict.get('timestamp')) or self._OLDEST_CAPTURE_DATE)) + thumbnail_dict.get('original'),
|
||||
'filesize': int_or_none(thumbnail_dict.get('length')),
|
||||
'preference': int_or_none(thumbnail_dict.get('length'))
|
||||
} for thumbnail_dict in response)
|
||||
if not try_all:
|
||||
break
|
||||
|
||||
self._remove_duplicate_formats(thumbnails)
|
||||
return thumbnails
|
||||
|
||||
def _get_capture_dates(self, video_id, url_date):
|
||||
capture_dates = []
|
||||
# Note: CDX API will not find watch pages with extra params in the url.
|
||||
response = self._call_cdx_api(
|
||||
video_id, f'https://www.youtube.com/watch?v={video_id}',
|
||||
filters=['mimetype:text/html'], collapse=['timestamp:6', 'digest'], query={'matchType': 'prefix'}) or []
|
||||
all_captures = sorted([int_or_none(r['timestamp']) for r in response if int_or_none(r['timestamp']) is not None])
|
||||
|
||||
# Prefer the new polymer UI captures as we support extracting more metadata from them
|
||||
# WBM captures seem to all switch to this layout ~July 2020
|
||||
modern_captures = [x for x in all_captures if x >= 20200701000000]
|
||||
if modern_captures:
|
||||
capture_dates.append(modern_captures[0])
|
||||
capture_dates.append(url_date)
|
||||
if all_captures:
|
||||
capture_dates.append(all_captures[0])
|
||||
|
||||
if 'captures' in self._configuration_arg('check_all'):
|
||||
capture_dates.extend(modern_captures + all_captures)
|
||||
|
||||
# Fallbacks if any of the above fail
|
||||
capture_dates.extend([self._OLDEST_CAPTURE_DATE, self._NEWEST_CAPTURE_DATE])
|
||||
return orderedSet(filter(None, capture_dates))
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id, url_date, url_date_2 = self._match_valid_url(url).group('id', 'date', 'date2')
|
||||
url_date = url_date or url_date_2
|
||||
|
||||
urlh = None
|
||||
try:
|
||||
video_file_webpage = self._request_webpage(
|
||||
HEADRequest(internal_fake_url), video_id,
|
||||
note='Fetching video file url', expected_status=True)
|
||||
urlh = self._request_webpage(
|
||||
HEADRequest('https://web.archive.org/web/2oe_/http://wayback-fakeurl.archive.org/yt/%s' % video_id),
|
||||
video_id, note='Fetching archived video file url', expected_status=True)
|
||||
except ExtractorError as e:
|
||||
# HTTP Error 404 is expected if the video is not saved.
|
||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
|
||||
raise ExtractorError(
|
||||
'HTTP Error %s. Most likely the video is not archived or issue with web.archive.org.' % e.cause.code,
|
||||
self.raise_no_formats(
|
||||
'The requested video is not archived, indexed, or there is an issue with web.archive.org',
|
||||
expected=True)
|
||||
else:
|
||||
raise
|
||||
video_file_url = compat_urllib_parse_unquote(video_file_webpage.url)
|
||||
video_file_url_qs = parse_qs(video_file_url)
|
||||
|
||||
# Attempt to recover any ext & format info from playback url
|
||||
format = {'url': video_file_url}
|
||||
capture_dates = self._get_capture_dates(video_id, int_or_none(url_date))
|
||||
self.write_debug('Captures to try: ' + join_nonempty(*capture_dates, delim=', '))
|
||||
info = {'id': video_id}
|
||||
for capture in capture_dates:
|
||||
webpage = self._download_webpage(
|
||||
(self._WAYBACK_BASE_URL + 'http://www.youtube.com/watch?v=%s') % (capture, video_id),
|
||||
video_id=video_id, fatal=False, errnote='unable to download capture webpage (it may not be archived)',
|
||||
note='Downloading capture webpage')
|
||||
current_info = self._extract_metadata(video_id, webpage or '')
|
||||
# Try avoid getting deleted video metadata
|
||||
if current_info.get('title'):
|
||||
info = merge_dicts(info, current_info)
|
||||
if 'captures' not in self._configuration_arg('check_all'):
|
||||
break
|
||||
|
||||
info['thumbnails'] = self._extract_thumbnails(video_id)
|
||||
|
||||
if urlh:
|
||||
url = compat_urllib_parse_unquote(urlh.geturl())
|
||||
video_file_url_qs = parse_qs(url)
|
||||
# Attempt to recover any ext & format info from playback url & response headers
|
||||
format = {'url': url, 'filesize': int_or_none(urlh.headers.get('x-archive-orig-content-length'))}
|
||||
itag = try_get(video_file_url_qs, lambda x: x['itag'][0])
|
||||
if itag and itag in YoutubeIE._formats: # Naughty access but it works
|
||||
if itag and itag in YoutubeIE._formats:
|
||||
format.update(YoutubeIE._formats[itag])
|
||||
format.update({'format_id': itag})
|
||||
else:
|
||||
mime = try_get(video_file_url_qs, lambda x: x['mime'][0])
|
||||
ext = mimetype2ext(mime) or determine_ext(video_file_url)
|
||||
ext = (mimetype2ext(mime)
|
||||
or urlhandle_detect_ext(urlh)
|
||||
or mimetype2ext(urlh.headers.get('x-archive-guessed-content-type')))
|
||||
format.update({'ext': ext})
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'formats': [format],
|
||||
'duration': str_to_int(try_get(video_file_url_qs, lambda x: x['dur'][0]))
|
||||
}
|
||||
info['formats'] = [format]
|
||||
if not info.get('duration'):
|
||||
info['duration'] = str_to_int(try_get(video_file_url_qs, lambda x: x['dur'][0]))
|
||||
|
||||
if not info.get('title'):
|
||||
info['title'] = video_id
|
||||
return info
|
||||
|
||||
@@ -124,8 +124,7 @@ class ArcPublishingIE(InfoExtractor):
|
||||
formats.extend(smil_formats)
|
||||
elif stream_type in ('ts', 'hls'):
|
||||
m3u8_formats = self._extract_m3u8_formats(
|
||||
s_url, uuid, 'mp4', 'm3u8' if is_live else 'm3u8_native',
|
||||
m3u8_id='hls', fatal=False)
|
||||
s_url, uuid, 'mp4', live=is_live, m3u8_id='hls', fatal=False)
|
||||
if all([f.get('acodec') == 'none' for f in m3u8_formats]):
|
||||
continue
|
||||
for f in m3u8_formats:
|
||||
@@ -158,7 +157,7 @@ class ArcPublishingIE(InfoExtractor):
|
||||
|
||||
return {
|
||||
'id': uuid,
|
||||
'title': self._live_title(title) if is_live else title,
|
||||
'title': title,
|
||||
'thumbnail': try_get(video, lambda x: x['promo_image']['url']),
|
||||
'description': try_get(video, lambda x: x['subheadlines']['basic']),
|
||||
'formats': formats,
|
||||
|
||||
@@ -280,7 +280,7 @@ class ARDMediathekIE(ARDMediathekBaseIE):
|
||||
|
||||
info.update({
|
||||
'id': video_id,
|
||||
'title': self._live_title(title) if info.get('is_live') else title,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
})
|
||||
@@ -376,9 +376,24 @@ class ARDIE(InfoExtractor):
|
||||
formats.append(f)
|
||||
self._sort_formats(formats)
|
||||
|
||||
_SUB_FORMATS = (
|
||||
('./dataTimedText', 'ttml'),
|
||||
('./dataTimedTextNoOffset', 'ttml'),
|
||||
('./dataTimedTextVtt', 'vtt'),
|
||||
)
|
||||
|
||||
subtitles = {}
|
||||
for subsel, subext in _SUB_FORMATS:
|
||||
for node in video_node.findall(subsel):
|
||||
subtitles.setdefault('de', []).append({
|
||||
'url': node.attrib['url'],
|
||||
'ext': subext,
|
||||
})
|
||||
|
||||
return {
|
||||
'id': xpath_text(video_node, './videoId', default=display_id),
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
'display_id': display_id,
|
||||
'title': video_node.find('./title').text,
|
||||
'duration': parse_duration(video_node.find('./duration').text),
|
||||
@@ -388,7 +403,14 @@ class ARDIE(InfoExtractor):
|
||||
|
||||
|
||||
class ARDBetaMediathekIE(ARDMediathekBaseIE):
|
||||
_VALID_URL = r'https://(?:(?:beta|www)\.)?ardmediathek\.de/(?P<client>[^/]+)/(?P<mode>player|live|video|sendung|sammlung)/(?P<display_id>(?:[^/]+/)*)(?P<video_id>[a-zA-Z0-9]+)'
|
||||
_VALID_URL = r'''(?x)https://
|
||||
(?:(?:beta|www)\.)?ardmediathek\.de/
|
||||
(?:(?P<client>[^/]+)/)?
|
||||
(?:player|live|video|(?P<playlist>sendung|sammlung))/
|
||||
(?:(?P<display_id>(?(playlist)[^?#]+?|[^?#]+))/)?
|
||||
(?P<id>(?(playlist)|Y3JpZDovL)[a-zA-Z0-9]+)
|
||||
(?(playlist)/(?P<season>\d+)?/?(?:[?#]|$))'''
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://www.ardmediathek.de/mdr/video/die-robuste-roswita/Y3JpZDovL21kci5kZS9iZWl0cmFnL2Ntcy84MWMxN2MzZC0wMjkxLTRmMzUtODk4ZS0wYzhlOWQxODE2NGI/',
|
||||
'md5': 'a1dc75a39c61601b980648f7c9f9f71d',
|
||||
@@ -403,6 +425,25 @@ class ARDBetaMediathekIE(ARDMediathekBaseIE):
|
||||
'upload_date': '20200805',
|
||||
'ext': 'mp4',
|
||||
},
|
||||
'skip': 'Error',
|
||||
}, {
|
||||
'url': 'https://www.ardmediathek.de/video/tagesschau-oder-tagesschau-20-00-uhr/das-erste/Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhZ2Vzc2NoYXUvZmM4ZDUxMjgtOTE0ZC00Y2MzLTgzNzAtNDZkNGNiZWJkOTll',
|
||||
'md5': 'f1837e563323b8a642a8ddeff0131f51',
|
||||
'info_dict': {
|
||||
'id': '10049223',
|
||||
'ext': 'mp4',
|
||||
'title': 'tagesschau, 20:00 Uhr',
|
||||
'timestamp': 1636398000,
|
||||
'description': 'md5:39578c7b96c9fe50afdf5674ad985e6b',
|
||||
'upload_date': '20211108',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.ardmediathek.de/sendung/beforeigners/beforeigners/staffel-1/Y3JpZDovL2Rhc2Vyc3RlLmRlL2JlZm9yZWlnbmVycw/1',
|
||||
'playlist_count': 6,
|
||||
'info_dict': {
|
||||
'id': 'Y3JpZDovL2Rhc2Vyc3RlLmRlL2JlZm9yZWlnbmVycw',
|
||||
'title': 'beforeigners/beforeigners/staffel-1',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://beta.ardmediathek.de/ard/video/Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhdG9ydC9mYmM4NGM1NC0xNzU4LTRmZGYtYWFhZS0wYzcyZTIxNGEyMDE',
|
||||
'only_matching': True,
|
||||
@@ -426,6 +467,12 @@ class ARDBetaMediathekIE(ARDMediathekBaseIE):
|
||||
# playlist of type 'sammlung'
|
||||
'url': 'https://www.ardmediathek.de/ard/sammlung/team-muenster/5JpTzLSbWUAK8184IOvEir/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.ardmediathek.de/video/coronavirus-update-ndr-info/astrazeneca-kurz-lockdown-und-pims-syndrom-81/ndr/Y3JpZDovL25kci5kZS84NzE0M2FjNi0wMWEwLTQ5ODEtOTE5NS1mOGZhNzdhOTFmOTI/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.ardmediathek.de/ard/player/Y3JpZDovL3dkci5kZS9CZWl0cmFnLWQ2NDJjYWEzLTMwZWYtNGI4NS1iMTI2LTU1N2UxYTcxOGIzOQ/tatort-duo-koeln-leipzig-ihr-kinderlein-kommet',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _ARD_load_playlist_snipped(self, playlist_id, display_id, client, mode, pageNumber):
|
||||
@@ -522,23 +569,16 @@ class ARDBetaMediathekIE(ARDMediathekBaseIE):
|
||||
break
|
||||
pageNumber = pageNumber + 1
|
||||
|
||||
return self.playlist_result(entries, playlist_title=display_id)
|
||||
return self.playlist_result(entries, playlist_id, playlist_title=display_id)
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = self._match_valid_url(url)
|
||||
video_id = mobj.group('video_id')
|
||||
display_id = mobj.group('display_id')
|
||||
if display_id:
|
||||
display_id = display_id.rstrip('/')
|
||||
if not display_id:
|
||||
display_id = video_id
|
||||
video_id, display_id, playlist_type, client, season_number = self._match_valid_url(url).group(
|
||||
'id', 'display_id', 'playlist', 'client', 'season')
|
||||
display_id, client = display_id or video_id, client or 'ard'
|
||||
|
||||
if mobj.group('mode') in ('sendung', 'sammlung'):
|
||||
# this is a playlist-URL
|
||||
return self._ARD_extract_playlist(
|
||||
url, video_id, display_id,
|
||||
mobj.group('client'),
|
||||
mobj.group('mode'))
|
||||
if playlist_type:
|
||||
# TODO: Extract only specified season
|
||||
return self._ARD_extract_playlist(url, video_id, display_id, client, playlist_type)
|
||||
|
||||
player_page = self._download_json(
|
||||
'https://api.ardmediathek.de/public-gateway',
|
||||
@@ -574,7 +614,7 @@ class ARDBetaMediathekIE(ARDMediathekBaseIE):
|
||||
}
|
||||
}
|
||||
}
|
||||
}''' % (mobj.group('client'), video_id),
|
||||
}''' % (client, video_id),
|
||||
}).encode(), headers={
|
||||
'Content-Type': 'application/json'
|
||||
})['data']['playerPage']
|
||||
|
||||
@@ -7,6 +7,7 @@ from ..compat import (
|
||||
compat_urllib_parse_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
format_field,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
parse_iso8601,
|
||||
@@ -92,7 +93,7 @@ class ArnesIE(InfoExtractor):
|
||||
'timestamp': parse_iso8601(video.get('creationTime')),
|
||||
'channel': channel.get('name'),
|
||||
'channel_id': channel_id,
|
||||
'channel_url': self._BASE_URL + '/?channel=' + channel_id if channel_id else None,
|
||||
'channel_url': format_field(channel_id, template=f'{self._BASE_URL}/?channel=%s'),
|
||||
'duration': float_or_none(video.get('duration'), 1000),
|
||||
'view_count': int_or_none(video.get('views')),
|
||||
'tags': video.get('hashtags'),
|
||||
|
||||
@@ -12,6 +12,7 @@ from ..utils import (
|
||||
int_or_none,
|
||||
parse_qs,
|
||||
qualities,
|
||||
strip_or_none,
|
||||
try_get,
|
||||
unified_strdate,
|
||||
url_or_none,
|
||||
@@ -253,3 +254,44 @@ class ArteTVPlaylistIE(ArteTVBaseIE):
|
||||
title = collection.get('title')
|
||||
description = collection.get('shortDescription') or collection.get('teaserText')
|
||||
return self.playlist_result(entries, playlist_id, title, description)
|
||||
|
||||
|
||||
class ArteTVCategoryIE(ArteTVBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?arte\.tv/(?P<lang>%s)/videos/(?P<id>[\w-]+(?:/[\w-]+)*)/?\s*$' % ArteTVBaseIE._ARTE_LANGUAGES
|
||||
_TESTS = [{
|
||||
'url': 'https://www.arte.tv/en/videos/politics-and-society/',
|
||||
'info_dict': {
|
||||
'id': 'politics-and-society',
|
||||
'title': 'Politics and society',
|
||||
'description': 'Investigative documentary series, geopolitical analysis, and international commentary',
|
||||
},
|
||||
'playlist_mincount': 13,
|
||||
},
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
return (
|
||||
not any(ie.suitable(url) for ie in (ArteTVIE, ArteTVPlaylistIE, ))
|
||||
and super(ArteTVCategoryIE, cls).suitable(url))
|
||||
|
||||
def _real_extract(self, url):
|
||||
lang, playlist_id = self._match_valid_url(url).groups()
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
|
||||
items = []
|
||||
for video in re.finditer(
|
||||
r'<a\b[^>]*?href\s*=\s*(?P<q>"|\'|\b)(?P<url>https?://www\.arte\.tv/%s/videos/[\w/-]+)(?P=q)' % lang,
|
||||
webpage):
|
||||
video = video.group('url')
|
||||
if video == url:
|
||||
continue
|
||||
if any(ie.suitable(video) for ie in (ArteTVIE, ArteTVPlaylistIE, )):
|
||||
items.append(video)
|
||||
|
||||
title = (self._og_search_title(webpage, default=None)
|
||||
or self._html_search_regex(r'<title\b[^>]*>([^<]+)</title>', default=None))
|
||||
title = strip_or_none(title.rsplit('|', 1)[0]) or self._generic_title(url)
|
||||
|
||||
return self.playlist_from_matches(items, playlist_id=playlist_id, playlist_title=title,
|
||||
description=self._og_search_description(webpage, default=None))
|
||||
|
||||
@@ -8,6 +8,7 @@ from ..utils import (
|
||||
float_or_none,
|
||||
jwt_encode_hs256,
|
||||
try_get,
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
@@ -94,6 +95,11 @@ class ATVAtIE(InfoExtractor):
|
||||
})
|
||||
|
||||
video_id, videos_data = list(videos['data'].items())[0]
|
||||
error_msg = try_get(videos_data, lambda x: x['error']['title'])
|
||||
if error_msg == 'Geo check failed':
|
||||
self.raise_geo_restricted(error_msg)
|
||||
elif error_msg:
|
||||
raise ExtractorError(error_msg)
|
||||
entries = [
|
||||
self._extract_video_info(url, contentResource[video['id']], video)
|
||||
for video in videos_data]
|
||||
|
||||
@@ -14,7 +14,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class AudiomackIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?audiomack\.com/song/(?P<id>[\w/-]+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?audiomack\.com/(?:song/|(?=.+/song/))(?P<id>[\w/-]+)'
|
||||
IE_NAME = 'audiomack'
|
||||
_TESTS = [
|
||||
# hosted on audiomack
|
||||
@@ -29,6 +29,7 @@ class AudiomackIE(InfoExtractor):
|
||||
}
|
||||
},
|
||||
# audiomack wrapper around soundcloud song
|
||||
# Needs new test URL.
|
||||
{
|
||||
'add_ie': ['Soundcloud'],
|
||||
'url': 'http://www.audiomack.com/song/hip-hop-daily/black-mamba-freestyle',
|
||||
@@ -39,15 +40,16 @@ class AudiomackIE(InfoExtractor):
|
||||
'title': 'Black Mamba Freestyle [Prod. By Danny Wolf]',
|
||||
'uploader': 'ILOVEMAKONNEN',
|
||||
'upload_date': '20160414',
|
||||
}
|
||||
},
|
||||
'skip': 'Song has been removed from the site',
|
||||
},
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
# URLs end with [uploader name]/[uploader title]
|
||||
# URLs end with [uploader name]/song/[uploader title]
|
||||
# this title is whatever the user types in, and is rarely
|
||||
# the proper song title. Real metadata is in the api response
|
||||
album_url_tag = self._match_id(url)
|
||||
album_url_tag = self._match_id(url).replace('/song/', '/')
|
||||
|
||||
# Request the extended version of the api for extra fields like artist and title
|
||||
api_response = self._download_json(
|
||||
@@ -73,13 +75,13 @@ class AudiomackIE(InfoExtractor):
|
||||
|
||||
|
||||
class AudiomackAlbumIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?audiomack\.com/album/(?P<id>[\w/-]+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?audiomack\.com/(?:album/|(?=.+/album/))(?P<id>[\w/-]+)'
|
||||
IE_NAME = 'audiomack:album'
|
||||
_TESTS = [
|
||||
# Standard album playlist
|
||||
{
|
||||
'url': 'http://www.audiomack.com/album/flytunezcom/tha-tour-part-2-mixtape',
|
||||
'playlist_count': 15,
|
||||
'playlist_count': 11,
|
||||
'info_dict':
|
||||
{
|
||||
'id': '812251',
|
||||
@@ -95,24 +97,27 @@ class AudiomackAlbumIE(InfoExtractor):
|
||||
},
|
||||
'playlist': [{
|
||||
'info_dict': {
|
||||
'title': 'PPP (Pistol P Project) - 9. Heaven or Hell (CHIMACA) ft Zuse (prod by DJ FU)',
|
||||
'id': '837577',
|
||||
'title': 'PPP (Pistol P Project) - 8. Real (prod by SYK SENSE )',
|
||||
'id': '837576',
|
||||
'ext': 'mp3',
|
||||
'uploader': 'Lil Herb a.k.a. G Herbo',
|
||||
}
|
||||
}, {
|
||||
'info_dict': {
|
||||
'title': 'PPP (Pistol P Project) - 10. 4 Minutes Of Hell Part 4 (prod by DY OF 808 MAFIA)',
|
||||
'id': '837580',
|
||||
'ext': 'mp3',
|
||||
'uploader': 'Lil Herb a.k.a. G Herbo',
|
||||
}
|
||||
}],
|
||||
'params': {
|
||||
'playliststart': 9,
|
||||
'playlistend': 9,
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
# URLs end with [uploader name]/[uploader title]
|
||||
# URLs end with [uploader name]/album/[uploader title]
|
||||
# this title is whatever the user types in, and is rarely
|
||||
# the proper song title. Real metadata is in the api response
|
||||
album_url_tag = self._match_id(url)
|
||||
album_url_tag = self._match_id(url).replace('/album/', '/')
|
||||
result = {'_type': 'playlist', 'entries': []}
|
||||
# There is no one endpoint for album metadata - instead it is included/repeated in each song's metadata
|
||||
# Therefore we don't know how many songs the album has and must infi-loop until failure
|
||||
@@ -134,7 +139,7 @@ class AudiomackAlbumIE(InfoExtractor):
|
||||
# Pull out the album metadata and add to result (if it exists)
|
||||
for resultkey, apikey in [('id', 'album_id'), ('title', 'album_title')]:
|
||||
if apikey in api_response and resultkey not in result:
|
||||
result[resultkey] = api_response[apikey]
|
||||
result[resultkey] = compat_str(api_response[apikey])
|
||||
song_id = url_basename(api_response['url']).rpartition('.')[0]
|
||||
result['entries'].append({
|
||||
'id': compat_str(api_response.get('id', song_id)),
|
||||
|
||||
@@ -9,6 +9,7 @@ from ..compat import (
|
||||
compat_str,
|
||||
)
|
||||
from ..utils import (
|
||||
format_field,
|
||||
int_or_none,
|
||||
parse_iso8601,
|
||||
smuggle_url,
|
||||
@@ -41,9 +42,9 @@ class AWAANBaseIE(InfoExtractor):
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': self._live_title(title) if is_live else title,
|
||||
'title': title,
|
||||
'description': video_data.get('description_en') or video_data.get('description_ar'),
|
||||
'thumbnail': 'http://admin.mangomolo.com/analytics/%s' % img if img else None,
|
||||
'thumbnail': format_field(img, template='http://admin.mangomolo.com/analytics/%s'),
|
||||
'duration': int_or_none(video_data.get('duration')),
|
||||
'timestamp': parse_iso8601(video_data.get('create_time'), ' '),
|
||||
'is_live': is_live,
|
||||
|
||||
@@ -183,6 +183,7 @@ class BandcampIE(InfoExtractor):
|
||||
'format_note': f.get('description'),
|
||||
'filesize': parse_filesize(f.get('size_mb')),
|
||||
'vcodec': 'none',
|
||||
'acodec': format_id.split('-')[0],
|
||||
})
|
||||
|
||||
self._sort_formats(formats)
|
||||
@@ -212,7 +213,7 @@ class BandcampIE(InfoExtractor):
|
||||
|
||||
class BandcampAlbumIE(BandcampIE):
|
||||
IE_NAME = 'Bandcamp:album'
|
||||
_VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?!/music)(?:/album/(?P<id>[^/?#&]+))?'
|
||||
_VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com/album/(?P<id>[^/?#&]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
|
||||
@@ -257,14 +258,6 @@ class BandcampAlbumIE(BandcampIE):
|
||||
'id': 'hierophany-of-the-open-grave',
|
||||
},
|
||||
'playlist_mincount': 9,
|
||||
}, {
|
||||
'url': 'http://dotscale.bandcamp.com',
|
||||
'info_dict': {
|
||||
'title': 'Loom',
|
||||
'id': 'dotscale',
|
||||
'uploader_id': 'dotscale',
|
||||
},
|
||||
'playlist_mincount': 7,
|
||||
}, {
|
||||
# with escaped quote in title
|
||||
'url': 'https://jstrecords.bandcamp.com/album/entropy-ep',
|
||||
@@ -391,41 +384,63 @@ class BandcampWeeklyIE(BandcampIE):
|
||||
}
|
||||
|
||||
|
||||
class BandcampMusicIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?P<id>[^/]+)\.bandcamp\.com/music'
|
||||
class BandcampUserIE(InfoExtractor):
|
||||
IE_NAME = 'Bandcamp:user'
|
||||
_VALID_URL = r'https?://(?!www\.)(?P<id>[^.]+)\.bandcamp\.com(?:/music)?/?(?:[#?]|$)'
|
||||
|
||||
_TESTS = [{
|
||||
# Type 1 Bandcamp user page.
|
||||
'url': 'https://adrianvonziegler.bandcamp.com',
|
||||
'info_dict': {
|
||||
'id': 'adrianvonziegler',
|
||||
'title': 'Discography of adrianvonziegler',
|
||||
},
|
||||
'playlist_mincount': 23,
|
||||
}, {
|
||||
# Bandcamp user page with only one album
|
||||
'url': 'http://dotscale.bandcamp.com',
|
||||
'info_dict': {
|
||||
'id': 'dotscale',
|
||||
'title': 'Discography of dotscale'
|
||||
},
|
||||
'playlist_count': 1,
|
||||
}, {
|
||||
# Type 2 Bandcamp user page.
|
||||
'url': 'https://nightcallofficial.bandcamp.com',
|
||||
'info_dict': {
|
||||
'id': 'nightcallofficial',
|
||||
'title': 'Discography of nightcallofficial',
|
||||
},
|
||||
'playlist_count': 4,
|
||||
}, {
|
||||
'url': 'https://steviasphere.bandcamp.com/music',
|
||||
'playlist_mincount': 47,
|
||||
'info_dict': {
|
||||
'id': 'steviasphere',
|
||||
'title': 'Discography of steviasphere',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://coldworldofficial.bandcamp.com/music',
|
||||
'playlist_mincount': 10,
|
||||
'info_dict': {
|
||||
'id': 'coldworldofficial',
|
||||
'title': 'Discography of coldworldofficial',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://nuclearwarnowproductions.bandcamp.com/music',
|
||||
'playlist_mincount': 399,
|
||||
'info_dict': {
|
||||
'id': 'nuclearwarnowproductions',
|
||||
'title': 'Discography of nuclearwarnowproductions',
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
_TYPE_IE_DICT = {
|
||||
'album': BandcampAlbumIE.ie_key(),
|
||||
'track': BandcampIE.ie_key()
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, id)
|
||||
items = re.findall(r'href\=\"\/(?P<path>(?P<type>album|track)+/[^\"]+)', webpage)
|
||||
entries = [
|
||||
self.url_result(
|
||||
f'https://{id}.bandcamp.com/{item[0]}',
|
||||
ie=self._TYPE_IE_DICT[item[1]])
|
||||
for item in items]
|
||||
return self.playlist_result(entries, id)
|
||||
uploader = self._match_id(url)
|
||||
webpage = self._download_webpage(url, uploader)
|
||||
|
||||
discography_data = (re.findall(r'<li data-item-id=["\'][^>]+>\s*<a href=["\']([^"\']+)', webpage)
|
||||
or re.findall(r'<div[^>]+trackTitle["\'][^"\']+["\']([^"\']+)', webpage))
|
||||
|
||||
return self.playlist_from_matches(
|
||||
discography_data, uploader, f'Discography of {uploader}', getter=lambda x: urljoin(url, x))
|
||||
|
||||
@@ -11,6 +11,7 @@ from ..compat import (
|
||||
compat_etree_Element,
|
||||
compat_HTTPError,
|
||||
compat_str,
|
||||
compat_urllib_error,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
@@ -38,7 +39,7 @@ from ..utils import (
|
||||
class BBCCoUkIE(InfoExtractor):
|
||||
IE_NAME = 'bbc.co.uk'
|
||||
IE_DESC = 'BBC iPlayer'
|
||||
_ID_REGEX = r'(?:[pbm][\da-z]{7}|w[\da-z]{7,14})'
|
||||
_ID_REGEX = r'(?:[pbml][\da-z]{7}|w[\da-z]{7,14})'
|
||||
_VALID_URL = r'''(?x)
|
||||
https?://
|
||||
(?:www\.)?bbc\.co\.uk/
|
||||
@@ -394,9 +395,17 @@ class BBCCoUkIE(InfoExtractor):
|
||||
formats.extend(self._extract_mpd_formats(
|
||||
href, programme_id, mpd_id=format_id, fatal=False))
|
||||
elif transfer_format == 'hls':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
# TODO: let expected_status be passed into _extract_xxx_formats() instead
|
||||
try:
|
||||
fmts = self._extract_m3u8_formats(
|
||||
href, programme_id, ext='mp4', entry_protocol='m3u8_native',
|
||||
m3u8_id=format_id, fatal=False))
|
||||
m3u8_id=format_id, fatal=False)
|
||||
except ExtractorError as e:
|
||||
if not (isinstance(e.exc_info[1], compat_urllib_error.HTTPError)
|
||||
and e.exc_info[1].code in (403, 404)):
|
||||
raise
|
||||
fmts = []
|
||||
formats.extend(fmts)
|
||||
elif transfer_format == 'hds':
|
||||
formats.extend(self._extract_f4m_formats(
|
||||
href, programme_id, f4m_id=format_id, fatal=False))
|
||||
@@ -451,9 +460,10 @@ class BBCCoUkIE(InfoExtractor):
|
||||
playlist = self._download_json(
|
||||
'http://www.bbc.co.uk/programmes/%s/playlist.json' % playlist_id,
|
||||
playlist_id, 'Downloading playlist JSON')
|
||||
formats = []
|
||||
subtitles = {}
|
||||
|
||||
version = playlist.get('defaultAvailableVersion')
|
||||
if version:
|
||||
for version in playlist.get('allAvailableVersions', []):
|
||||
smp_config = version['smpConfig']
|
||||
title = smp_config['title']
|
||||
description = smp_config['summary']
|
||||
@@ -463,7 +473,16 @@ class BBCCoUkIE(InfoExtractor):
|
||||
continue
|
||||
programme_id = item.get('vpid')
|
||||
duration = int_or_none(item.get('duration'))
|
||||
formats, subtitles = self._download_media_selector(programme_id)
|
||||
version_formats, version_subtitles = self._download_media_selector(programme_id)
|
||||
types = version['types']
|
||||
for f in version_formats:
|
||||
f['format_note'] = ', '.join(types)
|
||||
if any('AudioDescribed' in x for x in types):
|
||||
f['language_preference'] = -10
|
||||
formats += version_formats
|
||||
for tag, subformats in (version_subtitles or {}).items():
|
||||
subtitles.setdefault(tag, []).extend(subformats)
|
||||
|
||||
return programme_id, title, description, duration, formats, subtitles
|
||||
except ExtractorError as ee:
|
||||
if not (isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404):
|
||||
@@ -774,21 +793,33 @@ class BBCIE(BBCCoUkIE):
|
||||
'timestamp': 1437785037,
|
||||
'upload_date': '20150725',
|
||||
},
|
||||
}, {
|
||||
# video with window.__INITIAL_DATA__ and value as JSON string
|
||||
'url': 'https://www.bbc.com/news/av/world-europe-59468682',
|
||||
'info_dict': {
|
||||
'id': 'p0b71qth',
|
||||
'ext': 'mp4',
|
||||
'title': 'Why France is making this woman a national hero',
|
||||
'description': 'md5:7affdfab80e9c3a1f976230a1ff4d5e4',
|
||||
'thumbnail': r're:https?://.+/.+\.jpg',
|
||||
'timestamp': 1638230731,
|
||||
'upload_date': '20211130',
|
||||
},
|
||||
}, {
|
||||
# single video article embedded with data-media-vpid
|
||||
'url': 'http://www.bbc.co.uk/sport/rowing/35908187',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# bbcthreeConfig
|
||||
'url': 'https://www.bbc.co.uk/bbcthree/clip/73d0bbd0-abc3-4cea-b3c0-cdae21905eb1',
|
||||
'info_dict': {
|
||||
'id': 'p06556y7',
|
||||
'ext': 'mp4',
|
||||
'title': 'Transfers: Cristiano Ronaldo to Man Utd, Arsenal to spend?',
|
||||
'description': 'md5:4b7dfd063d5a789a1512e99662be3ddd',
|
||||
'title': 'Things Not To Say to people that live on council estates',
|
||||
'description': "From being labelled a 'chav', to the presumption that they're 'scroungers', people who live on council estates encounter all kinds of prejudices and false assumptions about themselves, their families, and their lifestyles. Here, eight people discuss the common statements, misconceptions, and clichés that they're tired of hearing.",
|
||||
'duration': 360,
|
||||
'thumbnail': r're:https?://.+/.+\.jpg',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
}, {
|
||||
# window.__PRELOADED_STATE__
|
||||
'url': 'https://www.bbc.co.uk/radio/play/b0b9z4yl',
|
||||
@@ -1161,9 +1192,16 @@ class BBCIE(BBCCoUkIE):
|
||||
return self.playlist_result(
|
||||
entries, playlist_id, playlist_title, playlist_description)
|
||||
|
||||
initial_data = self._parse_json(self._search_regex(
|
||||
r'window\.__INITIAL_DATA__\s*=\s*({.+?});', webpage,
|
||||
'preload state', default='{}'), playlist_id, fatal=False)
|
||||
initial_data = self._search_regex(
|
||||
r'window\.__INITIAL_DATA__\s*=\s*("{.+?}")\s*;', webpage,
|
||||
'quoted preload state', default=None)
|
||||
if initial_data is None:
|
||||
initial_data = self._search_regex(
|
||||
r'window\.__INITIAL_DATA__\s*=\s*({.+?})\s*;', webpage,
|
||||
'preload state', default={})
|
||||
else:
|
||||
initial_data = self._parse_json(initial_data or '"{}"', playlist_id, fatal=False)
|
||||
initial_data = self._parse_json(initial_data, playlist_id, fatal=False)
|
||||
if initial_data:
|
||||
def parse_media(media):
|
||||
if not media:
|
||||
@@ -1204,7 +1242,10 @@ class BBCIE(BBCCoUkIE):
|
||||
if name == 'media-experience':
|
||||
parse_media(try_get(resp, lambda x: x['data']['initialItem']['mediaItem'], dict))
|
||||
elif name == 'article':
|
||||
for block in (try_get(resp, lambda x: x['data']['blocks'], list) or []):
|
||||
for block in (try_get(resp,
|
||||
(lambda x: x['data']['blocks'],
|
||||
lambda x: x['data']['content']['model']['blocks'],),
|
||||
list) or []):
|
||||
if block.get('type') != 'media':
|
||||
continue
|
||||
parse_media(block.get('model'))
|
||||
|
||||
@@ -1,32 +1,45 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_str,
|
||||
)
|
||||
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
parse_qs,
|
||||
traverse_obj,
|
||||
try_get,
|
||||
unified_timestamp,
|
||||
)
|
||||
|
||||
|
||||
class BeegIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?beeg\.(?:com|porn(?:/video)?)/(?P<id>\d+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?beeg\.(?:com(?:/video)?)/-?(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
# api/v6 v1
|
||||
'url': 'http://beeg.com/5416503',
|
||||
'md5': 'a1a1b1a8bc70a89e49ccfd113aed0820',
|
||||
'url': 'https://beeg.com/-0983946056129650',
|
||||
'md5': '51d235147c4627cfce884f844293ff88',
|
||||
'info_dict': {
|
||||
'id': '5416503',
|
||||
'id': '0983946056129650',
|
||||
'ext': 'mp4',
|
||||
'title': 'Sultry Striptease',
|
||||
'description': 'md5:d22219c09da287c14bed3d6c37ce4bc2',
|
||||
'timestamp': 1391813355,
|
||||
'upload_date': '20140207',
|
||||
'duration': 383,
|
||||
'title': 'sucked cock and fucked in a private plane',
|
||||
'duration': 927,
|
||||
'tags': list,
|
||||
'age_limit': 18,
|
||||
'upload_date': '20220131',
|
||||
'timestamp': 1643656455,
|
||||
'display_id': 2540839,
|
||||
}
|
||||
}, {
|
||||
'url': 'https://beeg.com/-0599050563103750?t=4-861',
|
||||
'md5': 'bd8b5ea75134f7f07fad63008db2060e',
|
||||
'info_dict': {
|
||||
'id': '0599050563103750',
|
||||
'ext': 'mp4',
|
||||
'title': 'Bad Relatives',
|
||||
'duration': 2060,
|
||||
'tags': list,
|
||||
'age_limit': 18,
|
||||
'description': 'md5:b4fc879a58ae6c604f8f259155b7e3b9',
|
||||
'timestamp': 1643623200,
|
||||
'display_id': 2569965,
|
||||
'upload_date': '20220131',
|
||||
}
|
||||
}, {
|
||||
# api/v6 v2
|
||||
@@ -36,12 +49,6 @@ class BeegIE(InfoExtractor):
|
||||
# api/v6 v2 w/o t
|
||||
'url': 'https://beeg.com/1277207756',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://beeg.porn/video/5416503',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://beeg.porn/5416503',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
@@ -49,68 +56,38 @@ class BeegIE(InfoExtractor):
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
beeg_version = self._search_regex(
|
||||
r'beeg_version\s*=\s*([\da-zA-Z_-]+)', webpage, 'beeg version',
|
||||
default='1546225636701')
|
||||
|
||||
if len(video_id) >= 10:
|
||||
query = {
|
||||
'v': 2,
|
||||
}
|
||||
qs = parse_qs(url)
|
||||
t = qs.get('t', [''])[0].split('-')
|
||||
if len(t) > 1:
|
||||
query.update({
|
||||
's': t[0],
|
||||
'e': t[1],
|
||||
})
|
||||
else:
|
||||
query = {'v': 1}
|
||||
|
||||
for api_path in ('', 'api.'):
|
||||
video = self._download_json(
|
||||
'https://%sbeeg.com/api/v6/%s/video/%s'
|
||||
% (api_path, beeg_version, video_id), video_id,
|
||||
fatal=api_path == 'api.', query=query)
|
||||
if video:
|
||||
break
|
||||
'https://store.externulls.com/facts/file/%s' % video_id,
|
||||
video_id, 'Downloading JSON for %s' % video_id)
|
||||
|
||||
fc_facts = video.get('fc_facts')
|
||||
first_fact = {}
|
||||
for fact in fc_facts:
|
||||
if not first_fact or try_get(fact, lambda x: x['id'] < first_fact['id']):
|
||||
first_fact = fact
|
||||
|
||||
resources = traverse_obj(video, ('file', 'hls_resources')) or first_fact.get('hls_resources')
|
||||
|
||||
formats = []
|
||||
for format_id, video_url in video.items():
|
||||
if not video_url:
|
||||
for format_id, video_uri in resources.items():
|
||||
if not video_uri:
|
||||
continue
|
||||
height = self._search_regex(
|
||||
r'^(\d+)[pP]$', format_id, 'height', default=None)
|
||||
if not height:
|
||||
continue
|
||||
formats.append({
|
||||
'url': self._proto_relative_url(
|
||||
video_url.replace('{DATA_MARKERS}', 'data=pc_XX__%s_0' % beeg_version), 'https:'),
|
||||
'format_id': format_id,
|
||||
'height': int(height),
|
||||
})
|
||||
height = int_or_none(self._search_regex(r'fl_cdn_(\d+)', format_id, 'height', default=None))
|
||||
current_formats = self._extract_m3u8_formats(f'https://video.beeg.com/{video_uri}', video_id, ext='mp4', m3u8_id=str(height))
|
||||
for f in current_formats:
|
||||
f['height'] = height
|
||||
formats.extend(current_formats)
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
title = video['title']
|
||||
video_id = compat_str(video.get('id') or video_id)
|
||||
display_id = video.get('code')
|
||||
description = video.get('desc')
|
||||
series = video.get('ps_name')
|
||||
|
||||
timestamp = unified_timestamp(video.get('date'))
|
||||
duration = int_or_none(video.get('duration'))
|
||||
|
||||
tags = [tag.strip() for tag in video['tags'].split(',')] if video.get('tags') else None
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'series': series,
|
||||
'timestamp': timestamp,
|
||||
'duration': duration,
|
||||
'tags': tags,
|
||||
'display_id': first_fact.get('id'),
|
||||
'title': traverse_obj(video, ('file', 'stuff', 'sf_name')),
|
||||
'description': traverse_obj(video, ('file', 'stuff', 'sf_story')),
|
||||
'timestamp': unified_timestamp(first_fact.get('fc_created')),
|
||||
'duration': int_or_none(traverse_obj(video, ('file', 'fl_duration'))),
|
||||
'tags': traverse_obj(video, ('tags', ..., 'tg_name')),
|
||||
'formats': formats,
|
||||
'age_limit': self._rta_search(webpage),
|
||||
}
|
||||
|
||||
59
yt_dlp/extractor/bigo.py
Normal file
59
yt_dlp/extractor/bigo.py
Normal file
@@ -0,0 +1,59 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import ExtractorError, urlencode_postdata
|
||||
|
||||
|
||||
class BigoIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?bigo\.tv/(?:[a-z]{2,}/)?(?P<id>[^/]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://www.bigo.tv/ja/221338632',
|
||||
'info_dict': {
|
||||
'id': '6576287577575737440',
|
||||
'title': '土よ〜💁♂️ 休憩室/REST room',
|
||||
'thumbnail': r're:https?://.+',
|
||||
'uploader': '✨Shin💫',
|
||||
'uploader_id': '221338632',
|
||||
'is_live': True,
|
||||
},
|
||||
'skip': 'livestream',
|
||||
}, {
|
||||
'url': 'https://www.bigo.tv/th/Tarlerm1304',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://bigo.tv/115976881',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
user_id = self._match_id(url)
|
||||
|
||||
info_raw = self._download_json(
|
||||
'https://bigo.tv/studio/getInternalStudioInfo',
|
||||
user_id, data=urlencode_postdata({'siteId': user_id}))
|
||||
|
||||
if not isinstance(info_raw, dict):
|
||||
raise ExtractorError('Received invalid JSON data')
|
||||
if info_raw.get('code'):
|
||||
raise ExtractorError(
|
||||
'Bigo says: %s (code %s)' % (info_raw.get('msg'), info_raw.get('code')), expected=True)
|
||||
info = info_raw.get('data') or {}
|
||||
|
||||
if not info.get('alive'):
|
||||
raise ExtractorError('This user is offline.', expected=True)
|
||||
|
||||
return {
|
||||
'id': info.get('roomId') or user_id,
|
||||
'title': info.get('roomTopic') or info.get('nick_name') or user_id,
|
||||
'formats': [{
|
||||
'url': info.get('hls_src'),
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
}],
|
||||
'thumbnail': info.get('snapshot'),
|
||||
'uploader': info.get('nick_name'),
|
||||
'uploader_id': user_id,
|
||||
'is_live': True,
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
# coding: utf-8
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
import itertools
|
||||
import functools
|
||||
@@ -16,17 +17,18 @@ from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
float_or_none,
|
||||
mimetype2ext,
|
||||
parse_iso8601,
|
||||
traverse_obj,
|
||||
try_get,
|
||||
parse_count,
|
||||
smuggle_url,
|
||||
srt_subtitles_timecode,
|
||||
str_or_none,
|
||||
str_to_int,
|
||||
strip_jsonp,
|
||||
unified_timestamp,
|
||||
unsmuggle_url,
|
||||
urlencode_postdata,
|
||||
url_or_none,
|
||||
OnDemandPagedList
|
||||
)
|
||||
|
||||
@@ -50,16 +52,14 @@ class BiliBiliIE(InfoExtractor):
|
||||
'url': 'http://www.bilibili.com/video/av1074402/',
|
||||
'md5': '5f7d29e1a2872f3df0cf76b1f87d3788',
|
||||
'info_dict': {
|
||||
'id': '1074402',
|
||||
'ext': 'flv',
|
||||
'id': '1074402_part1',
|
||||
'ext': 'mp4',
|
||||
'title': '【金坷垃】金泡沫',
|
||||
'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923',
|
||||
'duration': 308.067,
|
||||
'timestamp': 1398012678,
|
||||
'upload_date': '20140420',
|
||||
'thumbnail': r're:^https?://.+\.jpg',
|
||||
'uploader': '菊子桑',
|
||||
'uploader_id': '156160',
|
||||
'uploader': '菊子桑',
|
||||
'upload_date': '20140420',
|
||||
'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923',
|
||||
'timestamp': 1398012678,
|
||||
},
|
||||
}, {
|
||||
# Tested in BiliBiliBangumiIE
|
||||
@@ -73,49 +73,27 @@ class BiliBiliIE(InfoExtractor):
|
||||
'url': 'http://bangumi.bilibili.com/anime/5802/play#100643',
|
||||
'md5': '3f721ad1e75030cc06faf73587cfec57',
|
||||
'info_dict': {
|
||||
'id': '100643',
|
||||
'id': '100643_part1',
|
||||
'ext': 'mp4',
|
||||
'title': 'CHAOS;CHILD',
|
||||
'description': '如果你是神明,并且能够让妄想成为现实。那你会进行怎么样的妄想?是淫靡的世界?独裁社会?毁灭性的制裁?还是……2015年,涩谷。从6年前发生的大灾害“涩谷地震”之后复兴了的这个街区里新设立的私立高中...',
|
||||
},
|
||||
'skip': 'Geo-restricted to China',
|
||||
}, {
|
||||
# Title with double quotes
|
||||
'url': 'http://www.bilibili.com/video/av8903802/',
|
||||
'info_dict': {
|
||||
'id': '8903802',
|
||||
'title': '阿滴英文|英文歌分享#6 "Closer',
|
||||
'description': '滴妹今天唱Closer給你聽! 有史以来,被推最多次也是最久的歌曲,其实歌词跟我原本想像差蛮多的,不过还是好听! 微博@阿滴英文',
|
||||
},
|
||||
'playlist': [{
|
||||
'info_dict': {
|
||||
'id': '8903802_part1',
|
||||
'ext': 'flv',
|
||||
'ext': 'mp4',
|
||||
'title': '阿滴英文|英文歌分享#6 "Closer',
|
||||
'description': 'md5:3b1b9e25b78da4ef87e9b548b88ee76a',
|
||||
'uploader': '阿滴英文',
|
||||
'uploader_id': '65880958',
|
||||
'timestamp': 1488382634,
|
||||
'upload_date': '20170301',
|
||||
'description': '滴妹今天唱Closer給你聽! 有史以来,被推最多次也是最久的歌曲,其实歌词跟我原本想像差蛮多的,不过还是好听! 微博@阿滴英文',
|
||||
'timestamp': 1488382634,
|
||||
'uploader_id': '65880958',
|
||||
'uploader': '阿滴英文',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
'info_dict': {
|
||||
'id': '8903802_part2',
|
||||
'ext': 'flv',
|
||||
'title': '阿滴英文|英文歌分享#6 "Closer',
|
||||
'description': 'md5:3b1b9e25b78da4ef87e9b548b88ee76a',
|
||||
'uploader': '阿滴英文',
|
||||
'uploader_id': '65880958',
|
||||
'timestamp': 1488382634,
|
||||
'upload_date': '20170301',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}]
|
||||
}, {
|
||||
# new BV video id format
|
||||
'url': 'https://www.bilibili.com/video/BV1JE411F741',
|
||||
@@ -150,6 +128,7 @@ class BiliBiliIE(InfoExtractor):
|
||||
av_id, bv_id = self._get_video_id_set(video_id, mobj.group('id_bv') is not None)
|
||||
video_id = av_id
|
||||
|
||||
info = {}
|
||||
anime_id = mobj.group('anime_id')
|
||||
page_id = mobj.group('page')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
@@ -201,66 +180,95 @@ class BiliBiliIE(InfoExtractor):
|
||||
}
|
||||
headers.update(self.geo_verification_headers())
|
||||
|
||||
video_info = self._parse_json(
|
||||
self._search_regex(r'window.__playinfo__\s*=\s*({.+?})</script>', webpage, 'video info', default=None) or '{}',
|
||||
video_id, fatal=False)
|
||||
video_info = video_info.get('data') or {}
|
||||
|
||||
durl = traverse_obj(video_info, ('dash', 'video'))
|
||||
audios = traverse_obj(video_info, ('dash', 'audio')) or []
|
||||
entries = []
|
||||
|
||||
RENDITIONS = ('qn=80&quality=80&type=', 'quality=2&type=mp4')
|
||||
for num, rendition in enumerate(RENDITIONS, start=1):
|
||||
payload = 'appkey=%s&cid=%s&otype=json&%s' % (self._APP_KEY, cid, rendition)
|
||||
sign = hashlib.md5((payload + self._BILIBILI_KEY).encode('utf-8')).hexdigest()
|
||||
|
||||
if not video_info:
|
||||
video_info = self._download_json(
|
||||
'http://interface.bilibili.com/v2/playurl?%s&sign=%s' % (payload, sign),
|
||||
video_id, note='Downloading video info page',
|
||||
headers=headers, fatal=num == len(RENDITIONS))
|
||||
|
||||
if not video_info:
|
||||
continue
|
||||
|
||||
if 'durl' not in video_info:
|
||||
if not durl and 'durl' not in video_info:
|
||||
if num < len(RENDITIONS):
|
||||
continue
|
||||
self._report_error(video_info)
|
||||
|
||||
for idx, durl in enumerate(video_info['durl']):
|
||||
formats = [{
|
||||
'url': durl['url'],
|
||||
'filesize': int_or_none(durl['size']),
|
||||
}]
|
||||
for backup_url in durl.get('backup_url', []):
|
||||
formats = []
|
||||
for idx, durl in enumerate(durl or video_info['durl']):
|
||||
formats.append({
|
||||
'url': durl.get('baseUrl') or durl.get('base_url') or durl.get('url'),
|
||||
'ext': mimetype2ext(durl.get('mimeType') or durl.get('mime_type')),
|
||||
'fps': int_or_none(durl.get('frameRate') or durl.get('frame_rate')),
|
||||
'width': int_or_none(durl.get('width')),
|
||||
'height': int_or_none(durl.get('height')),
|
||||
'vcodec': durl.get('codecs'),
|
||||
'acodec': 'none' if audios else None,
|
||||
'tbr': float_or_none(durl.get('bandwidth'), scale=1000),
|
||||
'filesize': int_or_none(durl.get('size')),
|
||||
})
|
||||
for backup_url in traverse_obj(durl, 'backup_url', expected_type=list) or []:
|
||||
formats.append({
|
||||
'url': backup_url,
|
||||
# backup URLs have lower priorities
|
||||
'quality': -2 if 'hd.mp4' in backup_url else -3,
|
||||
})
|
||||
|
||||
for a_format in formats:
|
||||
a_format.setdefault('http_headers', {}).update({
|
||||
'Referer': url,
|
||||
for audio in audios:
|
||||
formats.append({
|
||||
'url': audio.get('baseUrl') or audio.get('base_url') or audio.get('url'),
|
||||
'ext': mimetype2ext(audio.get('mimeType') or audio.get('mime_type')),
|
||||
'fps': int_or_none(audio.get('frameRate') or audio.get('frame_rate')),
|
||||
'width': int_or_none(audio.get('width')),
|
||||
'height': int_or_none(audio.get('height')),
|
||||
'acodec': audio.get('codecs'),
|
||||
'vcodec': 'none',
|
||||
'tbr': float_or_none(audio.get('bandwidth'), scale=1000),
|
||||
'filesize': int_or_none(audio.get('size'))
|
||||
})
|
||||
for backup_url in traverse_obj(audio, 'backup_url', expected_type=list) or []:
|
||||
formats.append({
|
||||
'url': backup_url,
|
||||
# backup URLs have lower priorities
|
||||
'quality': -3,
|
||||
})
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
entries.append({
|
||||
'id': '%s_part%s' % (video_id, idx),
|
||||
info.update({
|
||||
'id': video_id,
|
||||
'duration': float_or_none(durl.get('length'), 1000),
|
||||
'formats': formats,
|
||||
'http_headers': {
|
||||
'Referer': url,
|
||||
},
|
||||
})
|
||||
break
|
||||
|
||||
title = self._html_search_regex(
|
||||
(r'<h1[^>]+\btitle=(["\'])(?P<title>(?:(?!\1).)+)\1',
|
||||
r'(?s)<h1[^>]*>(?P<title>.+?)</h1>'), webpage, 'title',
|
||||
group='title')
|
||||
self._sort_formats(formats)
|
||||
|
||||
title = self._html_search_regex((
|
||||
r'<h1[^>]+title=(["\'])(?P<content>[^"\']+)',
|
||||
r'(?s)<h1[^>]*>(?P<content>.+?)</h1>',
|
||||
self._meta_regex('title')
|
||||
), webpage, 'title', group='content', fatal=False)
|
||||
|
||||
# Get part title for anthologies
|
||||
if page_id is not None:
|
||||
# TODO: The json is already downloaded by _extract_anthology_entries. Don't redownload for each video
|
||||
part_title = try_get(
|
||||
self._download_json(
|
||||
# TODO: The json is already downloaded by _extract_anthology_entries. Don't redownload for each video.
|
||||
part_info = traverse_obj(self._download_json(
|
||||
f'https://api.bilibili.com/x/player/pagelist?bvid={bv_id}&jsonp=jsonp',
|
||||
video_id, note='Extracting videos in anthology'),
|
||||
lambda x: x['data'][int(page_id) - 1]['part'])
|
||||
title = part_title or title
|
||||
video_id, note='Extracting videos in anthology'), 'data', expected_type=list)
|
||||
title = title if len(part_info) == 1 else traverse_obj(part_info, (int(page_id) - 1, 'part')) or title
|
||||
|
||||
description = self._html_search_meta('description', webpage)
|
||||
timestamp = unified_timestamp(self._html_search_regex(
|
||||
@@ -270,15 +278,15 @@ class BiliBiliIE(InfoExtractor):
|
||||
thumbnail = self._html_search_meta(['og:image', 'thumbnailUrl'], webpage)
|
||||
|
||||
# TODO 'view_count' requires deobfuscating Javascript
|
||||
info = {
|
||||
'id': str(video_id) if page_id is None else '%s_part%s' % (video_id, page_id),
|
||||
info.update({
|
||||
'id': f'{video_id}_part{page_id or 1}',
|
||||
'cid': cid,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'timestamp': timestamp,
|
||||
'thumbnail': thumbnail,
|
||||
'duration': float_or_none(video_info.get('timelength'), scale=1000),
|
||||
}
|
||||
})
|
||||
|
||||
uploader_mobj = re.search(
|
||||
r'<a[^>]+href="(?:https?:)?//space\.bilibili\.com/(?P<id>\d+)"[^>]*>\s*(?P<name>[^<]+?)\s*<',
|
||||
@@ -299,7 +307,7 @@ class BiliBiliIE(InfoExtractor):
|
||||
video_id, fatal=False, note='Downloading tags'), ('data', ..., 'tag_name')),
|
||||
}
|
||||
|
||||
entries[0]['subtitles'] = {
|
||||
info['subtitles'] = {
|
||||
'danmaku': [{
|
||||
'ext': 'xml',
|
||||
'url': f'https://comment.bilibili.com/{cid}.xml',
|
||||
@@ -334,19 +342,18 @@ class BiliBiliIE(InfoExtractor):
|
||||
entry['id'] = '%s_part%d' % (video_id, (idx + 1))
|
||||
|
||||
return {
|
||||
'_type': 'multi_video',
|
||||
'id': str(video_id),
|
||||
'bv_id': bv_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'entries': entries,
|
||||
**info, **top_level_info
|
||||
}
|
||||
|
||||
def _extract_anthology_entries(self, bv_id, video_id, webpage):
|
||||
title = self._html_search_regex(
|
||||
(r'<h1[^>]+\btitle=(["\'])(?P<title>(?:(?!\1).)+)\1',
|
||||
r'(?s)<h1[^>]*>(?P<title>.+?)</h1>'), webpage, 'title',
|
||||
r'(?s)<h1[^>]*>(?P<title>.+?)</h1>',
|
||||
r'<title>(?P<title>.+?)</title>'), webpage, 'title',
|
||||
group='title')
|
||||
json_data = self._download_json(
|
||||
f'https://api.bilibili.com/x/player/pagelist?bvid={bv_id}&jsonp=jsonp',
|
||||
@@ -479,9 +486,9 @@ class BilibiliChannelIE(InfoExtractor):
|
||||
data = self._download_json(
|
||||
self._API_URL % (list_id, page_num), list_id, note=f'Downloading page {page_num}')['data']
|
||||
|
||||
max_count = max_count or try_get(data, lambda x: x['page']['count'])
|
||||
max_count = max_count or traverse_obj(data, ('page', 'count'))
|
||||
|
||||
entries = try_get(data, lambda x: x['list']['vlist'])
|
||||
entries = traverse_obj(data, ('list', 'vlist'))
|
||||
if not entries:
|
||||
return
|
||||
for entry in entries:
|
||||
@@ -519,7 +526,7 @@ class BilibiliCategoryIE(InfoExtractor):
|
||||
api_url, query, query={'Search_key': query, 'pn': page_num},
|
||||
note='Extracting results from page %s of %s' % (page_num, num_pages))
|
||||
|
||||
video_list = try_get(parsed_json, lambda x: x['data']['archives'], list)
|
||||
video_list = traverse_obj(parsed_json, ('data', 'archives'), expected_type=list)
|
||||
if not video_list:
|
||||
raise ExtractorError('Failed to retrieve video list for page %d' % page_num)
|
||||
|
||||
@@ -549,7 +556,7 @@ class BilibiliCategoryIE(InfoExtractor):
|
||||
|
||||
api_url = 'https://api.bilibili.com/x/web-interface/newlist?rid=%d&type=1&ps=20&jsonp=jsonp' % rid_value
|
||||
page_json = self._download_json(api_url, query, query={'Search_key': query, 'pn': '1'})
|
||||
page_data = try_get(page_json, lambda x: x['data']['page'], dict)
|
||||
page_data = traverse_obj(page_json, ('data', 'page'), expected_type=dict)
|
||||
count, size = int_or_none(page_data.get('count')), int_or_none(page_data.get('size'))
|
||||
if count is None or not size:
|
||||
raise ExtractorError('Failed to calculate either page count or size')
|
||||
@@ -721,40 +728,57 @@ class BiliBiliPlayerIE(InfoExtractor):
|
||||
|
||||
|
||||
class BiliIntlBaseIE(InfoExtractor):
|
||||
_API_URL = 'https://api.bili{}/intl/gateway{}'
|
||||
_API_URL = 'https://api.bilibili.tv/intl/gateway'
|
||||
_NETRC_MACHINE = 'biliintl'
|
||||
|
||||
def _call_api(self, type, endpoint, id):
|
||||
return self._download_json(self._API_URL.format(type, endpoint), id)['data']
|
||||
def _call_api(self, endpoint, *args, **kwargs):
|
||||
json = self._download_json(self._API_URL + endpoint, *args, **kwargs)
|
||||
if json.get('code'):
|
||||
if json['code'] in (10004004, 10004005, 10023006):
|
||||
self.raise_login_required()
|
||||
elif json['code'] == 10004001:
|
||||
self.raise_geo_restricted()
|
||||
else:
|
||||
if json.get('message') and str(json['code']) != json['message']:
|
||||
errmsg = f'{kwargs.get("errnote", "Unable to download JSON metadata")}: {self.IE_NAME} said: {json["message"]}'
|
||||
else:
|
||||
errmsg = kwargs.get('errnote', 'Unable to download JSON metadata')
|
||||
if kwargs.get('fatal'):
|
||||
raise ExtractorError(errmsg)
|
||||
else:
|
||||
self.report_warning(errmsg)
|
||||
return json.get('data')
|
||||
|
||||
def json2srt(self, json):
|
||||
data = '\n\n'.join(
|
||||
f'{i + 1}\n{srt_subtitles_timecode(line["from"])} --> {srt_subtitles_timecode(line["to"])}\n{line["content"]}'
|
||||
for i, line in enumerate(json['body']))
|
||||
for i, line in enumerate(json['body']) if line.get('content'))
|
||||
return data
|
||||
|
||||
def _get_subtitles(self, type, ep_id):
|
||||
sub_json = self._call_api(type, f'/m/subtitle?ep_id={ep_id}&platform=web', ep_id)
|
||||
def _get_subtitles(self, ep_id):
|
||||
sub_json = self._call_api(f'/web/v2/subtitle?episode_id={ep_id}&platform=web', ep_id)
|
||||
subtitles = {}
|
||||
for sub in sub_json.get('subtitles', []):
|
||||
for sub in sub_json.get('subtitles') or []:
|
||||
sub_url = sub.get('url')
|
||||
if not sub_url:
|
||||
continue
|
||||
sub_data = self._download_json(sub_url, ep_id, fatal=False)
|
||||
sub_data = self._download_json(
|
||||
sub_url, ep_id, errnote='Unable to download subtitles', fatal=False,
|
||||
note='Downloading subtitles%s' % f' for {sub["lang"]}' if sub.get('lang') else '')
|
||||
if not sub_data:
|
||||
continue
|
||||
subtitles.setdefault(sub.get('key', 'en'), []).append({
|
||||
subtitles.setdefault(sub.get('lang_key', 'en'), []).append({
|
||||
'ext': 'srt',
|
||||
'data': self.json2srt(sub_data)
|
||||
})
|
||||
return subtitles
|
||||
|
||||
def _get_formats(self, type, ep_id):
|
||||
video_json = self._call_api(type, f'/web/playurl?ep_id={ep_id}&platform=web', ep_id)
|
||||
if not video_json:
|
||||
self.raise_login_required(method='cookies')
|
||||
def _get_formats(self, ep_id):
|
||||
video_json = self._call_api(f'/web/playurl?ep_id={ep_id}&platform=web', ep_id,
|
||||
note='Downloading video formats', errnote='Unable to download video formats')
|
||||
video_json = video_json['playurl']
|
||||
formats = []
|
||||
for vid in video_json.get('video', []):
|
||||
for vid in video_json.get('video') or []:
|
||||
video_res = vid.get('video_resource') or {}
|
||||
video_info = vid.get('stream_info') or {}
|
||||
if not video_res.get('url'):
|
||||
@@ -770,7 +794,7 @@ class BiliIntlBaseIE(InfoExtractor):
|
||||
'vcodec': video_res.get('codecs'),
|
||||
'filesize': video_res.get('size'),
|
||||
})
|
||||
for aud in video_json.get('audio_resource', []):
|
||||
for aud in video_json.get('audio_resource') or []:
|
||||
if not aud.get('url'):
|
||||
continue
|
||||
formats.append({
|
||||
@@ -785,85 +809,144 @@ class BiliIntlBaseIE(InfoExtractor):
|
||||
self._sort_formats(formats)
|
||||
return formats
|
||||
|
||||
def _extract_ep_info(self, type, episode_data, ep_id):
|
||||
def _extract_ep_info(self, episode_data, ep_id):
|
||||
return {
|
||||
'id': ep_id,
|
||||
'title': episode_data.get('long_title') or episode_data['title'],
|
||||
'title': episode_data.get('title_display') or episode_data['title'],
|
||||
'thumbnail': episode_data.get('cover'),
|
||||
'episode_number': str_to_int(episode_data.get('title')),
|
||||
'formats': self._get_formats(type, ep_id),
|
||||
'subtitles': self._get_subtitles(type, ep_id),
|
||||
'episode_number': int_or_none(self._search_regex(
|
||||
r'^E(\d+)(?:$| - )', episode_data.get('title_display'), 'episode number', default=None)),
|
||||
'formats': self._get_formats(ep_id),
|
||||
'subtitles': self._get_subtitles(ep_id),
|
||||
'extractor_key': BiliIntlIE.ie_key(),
|
||||
}
|
||||
|
||||
def _login(self):
|
||||
username, password = self._get_login_info()
|
||||
if username is None:
|
||||
return
|
||||
|
||||
try:
|
||||
from Cryptodome.PublicKey import RSA
|
||||
from Cryptodome.Cipher import PKCS1_v1_5
|
||||
except ImportError:
|
||||
try:
|
||||
from Crypto.PublicKey import RSA
|
||||
from Crypto.Cipher import PKCS1_v1_5
|
||||
except ImportError:
|
||||
raise ExtractorError('pycryptodomex not found. Please install', expected=True)
|
||||
|
||||
key_data = self._download_json(
|
||||
'https://passport.bilibili.tv/x/intl/passport-login/web/key?lang=en-US', None,
|
||||
note='Downloading login key', errnote='Unable to download login key')['data']
|
||||
|
||||
public_key = RSA.importKey(key_data['key'])
|
||||
password_hash = PKCS1_v1_5.new(public_key).encrypt((key_data['hash'] + password).encode('utf-8'))
|
||||
login_post = self._download_json(
|
||||
'https://passport.bilibili.tv/x/intl/passport-login/web/login/password?lang=en-US', None, data=urlencode_postdata({
|
||||
'username': username,
|
||||
'password': base64.b64encode(password_hash).decode('ascii'),
|
||||
'keep_me': 'true',
|
||||
's_locale': 'en_US',
|
||||
'isTrusted': 'true'
|
||||
}), note='Logging in', errnote='Unable to log in')
|
||||
if login_post.get('code'):
|
||||
if login_post.get('message'):
|
||||
raise ExtractorError(f'Unable to log in: {self.IE_NAME} said: {login_post["message"]}', expected=True)
|
||||
else:
|
||||
raise ExtractorError('Unable to log in')
|
||||
|
||||
def _real_initialize(self):
|
||||
self._login()
|
||||
|
||||
|
||||
class BiliIntlIE(BiliIntlBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?bili(?P<type>bili\.tv|intl.com)/(?:[a-z]{2}/)?play/(?P<season_id>\d+)/(?P<id>\d+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?bili(?:bili\.tv|intl\.com)/(?:[a-z]{2}/)?play/(?P<season_id>\d+)/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
# Bstation page
|
||||
'url': 'https://www.bilibili.tv/en/play/34613/341736',
|
||||
'info_dict': {
|
||||
'id': '341736',
|
||||
'ext': 'mp4',
|
||||
'title': 'The First Night',
|
||||
'thumbnail': 'https://i0.hdslb.com/bfs/intl/management/91e30e5521235d9b163339a26a0b030ebda54310.png',
|
||||
'title': 'E2 - The First Night',
|
||||
'thumbnail': r're:^https://pic\.bstarstatic\.com/ogv/.+\.png$',
|
||||
'episode_number': 2,
|
||||
}
|
||||
}, {
|
||||
# Non-Bstation page
|
||||
'url': 'https://www.bilibili.tv/en/play/1033760/11005006',
|
||||
'info_dict': {
|
||||
'id': '11005006',
|
||||
'ext': 'mp4',
|
||||
'title': 'E3 - Who?',
|
||||
'thumbnail': r're:^https://pic\.bstarstatic\.com/ogv/.+\.png$',
|
||||
'episode_number': 3,
|
||||
}
|
||||
}, {
|
||||
# Subtitle with empty content
|
||||
'url': 'https://www.bilibili.tv/en/play/1005144/10131790',
|
||||
'info_dict': {
|
||||
'id': '10131790',
|
||||
'ext': 'mp4',
|
||||
'title': 'E140 - Two Heartbeats: Kabuto\'s Trap',
|
||||
'thumbnail': r're:^https://pic\.bstarstatic\.com/ogv/.+\.png$',
|
||||
'episode_number': 140,
|
||||
},
|
||||
'params': {
|
||||
'format': 'bv',
|
||||
},
|
||||
'skip': 'According to the copyright owner\'s request, you may only watch the video after you log in.'
|
||||
}, {
|
||||
'url': 'https://www.biliintl.com/en/play/34613/341736',
|
||||
'info_dict': {
|
||||
'id': '341736',
|
||||
'ext': 'mp4',
|
||||
'title': 'The First Night',
|
||||
'thumbnail': 'https://i0.hdslb.com/bfs/intl/management/91e30e5521235d9b163339a26a0b030ebda54310.png',
|
||||
'episode_number': 2,
|
||||
},
|
||||
'params': {
|
||||
'format': 'bv',
|
||||
},
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
type, season_id, id = self._match_valid_url(url).groups()
|
||||
data_json = self._call_api(type, f'/web/view/ogv_collection?season_id={season_id}', id)
|
||||
season_id, video_id = self._match_valid_url(url).groups()
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
# Bstation layout
|
||||
initial_data = self._parse_json(self._search_regex(
|
||||
r'window\.__INITIAL_DATA__\s*=\s*({.+?});', webpage,
|
||||
'preload state', default='{}'), video_id, fatal=False) or {}
|
||||
episode_data = traverse_obj(initial_data, ('OgvVideo', 'epDetail'), expected_type=dict)
|
||||
|
||||
if not episode_data:
|
||||
# Non-Bstation layout, read through episode list
|
||||
season_json = self._call_api(f'/web/v2/ogv/play/episodes?season_id={season_id}&platform=web', video_id)
|
||||
episode_data = next(
|
||||
episode for episode in data_json.get('episodes', [])
|
||||
if str(episode.get('ep_id')) == id)
|
||||
return self._extract_ep_info(type, episode_data, id)
|
||||
episode for episode in traverse_obj(season_json, ('sections', ..., 'episodes', ...), expected_type=dict)
|
||||
if str(episode.get('episode_id')) == video_id)
|
||||
return self._extract_ep_info(episode_data, video_id)
|
||||
|
||||
|
||||
class BiliIntlSeriesIE(BiliIntlBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?bili(?P<type>bili\.tv|intl.com)/(?:[a-z]{2}/)?play/(?P<id>\d+)$'
|
||||
_VALID_URL = r'https?://(?:www\.)?bili(?:bili\.tv|intl\.com)/(?:[a-z]{2}/)?play/(?P<id>\d+)$'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.bilibili.tv/en/play/34613',
|
||||
'playlist_mincount': 15,
|
||||
'info_dict': {
|
||||
'id': '34613',
|
||||
'title': 'Fly Me to the Moon',
|
||||
'description': 'md5:a861ee1c4dc0acfad85f557cc42ac627',
|
||||
'categories': ['Romance', 'Comedy', 'Slice of life'],
|
||||
'thumbnail': r're:^https://pic\.bstarstatic\.com/ogv/.+\.png$',
|
||||
'view_count': int,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
'format': 'bv',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.biliintl.com/en/play/34613',
|
||||
'playlist_mincount': 15,
|
||||
'info_dict': {
|
||||
'id': '34613',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
'format': 'bv',
|
||||
},
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _entries(self, id, type):
|
||||
data_json = self._call_api(type, f'/web/view/ogv_collection?season_id={id}', id)
|
||||
for episode in data_json.get('episodes', []):
|
||||
episode_id = str(episode.get('ep_id'))
|
||||
yield self._extract_ep_info(type, episode, episode_id)
|
||||
def _entries(self, series_id):
|
||||
series_json = self._call_api(f'/web/v2/ogv/play/episodes?season_id={series_id}&platform=web', series_id)
|
||||
for episode in traverse_obj(series_json, ('sections', ..., 'episodes', ...), expected_type=dict, default=[]):
|
||||
episode_id = str(episode.get('episode_id'))
|
||||
yield self._extract_ep_info(episode, episode_id)
|
||||
|
||||
def _real_extract(self, url):
|
||||
type, id = self._match_valid_url(url).groups()
|
||||
return self.playlist_result(self._entries(id, type), playlist_id=id)
|
||||
series_id = self._match_id(url)
|
||||
series_info = self._call_api(f'/web/v2/ogv/play/season_info?season_id={series_id}&platform=web', series_id).get('season') or {}
|
||||
return self.playlist_result(
|
||||
self._entries(series_id), series_id, series_info.get('title'), series_info.get('description'),
|
||||
categories=traverse_obj(series_info, ('styles', ..., 'title'), expected_type=str_or_none),
|
||||
thumbnail=url_or_none(series_info.get('horizontal_cover')), view_count=parse_count(series_info.get('view')))
|
||||
|
||||
@@ -3,27 +3,28 @@ from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .vk import VKIE
|
||||
from ..compat import (
|
||||
compat_b64decode,
|
||||
compat_urllib_parse_unquote,
|
||||
from ..compat import compat_b64decode
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
js_to_json,
|
||||
traverse_obj,
|
||||
unified_timestamp,
|
||||
)
|
||||
from ..utils import int_or_none
|
||||
|
||||
|
||||
class BIQLEIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?biqle\.(?:com|org|ru)/watch/(?P<id>-?\d+_\d+)'
|
||||
_TESTS = [{
|
||||
# Youtube embed
|
||||
'url': 'https://biqle.ru/watch/-115995369_456239081',
|
||||
'md5': '97af5a06ee4c29bbf9c001bdb1cf5c06',
|
||||
'url': 'https://biqle.ru/watch/-2000421746_85421746',
|
||||
'md5': 'ae6ef4f04d19ac84e4658046d02c151c',
|
||||
'info_dict': {
|
||||
'id': '8v4f-avW-VI',
|
||||
'id': '-2000421746_85421746',
|
||||
'ext': 'mp4',
|
||||
'title': "PASSE-PARTOUT - L'ete c'est fait pour jouer",
|
||||
'description': 'Passe-Partout',
|
||||
'uploader_id': 'mrsimpsonstef3',
|
||||
'uploader': 'Phanolito',
|
||||
'upload_date': '20120822',
|
||||
'title': 'Forsaken By Hope Studio Clip',
|
||||
'description': 'Forsaken By Hope Studio Clip — Смотреть онлайн',
|
||||
'upload_date': '19700101',
|
||||
'thumbnail': r're:https://[^/]+/impf/7vN3ACwSTgChP96OdOfzFjUCzFR6ZglDQgWsIw/KPaACiVJJxM\.jpg\?size=800x450&quality=96&keep_aspect_ratio=1&background=000000&sign=b48ea459c4d33dbcba5e26d63574b1cb&type=video_thumb',
|
||||
'timestamp': 0,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://biqle.org/watch/-44781847_168547604',
|
||||
@@ -32,50 +33,59 @@ class BIQLEIE(InfoExtractor):
|
||||
'id': '-44781847_168547604',
|
||||
'ext': 'mp4',
|
||||
'title': 'Ребенок в шоке от автоматической мойки',
|
||||
'description': 'Ребенок в шоке от автоматической мойки — Смотреть онлайн',
|
||||
'timestamp': 1396633454,
|
||||
'uploader': 'Dmitry Kotov',
|
||||
'upload_date': '20140404',
|
||||
'uploader_id': '47850140',
|
||||
'thumbnail': r're:https://[^/]+/c535507/u190034692/video/l_b84df002\.jpg',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
embed_url = self._proto_relative_url(self._search_regex(
|
||||
r'<iframe.+?src="((?:https?:)?//(?:daxab\.com|dxb\.to|[^/]+/player)/[^"]+)".*?></iframe>',
|
||||
webpage, 'embed url'))
|
||||
|
||||
title = self._html_search_meta('name', webpage, 'Title', fatal=False)
|
||||
timestamp = unified_timestamp(self._html_search_meta('uploadDate', webpage, 'Upload Date', default=None))
|
||||
description = self._html_search_meta('description', webpage, 'Description', default=None)
|
||||
|
||||
global_embed_url = self._search_regex(
|
||||
r'<script[^<]+?window.globEmbedUrl\s*=\s*\'((?:https?:)?//(?:daxab\.com|dxb\.to|[^/]+/player)/[^\']+)\'',
|
||||
webpage, 'global Embed url')
|
||||
hash = self._search_regex(
|
||||
r'<script id="data-embed-video[^<]+?hash: "([^"]+)"[^<]*</script>', webpage, 'Hash')
|
||||
|
||||
embed_url = global_embed_url + hash
|
||||
|
||||
if VKIE.suitable(embed_url):
|
||||
return self.url_result(embed_url, VKIE.ie_key(), video_id)
|
||||
|
||||
embed_page = self._download_webpage(
|
||||
embed_url, video_id, headers={'Referer': url})
|
||||
video_ext = self._get_cookies(embed_url).get('video_ext')
|
||||
if video_ext:
|
||||
video_ext = compat_urllib_parse_unquote(video_ext.value)
|
||||
if not video_ext:
|
||||
video_ext = compat_b64decode(self._search_regex(
|
||||
r'video_ext\s*:\s*[\'"]([A-Za-z0-9+/=]+)',
|
||||
embed_page, 'video_ext')).decode()
|
||||
video_id, sig, _, access_token = video_ext.split(':')
|
||||
embed_url, video_id, 'Downloading embed webpage', headers={'Referer': url})
|
||||
|
||||
glob_params = self._parse_json(self._search_regex(
|
||||
r'<script id="globParams">[^<]*window.globParams = ([^;]+);[^<]+</script>',
|
||||
embed_page, 'Global Parameters'), video_id, transform_source=js_to_json)
|
||||
host_name = compat_b64decode(glob_params['server'][::-1]).decode()
|
||||
|
||||
item = self._download_json(
|
||||
'https://api.vk.com/method/video.get', video_id,
|
||||
headers={'User-Agent': 'okhttp/3.4.1'}, query={
|
||||
'access_token': access_token,
|
||||
'sig': sig,
|
||||
'v': 5.44,
|
||||
f'https://{host_name}/method/video.get/{video_id}', video_id,
|
||||
headers={'Referer': url}, query={
|
||||
'token': glob_params['video']['access_token'],
|
||||
'videos': video_id,
|
||||
'ckey': glob_params['c_key'],
|
||||
'credentials': glob_params['video']['credentials'],
|
||||
})['response']['items'][0]
|
||||
title = item['title']
|
||||
|
||||
formats = []
|
||||
for f_id, f_url in item.get('files', {}).items():
|
||||
if f_id == 'external':
|
||||
return self.url_result(f_url)
|
||||
ext, height = f_id.split('_')
|
||||
height_extra_key = traverse_obj(glob_params, ('video', 'partial', 'quality', height))
|
||||
if height_extra_key:
|
||||
formats.append({
|
||||
'format_id': height + 'p',
|
||||
'url': f_url,
|
||||
'format_id': f'{height}p',
|
||||
'url': f'https://{host_name}/{f_url[8:]}&videos={video_id}&extra_key={height_extra_key}',
|
||||
'height': int_or_none(height),
|
||||
'ext': ext,
|
||||
})
|
||||
@@ -96,10 +106,9 @@ class BIQLEIE(InfoExtractor):
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'comment_count': int_or_none(item.get('comments')),
|
||||
'description': item.get('description'),
|
||||
'description': description,
|
||||
'duration': int_or_none(item.get('duration')),
|
||||
'thumbnails': thumbnails,
|
||||
'timestamp': int_or_none(item.get('date')),
|
||||
'uploader': item.get('owner_id'),
|
||||
'timestamp': timestamp,
|
||||
'view_count': int_or_none(item.get('views')),
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ class BitwaveStreamIE(InfoExtractor):
|
||||
|
||||
return {
|
||||
'id': username,
|
||||
'title': self._live_title(channel['data']['title']),
|
||||
'title': channel['data']['title'],
|
||||
'uploader': username,
|
||||
'uploader_id': username,
|
||||
'formats': formats,
|
||||
|
||||
54
yt_dlp/extractor/blogger.py
Normal file
54
yt_dlp/extractor/blogger.py
Normal file
@@ -0,0 +1,54 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from ..utils import (
|
||||
mimetype2ext,
|
||||
parse_duration,
|
||||
parse_qs,
|
||||
str_or_none,
|
||||
traverse_obj,
|
||||
)
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class BloggerIE(InfoExtractor):
|
||||
IE_NAME = 'blogger.com'
|
||||
_VALID_URL = r'https?://(?:www\.)?blogger\.com/video\.g\?token=(?P<id>.+)'
|
||||
_VALID_EMBED = r'''<iframe[^>]+src=["']((?:https?:)?//(?:www\.)?blogger\.com/video\.g\?token=[^"']+)["']'''
|
||||
_TESTS = [{
|
||||
'url': 'https://www.blogger.com/video.g?token=AD6v5dzEe9hfcARr5Hlq1WTkYy6t-fXH3BBahVhGvVHe5szdEUBEloSEDSTA8-b111089KbfWuBvTN7fnbxMtymsHhXAXwVvyzHH4Qch2cfLQdGxKQrrEuFpC1amSl_9GuLWODjPgw',
|
||||
'md5': 'f1bc19b6ea1b0fd1d81e84ca9ec467ac',
|
||||
'info_dict': {
|
||||
'id': 'BLOGGER-video-3c740e3a49197e16-796',
|
||||
'title': 'BLOGGER-video-3c740e3a49197e16-796',
|
||||
'ext': 'mp4',
|
||||
'thumbnail': r're:^https?://.*',
|
||||
'duration': 76.068,
|
||||
}
|
||||
}]
|
||||
|
||||
@staticmethod
|
||||
def _extract_urls(webpage):
|
||||
return re.findall(BloggerIE._VALID_EMBED, webpage)
|
||||
|
||||
def _real_extract(self, url):
|
||||
token_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, token_id)
|
||||
data_json = self._search_regex(r'var\s+VIDEO_CONFIG\s*=\s*(\{.*)', webpage, 'JSON data')
|
||||
data = self._parse_json(data_json.encode('utf-8').decode('unicode_escape'), token_id)
|
||||
streams = data['streams']
|
||||
formats = [{
|
||||
'ext': mimetype2ext(traverse_obj(parse_qs(stream['play_url']), ('mime', 0))),
|
||||
'url': stream['play_url'],
|
||||
'format_id': str_or_none(stream.get('format_id')),
|
||||
} for stream in streams]
|
||||
|
||||
return {
|
||||
'id': data.get('iframe_id', token_id),
|
||||
'title': data.get('iframe_id', token_id),
|
||||
'formats': formats,
|
||||
'thumbnail': data.get('thumbnail'),
|
||||
'duration': parse_duration(traverse_obj(parse_qs(streams[0]['play_url']), ('dur', 0))),
|
||||
}
|
||||
@@ -49,7 +49,7 @@ class BongaCamsIE(InfoExtractor):
|
||||
|
||||
return {
|
||||
'id': channel_id,
|
||||
'title': self._live_title(uploader or uploader_id),
|
||||
'title': uploader or uploader_id,
|
||||
'uploader': uploader,
|
||||
'uploader_id': uploader_id,
|
||||
'like_count': like_count,
|
||||
|
||||
39
yt_dlp/extractor/breitbart.py
Normal file
39
yt_dlp/extractor/breitbart.py
Normal file
@@ -0,0 +1,39 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class BreitBartIE(InfoExtractor):
|
||||
_VALID_URL = r'https?:\/\/(?:www\.)breitbart.com/videos/v/(?P<id>[^/]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.breitbart.com/videos/v/5cOz1yup/?pl=Ij6NDOji',
|
||||
'md5': '0aa6d1d6e183ac5ca09207fe49f17ade',
|
||||
'info_dict': {
|
||||
'id': '5cOz1yup',
|
||||
'ext': 'mp4',
|
||||
'title': 'Watch \u2013 Clyburn: Statues in Congress Have to Go Because they Are Honoring Slavery',
|
||||
'description': 'md5:bac35eb0256d1cb17f517f54c79404d5',
|
||||
'thumbnail': 'https://cdn.jwplayer.com/thumbs/5cOz1yup-1920.jpg',
|
||||
'age_limit': 0,
|
||||
}
|
||||
}, {
|
||||
'url': 'https://www.breitbart.com/videos/v/eaiZjVOn/',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
formats = self._extract_m3u8_formats(f'https://cdn.jwplayer.com/manifests/{video_id}.m3u8', video_id, ext='mp4')
|
||||
self._sort_formats(formats)
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': self._og_search_title(
|
||||
webpage, default=None) or self._html_search_regex(
|
||||
r'(?s)<title>(.*?)</title>', webpage, 'video title'),
|
||||
'description': self._og_search_description(webpage),
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'age_limit': self._rta_search(webpage),
|
||||
'formats': formats
|
||||
}
|
||||
@@ -16,6 +16,7 @@ from ..compat import (
|
||||
)
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
dict_get,
|
||||
extract_attributes,
|
||||
ExtractorError,
|
||||
find_xpath_attr,
|
||||
@@ -471,32 +472,22 @@ class BrightcoveNewIE(AdobePassIE):
|
||||
def _parse_brightcove_metadata(self, json_data, video_id, headers={}):
|
||||
title = json_data['name'].strip()
|
||||
|
||||
num_drm_sources = 0
|
||||
formats, subtitles = [], {}
|
||||
sources = json_data.get('sources') or []
|
||||
for source in sources:
|
||||
container = source.get('container')
|
||||
ext = mimetype2ext(source.get('type'))
|
||||
src = source.get('src')
|
||||
skip_unplayable = not self.get_param('allow_unplayable_formats')
|
||||
# https://support.brightcove.com/playback-api-video-fields-reference#key_systems_object
|
||||
if skip_unplayable and (container == 'WVM' or source.get('key_systems')):
|
||||
num_drm_sources += 1
|
||||
continue
|
||||
elif ext == 'ism' and skip_unplayable:
|
||||
continue
|
||||
elif ext == 'm3u8' or container == 'M2TS':
|
||||
if ext == 'm3u8' or container == 'M2TS':
|
||||
if not src:
|
||||
continue
|
||||
f, subs = self._extract_m3u8_formats_and_subtitles(
|
||||
fmts, subs = self._extract_m3u8_formats_and_subtitles(
|
||||
src, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)
|
||||
formats.extend(f)
|
||||
subtitles = self._merge_subtitles(subtitles, subs)
|
||||
elif ext == 'mpd':
|
||||
if not src:
|
||||
continue
|
||||
f, subs = self._extract_mpd_formats_and_subtitles(src, video_id, 'dash', fatal=False)
|
||||
formats.extend(f)
|
||||
fmts, subs = self._extract_mpd_formats_and_subtitles(src, video_id, 'dash', fatal=False)
|
||||
subtitles = self._merge_subtitles(subtitles, subs)
|
||||
else:
|
||||
streaming_src = source.get('streaming_src')
|
||||
@@ -543,7 +534,13 @@ class BrightcoveNewIE(AdobePassIE):
|
||||
'play_path': stream_name,
|
||||
'format_id': build_format_id('rtmp'),
|
||||
})
|
||||
formats.append(f)
|
||||
fmts = [f]
|
||||
|
||||
# https://support.brightcove.com/playback-api-video-fields-reference#key_systems_object
|
||||
if container == 'WVM' or source.get('key_systems') or ext == 'ism':
|
||||
for f in fmts:
|
||||
f['has_drm'] = True
|
||||
formats.extend(fmts)
|
||||
|
||||
if not formats:
|
||||
errors = json_data.get('errors')
|
||||
@@ -551,9 +548,6 @@ class BrightcoveNewIE(AdobePassIE):
|
||||
error = errors[0]
|
||||
self.raise_no_formats(
|
||||
error.get('message') or error.get('error_subcode') or error['error_code'], expected=True)
|
||||
elif (not self.get_param('allow_unplayable_formats')
|
||||
and sources and num_drm_sources == len(sources)):
|
||||
self.report_drm(video_id)
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
@@ -577,11 +571,19 @@ class BrightcoveNewIE(AdobePassIE):
|
||||
if duration is not None and duration <= 0:
|
||||
is_live = True
|
||||
|
||||
common_res = [(160, 90), (320, 180), (480, 720), (640, 360), (768, 432), (1024, 576), (1280, 720), (1366, 768), (1920, 1080)]
|
||||
thumb_base_url = dict_get(json_data, ('poster', 'thumbnail'))
|
||||
thumbnails = [{
|
||||
'url': re.sub(r'\d+x\d+', f'{w}x{h}', thumb_base_url),
|
||||
'width': w,
|
||||
'height': h,
|
||||
} for w, h in common_res] if thumb_base_url else None
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': self._live_title(title) if is_live else title,
|
||||
'title': title,
|
||||
'description': clean_html(json_data.get('description')),
|
||||
'thumbnail': json_data.get('thumbnail') or json_data.get('poster'),
|
||||
'thumbnails': thumbnails,
|
||||
'duration': duration,
|
||||
'timestamp': parse_iso8601(json_data.get('published_at')),
|
||||
'uploader_id': json_data.get('account_id'),
|
||||
|
||||
34
yt_dlp/extractor/cableav.py
Normal file
34
yt_dlp/extractor/cableav.py
Normal file
@@ -0,0 +1,34 @@
|
||||
# coding: utf-8
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class CableAVIE(InfoExtractor):
|
||||
_VALID_URL = r'https://cableav\.tv/(?P<id>[a-zA-Z0-9]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://cableav.tv/lS4iR9lWjN8/',
|
||||
'md5': '7e3fe5e49d61c4233b7f5b0f69b15e18',
|
||||
'info_dict': {
|
||||
'id': 'lS4iR9lWjN8',
|
||||
'ext': 'mp4',
|
||||
'title': '國產麻豆AV 叮叮映畫 DDF001 情欲小說家 - CableAV',
|
||||
'description': '國產AV 480p, 720p 国产麻豆AV 叮叮映画 DDF001 情欲小说家',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
video_url = self._og_search_video_url(webpage, secure=False)
|
||||
|
||||
formats = self._extract_m3u8_formats(video_url, video_id, 'mp4')
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': self._og_search_title(webpage),
|
||||
'description': self._og_search_description(webpage),
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'formats': formats,
|
||||
}
|
||||
114
yt_dlp/extractor/callin.py
Normal file
114
yt_dlp/extractor/callin.py
Normal file
@@ -0,0 +1,114 @@
|
||||
# coding: utf-8
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
traverse_obj,
|
||||
float_or_none,
|
||||
int_or_none
|
||||
)
|
||||
|
||||
|
||||
class CallinIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?callin\.com/(episode)/(?P<id>[-a-zA-Z]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.callin.com/episode/the-title-ix-regime-and-the-long-march-through-EBfXYSrsjc',
|
||||
'info_dict': {
|
||||
'id': '218b979630a35ead12c6fd096f2996c56c37e4d0dc1f6dc0feada32dcf7b31cd',
|
||||
'title': 'The Title IX Regime and the Long March Through and Beyond the Institutions',
|
||||
'ext': 'ts',
|
||||
'display_id': 'the-title-ix-regime-and-the-long-march-through-EBfXYSrsjc',
|
||||
'thumbnail': 're:https://.+\\.png',
|
||||
'description': 'First episode',
|
||||
'uploader': 'Wesley Yang',
|
||||
'timestamp': 1639404128.65,
|
||||
'upload_date': '20211213',
|
||||
'uploader_id': 'wesyang',
|
||||
'uploader_url': 'http://wesleyyang.substack.com',
|
||||
'channel': 'Conversations in Year Zero',
|
||||
'channel_id': '436d1f82ddeb30cd2306ea9156044d8d2cfdc3f1f1552d245117a42173e78553',
|
||||
'channel_url': 'https://callin.com/show/conversations-in-year-zero-oJNllRFSfx',
|
||||
'duration': 9951.936,
|
||||
'view_count': int,
|
||||
'categories': ['News & Politics', 'History', 'Technology'],
|
||||
'cast': ['Wesley Yang', 'KC Johnson', 'Gabi Abramovich'],
|
||||
'series': 'Conversations in Year Zero',
|
||||
'series_id': '436d1f82ddeb30cd2306ea9156044d8d2cfdc3f1f1552d245117a42173e78553',
|
||||
'episode': 'The Title IX Regime and the Long March Through and Beyond the Institutions',
|
||||
'episode_number': 1,
|
||||
'episode_id': '218b979630a35ead12c6fd096f2996c56c37e4d0dc1f6dc0feada32dcf7b31cd'
|
||||
}
|
||||
}]
|
||||
|
||||
def try_get_user_name(self, d):
|
||||
names = [d.get(n) for n in ('first', 'last')]
|
||||
if None in names:
|
||||
return next((n for n in names if n), default=None)
|
||||
return ' '.join(names)
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
next_data = self._search_nextjs_data(webpage, display_id)
|
||||
episode = next_data['props']['pageProps']['episode']
|
||||
|
||||
id = episode['id']
|
||||
title = (episode.get('title')
|
||||
or self._og_search_title(webpage, fatal=False)
|
||||
or self._html_search_regex('<title>(.*?)</title>', webpage, 'title'))
|
||||
url = episode['m3u8']
|
||||
formats = self._extract_m3u8_formats(url, display_id, ext='ts')
|
||||
self._sort_formats(formats)
|
||||
|
||||
show = traverse_obj(episode, ('show', 'title'))
|
||||
show_id = traverse_obj(episode, ('show', 'id'))
|
||||
|
||||
show_json = None
|
||||
app_slug = (self._html_search_regex(
|
||||
'<script\\s+src=["\']/_next/static/([-_a-zA-Z0-9]+)/_',
|
||||
webpage, 'app slug', fatal=False) or next_data.get('buildId'))
|
||||
show_slug = traverse_obj(episode, ('show', 'linkObj', 'resourceUrl'))
|
||||
if app_slug and show_slug and '/' in show_slug:
|
||||
show_slug = show_slug.rsplit('/', 1)[1]
|
||||
show_json_url = f'https://www.callin.com/_next/data/{app_slug}/show/{show_slug}.json'
|
||||
show_json = self._download_json(show_json_url, display_id, fatal=False)
|
||||
|
||||
host = (traverse_obj(show_json, ('pageProps', 'show', 'hosts', 0))
|
||||
or traverse_obj(episode, ('speakers', 0)))
|
||||
|
||||
host_nick = traverse_obj(host, ('linkObj', 'resourceUrl'))
|
||||
host_nick = host_nick.rsplit('/', 1)[1] if (host_nick and '/' in host_nick) else None
|
||||
|
||||
cast = list(filter(None, [
|
||||
self.try_get_user_name(u) for u in
|
||||
traverse_obj(episode, (('speakers', 'callerTags'), ...)) or []
|
||||
]))
|
||||
|
||||
episode_list = traverse_obj(show_json, ('pageProps', 'show', 'episodes')) or []
|
||||
episode_number = next(
|
||||
(len(episode_list) - i for (i, e) in enumerate(episode_list) if e.get('id') == id),
|
||||
None)
|
||||
|
||||
return {
|
||||
'id': id,
|
||||
'display_id': display_id,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'thumbnail': traverse_obj(episode, ('show', 'photo')),
|
||||
'description': episode.get('description'),
|
||||
'uploader': self.try_get_user_name(host) if host else None,
|
||||
'timestamp': episode.get('publishedAt'),
|
||||
'uploader_id': host_nick,
|
||||
'uploader_url': traverse_obj(show_json, ('pageProps', 'show', 'url')),
|
||||
'channel': show,
|
||||
'channel_id': show_id,
|
||||
'channel_url': traverse_obj(episode, ('show', 'linkObj', 'resourceUrl')),
|
||||
'duration': float_or_none(episode.get('runtime')),
|
||||
'view_count': int_or_none(episode.get('plays')),
|
||||
'categories': traverse_obj(episode, ('show', 'categorizations', ..., 'name')),
|
||||
'cast': cast if cast else None,
|
||||
'series': show,
|
||||
'series_id': show_id,
|
||||
'episode': title,
|
||||
'episode_number': episode_number,
|
||||
'episode_id': id
|
||||
}
|
||||
41
yt_dlp/extractor/caltrans.py
Normal file
41
yt_dlp/extractor/caltrans.py
Normal file
@@ -0,0 +1,41 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class CaltransIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:[^/]+\.)?ca\.gov/vm/loc/[^/]+/(?P<id>[a-z0-9_]+)\.htm'
|
||||
_TEST = {
|
||||
'url': 'https://cwwp2.dot.ca.gov/vm/loc/d3/hwy50at24th.htm',
|
||||
'info_dict': {
|
||||
'id': 'hwy50at24th',
|
||||
'ext': 'ts',
|
||||
'title': 'US-50 : Sacramento : Hwy 50 at 24th',
|
||||
'live_status': 'is_live',
|
||||
'thumbnail': 'https://cwwp2.dot.ca.gov/data/d3/cctv/image/hwy50at24th/hwy50at24th.jpg',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
global_vars = self._search_regex(
|
||||
r'<script[^<]+?([^<]+\.m3u8[^<]+)</script>',
|
||||
webpage, 'Global Vars')
|
||||
route_place = self._search_regex(r'routePlace\s*=\s*"([^"]+)"', global_vars, 'Route Place', fatal=False)
|
||||
location_name = self._search_regex(r'locationName\s*=\s*"([^"]+)"', global_vars, 'Location Name', fatal=False)
|
||||
poster_url = self._search_regex(r'posterURL\s*=\s*"([^"]+)"', global_vars, 'Poster Url', fatal=False)
|
||||
video_stream = self._search_regex(r'videoStreamURL\s*=\s*"([^"]+)"', global_vars, 'Video Stream URL', fatal=False)
|
||||
|
||||
formats = self._extract_m3u8_formats(video_stream, video_id, 'ts', live=True)
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': f'{route_place} : {location_name}',
|
||||
'is_live': True,
|
||||
'formats': formats,
|
||||
'thumbnail': poster_url,
|
||||
}
|
||||
@@ -13,6 +13,8 @@ class CAM4IE(InfoExtractor):
|
||||
'ext': 'mp4',
|
||||
'title': 're:^foxynesss [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
|
||||
'age_limit': 18,
|
||||
'live_status': 'is_live',
|
||||
'thumbnail': 'https://snapshots.xcdnpro.com/thumbnails/foxynesss',
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,8 +27,9 @@ class CAM4IE(InfoExtractor):
|
||||
|
||||
return {
|
||||
'id': channel_id,
|
||||
'title': self._live_title(channel_id),
|
||||
'title': channel_id,
|
||||
'is_live': True,
|
||||
'age_limit': 18,
|
||||
'formats': formats,
|
||||
'thumbnail': f'https://snapshots.xcdnpro.com/thumbnails/{channel_id}',
|
||||
}
|
||||
|
||||
@@ -91,7 +91,7 @@ class CamModelsIE(InfoExtractor):
|
||||
|
||||
return {
|
||||
'id': user_id,
|
||||
'title': self._live_title(user_id),
|
||||
'title': user_id,
|
||||
'is_live': True,
|
||||
'formats': formats,
|
||||
'age_limit': 18
|
||||
|
||||
98
yt_dlp/extractor/canalalpha.py
Normal file
98
yt_dlp/extractor/canalalpha.py
Normal file
@@ -0,0 +1,98 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
dict_get,
|
||||
try_get,
|
||||
unified_strdate,
|
||||
)
|
||||
|
||||
|
||||
class CanalAlphaIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?canalalpha\.ch/play/[^/]+/[^/]+/(?P<id>\d+)/?.*'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://www.canalalpha.ch/play/le-journal/episode/24520/jeudi-28-octobre-2021',
|
||||
'info_dict': {
|
||||
'id': '24520',
|
||||
'ext': 'mp4',
|
||||
'title': 'Jeudi 28 octobre 2021',
|
||||
'description': 'md5:d30c6c3e53f8ad40d405379601973b30',
|
||||
'thumbnail': 'https://static.canalalpha.ch/poster/journal/journal_20211028.jpg',
|
||||
'upload_date': '20211028',
|
||||
'duration': 1125,
|
||||
},
|
||||
'params': {'skip_download': True}
|
||||
}, {
|
||||
'url': 'https://www.canalalpha.ch/play/le-journal/topic/24512/la-poste-fait-de-neuchatel-un-pole-cryptographique',
|
||||
'info_dict': {
|
||||
'id': '24512',
|
||||
'ext': 'mp4',
|
||||
'title': 'La Poste fait de Neuchâtel un pôle cryptographique',
|
||||
'description': 'md5:4ba63ae78a0974d1a53d6703b6e1dedf',
|
||||
'thumbnail': 'https://static.canalalpha.ch/poster/news/news_39712.jpg',
|
||||
'upload_date': '20211028',
|
||||
'duration': 138,
|
||||
},
|
||||
'params': {'skip_download': True}
|
||||
}, {
|
||||
'url': 'https://www.canalalpha.ch/play/eureka/episode/24484/ces-innovations-qui-veulent-rendre-lagriculture-plus-durable',
|
||||
'info_dict': {
|
||||
'id': '24484',
|
||||
'ext': 'mp4',
|
||||
'title': 'Ces innovations qui veulent rendre l’agriculture plus durable',
|
||||
'description': 'md5:3de3f151180684621e85be7c10e4e613',
|
||||
'thumbnail': 'https://static.canalalpha.ch/poster/magazine/magazine_10236.jpg',
|
||||
'upload_date': '20211026',
|
||||
'duration': 360,
|
||||
},
|
||||
'params': {'skip_download': True}
|
||||
}, {
|
||||
'url': 'https://www.canalalpha.ch/play/avec-le-temps/episode/23516/redonner-de-leclat-grace-au-polissage',
|
||||
'info_dict': {
|
||||
'id': '23516',
|
||||
'ext': 'mp4',
|
||||
'title': 'Redonner de l\'éclat grâce au polissage',
|
||||
'description': 'md5:0d8fbcda1a5a4d6f6daa3165402177e1',
|
||||
'thumbnail': 'https://static.canalalpha.ch/poster/magazine/magazine_9990.png',
|
||||
'upload_date': '20210726',
|
||||
'duration': 360,
|
||||
},
|
||||
'params': {'skip_download': True}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, id)
|
||||
data_json = self._parse_json(self._search_regex(
|
||||
r'window\.__SERVER_STATE__\s?=\s?({(?:(?!};)[^"]|"([^"]|\\")*")+})\s?;',
|
||||
webpage, 'data_json'), id)['1']['data']['data']
|
||||
manifests = try_get(data_json, lambda x: x['video']['manifests'], expected_type=dict) or {}
|
||||
subtitles = {}
|
||||
formats = [{
|
||||
'url': video['$url'],
|
||||
'ext': 'mp4',
|
||||
'width': try_get(video, lambda x: x['res']['width'], expected_type=int),
|
||||
'height': try_get(video, lambda x: x['res']['height'], expected_type=int),
|
||||
} for video in try_get(data_json, lambda x: x['video']['mp4'], expected_type=list) or [] if video.get('$url')]
|
||||
if manifests.get('hls'):
|
||||
m3u8_frmts, m3u8_subs = self._parse_m3u8_formats_and_subtitles(manifests['hls'], video_id=id)
|
||||
formats.extend(m3u8_frmts)
|
||||
subtitles = self._merge_subtitles(subtitles, m3u8_subs)
|
||||
if manifests.get('dash'):
|
||||
dash_frmts, dash_subs = self._parse_mpd_formats_and_subtitles(manifests['dash'])
|
||||
formats.extend(dash_frmts)
|
||||
subtitles = self._merge_subtitles(subtitles, dash_subs)
|
||||
self._sort_formats(formats)
|
||||
return {
|
||||
'id': id,
|
||||
'title': data_json.get('title').strip(),
|
||||
'description': clean_html(dict_get(data_json, ('longDesc', 'shortDesc'))),
|
||||
'thumbnail': data_json.get('poster'),
|
||||
'upload_date': unified_strdate(dict_get(data_json, ('webPublishAt', 'featuredAt', 'diffusionDate'))),
|
||||
'duration': try_get(data_json, lambda x: x['video']['duration'], expected_type=int),
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
@@ -76,7 +76,7 @@ class CanvasIE(InfoExtractor):
|
||||
'vrtPlayerToken': vrtPlayerToken,
|
||||
'client': 'null',
|
||||
}, expected_status=400)
|
||||
if not data.get('title'):
|
||||
if 'title' not in data:
|
||||
code = data.get('code')
|
||||
if code == 'AUTHENTICATION_REQUIRED':
|
||||
self.raise_login_required()
|
||||
@@ -84,7 +84,8 @@ class CanvasIE(InfoExtractor):
|
||||
self.raise_geo_restricted(countries=['BE'])
|
||||
raise ExtractorError(data.get('message') or code, expected=True)
|
||||
|
||||
title = data['title']
|
||||
# Note: The title may be an empty string
|
||||
title = data['title'] or f'{site_id} {video_id}'
|
||||
description = data.get('description')
|
||||
|
||||
formats = []
|
||||
|
||||
@@ -4,6 +4,7 @@ from __future__ import unicode_literals
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
format_field,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
try_get,
|
||||
@@ -43,7 +44,7 @@ class CarambaTVIE(InfoExtractor):
|
||||
formats = [{
|
||||
'url': base_url + f['fn'],
|
||||
'height': int_or_none(f.get('height')),
|
||||
'format_id': '%sp' % f['height'] if f.get('height') else None,
|
||||
'format_id': format_field(f, 'height', '%sp'),
|
||||
} for f in video['qualities'] if f.get('fn')]
|
||||
self._sort_formats(formats)
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user