mirror of
https://github.com/ytdl-org/youtube-dl.git
synced 2025-12-14 01:52:43 +01:00
Compare commits
28 Commits
2014.01.06
...
2014.01.07
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0cdad20c75 | ||
|
|
50144133c5 | ||
|
|
089cb705e8 | ||
|
|
525e1076ad | ||
|
|
282962bd36 | ||
|
|
c93c2ab1c3 | ||
|
|
7b09a4d847 | ||
|
|
73a25b30ea | ||
|
|
ac260dd81e | ||
|
|
48a2034671 | ||
|
|
a9ce0c631e | ||
|
|
afc7bc33cb | ||
|
|
168da92b9a | ||
|
|
d70ad093af | ||
|
|
2a2e2770cc | ||
|
|
42cc71e80b | ||
|
|
496c19234c | ||
|
|
4f81667d76 | ||
|
|
56327689a2 | ||
|
|
ad84831537 | ||
|
|
5f263296ea | ||
|
|
89650ea3a6 | ||
|
|
79f8295303 | ||
|
|
400e58103d | ||
|
|
fcee8ee784 | ||
|
|
9148eb002b | ||
|
|
559e370f44 | ||
|
|
e63fc1bed4 |
@@ -193,7 +193,9 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
processed files are overwritten by default
|
||||
--embed-subs embed subtitles in the video (only for mp4
|
||||
videos)
|
||||
--add-metadata add metadata to the files
|
||||
--add-metadata write metadata to the video file
|
||||
--xattrs write metadata to the video file's xattrs (using
|
||||
dublin core and xdg standards)
|
||||
|
||||
# CONFIGURATION
|
||||
|
||||
|
||||
@@ -24,6 +24,8 @@ if [ -z "$1" ]; then echo "ERROR: specify version number like this: $0 1994.09.0
|
||||
version="$1"
|
||||
if [ ! -z "`git tag | grep "$version"`" ]; then echo 'ERROR: version already present'; exit 1; fi
|
||||
if [ ! -z "`git status --porcelain | grep -v CHANGELOG`" ]; then echo 'ERROR: the working directory is not clean; commit or stash changes'; exit 1; fi
|
||||
useless_files=$(find youtube_dl -type f -not -name '*.py')
|
||||
if [ ! -z "$useless_files" ]; then echo "ERROR: Non-.py files in youtube_dl: $useless_files"; exit 1; fi
|
||||
if [ ! -f "updates_key.pem" ]; then echo 'ERROR: updates_key.pem missing'; exit 1; fi
|
||||
|
||||
/bin/echo -e "\n### First of all, testing..."
|
||||
|
||||
5
setup.py
5
setup.py
@@ -71,7 +71,10 @@ setup(
|
||||
author_email='ytdl@yt-dl.org',
|
||||
maintainer='Philipp Hagemeister',
|
||||
maintainer_email='phihag@phihag.de',
|
||||
packages=['youtube_dl', 'youtube_dl.extractor', 'youtube_dl.downloader'],
|
||||
packages=[
|
||||
'youtube_dl',
|
||||
'youtube_dl.extractor', 'youtube_dl.downloader',
|
||||
'youtube_dl.postprocessor'],
|
||||
|
||||
# Provokes warning on most systems (why?!)
|
||||
# test_suite = 'nose.collector',
|
||||
|
||||
@@ -113,6 +113,8 @@ class TestAllURLsMatching(unittest.TestCase):
|
||||
def test_vimeo_matching(self):
|
||||
self.assertMatch('http://vimeo.com/channels/tributes', ['vimeo:channel'])
|
||||
self.assertMatch('http://vimeo.com/user7108434', ['vimeo:user'])
|
||||
self.assertMatch('http://vimeo.com/user7108434/videos', ['vimeo:user'])
|
||||
self.assertMatch('https://vimeo.com/user21297594/review/75524534/3c257a1b5d', ['vimeo:review'])
|
||||
|
||||
# https://github.com/rg3/youtube-dl/issues/1930
|
||||
def test_soundcloud_not_matching_sets(self):
|
||||
|
||||
@@ -55,7 +55,7 @@ from .utils import (
|
||||
)
|
||||
from .extractor import get_info_extractor, gen_extractors
|
||||
from .downloader import get_suitable_downloader
|
||||
from .PostProcessor import FFmpegMergerPP
|
||||
from .postprocessor import FFmpegMergerPP
|
||||
from .version import __version__
|
||||
|
||||
|
||||
@@ -1062,7 +1062,9 @@ class YoutubeDL(object):
|
||||
res += '%4dk ' % fdict['tbr']
|
||||
if (fdict.get('vcodec') is not None and
|
||||
fdict.get('vcodec') != 'none'):
|
||||
res += '%-5s@' % fdict['vcodec']
|
||||
res += '%-5s' % fdict['vcodec']
|
||||
if fdict.get('vbr') is not None:
|
||||
res += '@'
|
||||
elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
|
||||
res += 'video@'
|
||||
if fdict.get('vbr') is not None:
|
||||
|
||||
@@ -38,6 +38,7 @@ __authors__ = (
|
||||
'Takuya Tsuchida',
|
||||
'Sergey M.',
|
||||
'Michael Orlitzky',
|
||||
'Chris Gahan',
|
||||
)
|
||||
|
||||
__license__ = 'Public Domain'
|
||||
@@ -74,11 +75,12 @@ from .FileDownloader import (
|
||||
from .extractor import gen_extractors
|
||||
from .version import __version__
|
||||
from .YoutubeDL import YoutubeDL
|
||||
from .PostProcessor import (
|
||||
from .postprocessor import (
|
||||
FFmpegMetadataPP,
|
||||
FFmpegVideoConvertor,
|
||||
FFmpegExtractAudioPP,
|
||||
FFmpegEmbedSubtitlePP,
|
||||
XAttrMetadataPP,
|
||||
)
|
||||
|
||||
|
||||
@@ -415,7 +417,9 @@ def parseOpts(overrideArguments=None):
|
||||
postproc.add_option('--embed-subs', action='store_true', dest='embedsubtitles', default=False,
|
||||
help='embed subtitles in the video (only for mp4 videos)')
|
||||
postproc.add_option('--add-metadata', action='store_true', dest='addmetadata', default=False,
|
||||
help='add metadata to the files')
|
||||
help='write metadata to the video file')
|
||||
postproc.add_option('--xattrs', action='store_true', dest='xattrs', default=False,
|
||||
help='write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
|
||||
|
||||
|
||||
parser.add_option_group(general)
|
||||
@@ -717,6 +721,8 @@ def _real_main(argv=None):
|
||||
ydl.add_post_processor(FFmpegVideoConvertor(preferedformat=opts.recodevideo))
|
||||
if opts.embedsubtitles:
|
||||
ydl.add_post_processor(FFmpegEmbedSubtitlePP(subtitlesformat=opts.subtitlesformat))
|
||||
if opts.xattrs:
|
||||
ydl.add_post_processor(XAttrMetadataPP())
|
||||
|
||||
# Update version
|
||||
if opts.update_self:
|
||||
|
||||
@@ -112,6 +112,7 @@ from .metacafe import MetacafeIE
|
||||
from .metacritic import MetacriticIE
|
||||
from .mit import TechTVMITIE, MITIE
|
||||
from .mixcloud import MixcloudIE
|
||||
from .mpora import MporaIE
|
||||
from .mofosex import MofosexIE
|
||||
from .mtv import MTVIE
|
||||
from .muzu import MuzuTVIE
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
# encoding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import json
|
||||
@@ -13,6 +14,7 @@ from ..utils import (
|
||||
compat_urllib_request,
|
||||
|
||||
ExtractorError,
|
||||
unsmuggle_url,
|
||||
)
|
||||
|
||||
|
||||
@@ -24,47 +26,47 @@ class BrightcoveIE(InfoExtractor):
|
||||
_TESTS = [
|
||||
{
|
||||
# From http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/
|
||||
u'url': u'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001',
|
||||
u'file': u'2371591881001.mp4',
|
||||
u'md5': u'5423e113865d26e40624dce2e4b45d95',
|
||||
u'note': u'Test Brightcove downloads and detection in GenericIE',
|
||||
u'info_dict': {
|
||||
u'title': u'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”',
|
||||
u'uploader': u'8TV',
|
||||
u'description': u'md5:a950cc4285c43e44d763d036710cd9cd',
|
||||
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001',
|
||||
'file': '2371591881001.mp4',
|
||||
'md5': '5423e113865d26e40624dce2e4b45d95',
|
||||
'note': 'Test Brightcove downloads and detection in GenericIE',
|
||||
'info_dict': {
|
||||
'title': 'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”',
|
||||
'uploader': '8TV',
|
||||
'description': 'md5:a950cc4285c43e44d763d036710cd9cd',
|
||||
}
|
||||
},
|
||||
{
|
||||
# From http://medianetwork.oracle.com/video/player/1785452137001
|
||||
u'url': u'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1217746023001&flashID=myPlayer&%40videoPlayer=1785452137001',
|
||||
u'file': u'1785452137001.flv',
|
||||
u'info_dict': {
|
||||
u'title': u'JVMLS 2012: Arrays 2.0 - Opportunities and Challenges',
|
||||
u'description': u'John Rose speaks at the JVM Language Summit, August 1, 2012.',
|
||||
u'uploader': u'Oracle',
|
||||
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1217746023001&flashID=myPlayer&%40videoPlayer=1785452137001',
|
||||
'file': '1785452137001.flv',
|
||||
'info_dict': {
|
||||
'title': 'JVMLS 2012: Arrays 2.0 - Opportunities and Challenges',
|
||||
'description': 'John Rose speaks at the JVM Language Summit, August 1, 2012.',
|
||||
'uploader': 'Oracle',
|
||||
},
|
||||
},
|
||||
{
|
||||
# From http://mashable.com/2013/10/26/thermoelectric-bracelet-lets-you-control-your-body-temperature/
|
||||
u'url': u'http://c.brightcove.com/services/viewer/federated_f9?&playerID=1265504713001&publisherID=AQ%7E%7E%2CAAABBzUwv1E%7E%2CxP-xFHVUstiMFlNYfvF4G9yFnNaqCw_9&videoID=2750934548001',
|
||||
u'info_dict': {
|
||||
u'id': u'2750934548001',
|
||||
u'ext': u'mp4',
|
||||
u'title': u'This Bracelet Acts as a Personal Thermostat',
|
||||
u'description': u'md5:547b78c64f4112766ccf4e151c20b6a0',
|
||||
u'uploader': u'Mashable',
|
||||
'url': 'http://c.brightcove.com/services/viewer/federated_f9?&playerID=1265504713001&publisherID=AQ%7E%7E%2CAAABBzUwv1E%7E%2CxP-xFHVUstiMFlNYfvF4G9yFnNaqCw_9&videoID=2750934548001',
|
||||
'info_dict': {
|
||||
'id': '2750934548001',
|
||||
'ext': 'mp4',
|
||||
'title': 'This Bracelet Acts as a Personal Thermostat',
|
||||
'description': 'md5:547b78c64f4112766ccf4e151c20b6a0',
|
||||
'uploader': 'Mashable',
|
||||
},
|
||||
},
|
||||
{
|
||||
# test that the default referer works
|
||||
# from http://national.ballet.ca/interact/video/Lost_in_Motion_II/
|
||||
u'url': u'http://link.brightcove.com/services/player/bcpid756015033001?bckey=AQ~~,AAAApYJi_Ck~,GxhXCegT1Dp39ilhXuxMJxasUhVNZiil&bctid=2878862109001',
|
||||
u'info_dict': {
|
||||
u'id': u'2878862109001',
|
||||
u'ext': u'mp4',
|
||||
u'title': u'Lost in Motion II',
|
||||
u'description': u'md5:363109c02998fee92ec02211bd8000df',
|
||||
u'uploader': u'National Ballet of Canada',
|
||||
'url': 'http://link.brightcove.com/services/player/bcpid756015033001?bckey=AQ~~,AAAApYJi_Ck~,GxhXCegT1Dp39ilhXuxMJxasUhVNZiil&bctid=2878862109001',
|
||||
'info_dict': {
|
||||
'id': '2878862109001',
|
||||
'ext': 'mp4',
|
||||
'title': 'Lost in Motion II',
|
||||
'description': 'md5:363109c02998fee92ec02211bd8000df',
|
||||
'uploader': 'National Ballet of Canada',
|
||||
},
|
||||
},
|
||||
]
|
||||
@@ -80,10 +82,10 @@ class BrightcoveIE(InfoExtractor):
|
||||
object_str = re.sub(r'(<param name="[^"]+" value="[^"]+")>',
|
||||
lambda m: m.group(1) + '/>', object_str)
|
||||
# Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608
|
||||
object_str = object_str.replace(u'<--', u'<!--')
|
||||
object_str = object_str.replace('<--', '<!--')
|
||||
|
||||
object_doc = xml.etree.ElementTree.fromstring(object_str)
|
||||
assert u'BrightcoveExperience' in object_doc.attrib['class']
|
||||
assert 'BrightcoveExperience' in object_doc.attrib['class']
|
||||
params = {'flashID': object_doc.attrib['id'],
|
||||
'playerID': find_xpath_attr(object_doc, './param', 'name', 'playerID').attrib['value'],
|
||||
}
|
||||
@@ -120,6 +122,8 @@ class BrightcoveIE(InfoExtractor):
|
||||
return None
|
||||
|
||||
def _real_extract(self, url):
|
||||
url, smuggled_data = unsmuggle_url(url, {})
|
||||
|
||||
# Change the 'videoId' and others field to '@videoPlayer'
|
||||
url = re.sub(r'(?<=[?&])(videoI(d|D)|bctid)', '%40videoPlayer', url)
|
||||
# Change bckey (used by bcove.me urls) to playerKey
|
||||
@@ -130,9 +134,10 @@ class BrightcoveIE(InfoExtractor):
|
||||
|
||||
videoPlayer = query.get('@videoPlayer')
|
||||
if videoPlayer:
|
||||
return self._get_video_info(videoPlayer[0], query_str, query,
|
||||
# We set the original url as the default 'Referer' header
|
||||
referer=url)
|
||||
# We set the original url as the default 'Referer' header
|
||||
referer = smuggled_data.get('Referer', url)
|
||||
return self._get_video_info(
|
||||
videoPlayer[0], query_str, query, referer=referer)
|
||||
else:
|
||||
player_key = query['playerKey']
|
||||
return self._get_playlist_info(player_key[0])
|
||||
@@ -156,11 +161,11 @@ class BrightcoveIE(InfoExtractor):
|
||||
|
||||
def _get_playlist_info(self, player_key):
|
||||
playlist_info = self._download_webpage(self._PLAYLIST_URL_TEMPLATE % player_key,
|
||||
player_key, u'Downloading playlist information')
|
||||
player_key, 'Downloading playlist information')
|
||||
|
||||
json_data = json.loads(playlist_info)
|
||||
if 'videoList' not in json_data:
|
||||
raise ExtractorError(u'Empty playlist')
|
||||
raise ExtractorError('Empty playlist')
|
||||
playlist_info = json_data['videoList']
|
||||
videos = [self._extract_video_info(video_info) for video_info in playlist_info['mediaCollectionDTO']['videoDTOs']]
|
||||
|
||||
@@ -189,5 +194,5 @@ class BrightcoveIE(InfoExtractor):
|
||||
'url': video_info['FLVFullLengthURL'],
|
||||
})
|
||||
else:
|
||||
raise ExtractorError(u'Unable to extract video url for %s' % info['id'])
|
||||
raise ExtractorError('Unable to extract video url for %s' % info['id'])
|
||||
return info
|
||||
|
||||
@@ -225,7 +225,8 @@ class GenericIE(InfoExtractor):
|
||||
bc_url = BrightcoveIE._extract_brightcove_url(webpage)
|
||||
if bc_url is not None:
|
||||
self.to_screen('Brightcove video detected.')
|
||||
return self.url_result(bc_url, 'Brightcove')
|
||||
surl = smuggle_url(bc_url, {'Referer': url})
|
||||
return self.url_result(surl, 'Brightcove')
|
||||
|
||||
# Look for embedded (iframe) Vimeo player
|
||||
mobj = re.search(
|
||||
@@ -306,6 +307,11 @@ class GenericIE(InfoExtractor):
|
||||
if mobj is not None:
|
||||
return self.url_result(mobj.group(1), 'Aparat')
|
||||
|
||||
# Look for MPORA videos
|
||||
mobj = re.search(r'<iframe .*?src="(http://mpora.com/videos/[^"]+)"', webpage)
|
||||
if mobj is not None:
|
||||
return self.url_result(mobj.group(1), 'Mpora')
|
||||
|
||||
# Start with something easy: JW Player in SWFObject
|
||||
mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
|
||||
if mobj is None:
|
||||
|
||||
@@ -17,7 +17,7 @@ class LyndaIE(SubtitlesInfoExtractor):
|
||||
'url': 'http://www.lynda.com/Bootstrap-tutorials/Using-exercise-files/110885/114408-4.html',
|
||||
'file': '114408.mp4',
|
||||
'md5': 'ecfc6862da89489161fb9cd5f5a6fac1',
|
||||
u"info_dict": {
|
||||
'info_dict': {
|
||||
'title': 'Using the exercise files',
|
||||
'duration': 68
|
||||
}
|
||||
@@ -65,9 +65,12 @@ class LyndaIE(SubtitlesInfoExtractor):
|
||||
'formats': formats
|
||||
}
|
||||
|
||||
_TIMECODE_REGEX = r'\[(?P<timecode>\d+:\d+:\d+[\.,]\d+)\]'
|
||||
|
||||
_TIMECODE_REGEX = r'\[(?P<timecode>\d+:\d+:\d+[\.,]\d+)\]'
|
||||
|
||||
def _fix_subtitles(self, subtitles):
|
||||
if subtitles is None:
|
||||
return subtitles # subtitles not requested
|
||||
|
||||
fixed_subtitles = {}
|
||||
for k, v in subtitles.items():
|
||||
subs = json.loads(v)
|
||||
@@ -75,14 +78,14 @@ class LyndaIE(SubtitlesInfoExtractor):
|
||||
continue
|
||||
srt = ''
|
||||
for pos in range(0, len(subs) - 1):
|
||||
seq_current = subs[pos]
|
||||
seq_current = subs[pos]
|
||||
m_current = re.match(self._TIMECODE_REGEX, seq_current['Timecode'])
|
||||
if m_current is None:
|
||||
continue
|
||||
seq_next = subs[pos+1]
|
||||
continue
|
||||
seq_next = subs[pos + 1]
|
||||
m_next = re.match(self._TIMECODE_REGEX, seq_next['Timecode'])
|
||||
if m_next is None:
|
||||
continue
|
||||
continue
|
||||
appear_time = m_current.group('timecode')
|
||||
disappear_time = m_next.group('timecode')
|
||||
text = seq_current['Caption']
|
||||
|
||||
66
youtube_dl/extractor/mpora.py
Normal file
66
youtube_dl/extractor/mpora.py
Normal file
@@ -0,0 +1,66 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
)
|
||||
|
||||
|
||||
class MporaIE(InfoExtractor):
|
||||
_VALID_URL = r'^https?://(www\.)?mpora\.(?:com|de)/videos/(?P<id>[^?#/]+)'
|
||||
_IE_NAME = 'MPORA'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://mpora.de/videos/AAdo8okx4wiz/embed?locale=de',
|
||||
'file': 'AAdo8okx4wiz.mp4',
|
||||
'md5': 'a7a228473eedd3be741397cf452932eb',
|
||||
'info_dict': {
|
||||
'title': 'Katy Curd - Winter in the Forest',
|
||||
'duration': 416,
|
||||
'uploader': 'petenewman',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
m = re.match(self._VALID_URL, url)
|
||||
video_id = m.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
data_json = self._search_regex(
|
||||
r"new FM\.Player\('[^']+',\s*(\{.*?)\);\n", webpage, 'json')
|
||||
|
||||
data = json.loads(data_json)
|
||||
|
||||
uploader = data['info_overlay']['name']
|
||||
duration = data['video']['duration'] // 1000
|
||||
thumbnail = data['video']['encodings']['sd']['poster']
|
||||
title = data['info_overlay']['title']
|
||||
|
||||
formats = []
|
||||
for encoding_id, edata in data['video']['encodings'].items():
|
||||
for src in edata['sources']:
|
||||
width_str = self._search_regex(
|
||||
r'_([0-9]+)\.[a-zA-Z0-9]+$', src['src'],
|
||||
False, default=None)
|
||||
vcodec = src['type'].partition('/')[2]
|
||||
|
||||
formats.append({
|
||||
'format_id': encoding_id + '-' + vcodec,
|
||||
'url': src['src'],
|
||||
'vcodec': vcodec,
|
||||
'width': int_or_none(width_str),
|
||||
})
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'uploader': uploader,
|
||||
'duration': duration,
|
||||
'thumbnail': thumbnail,
|
||||
}
|
||||
@@ -68,7 +68,7 @@ class ORFIE(InfoExtractor):
|
||||
pass
|
||||
else:
|
||||
req = HEADRequest(http_url)
|
||||
response = self._request_webpage(
|
||||
self._request_webpage(
|
||||
req, video_id,
|
||||
note='Testing for geoblocking',
|
||||
errnote=((
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
# encoding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
import itertools
|
||||
@@ -31,54 +33,55 @@ class VimeoIE(InfoExtractor):
|
||||
(?P<id>[0-9]+)
|
||||
/?(?:[?&].*)?(?:[#].*)?$'''
|
||||
_NETRC_MACHINE = 'vimeo'
|
||||
IE_NAME = u'vimeo'
|
||||
IE_NAME = 'vimeo'
|
||||
_TESTS = [
|
||||
{
|
||||
u'url': u'http://vimeo.com/56015672#at=0',
|
||||
u'file': u'56015672.mp4',
|
||||
u'md5': u'8879b6cc097e987f02484baf890129e5',
|
||||
u'info_dict': {
|
||||
u"upload_date": u"20121220",
|
||||
u"description": u"This is a test case for youtube-dl.\nFor more information, see github.com/rg3/youtube-dl\nTest chars: \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
|
||||
u"uploader_id": u"user7108434",
|
||||
u"uploader": u"Filippo Valsorda",
|
||||
u"title": u"youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
|
||||
'url': 'http://vimeo.com/56015672#at=0',
|
||||
'file': '56015672.mp4',
|
||||
'md5': '8879b6cc097e987f02484baf890129e5',
|
||||
'info_dict': {
|
||||
"upload_date": "20121220",
|
||||
"description": "This is a test case for youtube-dl.\nFor more information, see github.com/rg3/youtube-dl\nTest chars: \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
|
||||
"uploader_id": "user7108434",
|
||||
"uploader": "Filippo Valsorda",
|
||||
"title": "youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
|
||||
},
|
||||
},
|
||||
{
|
||||
u'url': u'http://vimeopro.com/openstreetmapus/state-of-the-map-us-2013/video/68093876',
|
||||
u'file': u'68093876.mp4',
|
||||
u'md5': u'3b5ca6aa22b60dfeeadf50b72e44ed82',
|
||||
u'note': u'Vimeo Pro video (#1197)',
|
||||
u'info_dict': {
|
||||
u'uploader_id': u'openstreetmapus',
|
||||
u'uploader': u'OpenStreetMap US',
|
||||
u'title': u'Andy Allan - Putting the Carto into OpenStreetMap Cartography',
|
||||
'url': 'http://vimeopro.com/openstreetmapus/state-of-the-map-us-2013/video/68093876',
|
||||
'file': '68093876.mp4',
|
||||
'md5': '3b5ca6aa22b60dfeeadf50b72e44ed82',
|
||||
'note': 'Vimeo Pro video (#1197)',
|
||||
'info_dict': {
|
||||
'uploader_id': 'openstreetmapus',
|
||||
'uploader': 'OpenStreetMap US',
|
||||
'title': 'Andy Allan - Putting the Carto into OpenStreetMap Cartography',
|
||||
},
|
||||
},
|
||||
{
|
||||
u'url': u'http://player.vimeo.com/video/54469442',
|
||||
u'file': u'54469442.mp4',
|
||||
u'md5': u'619b811a4417aa4abe78dc653becf511',
|
||||
u'note': u'Videos that embed the url in the player page',
|
||||
u'info_dict': {
|
||||
u'title': u'Kathy Sierra: Building the minimum Badass User, Business of Software',
|
||||
u'uploader': u'The BLN & Business of Software',
|
||||
'url': 'http://player.vimeo.com/video/54469442',
|
||||
'file': '54469442.mp4',
|
||||
'md5': '619b811a4417aa4abe78dc653becf511',
|
||||
'note': 'Videos that embed the url in the player page',
|
||||
'info_dict': {
|
||||
'title': 'Kathy Sierra: Building the minimum Badass User, Business of Software',
|
||||
'uploader': 'The BLN & Business of Software',
|
||||
'uploader_id': 'theblnbusinessofsoftware',
|
||||
},
|
||||
},
|
||||
{
|
||||
u'url': u'http://vimeo.com/68375962',
|
||||
u'file': u'68375962.mp4',
|
||||
u'md5': u'aaf896bdb7ddd6476df50007a0ac0ae7',
|
||||
u'note': u'Video protected with password',
|
||||
u'info_dict': {
|
||||
u'title': u'youtube-dl password protected test video',
|
||||
u'upload_date': u'20130614',
|
||||
u'uploader_id': u'user18948128',
|
||||
u'uploader': u'Jaime Marquínez Ferrándiz',
|
||||
'url': 'http://vimeo.com/68375962',
|
||||
'file': '68375962.mp4',
|
||||
'md5': 'aaf896bdb7ddd6476df50007a0ac0ae7',
|
||||
'note': 'Video protected with password',
|
||||
'info_dict': {
|
||||
'title': 'youtube-dl password protected test video',
|
||||
'upload_date': '20130614',
|
||||
'uploader_id': 'user18948128',
|
||||
'uploader': 'Jaime Marquínez Ferrándiz',
|
||||
},
|
||||
u'params': {
|
||||
u'videopassword': u'youtube-dl',
|
||||
'params': {
|
||||
'videopassword': 'youtube-dl',
|
||||
},
|
||||
},
|
||||
]
|
||||
@@ -90,7 +93,7 @@ class VimeoIE(InfoExtractor):
|
||||
self.report_login()
|
||||
login_url = 'https://vimeo.com/log_in'
|
||||
webpage = self._download_webpage(login_url, None, False)
|
||||
token = re.search(r'xsrft: \'(.*?)\'', webpage).group(1)
|
||||
token = self._search_regex(r'xsrft: \'(.*?)\'', webpage, 'login token')
|
||||
data = compat_urllib_parse.urlencode({'email': username,
|
||||
'password': password,
|
||||
'action': 'login',
|
||||
@@ -100,13 +103,13 @@ class VimeoIE(InfoExtractor):
|
||||
login_request = compat_urllib_request.Request(login_url, data)
|
||||
login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
login_request.add_header('Cookie', 'xsrft=%s' % token)
|
||||
self._download_webpage(login_request, None, False, u'Wrong login info')
|
||||
self._download_webpage(login_request, None, False, 'Wrong login info')
|
||||
|
||||
def _verify_video_password(self, url, video_id, webpage):
|
||||
password = self._downloader.params.get('videopassword', None)
|
||||
if password is None:
|
||||
raise ExtractorError(u'This video is protected by a password, use the --video-password option')
|
||||
token = re.search(r'xsrft: \'(.*?)\'', webpage).group(1)
|
||||
raise ExtractorError('This video is protected by a password, use the --video-password option')
|
||||
token = self._search_regex(r'xsrft: \'(.*?)\'', webpage, 'login token')
|
||||
data = compat_urllib_parse.urlencode({'password': password,
|
||||
'token': token})
|
||||
# I didn't manage to use the password with https
|
||||
@@ -118,8 +121,8 @@ class VimeoIE(InfoExtractor):
|
||||
password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
password_request.add_header('Cookie', 'xsrft=%s' % token)
|
||||
self._download_webpage(password_request, video_id,
|
||||
u'Verifying the password',
|
||||
u'Wrong password')
|
||||
'Verifying the password',
|
||||
'Wrong password')
|
||||
|
||||
def _real_initialize(self):
|
||||
self._login()
|
||||
@@ -134,7 +137,7 @@ class VimeoIE(InfoExtractor):
|
||||
# Extract ID from URL
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
raise ExtractorError('Invalid URL: %s' % url)
|
||||
|
||||
video_id = mobj.group('id')
|
||||
if mobj.group('pro') or mobj.group('player'):
|
||||
@@ -155,7 +158,7 @@ class VimeoIE(InfoExtractor):
|
||||
try:
|
||||
try:
|
||||
config_url = self._html_search_regex(
|
||||
r' data-config-url="(.+?)"', webpage, u'config URL')
|
||||
r' data-config-url="(.+?)"', webpage, 'config URL')
|
||||
config_json = self._download_webpage(config_url, video_id)
|
||||
config = json.loads(config_json)
|
||||
except RegexNotFoundError:
|
||||
@@ -166,19 +169,23 @@ class VimeoIE(InfoExtractor):
|
||||
config_re = r'%s=({.+?});' % re.escape(m_variable_name.group(1))
|
||||
else:
|
||||
config_re = [r' = {config:({.+?}),assets:', r'(?:[abc])=({.+?});']
|
||||
config = self._search_regex(config_re, webpage, u'info section',
|
||||
config = self._search_regex(config_re, webpage, 'info section',
|
||||
flags=re.DOTALL)
|
||||
config = json.loads(config)
|
||||
except Exception as e:
|
||||
if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
|
||||
raise ExtractorError(u'The author has restricted the access to this video, try with the "--referer" option')
|
||||
raise ExtractorError('The author has restricted the access to this video, try with the "--referer" option')
|
||||
|
||||
if re.search('<form[^>]+?id="pw_form"', webpage) is not None:
|
||||
self._verify_video_password(url, video_id, webpage)
|
||||
return self._real_extract(url)
|
||||
else:
|
||||
raise ExtractorError(u'Unable to extract info section',
|
||||
raise ExtractorError('Unable to extract info section',
|
||||
cause=e)
|
||||
else:
|
||||
if config.get('view') == 4:
|
||||
self._verify_video_password(url, video_id, webpage)
|
||||
return self._real_extract(url)
|
||||
|
||||
# Extract title
|
||||
video_title = config["video"]["title"]
|
||||
@@ -212,9 +219,9 @@ class VimeoIE(InfoExtractor):
|
||||
video_upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3)
|
||||
|
||||
try:
|
||||
view_count = int(self._search_regex(r'UserPlays:(\d+)', webpage, u'view count'))
|
||||
like_count = int(self._search_regex(r'UserLikes:(\d+)', webpage, u'like count'))
|
||||
comment_count = int(self._search_regex(r'UserComments:(\d+)', webpage, u'comment count'))
|
||||
view_count = int(self._search_regex(r'UserPlays:(\d+)', webpage, 'view count'))
|
||||
like_count = int(self._search_regex(r'UserLikes:(\d+)', webpage, 'like count'))
|
||||
comment_count = int(self._search_regex(r'UserComments:(\d+)', webpage, 'comment count'))
|
||||
except RegexNotFoundError:
|
||||
# This info is only available in vimeo.com/{id} urls
|
||||
view_count = None
|
||||
@@ -255,7 +262,7 @@ class VimeoIE(InfoExtractor):
|
||||
for key in ('other', 'sd', 'hd'):
|
||||
formats += files[key]
|
||||
if len(formats) == 0:
|
||||
raise ExtractorError(u'No known codec found')
|
||||
raise ExtractorError('No known codec found')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
@@ -274,7 +281,7 @@ class VimeoIE(InfoExtractor):
|
||||
|
||||
|
||||
class VimeoChannelIE(InfoExtractor):
|
||||
IE_NAME = u'vimeo:channel'
|
||||
IE_NAME = 'vimeo:channel'
|
||||
_VALID_URL = r'(?:https?://)?vimeo.\com/channels/(?P<id>[^/]+)'
|
||||
_MORE_PAGES_INDICATOR = r'<a.+?rel="next"'
|
||||
_TITLE_RE = r'<link rel="alternate"[^>]+?title="(.*?)"'
|
||||
@@ -283,14 +290,14 @@ class VimeoChannelIE(InfoExtractor):
|
||||
return '%s/videos/page:%d/' % (base_url, pagenum)
|
||||
|
||||
def _extract_list_title(self, webpage):
|
||||
return self._html_search_regex(self._TITLE_RE, webpage, u'list title')
|
||||
return self._html_search_regex(self._TITLE_RE, webpage, 'list title')
|
||||
|
||||
def _extract_videos(self, list_id, base_url):
|
||||
video_ids = []
|
||||
for pagenum in itertools.count(1):
|
||||
webpage = self._download_webpage(
|
||||
self._page_url(base_url, pagenum) ,list_id,
|
||||
u'Downloading page %s' % pagenum)
|
||||
'Downloading page %s' % pagenum)
|
||||
video_ids.extend(re.findall(r'id="clip_(\d+?)"', webpage))
|
||||
if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
|
||||
break
|
||||
@@ -310,8 +317,8 @@ class VimeoChannelIE(InfoExtractor):
|
||||
|
||||
|
||||
class VimeoUserIE(VimeoChannelIE):
|
||||
IE_NAME = u'vimeo:user'
|
||||
_VALID_URL = r'(?:https?://)?vimeo.\com/(?P<name>[^/]+)(?:[#?]|$)'
|
||||
IE_NAME = 'vimeo:user'
|
||||
_VALID_URL = r'(?:https?://)?vimeo.\com/(?P<name>[^/]+)(?:/videos|[#?]|$)'
|
||||
_TITLE_RE = r'<a[^>]+?class="user">([^<>]+?)</a>'
|
||||
|
||||
@classmethod
|
||||
@@ -327,7 +334,7 @@ class VimeoUserIE(VimeoChannelIE):
|
||||
|
||||
|
||||
class VimeoAlbumIE(VimeoChannelIE):
|
||||
IE_NAME = u'vimeo:album'
|
||||
IE_NAME = 'vimeo:album'
|
||||
_VALID_URL = r'(?:https?://)?vimeo.\com/album/(?P<id>\d+)'
|
||||
_TITLE_RE = r'<header id="page_header">\n\s*<h1>(.*?)</h1>'
|
||||
|
||||
@@ -341,7 +348,7 @@ class VimeoAlbumIE(VimeoChannelIE):
|
||||
|
||||
|
||||
class VimeoGroupsIE(VimeoAlbumIE):
|
||||
IE_NAME = u'vimeo:group'
|
||||
IE_NAME = 'vimeo:group'
|
||||
_VALID_URL = r'(?:https?://)?vimeo.\com/groups/(?P<name>[^/]+)'
|
||||
|
||||
def _extract_list_title(self, webpage):
|
||||
@@ -354,8 +361,8 @@ class VimeoGroupsIE(VimeoAlbumIE):
|
||||
|
||||
|
||||
class VimeoReviewIE(InfoExtractor):
|
||||
IE_NAME = u'vimeo:review'
|
||||
IE_DESC = u'Review pages on vimeo'
|
||||
IE_NAME = 'vimeo:review'
|
||||
IE_DESC = 'Review pages on vimeo'
|
||||
_VALID_URL = r'(?:https?://)?vimeo.\com/[^/]+/review/(?P<id>[^/]+)'
|
||||
_TEST = {
|
||||
'url': 'https://vimeo.com/user21297594/review/75524534/3c257a1b5d',
|
||||
|
||||
18
youtube_dl/postprocessor/__init__.py
Normal file
18
youtube_dl/postprocessor/__init__.py
Normal file
@@ -0,0 +1,18 @@
|
||||
|
||||
from .ffmpeg import (
|
||||
FFmpegMergerPP,
|
||||
FFmpegMetadataPP,
|
||||
FFmpegVideoConvertor,
|
||||
FFmpegExtractAudioPP,
|
||||
FFmpegEmbedSubtitlePP,
|
||||
)
|
||||
from .xattrpp import XAttrMetadataPP
|
||||
|
||||
__all__ = [
|
||||
'FFmpegMergerPP',
|
||||
'FFmpegMetadataPP',
|
||||
'FFmpegVideoConvertor',
|
||||
'FFmpegExtractAudioPP',
|
||||
'FFmpegEmbedSubtitlePP',
|
||||
'XAttrMetadataPP',
|
||||
]
|
||||
49
youtube_dl/postprocessor/common.py
Normal file
49
youtube_dl/postprocessor/common.py
Normal file
@@ -0,0 +1,49 @@
|
||||
from ..utils import PostProcessingError
|
||||
|
||||
|
||||
class PostProcessor(object):
|
||||
"""Post Processor class.
|
||||
|
||||
PostProcessor objects can be added to downloaders with their
|
||||
add_post_processor() method. When the downloader has finished a
|
||||
successful download, it will take its internal chain of PostProcessors
|
||||
and start calling the run() method on each one of them, first with
|
||||
an initial argument and then with the returned value of the previous
|
||||
PostProcessor.
|
||||
|
||||
The chain will be stopped if one of them ever returns None or the end
|
||||
of the chain is reached.
|
||||
|
||||
PostProcessor objects follow a "mutual registration" process similar
|
||||
to InfoExtractor objects.
|
||||
"""
|
||||
|
||||
_downloader = None
|
||||
|
||||
def __init__(self, downloader=None):
|
||||
self._downloader = downloader
|
||||
|
||||
def set_downloader(self, downloader):
|
||||
"""Sets the downloader for this PP."""
|
||||
self._downloader = downloader
|
||||
|
||||
def run(self, information):
|
||||
"""Run the PostProcessor.
|
||||
|
||||
The "information" argument is a dictionary like the ones
|
||||
composed by InfoExtractors. The only difference is that this
|
||||
one has an extra field called "filepath" that points to the
|
||||
downloaded file.
|
||||
|
||||
This method returns a tuple, the first element of which describes
|
||||
whether the original file should be kept (i.e. not deleted - None for
|
||||
no preference), and the second of which is the updated information.
|
||||
|
||||
In addition, this method may raise a PostProcessingError
|
||||
exception if post processing fails.
|
||||
"""
|
||||
return None, information # by default, keep file and do nothing
|
||||
|
||||
|
||||
class AudioConversionError(PostProcessingError):
|
||||
pass
|
||||
@@ -4,65 +4,23 @@ import sys
|
||||
import time
|
||||
|
||||
|
||||
from .utils import (
|
||||
from .common import AudioConversionError, PostProcessor
|
||||
|
||||
from ..utils import (
|
||||
check_executable,
|
||||
compat_subprocess_get_DEVNULL,
|
||||
encodeFilename,
|
||||
PostProcessingError,
|
||||
prepend_extension,
|
||||
shell_quote,
|
||||
subtitles_filename,
|
||||
prepend_extension,
|
||||
)
|
||||
|
||||
|
||||
class PostProcessor(object):
|
||||
"""Post Processor class.
|
||||
|
||||
PostProcessor objects can be added to downloaders with their
|
||||
add_post_processor() method. When the downloader has finished a
|
||||
successful download, it will take its internal chain of PostProcessors
|
||||
and start calling the run() method on each one of them, first with
|
||||
an initial argument and then with the returned value of the previous
|
||||
PostProcessor.
|
||||
|
||||
The chain will be stopped if one of them ever returns None or the end
|
||||
of the chain is reached.
|
||||
|
||||
PostProcessor objects follow a "mutual registration" process similar
|
||||
to InfoExtractor objects.
|
||||
"""
|
||||
|
||||
_downloader = None
|
||||
|
||||
def __init__(self, downloader=None):
|
||||
self._downloader = downloader
|
||||
|
||||
def set_downloader(self, downloader):
|
||||
"""Sets the downloader for this PP."""
|
||||
self._downloader = downloader
|
||||
|
||||
def run(self, information):
|
||||
"""Run the PostProcessor.
|
||||
|
||||
The "information" argument is a dictionary like the ones
|
||||
composed by InfoExtractors. The only difference is that this
|
||||
one has an extra field called "filepath" that points to the
|
||||
downloaded file.
|
||||
|
||||
This method returns a tuple, the first element of which describes
|
||||
whether the original file should be kept (i.e. not deleted - None for
|
||||
no preference), and the second of which is the updated information.
|
||||
|
||||
In addition, this method may raise a PostProcessingError
|
||||
exception if post processing fails.
|
||||
"""
|
||||
return None, information # by default, keep file and do nothing
|
||||
|
||||
class FFmpegPostProcessorError(PostProcessingError):
|
||||
pass
|
||||
|
||||
class AudioConversionError(PostProcessingError):
|
||||
pass
|
||||
|
||||
class FFmpegPostProcessor(PostProcessor):
|
||||
def __init__(self,downloader=None):
|
||||
PostProcessor.__init__(self, downloader)
|
||||
@@ -70,14 +28,8 @@ class FFmpegPostProcessor(PostProcessor):
|
||||
|
||||
@staticmethod
|
||||
def detect_executables():
|
||||
def executable(exe):
|
||||
try:
|
||||
subprocess.Popen([exe, '-version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
|
||||
except OSError:
|
||||
return False
|
||||
return exe
|
||||
programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
|
||||
return dict((program, executable(program)) for program in programs)
|
||||
return dict((program, check_executable(program, ['-version'])) for program in programs)
|
||||
|
||||
def run_ffmpeg_multiple_files(self, input_paths, out_path, opts):
|
||||
if not self._exes['ffmpeg'] and not self._exes['avconv']:
|
||||
@@ -108,6 +60,7 @@ class FFmpegPostProcessor(PostProcessor):
|
||||
return u'./' + fn
|
||||
return fn
|
||||
|
||||
|
||||
class FFmpegExtractAudioPP(FFmpegPostProcessor):
|
||||
def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, nopostoverwrites=False):
|
||||
FFmpegPostProcessor.__init__(self, downloader)
|
||||
@@ -236,6 +189,7 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
|
||||
information['filepath'] = new_path
|
||||
return self._nopostoverwrites,information
|
||||
|
||||
|
||||
class FFmpegVideoConvertor(FFmpegPostProcessor):
|
||||
def __init__(self, downloader=None,preferedformat=None):
|
||||
super(FFmpegVideoConvertor, self).__init__(downloader)
|
||||
@@ -519,3 +473,4 @@ class FFmpegMergerPP(FFmpegPostProcessor):
|
||||
args = ['-c', 'copy']
|
||||
self.run_ffmpeg_multiple_files(info['__files_to_merge'], filename, args)
|
||||
return True, info
|
||||
|
||||
109
youtube_dl/postprocessor/xattrpp.py
Normal file
109
youtube_dl/postprocessor/xattrpp.py
Normal file
@@ -0,0 +1,109 @@
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from .common import PostProcessor
|
||||
from ..utils import (
|
||||
check_executable,
|
||||
hyphenate_date,
|
||||
preferredencoding,
|
||||
)
|
||||
|
||||
|
||||
class XAttrMetadataPP(PostProcessor):
|
||||
|
||||
#
|
||||
# More info about extended attributes for media:
|
||||
# http://freedesktop.org/wiki/CommonExtendedAttributes/
|
||||
# http://www.freedesktop.org/wiki/PhreedomDraft/
|
||||
# http://dublincore.org/documents/usageguide/elements.shtml
|
||||
#
|
||||
# TODO:
|
||||
# * capture youtube keywords and put them in 'user.dublincore.subject' (comma-separated)
|
||||
# * figure out which xattrs can be used for 'duration', 'thumbnail', 'resolution'
|
||||
#
|
||||
|
||||
def run(self, info):
|
||||
""" Set extended attributes on downloaded file (if xattr support is found). """
|
||||
|
||||
# This mess below finds the best xattr tool for the job and creates a
|
||||
# "write_xattr" function.
|
||||
try:
|
||||
# try the pyxattr module...
|
||||
import xattr
|
||||
|
||||
def write_xattr(path, key, value):
|
||||
return xattr.setxattr(path, key, value)
|
||||
|
||||
except ImportError:
|
||||
if os.name == 'nt':
|
||||
# Write xattrs to NTFS Alternate Data Streams:
|
||||
# http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
|
||||
def write_xattr(path, key, value):
|
||||
assert ':' not in key
|
||||
assert os.path.exists(path)
|
||||
|
||||
ads_fn = path + ":" + key
|
||||
with open(ads_fn, "wb") as f:
|
||||
f.write(value)
|
||||
else:
|
||||
user_has_setfattr = check_executable("setfattr", ['--version'])
|
||||
user_has_xattr = check_executable("xattr", ['-h'])
|
||||
|
||||
if user_has_setfattr or user_has_xattr:
|
||||
|
||||
def write_xattr(path, key, value):
|
||||
if user_has_setfattr:
|
||||
cmd = ['setfattr', '-n', key, '-v', value, path]
|
||||
elif user_has_xattr:
|
||||
cmd = ['xattr', '-w', key, value, path]
|
||||
|
||||
subprocess.check_output(cmd)
|
||||
|
||||
else:
|
||||
# On Unix, and can't find pyxattr, setfattr, or xattr.
|
||||
if sys.platform.startswith('linux'):
|
||||
self._downloader.report_error(
|
||||
"Couldn't find a tool to set the xattrs. "
|
||||
"Install either the python 'pyxattr' or 'xattr' "
|
||||
"modules, or the GNU 'attr' package "
|
||||
"(which contains the 'setfattr' tool).")
|
||||
else:
|
||||
self._downloader.report_error(
|
||||
"Couldn't find a tool to set the xattrs. "
|
||||
"Install either the python 'xattr' module, "
|
||||
"or the 'xattr' binary.")
|
||||
|
||||
# Write the metadata to the file's xattrs
|
||||
self._downloader.to_screen('[metadata] Writing metadata to file\'s xattrs')
|
||||
|
||||
filename = info['filepath']
|
||||
|
||||
try:
|
||||
xattr_mapping = {
|
||||
'user.xdg.referrer.url': 'webpage_url',
|
||||
# 'user.xdg.comment': 'description',
|
||||
'user.dublincore.title': 'title',
|
||||
'user.dublincore.date': 'upload_date',
|
||||
'user.dublincore.description': 'description',
|
||||
'user.dublincore.contributor': 'uploader',
|
||||
'user.dublincore.format': 'format',
|
||||
}
|
||||
|
||||
for xattrname, infoname in xattr_mapping.items():
|
||||
|
||||
value = info.get(infoname)
|
||||
|
||||
if value:
|
||||
if infoname == "upload_date":
|
||||
value = hyphenate_date(value)
|
||||
|
||||
byte_value = value.encode('utf-8')
|
||||
write_xattr(filename, xattrname, byte_value)
|
||||
|
||||
return True, info
|
||||
|
||||
except (subprocess.CalledProcessError, OSError):
|
||||
self._downloader.report_error("This filesystem doesn't support extended attributes. (You may have to enable them in your /etc/fstab)")
|
||||
return False, info
|
||||
|
||||
@@ -818,6 +818,15 @@ def date_from_str(date_str):
|
||||
return today + delta
|
||||
return datetime.datetime.strptime(date_str, "%Y%m%d").date()
|
||||
|
||||
def hyphenate_date(date_str):
|
||||
"""
|
||||
Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
|
||||
match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
|
||||
if match is not None:
|
||||
return '-'.join(match.groups())
|
||||
else:
|
||||
return date_str
|
||||
|
||||
class DateRange(object):
|
||||
"""Represents a time interval between two dates"""
|
||||
def __init__(self, start=None, end=None):
|
||||
@@ -1027,9 +1036,9 @@ def smuggle_url(url, data):
|
||||
return url + u'#' + sdata
|
||||
|
||||
|
||||
def unsmuggle_url(smug_url):
|
||||
def unsmuggle_url(smug_url, default=None):
|
||||
if not '#__youtubedl_smuggle' in smug_url:
|
||||
return smug_url, None
|
||||
return smug_url, default
|
||||
url, _, sdata = smug_url.rpartition(u'#')
|
||||
jsond = compat_parse_qs(sdata)[u'__youtubedl_smuggle'][0]
|
||||
data = json.loads(jsond)
|
||||
@@ -1142,3 +1151,13 @@ def parse_duration(s):
|
||||
def prepend_extension(filename, ext):
|
||||
name, real_ext = os.path.splitext(filename)
|
||||
return u'{0}.{1}{2}'.format(name, ext, real_ext)
|
||||
|
||||
|
||||
def check_executable(exe, args=[]):
|
||||
""" Checks if the given binary is installed somewhere in PATH, and returns its name.
|
||||
args can be a list of arguments for a short output (like -version) """
|
||||
try:
|
||||
subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
|
||||
except OSError:
|
||||
return False
|
||||
return exe
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
|
||||
__version__ = '2014.01.06.1'
|
||||
__version__ = '2014.01.07.3'
|
||||
|
||||
Reference in New Issue
Block a user